-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsdGetRawCurGroups.py
executable file
·651 lines (573 loc) · 24 KB
/
sdGetRawCurGroups.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
#-----------------------------------
'''
Purpose:
run sql to get lit triage relevance training set
(minor) Data transformations include:
replacing non-ascii chars with ' '
replacing FIELDSEP and RECORDSEP chars in the doc text w/ ' '
Outputs: Delimited file to stdout
See sampleDataLib.ClassifiedSample for output format
'''
#-----------------------------------
import sys
import os
import string
import re
import time
import argparse
import db
import utilsLib
import sampleDataLib
from ExtractedTextSet import ExtractedTextSet
#-----------------------------------
SAMPLE_OBJ_TYPE = sampleDataLib.CurGroupClassifiedSample
sampleSet = sampleDataLib.ClassifiedSampleSet(sampleObjType=SAMPLE_OBJ_TYPE)
# for the output delimited file
FIELDSEP = SAMPLE_OBJ_TYPE.getFieldSep()
RECORDEND = SAMPLE_OBJ_TYPE.getRecordEnd()
POS_CLASSNAME = SAMPLE_OBJ_TYPE.getClassNames()[SAMPLE_OBJ_TYPE.getY_positive()]
NEG_CLASSNAME = SAMPLE_OBJ_TYPE.getClassNames()[SAMPLE_OBJ_TYPE.getY_negative()]
#-----------------------------------
def getArgs():
parser = argparse.ArgumentParser( \
description='Get training samples for curation group select/unselect, write to stdout')
parser.add_argument('--test', dest='test', action='store_true',
required=False,
help="just run ad hoc test code")
parser.add_argument('--group', dest='group', action='store', required=True,
choices=['ap', 'gxd', 'go', 'tumor',], help='which curation group')
parser.add_argument('--query', dest='queryKey', action='store',
required=False, default='selected_after',
choices=['unselected_after', 'selected_after', 'selected_before'],
help='which subset of the ref samples to get, default: selected_after')
parser.add_argument('--counts', dest='counts', action='store_true',
required=False, help="don't get references, just get counts")
parser.add_argument('-l', '--limit', dest='nResults',
required=False, type=int, default=0, # 0 means ALL
help="limit SQL to n results. Default is no limit")
parser.add_argument('--textlength', dest='maxTextLength',
type=int, required=False, default=None,
help="only include 1st n chars of text fields & 1 rcd/line")
parser.add_argument('--norestrict', dest='restrictArticles',
action='store_false', required=False,
help="include all articles, default: skip review and non-peer reviewed")
parser.add_argument('-q', '--quiet', dest='verbose', action='store_false',
required=False, help="skip helpful messages to stderr")
defaultHost = os.environ.get('PG_DBSERVER', 'bhmgidevdb01')
defaultDatabase = os.environ.get('PG_DBNAME', 'prod')
parser.add_argument('-s', '--server', dest='server', action='store',
required=False, default=defaultHost,
help='db server. Shortcuts: prod, dev, test. (Default %s)' %
defaultHost)
parser.add_argument('-d', '--database', dest='database', action='store',
required=False, default=defaultDatabase,
help='which database. Example: mgd (Default %s)' % defaultDatabase)
args = parser.parse_args()
if args.server == 'prod':
args.host = 'bhmgidb01.jax.org'
args.db = 'prod'
elif args.server == 'dev':
args.host = 'mgi-testdb4.jax.org'
args.db = 'prod_dev'
elif args.server == 'test':
args.host = 'bhmgidevdb01.jax.org'
args.db = 'prod'
else:
args.host = args.server + '.jax.org'
args.db = args.database
return args
#-----------------------------------
args = getArgs()
db.set_sqlServer ( args.host)
db.set_sqlDatabase( args.db)
db.set_sqlUser ("mgd_public")
db.set_sqlPassword("mgdpub")
#-----------------------------------
class BaseRefSearch (object): # {
"""
Is: base class for a reference (article) search from the database
Has: all the necessary SQL for the search, the result set,
Does: Encapsulates the common SQL for specific searches that return
result sets of references and counts/stats for these result sets.
"""
####################
# SQL fragments used to build up queries
####################
SQLSEPARATOR = '||'
LIT_TRIAGE_DATE = "10/31/2017" # when we switched to new lit triage
START_DATE = "10/01/2016" # earliest date for refs to get
# before lit Triage
TUMOR_START_DATE = "07/01/2013" # date to get add'l tumor refs from
tmpTablesBuilt = False # only build the tmp tables once
#----------------
# SQL to build tmp tables
#----------------
BUILD_TMP_TABLES = [ \
# tmp table of references w/ extracted text.
# Need this tmp tble and indexes to make subsequent selects run fast.
'''
create temporary table tmp_refs
as
select distinct r._refs_key, r.creation_date
from bib_refs r join bib_workflow_data bd on (r._refs_key =bd._refs_key)
where bd.extractedtext is not null
''',
'''
create index tmp_idx2 on tmp_refs(_refs_key)
''',
# this index is important for speed since bib_refs does not have an
# index on creation_date
'''
create index tmp_idx3 on tmp_refs(creation_date)
''',
]
#----------------
# We get the data for a reference in 2 steps (separate SQL):
# (1) basic ref info
# (2) extracted text parts (body, references, star methods, ...)
# Then we concat the text parts in the right order to get the full ext text
# and then join this to the basic ref info.
#----------------
# SQL Parts for getting basic ref info (not extracted text)
#----------------
REFINFO_SELECT = \
'''
select distinct r._refs_key,
'%s' as "known_class_name",
r.isdiscard, r.year,
to_char(r.creation_date, 'MM/DD/YYYY') as "creation_date",
r.isreviewarticle,
typeTerm.term as ref_type,
'ignore supp term' as supp_status,
-- suppTerm.term as supp_status,
r.journal, r.title, r.abstract,
a.accid pubmed,
bsv.ap_status,
bsv.gxd_status,
bsv.go_status,
bsv.tumor_status,
bsv.qtl_status
''' # "known_class_name" is a constant determined by whether this
# ref search returns positive or negative samples
# Skipping suppTerm for now since in bib_workflow_data there
# are multiple records that don't always agree on the
# supplemental status term. If we include these, (1) we get
# multiple records returned in the join (2) would take some
# work to get the right status (the one associated with the
# extracted *body* text)
REFINFO_FROM = \
'''
from bib_refs r join tmp_refs tr on (r._refs_key = tr._refs_key)
join bib_workflow_data bd on (r._refs_key = bd._refs_key)
join bib_status_view bsv on (r._refs_key = bsv._refs_key)
-- join voc_term suppTerm on (bd._supplemental_key = suppTerm._term_key)
join voc_term typeTerm on (r._referencetype_key = typeTerm._term_key)
join acc_accession a on
(a._object_key = r._refs_key and a._logicaldb_key=29 -- pubmed
and a._mgitype_key=1 )
'''
RESTRICT_REF_TYPE = \
'''
and r._referencetype_key=31576687 -- peer reviewed article
and r.isreviewarticle != 1
'''
#----------------
# SQL Parts for getting extracted text parts so they can be catted together
#----------------
EXTTEXT_SELECT = \
'''
select bd._refs_key, bd.extractedtext as text_part, t.term as text_type
'''
EXTTEXT_FROM = \
'''
from bib_refs r join tmp_refs tr on (r._refs_key = tr._refs_key)
join bib_workflow_data bd on (r._refs_key = bd._refs_key)
join voc_term t on (bd._extractedtext_key = t._term_key)
join bib_status_view bsv on (r._refs_key = bsv._refs_key)
'''
#----------------
# SQL Parts for getting stats/counts of references
#----------------
COUNT_SELECT = ' select count(distinct r._refs_key) as num\n'
COUNT_FROM = \
'''
from bib_refs r join tmp_refs tr on (r._refs_key = tr._refs_key)
join bib_status_view bsv on (r._refs_key = bsv._refs_key)
join acc_accession a on (r._refs_key = a._object_key
and a._logicaldb_key = 29 and a._mgitype_key=1)
'''
# Need acc_accession join so we dont count refs that don't have
# pubmed IDs.
#-----------------------------------
def __init__(self,
args,
ispositive, # True if refs from this search are in the pos class
):
self.args = args
self.knownClassName = self.determineKnownClassName(ispositive)
def determineKnownClassName(self, ispositive):
if ispositive:
knownClassName = POS_CLASSNAME
else:
knownClassName = NEG_CLASSNAME
return knownClassName
#@abstract
def getName(self):
return 'reference records from 1/1/2010' # example
#@abstract
def getWhereClauses(self):
return 'where 1=0'
#-----------------------------------
def getCount(self):
self.buildTmpTables()
results = self.runSQL(self.buildCountSQL(),
'getting %s count' % self.getName())
return results[-1][0]['num']
#-----------------------------------
def buildCountSQL(self):
if self.args.restrictArticles:
restrict = self.RESTRICT_REF_TYPE
else:
restrict = ''
return self.COUNT_SELECT + self.COUNT_FROM + self.getWhereClauses() \
+ restrict
#-----------------------------------
def getRefRecords(self):
"""
Run SQL for basic fields and extracted text fields, & join them.
Return list of records.
Each record represents one article w/ its basic fields & its
extracted text.
"""
self.buildTmpTables()
refQ, textQ = self.buildRefRecordsSQL()
rslts = self.runSQL(refQ, 'getting ref info for %s' % self.getName())
refRcds = rslts[-1]
rslts = self.runSQL(textQ, 'getting extracted text for %s' \
% self.getName())
extTextRcds = rslts[-1]
return self.joinExtractedText(refRcds, extTextRcds)
#-----------------------------------
def joinExtractedText(self, refRcds, extTextRcds):
startTime = time.time()
verbose( "Joining ref info to extracted text\n")
extTextSet = ExtractedTextSet( extTextRcds )
extTextSet.joinRefs2ExtText( refRcds, allowNoText=True )
verbose( "%8.3f seconds\n\n" % (time.time()-startTime))
return refRcds
#-----------------------------------
def buildRefRecordsSQL(self, ):
"""
Assemble SQL statements (strings) to run to get samples from db.
Return pair of SQL (basic fields query, ext text query)
"""
where = self.getWhereClauses()
if self.args.restrictArticles:
restrict = self.RESTRICT_REF_TYPE
else:
restrict = ''
if self.args.nResults > 0: limitSQL ="\nlimit %d\n" % self.args.nResults
else: limitSQL = ''
# set constant field in the select clause for the known class name
refInfoSelect = self.REFINFO_SELECT % self.knownClassName
refInfoSQL = refInfoSelect + self.REFINFO_FROM + where + \
restrict + limitSQL
extTextSQL = self.EXTTEXT_SELECT + self.EXTTEXT_FROM + where + \
restrict + limitSQL
return refInfoSQL, extTextSQL
#-----------------------------------
def buildTmpTables(self,):
if not BaseRefSearch.tmpTablesBuilt:
results = self.runSQL(self.BUILD_TMP_TABLES, 'Building temp tables')
BaseRefSearch.tmpTablesBuilt = True
#-----------------------------------
def runSQL(self, sql, label):
"""
Run an SQL stmt and return results
sql is list of SQLstmts or a single stmt (string)
"""
startTime = time.time()
verbose(label + '...')
if type(sql) == type(''):
results = db.sql(sql.split(self.SQLSEPARATOR), 'auto')
else:
results = db.sql(sql, 'auto')
verbose( "SQL time: %8.3f seconds\n" % (time.time()-startTime) )
return results
#-----------------------------------
# ------------------ end BaseRefSearch # }
class UnSelectedAfterRefSearch(BaseRefSearch): # {
""" IS: RefSearch for UNselected refs for a group after new littriage proces
"""
def __init__(self, args, group, bsvFieldName):
super(type(self), self).__init__(args, False)
self.group = group
self.bsvFieldName = bsvFieldName # bib_stat_view field for group
def getName(self):
return '%s UNselected_after %s' % (self.group, self.LIT_TRIAGE_DATE)
def getWhereClauses(self):
return '''
-- UNselected after
where tr.creation_date > '%s'
and ( (r.isdiscard = 1 and r._createdby_key != 1609) --not littriage_discard
or bsv.%s = 'Rejected')
''' % (self.LIT_TRIAGE_DATE, self.bsvFieldName,)
# really want: "r.isdiscard=1 and this discard was set by a curator"
# (rather than automated process),
# but db doesn't keep track of who set discard,
# so "not created by littriage_discard loader" is reasonable approx
# ----------- }
class SelectedAfterRefSearch(BaseRefSearch): # {
""" IS: RefSearch for selected refs for a group after new littriage proces
"""
def __init__(self, args, group, bsvFieldName):
super(type(self), self).__init__(args, True)
self.group = group
self.bsvFieldName = bsvFieldName # bib_stat_view field for group
def getName(self):
return '%s selected_after %s' % (self.group, self.LIT_TRIAGE_DATE)
def getWhereClauses(self):
return '''
-- selected after
where tr.creation_date > '%s'
and r.isdiscard = 0 -- likely unnec., but ensures sel and UNsel disjoint
and bsv.%s in ( 'Chosen', 'Indexed', 'Full-coded')
''' % (self.LIT_TRIAGE_DATE, self.bsvFieldName)
# ----------- }
class GoSelectedAfterRefSearch(BaseRefSearch): # {
""" IS: RefSearch for selected refs for GO after new lit triage proces
"""
def __init__(self, args,):
super(type(self), self).__init__(args, True)
self.group = 'GO'
self.bsvFieldName = 'go_status' # bib_stat_view field for group
def getName(self):
return '%s selected_after %s' % (self.group, self.LIT_TRIAGE_DATE)
def getWhereClauses(self):
return '''
-- GO selected after
where tr.creation_date > '%s'
and r.isdiscard = 0 -- likely unnec., but ensures sel and UNsel disjoint
and bsv.%s in ('Chosen', 'Indexed', 'Full-coded')
and exists (select 1 from bib_workflow_status bs
where bs._refs_key = r._refs_key
and bs._group_key = 31576666 -- GO
and bs._status_key in (31576671, 31576673, 31576674)
-- Chosen, Indexed, Full-coded
and bs._createdby_key != 1571 -- pm2geneload
)
''' % (self.LIT_TRIAGE_DATE, self.bsvFieldName)
# ----------- }
class GoSelectedBeforeRefSearch(BaseRefSearch): # {
""" IS: RefSearch for selected refs for GO before new lit triage proces
"""
def __init__(self, args, startDate=None):
super(type(self), self).__init__(args, True)
self.group = 'GO'
self.bsvFieldName = 'go_status' # bib_stat_view field for group
self.startDate = startDate
def getName(self):
if self.startDate:
return '%s selected_before %s-%s' % \
(self.group, self.startDate, self.LIT_TRIAGE_DATE)
else:
return '%s selected_before %s' % (self.group, self.LIT_TRIAGE_DATE)
def getWhereClauses(self):
if self.startDate:
startDateClause = "and tr.creation_date >= '%s'" % self.startDate
else:
startDateClause = ''
return '''
-- GO selected before
where tr.creation_date <= '%s'
and r.isdiscard = 0 -- likely unnec., but ensures sel and UNsel disjoint
and bsv.%s in ('Chosen', 'Indexed', 'Full-coded')
and exists (select 1 from bib_workflow_status bs
where bs._refs_key = r._refs_key
and bs._group_key = 31576666 -- GO
and bs._status_key in (31576671, 31576673, 31576674)
-- Chosen, Indexed, Full-coded
and bs._createdby_key != 1571 -- pm2geneload
)
''' % (self.LIT_TRIAGE_DATE, self.bsvFieldName) + startDateClause
# ----------- }
class SelectedBeforeRefSearch(BaseRefSearch): # {
""" IS: RefSearch for selected refs for a group before new littriage proces
"""
def __init__(self, args, group, bsvFieldName, startDate=None):
super(type(self), self).__init__(args, True)
self.group = group
self.bsvFieldName = bsvFieldName # bib_stat_view field for group
self.startDate = startDate
def getName(self):
if self.startDate:
return '%s selected_before %s-%s' % \
(self.group, self.startDate, self.LIT_TRIAGE_DATE)
else:
return '%s selected_before %s' % (self.group, self.LIT_TRIAGE_DATE)
def getWhereClauses(self):
if self.startDate:
startDateClause = "and tr.creation_date >= '%s'" % self.startDate
else:
startDateClause = ''
return '''
-- selected before
where
bsv.%s in ('Chosen', 'Indexed', 'Full-coded')
and tr.creation_date <= '%s' -- before start date
''' % ( self.bsvFieldName, self.LIT_TRIAGE_DATE, ) + startDateClause
# ----------- }
dataSets = {
'ap' : {
'unselected_after': UnSelectedAfterRefSearch(args,'AP','ap_status'),
'selected_after' : SelectedAfterRefSearch(args,'AP','ap_status'),
'selected_before' : SelectedBeforeRefSearch(args,'AP','ap_status',
startDate='6/1/2015'),
},
'go' : {
'unselected_after': UnSelectedAfterRefSearch(args,'GO','go_status'),
'selected_after' : GoSelectedAfterRefSearch(args,),
'selected_before' : GoSelectedBeforeRefSearch(args,startDate='1/1/2014'),
},
'gxd': {
'unselected_after': UnSelectedAfterRefSearch(args,'GXD','gxd_status'),
'selected_after' : SelectedAfterRefSearch(args,'GXD','gxd_status'),
'selected_before' : SelectedBeforeRefSearch(args,'GXD','gxd_status'),
},
'tumor': {
'unselected_after': UnSelectedAfterRefSearch(args,'Tumor','tumor_status'),
'selected_after' : SelectedAfterRefSearch(args,'Tumor','tumor_status'),
'selected_before': SelectedBeforeRefSearch(args,'Tumor','tumor_status'),
},
}
#-----------------------------------
####################
def main():
####################
verbose( "Hitting database %s %s as mgd_public\n" % (args.host, args.db))
verbose( "Query option: %s\n" % args.group)
startTime = time.time()
if args.counts:
writeCounts(args)
else:
if args.restrictArticles:
verbose("Omitting review and non-peer reviewed articles\n")
else:
verbose("Including review and non-peer reviewed articles\n")
refSearch = dataSets[args.group].get(args.queryKey)
if refSearch:
results = refSearch.getRefRecords()
n = writeSamples(results)
verbose("%d samples written\n" % n)
else:
sys.stderr.write("'%s' is not a valid search for group %s\n" % \
(args.queryKey, args.group))
sys.stderr.write("Valid vals: %s\n" % \
str(list(dataSets[args.group].keys())))
return
verbose( "Total time: %8.3f seconds\n\n" % (time.time()-startTime))
#-----------------------------------
def writeCounts(args):
sys.stdout.write(time.ctime() + '\n')
if args.restrictArticles:
sys.stdout.write("Omitting review and non-peer reviewed articles\n")
else:
sys.stdout.write("Including review and non-peer reviewed articles\n")
counts = []
total = 0
searches = dataSets[args.group]
for sName in sorted(searches.keys()):
ds = searches[sName]
count = ds.getCount()
counts.append( {'name': ds.getName(), 'count': count} )
total += count
for countInfo in counts:
percent = 100.0 * countInfo['count']/total
sys.stdout.write("%-43s %d\t%4.1f%%\n" % \
(countInfo['name'], countInfo['count'], percent))
return
#-----------------------------------
def writeSamples(results # list of records from SQL query (dicts)
):
"""
Write records to stdout
Return count of records written
"""
global sampleSet
for r in results:
sampleSet.addSample( sqlRecord2ClassifiedSample(r) )
sampleSet.setMetaItem('host', args.host)
sampleSet.setMetaItem('db', args.db)
sampleSet.setMetaItem('time', time.strftime("%Y/%m/%d-%H:%M:%S"))
sampleSet.write(sys.stdout, writeHeader=True, writeMeta=True)
return len(results)
#-----------------------------------
def sqlRecord2ClassifiedSample( r, # sql Result record
):
"""
Encapsulates knowledge of ClassifiedSample.setFields() field names
"""
newR = {}
if str(r['isdiscard']) == '0':
discardKeep = 'keep'
else:
discardKeep = 'discard'
newR['knownClassName']= str(r['known_class_name'])
newR['ID'] = str(r['pubmed'])
newR['creationDate'] = str(r['creation_date'])
newR['year'] = str(r['year'])
newR['journal'] = '_'.join(str(r['journal']).split(' '))
newR['title'] = cleanUpTextField(r, 'title')
newR['abstract'] = cleanUpTextField(r, 'abstract')
newR['extractedText'] = cleanUpTextField(r, 'ext_text')
if args.maxTextLength: newR['extractedText'] += '\n'
newR['discardKeep'] = discardKeep
newR['isReview'] = str(r['isreviewarticle'])
newR['refType'] = str(r['ref_type'])
newR['suppStatus'] = str(r['supp_status'])
newR['apStatus'] = str(r['ap_status'])
newR['gxdStatus'] = str(r['gxd_status'])
newR['goStatus'] = str(r['go_status'])
newR['tumorStatus'] = str(r['tumor_status'])
newR['qtlStatus'] = str(r['qtl_status'])
return SAMPLE_OBJ_TYPE().setFields(newR)
#-----------------------------------
def cleanUpTextField(rcd,
textFieldName,
):
# in case we omit this text field during debugging, check if defined
if rcd.has_key(textFieldName): # 2to3 note: keep this has_key() call
text = str(rcd[textFieldName])
else: text = ''
if args.maxTextLength: # handy for debugging
text = text[:args.maxTextLength]
text = text.replace('\n',' ')
text = utilsLib.removeNonAscii( cleanDelimiters( text))
return text
#-----------------------------------
def cleanDelimiters(text):
""" remove RECORDEND and FIELDSEPs from text (replace w/ ' ')
"""
new = text.replace(RECORDEND,' ').replace(FIELDSEP,' ')
return new
#-----------------------------------
def verbose(text):
if args.verbose:
sys.stderr.write(text)
sys.stderr.flush()
#-----------------------------------
if __name__ == "__main__":
if not (len(sys.argv) > 1 and sys.argv[1] == '--test'):
main()
else: # ad hoc test code
if True: # debug SQL
group = args.group
searches = dataSets[group]
for sName in searches.keys():
print('---------------')
ds = searches[sName]
print(ds.getName())
print(ds.buildCountSQL())
refSQL, textSQL = ds.buildRefRecordsSQL()
print(refSQL)
print(textSQL)