clarity_to_csv_test.py
5.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
import unittest
import clarity_to_csv as ctc
import pandas as pd
from unittest.mock import MagicMock
import sqlparse
testquerydir = 'C:\\Users\\LynchSe\\Documents\\Repos\\rClarity_Tools_Selah\\clarity_to_csv_tests\\'
testdatadir = 'C:\\Users\\LynchSe\\Documents\\Data\\Clarity_Tools_Selah\\'
#TODO spin up a sqlite database here
def line_count(filename):
with open(filename) as myfile:
return len(myfile.readlines())
def remove_files(filenamelist):
import os
import subprocess
for filename in filenamelist:
if filename is None:
next
elif os.path.isfile(filename):
try:
subprocess.check_output('rm {}'.format(filename))
except Exception as e:
print(e)
pass
else :
print("Skipping removal because not recognized as file - {}".format(filename))
class TestStuff(unittest.TestCase):
def test_remove_file_not_there(self):
#make it not make that stupid shitty error
remove_files(['poop.csv'])
def test_integration_test(self):
sqlfilename1 = testquerydir + "testCohort.sql"
sqlfilename2 = testquerydir + "readTestCohort.sql"
genericcsvs = [
testdatadir + 'test1.csv',
testdatadir + 'test2.csv',
]
remove_files(genericcsvs)
with ctc.get_clarity_engine().connect() as sqalconn:
ctc.clarity_to_csv(sqlfilename1, genericcsvs, dbconn=sqalconn)
ctc.clarity_to_csv(sqlfilename2, genericcsvs, dbconn=sqalconn)
def test_save_to_dataframes(self):
sqlfilename = testquerydir + "testCohort.sql"
with ctc.get_clarity_engine().connect() as sqalconn:
(df1, df2) = ctc.sqlfile_to_dfs(sqlfilename, sqalconn)
self.assertEqual(len(df1),3)
self.assertEqual(len(df2),2)
def test_save_to_df_2col(self):
sql_2col = '''SELECT TOP 10 PAT_ID, PAT_ENC_CSN_ID FROM PAT_ENC;
SELECT TOP 5 CONTACT_DATE FROM PAT_ENC tablesample(0.01);
'''
with ctc.get_clarity_engine().connect() as sqalconn:
(df1, df2) = ctc.sqltext_to_dfs(sql_2col, sqalconn)
self.assertEqual(len(df1),10)
self.assertEqual(len(df2),5)
def test_comment_with_semicolon(self):
sqltext = '''
SELECT TOP 2 PAT_ID FROM PAT_ENC;
/*
SELECT 1; SELECT 2;
*/
'''
genericcsvs = [
testdatadir + 'top2_pat_enc.csv'
]
remove_files(genericcsvs)
with ctc.get_clarity_engine().connect() as sqalconn:
ctc.clarity_to_csv_inner(sqltext, genericcsvs, sqalconn)
def test_none_csv(self):
sqlfilename = testquerydir + "testCohort.sql"
genericcsvs = [
None, #it should just skip this one
testdatadir + 'two_empis.csv',
]
remove_files(genericcsvs)
with ctc.get_clarity_engine().connect() as sqalconn:
ctc.clarity_to_csv(sqlfilename, genericcsvs, dbconn=sqalconn)
def test_unicode_error(self):
genericcsvs = [
testdatadir + 'test_cohort.csv'
]
remove_files(genericcsvs)
#I think it hangs if I don't include 'IF EXISTS'. GRRR... why??
sql_text = r'''
SELECT * FROM
X_COVID19_LAB_ORDERS l
WHERE PAT_ID IN ('055948350', '041356734', '057338121');
'''
with ctc.get_clarity_engine().connect() as conn:
ctc.clarity_to_csv_inner(sql_text, genericcsvs, conn)
def test_simple(self):
genericcsvs = [
testdatadir + 'test1.csv',
testdatadir + 'test2.csv'
]
remove_files(genericcsvs)
simple_sql_text = '''SELECT TOP 10 PAT_ID, PAT_ENC_CSN_ID FROM PAT_ENC;
SELECT TOP 5 CONTACT_DATE FROM PAT_ENC tablesample(0.01);
'''
with ctc.get_clarity_engine().connect() as conn:
ctc.clarity_to_csv_inner(simple_sql_text, genericcsvs, conn)
self.assertEqual(line_count( testdatadir + 'test1.csv'), 11)
self.assertEqual(line_count( testdatadir + 'test2.csv'), 6)
def test_wrapper(self):
sqlfilename = testquerydir + "tinyTestQuery.sql"
genericcsvs = [
testdatadir + 'test1.csv',
testdatadir + 'test2.csv',
testdatadir + 'test3.csv'
]
remove_files(genericcsvs)
ctc.clarity_to_csv(sqlfilename, genericcsvs)
#I believe this is hanging
def test_cohort(self):
genericcsvs = [
testdatadir + 'test_cohort.csv'
]
remove_files(genericcsvs)
#I think it hangs if I don't include 'IF EXISTS'. GRRR... why??
sql_text = '''
DROP TABLE IF EXISTS ##cohort;
CREATE TABLE ##cohort (EMPI VARCHAR(90) NOT NULL);
INSERT INTO ##cohort (EMPI) values ('8001111117'),('1000000000');
SELECT * FROM ##cohort;
'''
with ctc.get_clarity_engine().connect() as conn:
ctc.clarity_to_csv_inner(sql_text, genericcsvs, conn)
self.assertEqual(line_count(testdatadir + 'test_cohort.csv'), 3) #a header and two values
#TODO - deal with wrong number of csv's supplied
#%%
if __name__ == '__main__':
t = TestStuff()
t.test_save_to_dataframes()
t.test_save_to_df_2col()
# t.test_remove_file_not_there()
# t.test_integration_test()
# t.test_comment_with_semicolon()
# t.test_none_csv()
# t.test_unicode_error()
# t.test_simple()
# t.test_wrapper()
# t.test_cohort()
# unittest.main()