forked from OpenDataSk/eks-od-datastore-pusher
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathdatastore_updater.py
634 lines (524 loc) · 21.6 KB
/
datastore_updater.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014, Adrià Mercader (https://github.com/amercader)
# Copyright (c) 2018, Peter Hanecak <hanecak@opendata.sk>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import configparser
import csv
import datetime
import json
import os
import pickle
import sys
import requests
USAGE = '''
datastore_update.py setup
Creates a dataset in the remote CKAN instance, adds a DataStore
resource to it and pushes a first dump of the earthquakes that happened
during the last day. It will return the resource id that you must
write in your configuration file if you want to regularly update the
DataStore table with the `update` command.
datastore_update.py update
Requests the last hour eartquakes from the remote server and pushes the
records to the DataStore. You need to include the resource_id returned
by the previous command to your configuration file before running this
one. You should run this command periodically every each hour, eg with
cron job.
'''
#PAST_DAY_DATA_URL = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_day.geojson'
#PAST_HOUR_DATA_URL = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_hour.geojson'
BATCH_SIZE = 1000
STATE_FILE = 'datastore_updater.state'
# state keys
STATE_LAST_PROCESSED = 'last_processed'
# structure description
ZAZKAZKY_STRUCTURE = [
{'id': 'IdentifikatorZakazky',
'type': 'text',
'csvindex': 0},
{'id': 'ZakazkaUrl',
'type': 'text',
'csvindex': 1},
{'id': 'StavZakazky',
'type': 'text',
'csvindex': 2},
{'id': 'PouzityPostup',
'type': 'text',
'csvindex': 3},
{'id': 'ObjednavatelDruh',
'type': 'text',
'csvindex': 4},
{'id': 'ObjednavatelObchodneMeno',
'type': 'text',
'csvindex': 5},
{'id': 'ObjednavatelICO',
'type': 'text',
'csvindex': 6},
{'id': 'ObjednavatelStat',
'type': 'text',
'csvindex': 7},
{'id': 'ObjednavatelObec',
'type': 'text',
'csvindex': 8},
{'id': 'ObjednavatelPSC',
'type': 'text',
'csvindex': 9},
{'id': 'ObjednavatelUlica',
'type': 'text',
'csvindex': 10},
{'id': 'DatumVyhlasenia',
'type': 'timestamp',
'csvindex': 11},
{'id': 'DatumZazmluvnenia',
'type': 'timestamp',
'csvindex': 12},
{'id': 'OpisnyFormularNazov',
'type': 'text',
'csvindex': 13},
{'id': 'OpisnyFormularKlucoveSlova',
'type': 'text',
'csvindex': 14},
{'id': 'OpisnyFormularCpv',
'type': 'text',
'csvindex': 15},
{'id': 'OpisnyFormularDruh',
'type': 'text',
'csvindex': 16},
{'id': 'OpisnyFormularKategoriaSluzieb',
'type': 'text',
'csvindex': 17},
{'id': 'OpisnyFormularFunkcnaSpecifikacia',
'type': 'text',
'csvindex': 18},
{'id': 'OpisnyFormularTechnickaSpecifikaciaTextova',
'type': 'text',
'csvindex': 19},
{'id': 'OpisnyFormularTechnickaSpecifikaciaCiselna',
'type': 'text',
'csvindex': 20},
{'id': 'MiestoPlneniaStat',
'type': 'text',
'csvindex': 21},
{'id': 'MiestoPlneniaKraj',
'type': 'text',
'csvindex': 22},
{'id': 'MiestoPlneniaOkres',
'type': 'text',
'csvindex': 23},
{'id': 'MiestoPlneniaObec',
'type': 'text',
'csvindex': 24},
{'id': 'MiestoPlneniaUlica',
'type': 'text',
'csvindex': 25},
{'id': 'LehotaPlneniaOd',
'type': 'timestamp',
'csvindex': 26},
{'id': 'LehotaPlneniaDo',
'type': 'timestamp',
'csvindex': 27},
{'id': 'LehotaPlneniaPresne',
'type': 'timestamp',
'csvindex': 28},
{'id': 'MnozstvoJednotka',
'type': 'text',
'csvindex': 29},
{'id': 'MnozstvoHodnota',
'type': 'float',
'csvindex': 30},
{'id': 'MaximalnaVyskaZdrojov',
'type': 'float',
'csvindex': 31},
{'id': 'ZmluvnyVztah',
'type': 'text',
'csvindex': 32},
{'id': 'FinancovanieEU',
'type': 'bool',
'csvindex': 33},
{'id': 'HodnotiaceKriterium',
'type': 'text',
'csvindex': 34},
{'id': 'LehotaNaPredkladaniePonuk',
'type': 'timestamp',
'csvindex': 35},
{'id': 'PocetNotifikovanychDodavatelov',
'type': 'integer',
'csvindex': 36},
{'id': 'VstupnaCena',
'type': 'float',
'csvindex': 37},
{'id': 'PocetSutaziacich',
'type': 'integer',
'csvindex': 38},
{'id': 'PocetPredlozenychPonuk',
'type': 'integer',
'csvindex': 39},
{'id': 'ZaciatokAukcie',
'type': 'timestamp',
'csvindex': 40},
{'id': 'TrvanieAukcie_Minut',
'type': 'integer',
'csvindex': 41},
{'id': 'PredlzovanieAukcie_Minut',
'type': 'integer',
'csvindex': 42},
{'id': 'ProtokolOPriebehuZadavaniaZakazky',
'type': 'text',
'csvindex': 43},
{'id': 'Priloha_c1_ZmluvnyFormularZakazky',
'type': 'text',
'csvindex': 44},
{'id': 'Priloha_c2_VyslednePoradieDodavatelov',
'type': 'text',
'csvindex': 45},
{'id': 'Priloha_c3_Zmluva',
'type': 'text',
'csvindex': 46},
{'id': 'Priloha_c4A_ZaznamOSystemovychUdalostiachZakazky',
'type': 'text',
'csvindex': 47},
{'id': 'Priloha_c4B_ZaznamOSystemovychUdalostiachElektronickejAukcie',
'type': 'text',
'csvindex': 48},
{'id': 'AnonymnyZmluvnyFormularZakazky',
'type': 'text',
'csvindex': 49},
{'id': 'ObjednavkovyFormularZakazky',
'type': 'text',
'csvindex': 50},
]
class EksZakazkyDatastoreUpdater:
def __init__(self):
self.state = {}
self.load_state()
config = configparser.SafeConfigParser()
config.read('config.ini')
for key in ('ckan_url', 'api_key',):
if not config.get('main', key):
exit('Please fill the {0} option in the config.ini file'
.format(key))
self.ckan_url = config.get('main', 'ckan_url').rstrip('/')
self.api_key = config.get('main', 'api_key')
self.resource_id = config.get('main', 'resource_id')
if not self.resource_id:
exit('You need to add the resource id to your configuration file.\n' +
'Did you run `datastore_update.py setup`first?')
self.directory_zakazky = config.get('main', 'directory_zakazky')
if not self.resource_id:
exit('You need to add the path to directory with "Zakazky" files ' +
'to your configuration file.')
def load_state(self):
if not os.path.isfile(STATE_FILE):
print('info: no previous state found (%s)' % STATE_FILE)
return
state_file = open(STATE_FILE, "rb");
self.state = pickle.load(state_file);
def save_state(self):
state_file = open(STATE_FILE, "wb");
pickle.dump(self.state, state_file);
def exit(self, msg=USAGE):
print(msg)
sys.exit(1)
def setup(self):
"""Basic setup operation called from command line."""
# Create a dataset first
data = {
'name': 'eks-zakazky-datapusher-test8',
'title': 'EKS - Zakázky - datapusher test (alpha)',
'owner_org': 'opendata_sk', # TODO: take that from config.ini
'notes': '''
Target for https://github.com/OpenDataSk/eks-od-datastore-pusher during development and testing. Thus:
- it may contain bogus data
- data may vanish without warning
- BEWARE OF DRAGONS
''',
}
response = requests.post(
'{0}/api/action/package_create'.format(self.ckan_url),
data=json.dumps(data),
headers={'Content-type': 'application/json',
'Authorization': self.api_key},
# FIXME: security vulnerability => move this to confing.ini so that those using self-signed certs can get stuff woring but those with good certs can by default be safe!!!
# (reference: http://docs.python-requests.org/en/master/user/advanced/?highlight=ssl#ssl-cert-verification)
verify=False)
if response.status_code != 200:
exit('Error creating dataset: {0}'.format(response.content))
dataset_id = response.json()['result']['id']
# Then create a resource, empty at the beginning
records = []
# Manually set the field types to ensure they are handled properly
# TODO: Those fileds are for "Zakazky". Later we will enhance that also for other EKS sets (Zmluvy, ...)
fields = []
for item in ZAZKAZKY_STRUCTURE:
field = {
'id': item['id'],
'type': item['type']
}
fields.append(field)
# Push the records to the DataStore table. This will create a resource
# of type datastore.
data = {
'resource': {
'package_id': dataset_id,
'name': 'Zakazky',
'format': 'csv',
'notes': '''
Set of multiple CSVs merged together into one complete resource.
TODO: further details
'''
},
'records': records,
'fields': fields,
'primary_key': ['IdentifikatorZakazky'],
}
response = requests.post(
'{0}/api/action/datastore_create'.format(self.ckan_url),
data=json.dumps(data),
headers={'Content-type': 'application/json',
'Authorization': self.api_key},
# FIXME: security vulnerability => move this to confing.ini so that those using self-signed certs can get stuff woring but those with good certs can by default be safe!!!
# (reference: http://docs.python-requests.org/en/master/user/advanced/?highlight=ssl#ssl-cert-verification)
verify=False)
if response.status_code != 200:
exit('Error: {0}'.format(response.content))
resource_id = response.json()['result']['resource_id']
print('''
Dataset and DataStore resource successfully created with {0} records.
Please add the resource id to your ini file:
resource_id={1}
'''.format(len(records), resource_id))
def find_oldest_csvdate(self):
"""Find oldest CSV file in the given directory.
"Oldest" does not mean selection based on file modification time but instead
based on year and month embedded in the file names.
For "Zakazky", following file naming is used:
ZoznamZakaziekReport_2018-3_.csv
ZoznamZakaziekReport_2018-4_.csv
So here, '2018-3' (a.k.a. "CVS date") would be returned."""
# list the self.directory_zakazky, skip directories, parse out
# available dates (YYYY-M) from file names
file_dates = []
print(os.listdir(self.directory_zakazky))
for diritem in os.listdir(self.directory_zakazky):
if not os.path.isfile(os.path.join(self.directory_zakazky, diritem)):
continue
try:
zdate = datetime.datetime.strptime(diritem, 'ZoznamZakaziekReport_%Y-%m_.csv')
file_dates.append(zdate)
except ValueError:
print("debug: file %s does not match, skipping" % diritem)
file_dates.sort()
return '{d.year}-{d.month}'.format(d = file_dates[0])
@staticmethod
def next_csvdate(csvdate):
"""Determine next CSV date.
Here, we simpe do "+1 month", assuming we do not have gaps in our
CSV copy and if file is not founf, then "we went too far into
future, no file available yet".
Example:
'2018-3' -> '2018-4'
"""
zdate = datetime.datetime.strptime(csvdate, '%Y-%m')
ztimedelta = datetime.timedelta(days=31)
return '{d.year}-{d.month}'.format(d = (zdate + ztimedelta))
@staticmethod
def csv_header_check(row):
"""Validate a header of CSV file, i.e. fail-safe check which should prevent
the script from loading improper data into datastore.
Checks should be sufficient to catch at least:
1) wrong CSV file (i.e. something completely unrelated)
2) CSV with new/changed structure (EKS may change stuff)"""
# Length is -1 because EKS puts separator ',' at the end which creates
# one more empty column.
hlen = len(row) - 1
if hlen != len(ZAZKAZKY_STRUCTURE):
print('error: %d items in header found, %d expected' % (hlen, len(ZAZKAZKY_STRUCTURE)))
return False
for sitem in ZAZKAZKY_STRUCTURE:
# We strip \ufeff because it's in the CSV file header, thus
# "damaging" name of first column.
ritem = row[sitem['csvindex']].strip('"\ufeff')
if sitem['id'] != ritem:
print("error: '%s' expected in row %d, '%s' found"
% (sitem['id'], sitem['csvindex'], ritem))
return False
return True
@staticmethod
def convert_date(eks_date):
"""Convert date used by EKS to ISO date, e.g.:
'5.3.2018 9:00:00' -> '2018-03-05T09:00:00'
"""
if len(eks_date) <= 0:
return None
date = datetime.datetime.strptime(eks_date, '%d.%m.%Y %H:%M:%S')
# FIXME: While we are at it, we may add proper time zone (EKS is
# pressumably using "Europe/Bratislava") so as to have proper
# timestamps.
return date.isoformat()
@staticmethod
def convert_float(eks_float):
"""Convert floats used by EKS (i.e. decinal separated with ',') into
proper JSON floats, e.g.:
'1,0000' -> 1.0
"""
if len(eks_float) <= 0:
return None
return float(eks_float.replace(',', '.'))
@staticmethod
def convert_int(eks_int):
"""We're getting "strings" from CSV, so strip quotes to get int
suitable for JSON, e.g.:
'504' -> 504
"""
if len(eks_int) <= 0:
return None
return eks_int.strip("'")
def upsert(self, records):
"""Upsert given records into data store."""
if len(records) == 0:
return
# Push the records to the DataStore table
data = {
'resource_id': self.resource_id,
'method': 'upsert',
'records': records,
}
response = requests.post(
'{0}/api/action/datastore_upsert'.format(self.ckan_url),
data=json.dumps(data),
headers={'Content-type': 'application/json',
'Authorization': self.api_key},
# FIXME: security vulnerability => move this to confing.ini so that those using self-signed certs can get stuff woring but those with good certs can by default be safe!!!
# (reference: http://docs.python-requests.org/en/master/user/advanced/?highlight=ssl#ssl-cert-verification)
verify=False)
if response.status_code != 200:
exit('Error: {0}'.format(response.content))
print('debug: pushed %d items in a batch' % len(records))
def update_month(self, csvdate):
"""
Basic update operation for one month (i.e. one CSV file).
csvdate: portion of CSV file name with year andf month (e.g. '2018-3')
Returns:
- True: file processed (and we may attempt file for next month)
- False: file not found (and thus it looks like we're done)
"""
# prepare mapping from structure and some helpers
# TODO: This is "static", i.e. move it somewhere so that it runs
# only once, not "once for each CSV file".
mapping = {}
for item in ZAZKAZKY_STRUCTURE:
mapping[item['id']] = item['csvindex']
DATE_ITEM_NAMES = ['DatumVyhlasenia', 'DatumZazmluvnenia', 'LehotaPlneniaOd',
'LehotaPlneniaDo', 'LehotaPlneniaPresne', 'LehotaNaPredkladaniePonuk',
'ZaciatokAukcie']
FLOAT_ITEM_NAMES = ['MnozstvoHodnota', 'MaximalnaVyskaZdrojov', 'VstupnaCena']
INT_ITEM_NAMES = ['PocetNotifikovanychDodavatelov', 'PocetSutaziacich',
'PocetPredlozenychPonuk', 'TrvanieAukcie_Minut', 'PredlzovanieAukcie_Minut']
# some other hacks:
# - some EKS items aer too big, triggering "csv.Error: field larger than field limit"
csv.field_size_limit(262144)
# records to be inserted
records = []
# Load the CSV file
csvfn = '%s/ZoznamZakaziekReport_%s_.csv' % (self.directory_zakazky, csvdate)
if not os.path.exists(csvfn):
print("file %s not available, it looks like we are done" % csvfn)
return False
with open(csvfn, 'r') as csvfile:
print("loading %s ..." % csvfn)
itemreader = csv.reader(csvfile)
counter = 0
for row in itemreader:
counter += 1
if counter == 1:
if not self.csv_header_check(row):
exit('%s header check failed' % csvfn)
continue
# convert row from CSV into JSON row
rowjson = {}
for mitem in mapping:
rowjson[mitem] = row[mapping[mitem]]
# fix dates, floats, etc.:
for mitem in DATE_ITEM_NAMES:
rowjson[mitem] = self.convert_date(row[mapping[mitem]])
for mitem in FLOAT_ITEM_NAMES:
rowjson[mitem] = self.convert_float(row[mapping[mitem]])
for mitem in INT_ITEM_NAMES:
rowjson[mitem] = self.convert_int(row[mapping[mitem]])
# TODO: add duplicate detection: For example
# ZoznamZakaziekReport_2018-3_.csv contains 'Z20187264' at least
# three time. We push all accurences to 'records' here but
# DataStore (based on IdentifikatorZakazky labeled as 'id' and
# with 'upsert') overwrites first occurence with seconds, etc.
# so at the end only last item gets actually stored.
# It not clear what to do with that but at least we should
# detect duplicates and reports their line numbers in a
# dedicated "problems" column?
# TODO: use the ID to obtain the row also from CKAN, so that we
# can properly create "created" and "modified" timestamps
records.append(rowjson)
# batching, to avoid pushing too much in one call
if len(records) >= BATCH_SIZE:
self.upsert(records)
records = []
# upsert the remainer of records, mark state
self.upsert(records)
self.state[STATE_LAST_PROCESSED] = csvdate
self.save_state()
print('DataStore resource successfully updated with %d records.' % counter)
return True
def update(self):
"""Basic update operation called from command line."""
# Load "state" (YYYY-M of last processed file); if not then
month_to_process = None
if STATE_LAST_PROCESSED in self.state:
month_to_process = self.state[STATE_LAST_PROCESSED]
if month_to_process is None:
month_to_process = self.find_oldest_csvdate()
# process "last processed" month assuming:
# 1) if it also still "current month": we will process all, pick
# updates, re-process again items/line maybe needlessly (but such
# waste is considered OK while it helps avoid more code)
# 2) if it is "last month": we weill process it "for the last time",
# picking up latest updates and then proceed to the next (i.e.
# current) month
counter = 0
while self.update_month(month_to_process):
counter += 1
# OK, get the name for "next month" and try it ...
month_to_process = self.next_csvdate(month_to_process)
print('%d files processed.' % counter)
return
if __name__ == '__main__':
if len(sys.argv) < 2:
exit()
action = sys.argv[1]
if action not in ('setup', 'update',):
exit()
eks_zakazky = EksZakazkyDatastoreUpdater()
if action == 'setup':
eks_zakazky.setup()
elif action == 'update':
eks_zakazky.update()