forked from codeforamerica/cfapi
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run_update.py
1017 lines (821 loc) · 39.3 KB
/
run_update.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os
import logging
from csv import DictReader
from itertools import groupby
from operator import itemgetter
from StringIO import StringIO
from datetime import datetime
from urllib2 import HTTPError, URLError
from urlparse import urlparse
from random import shuffle
from argparse import ArgumentParser
from time import time
from re import match
from requests import get, exceptions
from dateutil.tz import tzoffset
import feedparser
from feeds import get_first_working_feed_link
from app import db, Project, Organization, Story, Event, Error, Issue, Label, is_safe_name
# Logging Setup
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
# :NOTE: debug
# import warnings
# warnings.filterwarnings('error')
# org sources filenames
ORG_SOURCES_FILENAME = 'org_sources.csv'
TEST_ORG_SOURCES_FILENAME = 'test_org_sources.csv'
# API URL templates
MEETUP_API_URL = "https://api.meetup.com/2/events?status=past,upcoming&format=json&group_urlname={group_urlname}&key={key}"
GITHUB_USER_REPOS_API_URL = 'https://github.com/users/{username}/repos'
GITHUB_REPOS_API_URL = 'https://github.com/repos{repo_path}'
GITHUB_ISSUES_API_URL = 'https://github.com/repos{repo_path}/issues'
GITHUB_CONTENT_API_URL = 'https://github.com/repos{repo_path}/contents/{file_path}'
if 'GITHUB_TOKEN' in os.environ:
github_auth = (os.environ['GITHUB_TOKEN'], '')
else:
github_auth = None
if 'MEETUP_KEY' in os.environ:
meetup_key = os.environ['MEETUP_KEY']
else:
meetup_key = None
github_throttling = False
def get_github_api(url, headers=None):
'''
Make authenticated GitHub requests.
'''
logging.info('Asking Github for {}{}'.format(url, ' ({})'.format(headers) if headers and headers != {} else ''))
got = get(url, auth=github_auth, headers=headers)
return got
def format_date(time_in_milliseconds, utc_offset_msec):
'''
Create a datetime object from a time in milliseconds from the epoch
'''
tz = tzoffset(None, utc_offset_msec / 1000.0)
dt = datetime.fromtimestamp(time_in_milliseconds / 1000.0, tz)
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def format_location(venue):
address = venue['address_1']
if('address_2' in venue and venue['address_2'] != ''):
address = address + ', ' + venue['address_2']
if 'state' in venue:
return "{address}, {city}, {state}, {country}".format(address=address, city=venue['city'], state=venue['state'], country=venue['country'])
else:
return "{address}, {city}, {country}".format(address=address, city=venue['city'], country=venue['country'])
def get_meetup_events(organization, group_urlname):
'''
Get events associated with a group
'''
meetup_url = MEETUP_API_URL.format(group_urlname=group_urlname, key=meetup_key)
got = get(meetup_url)
if got.status_code in range(400, 499):
logging.error("%s's meetup page cannot be found" % organization.name)
return []
else:
try:
results = got.json()['results']
events = []
for event in results:
event = dict(organization_name=organization.name,
name=event['name'],
event_url=event['event_url'],
start_time_notz=format_date(event['time'], event['utc_offset']),
created_at=format_date(event['created'], event['utc_offset']),
utc_offset=event['utc_offset'] / 1000.0)
# Some events don't have locations.
if 'venue' in event:
event['location'] = format_location(event['venue'])
events.append(event)
return events
except (TypeError, ValueError):
return []
def get_organizations(org_sources):
''' Collate all organizations from different sources.
'''
organizations = []
with open(org_sources) as file:
for org_source in file.read().splitlines():
if 'docs.google.com' in org_source:
organizations.extend(get_organizations_from_spreadsheet(org_source))
return organizations
def get_organizations_from_spreadsheet(org_source):
'''
Get a row for each organization from the Brigade Info spreadsheet.
Return a list of dictionaries, one for each row past the header.
'''
got = get(org_source)
#
# Requests response.text is a lying liar, with its UTF8 bytes as unicode()?
# Use response.content to plain bytes, then decode everything.
#
organizations = list(DictReader(StringIO(got.content)))
for (index, org) in enumerate(organizations):
organizations[index] = dict([(k.decode('utf8'), v.decode('utf8'))
for (k, v) in org.items()])
return organizations
def get_stories(organization):
'''
Get two recent stories from an rss feed.
'''
# If there is no given rss link, try the website url.
if organization.rss:
rss = organization.rss
else:
rss = organization.website
try:
url = get_first_working_feed_link(rss)
# If no blog found then give up
if not url:
url = None
return None
except (HTTPError, ValueError, URLError):
url = None
return None
logging.info('Asking cyberspace for ' + url)
d = feedparser.parse(get(url).text)
#
# Return dictionaries for the two most recent entries.
#
return [dict(title=e.title, link=e.link, type=u'blog', organization_name=organization.name)
for e in d.entries[:2]]
def get_adjoined_json_lists(response, headers=None):
''' Github uses the Link header (RFC 5988) to do pagination.
If we see a Link header, assume we're dealing with lists
and concat them all together.
'''
result = response.json()
if type(result) is list:
while 'next' in response.links:
response = get_github_api(response.links['next']['url'], headers=headers)
result += response.json()
return result
def get_projects(organization):
'''
Get a list of projects from CSV, TSV, JSON, or Github URL.
Convert to a dict.
TODO: Have this work for GDocs.
'''
# If projects_list is a GitHub organization
# Use the GitHub auth to request all the included repos.
# Follow next page links
_, host, path, _, _, _ = urlparse(organization.projects_list_url)
matched = match(r'(/orgs)?/(?P<name>[^/]+)/?$', path)
if host in ('www.github.com', 'github.com') and matched:
projects_url = GITHUB_USER_REPOS_API_URL.format(username=matched.group('name'))
try:
response = get_github_api(projects_url)
# Consider any status other than 2xx an error
if not response.status_code // 100 == 2:
return []
projects = get_adjoined_json_lists(response)
except exceptions.RequestException:
# Something has gone wrong, probably a bad URL or site is down.
return []
# Else its a csv or json of projects
else:
projects_url = organization.projects_list_url
logging.info('Asking for ' + projects_url)
try:
response = get(projects_url)
# Consider any status other than 2xx an error
if not response.status_code // 100 == 2:
return []
# If its a csv
if "csv" in organization.projects_list_url and (('content-type' in response.headers and 'text/csv' in response.headers['content-type']) or 'content-type' not in response.headers):
data = response.content.splitlines()
projects = list(DictReader(data, dialect='excel'))
# convert all the values to unicode
for project in projects:
for project_key, project_value in project.items():
if project_key:
project_key = project_key.lower()
# some values might be lists
if type(project_value) is list:
project_value = [unicode(item.decode('utf8')) for item in project_value]
project[project_key] = project_value
else:
project[project_key] = unicode(project_value.decode('utf8'))
# Else just grab it as json
else:
try:
projects = response.json()
except ValueError:
# Not a json file.
return []
except exceptions.RequestException:
# Something has gone wrong, probably a bad URL or site is down.
return []
# If projects is just a list of GitHub urls, like Open Gov Hack Night
# turn it into a list of dicts with minimal project information
if len(projects) and type(projects[0]) in (str, unicode):
projects = [dict(code_url=item, organization_name=organization.name) for item in projects]
# If data is list of dicts, like BetaNYC or a GitHub org
elif len(projects) and type(projects[0]) is dict:
for project in projects:
project['organization_name'] = organization.name
if "homepage" in project:
project["link_url"] = project["homepage"]
if "html_url" in project:
project["code_url"] = project["html_url"]
for key in project.keys():
if key not in ['name', 'description', 'link_url', 'code_url', 'type', 'categories', 'organization_name', 'status']:
del project[key]
# Get any updates on the projects
projects = [update_project_info(proj) for proj in projects]
# Drop projects with no updates
projects = filter(None, projects)
# Add organization names along the way.
for project in projects:
project['organization_name'] = organization.name
return projects
def non_github_project_update_time(project):
''' If its a non-github project, we should check if any of the fields
have been updated, such as the description.
Set the last_updated timestamp.
'''
existing_project = db.session.query(Project).filter(Project.name == project['name']).first()
if existing_project:
# project gets existing last_updated
project['last_updated'] = existing_project.last_updated
# unless one of the fields has been updated
for key, value in project.iteritems():
if project[key] != existing_project.__dict__[key]:
project['last_updated'] = datetime.now().strftime("%a, %d %b %Y %H:%M:%S %Z")
else:
# Set a date when we first see a non-github project
project['last_updated'] = datetime.now().strftime("%a, %d %b %Y %H:%M:%S %Z")
return project
def update_project_info(project):
''' Update info from Github, if it's missing.
Modify the project in-place and return nothing.
Complete repository project details go into extras, for example
project details from Github can be found under "github_details".
Github_details is specifically expected to be used on this page:
http://opengovhacknight.org/projects.html
'''
if 'code_url' not in project:
project = non_github_project_update_time(project)
return project
_, host, path, _, _, _ = urlparse(project['code_url'])
if host != 'github.com':
project = non_github_project_update_time(project)
return project
# Get the Github attributes
if host == 'github.com':
repo_url = GITHUB_REPOS_API_URL.format(repo_path=path)
# If we've hit the GitHub rate limit, skip updating projects.
global github_throttling
if github_throttling:
return project
# find an existing project, filtering on code_url, organization_name, and project name (if we know it)
existing_filter = [Project.code_url == project['code_url'], Project.organization_name == project['organization_name']]
if 'name' in project and project['name']:
existing_filter.append(Project.name == project['name'])
existing_project = db.session.query(Project).filter(*existing_filter).first()
if existing_project:
# copy 'last_updated' values from the existing project to the project dict
project['last_updated'] = existing_project.last_updated
project['last_updated_issues'] = existing_project.last_updated_issues
project['last_updated_civic_json'] = existing_project.last_updated_civic_json
project['last_updated_root_files'] = existing_project.last_updated_root_files
# request project info from GitHub with the If-Modified-Since header
if existing_project.last_updated:
last_updated = datetime.strftime(existing_project.last_updated, "%a, %d %b %Y %H:%M:%S GMT")
got = get_github_api(repo_url, headers={"If-Modified-Since": last_updated})
# In rare cases, a project can be saved without a last_updated.
else:
got = get_github_api(repo_url)
else:
got = get_github_api(repo_url)
if got.status_code in range(400, 499):
if got.status_code == 404:
logging.error(repo_url + ' doesn\'t exist.')
# If its a bad GitHub link, don't return it at all.
return None
elif got.status_code == 403:
logging.error("GitHub Rate Limit Remaining: " + str(got.headers["x-ratelimit-remaining"]))
error_dict = {
"error": u'IOError: We done got throttled by GitHub',
"time": datetime.now()
}
new_error = Error(**error_dict)
db.session.add(new_error)
# commit the error
db.session.commit()
github_throttling = True
return project
else:
raise IOError
# If the project has not been modified...
elif got.status_code == 304:
logging.info('Project {} has not been modified since last update'.format(repo_url))
# check whether any of the org spreadsheet values for the project have changed
spreadsheet_is_updated = False
for project_key in project:
check_value = project[project_key]
existing_value = existing_project.__dict__[project_key]
if check_value and check_value != existing_value:
spreadsheet_is_updated = True
elif not check_value and existing_value:
project[project_key] = existing_value
# Populate values from the civic.json if it exists/is updated
project, civic_json_is_updated = update_project_from_civic_json(project)
# if values have changed, copy untouched values from the existing project object and return it
if spreadsheet_is_updated or civic_json_is_updated:
logging.info('Project %s has been modified via spreadsheet or civic.json.', repo_url)
project['last_updated'] = datetime.now().strftime("%a, %d %b %Y %H:%M:%S %Z")
project['github_details'] = existing_project.github_details
return project
# nothing was updated, but make sure we keep the project
# :::here (project/true)
existing_project.keep = True
db.session.add(existing_project)
# commit the project
db.session.commit()
return None
# Save last_updated time header for future requests
project['last_updated'] = got.headers['Last-Modified']
all_github_attributes = got.json()
github_details = {}
for field in ('contributors_url', 'created_at', 'forks_count', 'homepage',
'html_url', 'id', 'language', 'open_issues', 'pushed_at',
'updated_at', 'watchers_count', 'name', 'description', 'stargazers_count'):
github_details[field] = all_github_attributes[field]
github_details['owner'] = dict()
for field in ('avatar_url', 'html_url', 'login', 'type'):
github_details['owner'][field] = all_github_attributes['owner'][field]
project['github_details'] = github_details
if 'name' not in project or not project['name']:
project['name'] = all_github_attributes['name']
if 'description' not in project or not project['description']:
project['description'] = all_github_attributes['description']
if 'link_url' not in project or not project['link_url']:
project['link_url'] = all_github_attributes['homepage']
#
# Populate project contributors from github_details[contributors_url]
#
project['github_details']['contributors'] = []
got = get_github_api(all_github_attributes['contributors_url'])
# Check if there are contributors
try:
for contributor in got.json():
# we don't want people without email addresses?
if contributor['login'] == 'invalid-email-address':
break
project['github_details']['contributors'].append(dict())
for field in ('login', 'url', 'avatar_url', 'html_url', 'contributions'):
project['github_details']['contributors'][-1][field] = contributor[field]
# flag the owner with a boolean value
project['github_details']['contributors'][-1]['owner'] \
= bool(contributor['login'] == project['github_details']['owner']['login'])
except:
pass
#
# Populate project participation from github_details[url] + "/stats/participation"
# Sometimes GitHub returns a blank dict instead of no participation.
#
got = get_github_api(all_github_attributes['url'] + '/stats/participation')
try:
project['github_details']['participation'] = got.json()['all']
except:
project['github_details']['participation'] = [0] * 50
#
# Populate values from the civic.json if it exists/is updated
#
project, civic_json_is_updated = update_project_from_civic_json(project)
return project
def update_project_from_civic_json(project_dict, force=False):
''' Update and return the passed project dict with values from civic.json
'''
civic_json = get_civic_json_for_project(project_dict, force)
is_updated = False
existing_status = project_dict['status'] if 'status' in project_dict else u''
if 'status' in civic_json and existing_status != civic_json['status']:
project_dict['status'] = civic_json['status']
is_updated = True
# add other attributes from civic.json here
return project_dict, is_updated
def get_issues_for_project(project):
''' get the issues for a single project in dict format
without touching the database (used for testing)
'''
issues = []
# Get github issues api url
_, host, path, _, _, _ = urlparse(project.code_url)
issues_url = GITHUB_ISSUES_API_URL.format(repo_path=path)
# Ping github's api for project issues
got = get_github_api(issues_url, headers={'If-None-Match': project.last_updated_issues})
# Save each issue in response
responses = get_adjoined_json_lists(got, headers={'If-None-Match': project.last_updated_issues})
for issue in responses:
# Type check the issue, we are expecting a dictionary
if isinstance(issue, dict):
# Pull requests are returned along with issues. Skip them.
if "/pull/" in issue['html_url']:
continue
issue_dict = dict(title=issue['title'], html_url=issue['html_url'],
body=issue['body'], project_id=project.id, labels=issue['labels'])
issues.append(issue_dict)
else:
logging.error('Issue for project %s is not a dictionary', project.name)
return issues
def get_issues(org_name):
'''
Get github issues associated to each Organization's Projects.
'''
issues = []
# Only grab this organization's projects
projects = db.session.query(Project).filter(Project.organization_name == org_name).all()
# Populate issues for each project
for project in projects:
# Mark this project's issues for deletion
# :::here (issue/false)
db.session.execute(db.update(Issue, values={'keep': False}).where(Issue.project_id == project.id))
# Get github issues api url
_, host, path, _, _, _ = urlparse(project.code_url)
# Only check issues if its a github project
if host != 'github.com':
continue
issues_url = GITHUB_ISSUES_API_URL.format(repo_path=path)
# Ping github's api for project issues
# :TODO: non-github projects are hitting here and shouldn't be!
got = get_github_api(issues_url, headers={'If-None-Match': project.last_updated_issues})
# Verify that content has not been modified since last run
if got.status_code == 304:
# :::here (issue/true)
db.session.execute(db.update(Issue, values={'keep': True}).where(Issue.project_id == project.id))
logging.info('Issues %s have not changed since last update', issues_url)
elif got.status_code not in range(400, 499):
# Update project's last_updated_issue field
project.last_updated_issues = unicode(got.headers['ETag'])
db.session.add(project)
responses = get_adjoined_json_lists(got, headers={'If-None-Match': project.last_updated_issues})
# Save each issue in response
for issue in responses:
# Type check the issue, we are expecting a dictionary
if isinstance(issue, dict):
# Pull requests are returned along with issues. Skip them.
if "/pull/" in issue['html_url']:
continue
issue_dict = dict(title=issue['title'], html_url=issue['html_url'],
body=issue['body'], project_id=project.id, labels=issue['labels'])
issues.append(issue_dict)
else:
logging.error('Issue for project %s is not a dictionary', project.name)
return issues
def get_root_directory_listing_for_project(project_dict, force=False):
''' Get a listing of the project's github repo root directory. Will return
an empty list if the listing hasn't changed since the last time we asked
unless force is True.
'''
# Get the API URL
_, host, path, _, _, _ = urlparse(project_dict['code_url'])
directory_url = GITHUB_CONTENT_API_URL.format(repo_path=path, file_path='')
listing = []
# Request the directory listing
request_headers = {}
if 'last_updated_root_files' in project_dict and not force:
request_headers['If-None-Match'] = project_dict['last_updated_root_files']
got = get_github_api(directory_url, headers=request_headers)
# Verify that content has not been modified since last run
if got.status_code == 304:
logging.info('root directory listing has not changed since last update for {}'.format(directory_url))
elif got.status_code not in range(400, 499):
logging.info('root directory listing has changed for {}'.format(directory_url))
# Update the project's last_updated_root_files field
project_dict['last_updated_root_files'] = unicode(got.headers['ETag'])
# get the contents of the file
listing = got.json()
else:
logging.info('NO root directory listing found for {}'.format(directory_url))
return listing
def get_civic_json_exists_for_project(project_dict, force=False):
''' Return True if the passed project has a civic.json file in its root directory.
'''
directory_listing = get_root_directory_listing_for_project(project_dict, force)
exists = 'civic.json' in [item['name'] for item in directory_listing]
return exists
def get_civic_json_for_project(project_dict, force=False):
''' Get the contents of the civic.json at the project's github repo root, if it exists.
'''
civic = {}
# return an empty dict if civic.json doesn't exist (or hasn't been updated)
if not get_civic_json_exists_for_project(project_dict, force):
return civic
# Get the API URL
_, host, path, _, _, _ = urlparse(project_dict['code_url'])
civic_url = GITHUB_CONTENT_API_URL.format(repo_path=path, file_path='civic.json')
# Request the contents of the civic.json file
# without the 'Accept' header we'd get information about the
# file rather than the contents of the file
request_headers = {'Accept': 'application/vnd.github.v3.raw'}
if 'last_updated_civic_json' in project_dict and not force:
request_headers['If-None-Match'] = project_dict['last_updated_civic_json']
got = get_github_api(civic_url, headers=request_headers)
# Verify that content has not been modified since last run
if got.status_code == 304:
logging.info('Unchanged civic.json at {}'.format(civic_url))
elif got.status_code not in range(400, 499):
logging.info('New civic.json at {}'.format(civic_url))
# Update the project's last_updated_civic_json field
project_dict['last_updated_civic_json'] = unicode(got.headers['ETag'])
try:
# get the contents of the file
civic = got.json()
except ValueError:
logging.error('Malformed civic.json at {}'.format(civic_url))
else:
logging.info('No civic.json at {}'.format(civic_url))
return civic
def count_people_totals(all_projects):
''' Create a list of people details based on project details.
Request additional data from Github API for each person.
See discussion at
https://github.com/codeforamerica/civic-json-worker/issues/18
'''
users, contributors = [], []
for project in all_projects:
contributors.extend(project['contributors'])
#
# Sort by login; there will be duplicates!
#
contributors.sort(key=itemgetter('login'))
#
# Populate users array with groups of contributors.
#
for (_, _contributors) in groupby(contributors, key=itemgetter('login')):
user = dict(contributions=0, repositories=0)
for contributor in _contributors:
user['contributions'] += contributor['contributions']
user['repositories'] += 1
if 'login' in user:
continue
#
# Populate user hash with Github info, if it hasn't been already.
#
got = get_github_api(contributor['url'])
contributor = got.json()
for field in (
'login', 'avatar_url', 'html_url',
'blog', 'company', 'location'):
user[field] = contributor.get(field, None)
users.append(user)
return users
def save_organization_info(session, org_dict):
''' Save a dictionary of organization info to the datastore session.
Return an app.Organization instance.
'''
# Select an existing organization by name.
filter = Organization.name == org_dict['name']
existing_org = session.query(Organization).filter(filter).first()
# :::here (organization/true)
# If this is a new organization, save and return it. The keep parameter is True by default.
if not existing_org:
new_organization = Organization(**org_dict)
session.add(new_organization)
return new_organization
# Timestamp the existing organization
existing_org.last_updated = time()
# :::here (organization/true)
existing_org.keep = True
# Update existing organization details.
for (field, value) in org_dict.items():
setattr(existing_org, field, value)
return existing_org
def save_project_info(session, proj_dict):
''' Save a dictionary of project info to the datastore session.
Return an app.Project instance.
'''
# Select the current project, filtering on name AND organization.
filter = Project.name == proj_dict['name'], Project.organization_name == proj_dict['organization_name']
existing_project = session.query(Project).filter(*filter).first()
# If this is a new project, save and return it.
if not existing_project:
new_project = Project(**proj_dict)
session.add(new_project)
return new_project
# Preserve the existing project
# :::here (project/true)
existing_project.keep = True
# Update existing project details
for (field, value) in proj_dict.items():
setattr(existing_project, field, value)
return existing_project
def save_issue(session, issue):
'''
Save a dictionary of issue info to the datastore session.
Return an app.Issue instance
'''
# Select the current issue, filtering on title AND project_id.
filter = Issue.title == issue['title'], Issue.project_id == issue['project_id']
existing_issue = session.query(Issue).filter(*filter).first()
# If this is a new issue save it
if not existing_issue:
new_issue = Issue(**issue)
session.add(new_issue)
else:
# Preserve the existing issue.
# :::here (issue/true)
existing_issue.keep = True
# Update existing issue details
existing_issue.title = issue['title']
existing_issue.body = issue['body']
existing_issue.html_url = issue['html_url']
existing_issue.project_id = issue['project_id']
def save_labels(session, issue):
'''
Save labels to issues
'''
# Select the current issue, filtering on title AND project_id.
filter = Issue.title == issue['title'], Issue.project_id == issue['project_id']
existing_issue = session.query(Issue).filter(*filter).first()
# Get list of existing and incoming label names (dupes will be filtered out in comparison process)
existing_label_names = [label.name for label in existing_issue.labels]
incoming_label_names = [label['name'] for label in issue['labels']]
# Add labels that are in the incoming list and not the existing list
add_label_names = list(set(incoming_label_names) - set(existing_label_names))
for label_dict in issue['labels']:
if label_dict['name'] in add_label_names:
# add the issue id to the labels
label_dict["issue_id"] = existing_issue.id
new_label = Label(**label_dict)
session.add(new_label)
# Delete labels that are not in the incoming list but are in the existing list
delete_label_names = list(set(existing_label_names) - set(incoming_label_names))
for label_name in delete_label_names:
session.query(Label).filter(Label.issue_id == existing_issue.id, Label.name == label_name).delete()
def save_event_info(session, event_dict):
'''
Save a dictionary of event into to the datastore session then return
that event instance
'''
# Select the current event, filtering on event_url and organization name.
filter = Event.event_url == event_dict['event_url'], \
Event.organization_name == event_dict['organization_name']
existing_event = session.query(Event).filter(*filter).first()
# If this is a new event, save and return it.
if not existing_event:
new_event = Event(**event_dict)
session.add(new_event)
return new_event
# Preserve the existing event.
# :::here (event/true)
existing_event.keep = True
# Update existing event details
for (field, value) in event_dict.items():
setattr(existing_event, field, value)
def save_story_info(session, story_dict):
'''
Save a dictionary of story into to the datastore session then return
that story instance
'''
# Select the current story, filtering on link and organization name.
filter = Story.organization_name == story_dict['organization_name'], \
Story.link == story_dict['link']
existing_story = session.query(Story).filter(*filter).first()
# If this is a new story, save and return it.
if not existing_story:
new_story = Story(**story_dict)
session.add(new_story)
return new_story
# Preserve the existing story.
# :::here (story/true)
existing_story.keep = True
# Update existing story details
for (field, value) in story_dict.items():
setattr(existing_story, field, value)
def get_event_group_identifier(events_url):
parse_result = urlparse(events_url)
url_parts = parse_result.path.split('/')
identifier = url_parts.pop()
if not identifier:
identifier = url_parts.pop()
if(match('^[A-Za-z0-9-]+$', identifier)):
return identifier
else:
return None
# Org sources can be csv or yaml
# They should be lists of organizations you want included at /organizations
# columns should be name, website, events_url, rss, projects_list_url, city, latitude, longitude, type
def main(org_name=None, org_sources=None):
''' Run update over all organizations. Optionally, update just one.
'''
# set org_sources
org_sources = org_sources or ORG_SOURCES_FILENAME
# Collect a set of fresh organization names.
organization_names = set()
# Retrieve all organizations and shuffle the list in place.
orgs_info = get_organizations(org_sources)
shuffle(orgs_info)
if org_name:
orgs_info = [org for org in orgs_info if org['name'] == org_name]
# Iterate over organizations and projects, saving them to db.session.
for org_info in orgs_info:
if not is_safe_name(org_info['name']):
error_dict = {
"error": unicode('ValueError: Bad organization name: "%s"' % org_info['name']),
"time": datetime.now()
}
new_error = Error(**error_dict)
db.session.add(new_error)
# commit the error
db.session.commit()
continue
try:
filter = Organization.name == org_info['name']
existing_org = db.session.query(Organization).filter(filter).first()
organization_names.add(org_info['name'])
# Mark everything associated with this organization for deletion at first.
# :::here (event/false, story/false, project/false, organization/false)
db.session.execute(db.update(Event, values={'keep': False}).where(Event.organization_name == org_info['name']))
db.session.execute(db.update(Story, values={'keep': False}).where(Story.organization_name == org_info['name']))
db.session.execute(db.update(Project, values={'keep': False}).where(Project.organization_name == org_info['name']))
db.session.execute(db.update(Organization, values={'keep': False}).where(Organization.name == org_info['name']))
# commit the false keeps
db.session.commit()
# Empty lat longs are okay.
if 'latitude' in org_info:
if not org_info['latitude']:
org_info['latitude'] = None
if 'longitude' in org_info:
if not org_info['longitude']:
org_info['longitude'] = None
organization = save_organization_info(db.session, org_info)
organization_names.add(organization.name)
# flush the organization
db.session.flush()
if organization.rss or organization.website:
logging.info("Gathering all of %s's stories." % organization.name)
stories = get_stories(organization)
if stories:
for story_info in stories:
save_story_info(db.session, story_info)
# flush the stories
db.session.flush()
if organization.projects_list_url:
logging.info("Gathering all of %s's projects." % organization.name)
projects = get_projects(organization)
for proj_dict in projects:
save_project_info(db.session, proj_dict)
# flush the projects
db.session.flush()
if organization.events_url:
if not meetup_key:
logging.error("No Meetup.com key set.")
if 'meetup.com' not in organization.events_url:
logging.error("Only Meetup.com events work right now.")
else:
logging.info("Gathering all of %s's events." % organization.name)
identifier = get_event_group_identifier(organization.events_url)
if identifier:
for event in get_meetup_events(organization, identifier):
save_event_info(db.session, event)
# flush the events
db.session.flush()
else:
logging.error("%s does not have a valid events url" % organization.name)
# Get issues for all of the projects
logging.info("Gathering all of %s's open GitHub issues." % organization.name)
issues = get_issues(organization.name)
for issue in issues:
save_issue(db.session, issue)
# flush the issues
db.session.flush()
for issue in issues:
save_labels(db.session, issue)
# commit everything
db.session.commit()
# Remove everything marked for deletion.
# :::here (event/delete, story/delete, project/delete, issue/delete, organization/delete)
db.session.query(Event).filter(Event.keep == False).delete()
db.session.query(Story).filter(Story.keep == False).delete()
db.session.query(Issue).filter(Issue.keep == False).delete()
db.session.query(Project).filter(Project.keep == False).delete()
db.session.query(Organization).filter(Organization.keep == False).delete()
# commit objects deleted for keep=False
db.session.commit()
except:
# Raise the error, get out of main(), and don't commit the transaction.
raise
else:
# Commit and move on to the next organization.
# final commit before moving on to the next organization
db.session.commit()
# prune orphaned organizations if no organization name was passed
if not org_name: