partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
GitHub_Traffic.get_releases
Retrieves the releases for the given repo in JSON.
scripts/get_traffic.py
def get_releases(self, url='', headers={}, repo_name=''): """ Retrieves the releases for the given repo in JSON. """ url_releases = (url + '/releases') r = requests.get(url_releases, headers=headers) self.releases_json[repo_name] = r.json()
def get_releases(self, url='', headers={}, repo_name=''): """ Retrieves the releases for the given repo in JSON. """ url_releases = (url + '/releases') r = requests.get(url_releases, headers=headers) self.releases_json[repo_name] = r.json()
[ "Retrieves", "the", "releases", "for", "the", "given", "repo", "in", "JSON", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_traffic.py#L134-L140
[ "def", "get_releases", "(", "self", ",", "url", "=", "''", ",", "headers", "=", "{", "}", ",", "repo_name", "=", "''", ")", ":", "url_releases", "=", "(", "url", "+", "'/releases'", ")", "r", "=", "requests", ".", "get", "(", "url_releases", ",", "headers", "=", "headers", ")", "self", ".", "releases_json", "[", "repo_name", "]", "=", "r", ".", "json", "(", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_Traffic.get_referrers
Retrieves the total referrers and unique referrers of all repos in json and then stores it in a dict.
scripts/get_traffic.py
def get_referrers(self, url='', headers={}, repo_name=''): """ Retrieves the total referrers and unique referrers of all repos in json and then stores it in a dict. """ #JSON url_referrers = (url + '/traffic/popular/referrers') r1 = requests.get(url_referrers, headers=headers) referrers_json = r1.json() self.referrers_json[repo_name] = referrers_json #CSV for referrer in referrers_json: ref_name = referrer['referrer'] try: tuple_in = (referrer['count'], referrer['uniques'])#curr vals tuple = (self.referrers[ref_name][0] + tuple_in[0],#cal new vals self.referrers[ref_name][1] + tuple_in[1]) self.referrers[ref_name] = tuple#record new vals except KeyError: tuple = self.referrers[ref_name] = (referrer['count'], referrer['uniques']) self.referrers_lower[ref_name.lower()] = ref_name
def get_referrers(self, url='', headers={}, repo_name=''): """ Retrieves the total referrers and unique referrers of all repos in json and then stores it in a dict. """ #JSON url_referrers = (url + '/traffic/popular/referrers') r1 = requests.get(url_referrers, headers=headers) referrers_json = r1.json() self.referrers_json[repo_name] = referrers_json #CSV for referrer in referrers_json: ref_name = referrer['referrer'] try: tuple_in = (referrer['count'], referrer['uniques'])#curr vals tuple = (self.referrers[ref_name][0] + tuple_in[0],#cal new vals self.referrers[ref_name][1] + tuple_in[1]) self.referrers[ref_name] = tuple#record new vals except KeyError: tuple = self.referrers[ref_name] = (referrer['count'], referrer['uniques']) self.referrers_lower[ref_name.lower()] = ref_name
[ "Retrieves", "the", "total", "referrers", "and", "unique", "referrers", "of", "all", "repos", "in", "json", "and", "then", "stores", "it", "in", "a", "dict", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_traffic.py#L142-L163
[ "def", "get_referrers", "(", "self", ",", "url", "=", "''", ",", "headers", "=", "{", "}", ",", "repo_name", "=", "''", ")", ":", "#JSON", "url_referrers", "=", "(", "url", "+", "'/traffic/popular/referrers'", ")", "r1", "=", "requests", ".", "get", "(", "url_referrers", ",", "headers", "=", "headers", ")", "referrers_json", "=", "r1", ".", "json", "(", ")", "self", ".", "referrers_json", "[", "repo_name", "]", "=", "referrers_json", "#CSV", "for", "referrer", "in", "referrers_json", ":", "ref_name", "=", "referrer", "[", "'referrer'", "]", "try", ":", "tuple_in", "=", "(", "referrer", "[", "'count'", "]", ",", "referrer", "[", "'uniques'", "]", ")", "#curr vals", "tuple", "=", "(", "self", ".", "referrers", "[", "ref_name", "]", "[", "0", "]", "+", "tuple_in", "[", "0", "]", ",", "#cal new vals", "self", ".", "referrers", "[", "ref_name", "]", "[", "1", "]", "+", "tuple_in", "[", "1", "]", ")", "self", ".", "referrers", "[", "ref_name", "]", "=", "tuple", "#record new vals", "except", "KeyError", ":", "tuple", "=", "self", ".", "referrers", "[", "ref_name", "]", "=", "(", "referrer", "[", "'count'", "]", ",", "referrer", "[", "'uniques'", "]", ")", "self", ".", "referrers_lower", "[", "ref_name", ".", "lower", "(", ")", "]", "=", "ref_name" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_Traffic.get_data
Retrieves data from json and stores it in the supplied dict. Accepts 'clones' or 'views' as type.
scripts/get_traffic.py
def get_data(self, url='',headers={}, date=str(datetime.date.today()), dict_to_store={}, type='', repo_name=''): """ Retrieves data from json and stores it in the supplied dict. Accepts 'clones' or 'views' as type. """ #JSON url = (url + '/traffic/' + type) r3 = requests.get(url, headers=headers) json = r3.json() if type == 'views': self.views_json[repo_name] = json elif type == 'clones': self.clones_json[repo_name] = json #CSV for day in json[type]: timestamp_seconds = day['timestamp']/1000 try: date_timestamp = datetime.datetime.utcfromtimestamp( timestamp_seconds).strftime('%Y-%m-%d') #do not add todays date, some views might not be recorded yet if date_timestamp != date: tuple_in = (day['count'], day['uniques']) tuple = (dict_to_store[timestamp_seconds][0] + tuple_in[0], dict_to_store[timestamp_seconds][1] + tuple_in[1]) dict_to_store[timestamp_seconds] = tuple except KeyError: tuple = dict_to_store[timestamp_seconds] = (day['count'], day['uniques'])
def get_data(self, url='',headers={}, date=str(datetime.date.today()), dict_to_store={}, type='', repo_name=''): """ Retrieves data from json and stores it in the supplied dict. Accepts 'clones' or 'views' as type. """ #JSON url = (url + '/traffic/' + type) r3 = requests.get(url, headers=headers) json = r3.json() if type == 'views': self.views_json[repo_name] = json elif type == 'clones': self.clones_json[repo_name] = json #CSV for day in json[type]: timestamp_seconds = day['timestamp']/1000 try: date_timestamp = datetime.datetime.utcfromtimestamp( timestamp_seconds).strftime('%Y-%m-%d') #do not add todays date, some views might not be recorded yet if date_timestamp != date: tuple_in = (day['count'], day['uniques']) tuple = (dict_to_store[timestamp_seconds][0] + tuple_in[0], dict_to_store[timestamp_seconds][1] + tuple_in[1]) dict_to_store[timestamp_seconds] = tuple except KeyError: tuple = dict_to_store[timestamp_seconds] = (day['count'], day['uniques'])
[ "Retrieves", "data", "from", "json", "and", "stores", "it", "in", "the", "supplied", "dict", ".", "Accepts", "clones", "or", "views", "as", "type", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_traffic.py#L174-L202
[ "def", "get_data", "(", "self", ",", "url", "=", "''", ",", "headers", "=", "{", "}", ",", "date", "=", "str", "(", "datetime", ".", "date", ".", "today", "(", ")", ")", ",", "dict_to_store", "=", "{", "}", ",", "type", "=", "''", ",", "repo_name", "=", "''", ")", ":", "#JSON", "url", "=", "(", "url", "+", "'/traffic/'", "+", "type", ")", "r3", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "headers", ")", "json", "=", "r3", ".", "json", "(", ")", "if", "type", "==", "'views'", ":", "self", ".", "views_json", "[", "repo_name", "]", "=", "json", "elif", "type", "==", "'clones'", ":", "self", ".", "clones_json", "[", "repo_name", "]", "=", "json", "#CSV", "for", "day", "in", "json", "[", "type", "]", ":", "timestamp_seconds", "=", "day", "[", "'timestamp'", "]", "/", "1000", "try", ":", "date_timestamp", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "timestamp_seconds", ")", ".", "strftime", "(", "'%Y-%m-%d'", ")", "#do not add todays date, some views might not be recorded yet", "if", "date_timestamp", "!=", "date", ":", "tuple_in", "=", "(", "day", "[", "'count'", "]", ",", "day", "[", "'uniques'", "]", ")", "tuple", "=", "(", "dict_to_store", "[", "timestamp_seconds", "]", "[", "0", "]", "+", "tuple_in", "[", "0", "]", ",", "dict_to_store", "[", "timestamp_seconds", "]", "[", "1", "]", "+", "tuple_in", "[", "1", "]", ")", "dict_to_store", "[", "timestamp_seconds", "]", "=", "tuple", "except", "KeyError", ":", "tuple", "=", "dict_to_store", "[", "timestamp_seconds", "]", "=", "(", "day", "[", "'count'", "]", ",", "day", "[", "'uniques'", "]", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_Traffic.write_json
Writes all traffic data to file in JSON form.
scripts/get_traffic.py
def write_json(self, date=(datetime.date.today()), organization='llnl',dict_to_write={}, path_ending_type=''): """ Writes all traffic data to file in JSON form. """ for repo in dict_to_write: if len(dict_to_write[repo]) != 0:#don't need to write out empty lists path = ('../github-data/' + organization + '/' + repo + '/' + path_ending_type + '/' + str(date) + '.json') self.checkDir(path) with open(path, 'w') as out: out.write(json.dumps(dict_to_write[repo], sort_keys=True, indent=4, separators=(',', ': '))) out.close()
def write_json(self, date=(datetime.date.today()), organization='llnl',dict_to_write={}, path_ending_type=''): """ Writes all traffic data to file in JSON form. """ for repo in dict_to_write: if len(dict_to_write[repo]) != 0:#don't need to write out empty lists path = ('../github-data/' + organization + '/' + repo + '/' + path_ending_type + '/' + str(date) + '.json') self.checkDir(path) with open(path, 'w') as out: out.write(json.dumps(dict_to_write[repo], sort_keys=True, indent=4, separators=(',', ': '))) out.close()
[ "Writes", "all", "traffic", "data", "to", "file", "in", "JSON", "form", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_traffic.py#L204-L217
[ "def", "write_json", "(", "self", ",", "date", "=", "(", "datetime", ".", "date", ".", "today", "(", ")", ")", ",", "organization", "=", "'llnl'", ",", "dict_to_write", "=", "{", "}", ",", "path_ending_type", "=", "''", ")", ":", "for", "repo", "in", "dict_to_write", ":", "if", "len", "(", "dict_to_write", "[", "repo", "]", ")", "!=", "0", ":", "#don't need to write out empty lists", "path", "=", "(", "'../github-data/'", "+", "organization", "+", "'/'", "+", "repo", "+", "'/'", "+", "path_ending_type", "+", "'/'", "+", "str", "(", "date", ")", "+", "'.json'", ")", "self", ".", "checkDir", "(", "path", ")", "with", "open", "(", "path", ",", "'w'", ")", "as", "out", ":", "out", ".", "write", "(", "json", ".", "dumps", "(", "dict_to_write", "[", "repo", "]", ",", "sort_keys", "=", "True", ",", "indent", "=", "4", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")", "out", ".", "close", "(", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_Traffic.write_to_file
Writes all traffic data to file.
scripts/get_traffic.py
def write_to_file(self, referrers_file_path='', views_file_path='', clones_file_path='', date=(datetime.date.today()), organization='llnl', views_row_count=0, clones_row_count=0): """ Writes all traffic data to file. """ self.write_referrers_to_file(file_path=referrers_file_path) self.write_data_to_file(file_path=views_file_path, dict_to_write=self.views, name='views', row_count=views_row_count) self.write_data_to_file(file_path=clones_file_path, dict_to_write=self.clones, name='clones', row_count=clones_row_count)
def write_to_file(self, referrers_file_path='', views_file_path='', clones_file_path='', date=(datetime.date.today()), organization='llnl', views_row_count=0, clones_row_count=0): """ Writes all traffic data to file. """ self.write_referrers_to_file(file_path=referrers_file_path) self.write_data_to_file(file_path=views_file_path, dict_to_write=self.views, name='views', row_count=views_row_count) self.write_data_to_file(file_path=clones_file_path, dict_to_write=self.clones, name='clones', row_count=clones_row_count)
[ "Writes", "all", "traffic", "data", "to", "file", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_traffic.py#L219-L231
[ "def", "write_to_file", "(", "self", ",", "referrers_file_path", "=", "''", ",", "views_file_path", "=", "''", ",", "clones_file_path", "=", "''", ",", "date", "=", "(", "datetime", ".", "date", ".", "today", "(", ")", ")", ",", "organization", "=", "'llnl'", ",", "views_row_count", "=", "0", ",", "clones_row_count", "=", "0", ")", ":", "self", ".", "write_referrers_to_file", "(", "file_path", "=", "referrers_file_path", ")", "self", ".", "write_data_to_file", "(", "file_path", "=", "views_file_path", ",", "dict_to_write", "=", "self", ".", "views", ",", "name", "=", "'views'", ",", "row_count", "=", "views_row_count", ")", "self", ".", "write_data_to_file", "(", "file_path", "=", "clones_file_path", ",", "dict_to_write", "=", "self", ".", "clones", ",", "name", "=", "'clones'", ",", "row_count", "=", "clones_row_count", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_Traffic.check_data_redundancy
Checks the given csv file against the json data scraped for the given dict. It will remove all data retrieved that has already been recorded so we don't write redundant data to file. Returns count of rows from file.
scripts/get_traffic.py
def check_data_redundancy(self, file_path='', dict_to_check={}): """ Checks the given csv file against the json data scraped for the given dict. It will remove all data retrieved that has already been recorded so we don't write redundant data to file. Returns count of rows from file. """ count = 0 exists = os.path.isfile(file_path) previous_dates = {} if exists: with open(file_path, 'r') as input: input.readline()#skip header line for row in csv.reader(input): timestamp = calendar.timegm(time.strptime(row[0], '%Y-%m-%d')) if timestamp in dict_to_check:#our date is already recorded del dict_to_check[timestamp] #calc current id max count += 1 input.close() return count
def check_data_redundancy(self, file_path='', dict_to_check={}): """ Checks the given csv file against the json data scraped for the given dict. It will remove all data retrieved that has already been recorded so we don't write redundant data to file. Returns count of rows from file. """ count = 0 exists = os.path.isfile(file_path) previous_dates = {} if exists: with open(file_path, 'r') as input: input.readline()#skip header line for row in csv.reader(input): timestamp = calendar.timegm(time.strptime(row[0], '%Y-%m-%d')) if timestamp in dict_to_check:#our date is already recorded del dict_to_check[timestamp] #calc current id max count += 1 input.close() return count
[ "Checks", "the", "given", "csv", "file", "against", "the", "json", "data", "scraped", "for", "the", "given", "dict", ".", "It", "will", "remove", "all", "data", "retrieved", "that", "has", "already", "been", "recorded", "so", "we", "don", "t", "write", "redundant", "data", "to", "file", ".", "Returns", "count", "of", "rows", "from", "file", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_traffic.py#L233-L254
[ "def", "check_data_redundancy", "(", "self", ",", "file_path", "=", "''", ",", "dict_to_check", "=", "{", "}", ")", ":", "count", "=", "0", "exists", "=", "os", ".", "path", ".", "isfile", "(", "file_path", ")", "previous_dates", "=", "{", "}", "if", "exists", ":", "with", "open", "(", "file_path", ",", "'r'", ")", "as", "input", ":", "input", ".", "readline", "(", ")", "#skip header line", "for", "row", "in", "csv", ".", "reader", "(", "input", ")", ":", "timestamp", "=", "calendar", ".", "timegm", "(", "time", ".", "strptime", "(", "row", "[", "0", "]", ",", "'%Y-%m-%d'", ")", ")", "if", "timestamp", "in", "dict_to_check", ":", "#our date is already recorded", "del", "dict_to_check", "[", "timestamp", "]", "#calc current id max", "count", "+=", "1", "input", ".", "close", "(", ")", "return", "count" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_Traffic.write_data_to_file
Writes given dict to file.
scripts/get_traffic.py
def write_data_to_file(self, file_path='', date=str(datetime.date.today()), organization='llnl',dict_to_write={}, name='', row_count=0): """ Writes given dict to file. """ exists = os.path.isfile(file_path) with open(file_path, 'a') as out: if not exists: out.write('date,organization,' + name + ',unique_' + name + ',id\n') sorted_dict = sorted(dict_to_write) for day in sorted_dict: day_formatted = datetime.datetime.utcfromtimestamp( day ).strftime('%Y-%m-%d') out.write(day_formatted + ',' + organization + ',' + str(dict_to_write[day][0]) + ',' + str(dict_to_write[day][1]) + ',' + str(row_count) + '\n') row_count += 1
def write_data_to_file(self, file_path='', date=str(datetime.date.today()), organization='llnl',dict_to_write={}, name='', row_count=0): """ Writes given dict to file. """ exists = os.path.isfile(file_path) with open(file_path, 'a') as out: if not exists: out.write('date,organization,' + name + ',unique_' + name + ',id\n') sorted_dict = sorted(dict_to_write) for day in sorted_dict: day_formatted = datetime.datetime.utcfromtimestamp( day ).strftime('%Y-%m-%d') out.write(day_formatted + ',' + organization + ',' + str(dict_to_write[day][0]) + ',' + str(dict_to_write[day][1]) + ',' + str(row_count) + '\n') row_count += 1
[ "Writes", "given", "dict", "to", "file", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_traffic.py#L256-L273
[ "def", "write_data_to_file", "(", "self", ",", "file_path", "=", "''", ",", "date", "=", "str", "(", "datetime", ".", "date", ".", "today", "(", ")", ")", ",", "organization", "=", "'llnl'", ",", "dict_to_write", "=", "{", "}", ",", "name", "=", "''", ",", "row_count", "=", "0", ")", ":", "exists", "=", "os", ".", "path", ".", "isfile", "(", "file_path", ")", "with", "open", "(", "file_path", ",", "'a'", ")", "as", "out", ":", "if", "not", "exists", ":", "out", ".", "write", "(", "'date,organization,'", "+", "name", "+", "',unique_'", "+", "name", "+", "',id\\n'", ")", "sorted_dict", "=", "sorted", "(", "dict_to_write", ")", "for", "day", "in", "sorted_dict", ":", "day_formatted", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "day", ")", ".", "strftime", "(", "'%Y-%m-%d'", ")", "out", ".", "write", "(", "day_formatted", "+", "','", "+", "organization", "+", "','", "+", "str", "(", "dict_to_write", "[", "day", "]", "[", "0", "]", ")", "+", "','", "+", "str", "(", "dict_to_write", "[", "day", "]", "[", "1", "]", ")", "+", "','", "+", "str", "(", "row_count", ")", "+", "'\\n'", ")", "row_count", "+=", "1" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_Traffic.write_referrers_to_file
Writes the referrers data to file.
scripts/get_traffic.py
def write_referrers_to_file(self, file_path='', date=str(datetime.date.today()), organization='llnl'): """ Writes the referrers data to file. """ self.remove_date(file_path=file_path, date=date) referrers_exists = os.path.isfile(file_path) with open(file_path, 'a') as out: if not referrers_exists: out.write('date,organization,referrer,count,count_log,uniques,' + 'uniques_logged\n') sorted_referrers = sorted(self.referrers_lower)#sort based on lowercase for referrer in sorted_referrers: ref_name = self.referrers_lower[referrer]#grab real name from count = self.referrers[ref_name][0] uniques = self.referrers[ref_name][1] if count == 1:#so we don't display 0 for count of 1 count = 1.5 if uniques == 1: uniques = 1.5 count_logged = math.log(count) uniques_logged = math.log(uniques) out.write(date + ',' + organization + ',' + ref_name + ',' + str(count) + ',' + str(count_logged) + ',' + str(uniques) + ',' + str(uniques_logged) + '\n') out.close()
def write_referrers_to_file(self, file_path='', date=str(datetime.date.today()), organization='llnl'): """ Writes the referrers data to file. """ self.remove_date(file_path=file_path, date=date) referrers_exists = os.path.isfile(file_path) with open(file_path, 'a') as out: if not referrers_exists: out.write('date,organization,referrer,count,count_log,uniques,' + 'uniques_logged\n') sorted_referrers = sorted(self.referrers_lower)#sort based on lowercase for referrer in sorted_referrers: ref_name = self.referrers_lower[referrer]#grab real name from count = self.referrers[ref_name][0] uniques = self.referrers[ref_name][1] if count == 1:#so we don't display 0 for count of 1 count = 1.5 if uniques == 1: uniques = 1.5 count_logged = math.log(count) uniques_logged = math.log(uniques) out.write(date + ',' + organization + ',' + ref_name + ',' + str(count) + ',' + str(count_logged) + ',' + str(uniques) + ',' + str(uniques_logged) + '\n') out.close()
[ "Writes", "the", "referrers", "data", "to", "file", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_traffic.py#L275-L300
[ "def", "write_referrers_to_file", "(", "self", ",", "file_path", "=", "''", ",", "date", "=", "str", "(", "datetime", ".", "date", ".", "today", "(", ")", ")", ",", "organization", "=", "'llnl'", ")", ":", "self", ".", "remove_date", "(", "file_path", "=", "file_path", ",", "date", "=", "date", ")", "referrers_exists", "=", "os", ".", "path", ".", "isfile", "(", "file_path", ")", "with", "open", "(", "file_path", ",", "'a'", ")", "as", "out", ":", "if", "not", "referrers_exists", ":", "out", ".", "write", "(", "'date,organization,referrer,count,count_log,uniques,'", "+", "'uniques_logged\\n'", ")", "sorted_referrers", "=", "sorted", "(", "self", ".", "referrers_lower", ")", "#sort based on lowercase", "for", "referrer", "in", "sorted_referrers", ":", "ref_name", "=", "self", ".", "referrers_lower", "[", "referrer", "]", "#grab real name from", "count", "=", "self", ".", "referrers", "[", "ref_name", "]", "[", "0", "]", "uniques", "=", "self", ".", "referrers", "[", "ref_name", "]", "[", "1", "]", "if", "count", "==", "1", ":", "#so we don't display 0 for count of 1", "count", "=", "1.5", "if", "uniques", "==", "1", ":", "uniques", "=", "1.5", "count_logged", "=", "math", ".", "log", "(", "count", ")", "uniques_logged", "=", "math", ".", "log", "(", "uniques", ")", "out", ".", "write", "(", "date", "+", "','", "+", "organization", "+", "','", "+", "ref_name", "+", "','", "+", "str", "(", "count", ")", "+", "','", "+", "str", "(", "count_logged", ")", "+", "','", "+", "str", "(", "uniques", ")", "+", "','", "+", "str", "(", "uniques_logged", ")", "+", "'\\n'", ")", "out", ".", "close", "(", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
process_json
Converts a DOE CODE .json file into DOE CODE projects Yields DOE CODE records from a DOE CODE .json file
scraper/doecode/__init__.py
def process_json(filename): """ Converts a DOE CODE .json file into DOE CODE projects Yields DOE CODE records from a DOE CODE .json file """ logger.debug('Processing DOE CODE json: %s', filename) doecode_json = json.load(open(filename)) for record in doecode_json['records']: yield record
def process_json(filename): """ Converts a DOE CODE .json file into DOE CODE projects Yields DOE CODE records from a DOE CODE .json file """ logger.debug('Processing DOE CODE json: %s', filename) doecode_json = json.load(open(filename)) for record in doecode_json['records']: yield record
[ "Converts", "a", "DOE", "CODE", ".", "json", "file", "into", "DOE", "CODE", "projects", "Yields", "DOE", "CODE", "records", "from", "a", "DOE", "CODE", ".", "json", "file" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/doecode/__init__.py#L9-L20
[ "def", "process_json", "(", "filename", ")", ":", "logger", ".", "debug", "(", "'Processing DOE CODE json: %s'", ",", "filename", ")", "doecode_json", "=", "json", ".", "load", "(", "open", "(", "filename", ")", ")", "for", "record", "in", "doecode_json", "[", "'records'", "]", ":", "yield", "record" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
process_url
Yields DOE CODE records from a DOE CODE .json URL response Converts a DOE CODE API .json URL response into DOE CODE projects
scraper/doecode/__init__.py
def process_url(url, key): """ Yields DOE CODE records from a DOE CODE .json URL response Converts a DOE CODE API .json URL response into DOE CODE projects """ logger.debug('Fetching DOE CODE JSON: %s', url) if key is None: raise ValueError('DOE CODE API Key value is missing!') response = requests.get(url, headers={"Authorization": "Basic " + key}) doecode_json = response.json() for record in doecode_json['records']: yield record
def process_url(url, key): """ Yields DOE CODE records from a DOE CODE .json URL response Converts a DOE CODE API .json URL response into DOE CODE projects """ logger.debug('Fetching DOE CODE JSON: %s', url) if key is None: raise ValueError('DOE CODE API Key value is missing!') response = requests.get(url, headers={"Authorization": "Basic " + key}) doecode_json = response.json() for record in doecode_json['records']: yield record
[ "Yields", "DOE", "CODE", "records", "from", "a", "DOE", "CODE", ".", "json", "URL", "response", "Converts", "a", "DOE", "CODE", "API", ".", "json", "URL", "response", "into", "DOE", "CODE", "projects" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/doecode/__init__.py#L23-L38
[ "def", "process_url", "(", "url", ",", "key", ")", ":", "logger", ".", "debug", "(", "'Fetching DOE CODE JSON: %s'", ",", "url", ")", "if", "key", "is", "None", ":", "raise", "ValueError", "(", "'DOE CODE API Key value is missing!'", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "{", "\"Authorization\"", ":", "\"Basic \"", "+", "key", "}", ")", "doecode_json", "=", "response", ".", "json", "(", ")", "for", "record", "in", "doecode_json", "[", "'records'", "]", ":", "yield", "record" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
process
Yeilds DOE CODE records based on provided input sources param: filename (str): Path to a DOE CODE .json file url (str): URL for a DOE CODE server json file key (str): API Key for connecting to DOE CODE server
scraper/doecode/__init__.py
def process(filename=None, url=None, key=None): """ Yeilds DOE CODE records based on provided input sources param: filename (str): Path to a DOE CODE .json file url (str): URL for a DOE CODE server json file key (str): API Key for connecting to DOE CODE server """ if filename is not None: yield from process_json(filename) elif url and key: yield from process_url(url, key)
def process(filename=None, url=None, key=None): """ Yeilds DOE CODE records based on provided input sources param: filename (str): Path to a DOE CODE .json file url (str): URL for a DOE CODE server json file key (str): API Key for connecting to DOE CODE server """ if filename is not None: yield from process_json(filename) elif url and key: yield from process_url(url, key)
[ "Yeilds", "DOE", "CODE", "records", "based", "on", "provided", "input", "sources" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/doecode/__init__.py#L41-L54
[ "def", "process", "(", "filename", "=", "None", ",", "url", "=", "None", ",", "key", "=", "None", ")", ":", "if", "filename", "is", "not", "None", ":", "yield", "from", "process_json", "(", "filename", ")", "elif", "url", "and", "key", ":", "yield", "from", "process_url", "(", "url", ",", "key", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_Users_Emails.login
Performs a login and sets the Github object via given credentials. If credentials are empty or incorrect then prompts user for credentials. Stores the authentication token in a CREDENTIALS_FILE used for future logins. Handles Two Factor Authentication.
scripts/get_users_emails.py
def login(self, username='', password=''): """ Performs a login and sets the Github object via given credentials. If credentials are empty or incorrect then prompts user for credentials. Stores the authentication token in a CREDENTIALS_FILE used for future logins. Handles Two Factor Authentication. """ try: token = '' id = '' if not os.path.isfile('CREDENTIALS_FILE'): if(username == '' or password == ''): username = raw_input('Username: ') password = getpass.getpass('Password: ') note = 'GitHub Organization Stats App' note_url = 'http://software.llnl.gov/' scopes = ['user', 'repo'] auth = github3.authorize(username, password, scopes, note, note_url, two_factor_callback=self.prompt_2fa) token = auth.token id = auth.id with open('CREDENTIALS_FILE', 'w+') as fd: fd.write(token + '\n') fd.write(str(id)) fd.close() else: with open('CREDENTIALS_FILE', 'r') as fd: token = fd.readline().strip() id = fd.readline().strip() fd.close() print "Logging in." self.logged_in_gh = github3.login(token=token, two_factor_callback=self.prompt_2fa) self.logged_in_gh.user().to_json() except (ValueError, AttributeError, github3.models.GitHubError) as e: print 'Bad credentials. Try again.' self.login()
def login(self, username='', password=''): """ Performs a login and sets the Github object via given credentials. If credentials are empty or incorrect then prompts user for credentials. Stores the authentication token in a CREDENTIALS_FILE used for future logins. Handles Two Factor Authentication. """ try: token = '' id = '' if not os.path.isfile('CREDENTIALS_FILE'): if(username == '' or password == ''): username = raw_input('Username: ') password = getpass.getpass('Password: ') note = 'GitHub Organization Stats App' note_url = 'http://software.llnl.gov/' scopes = ['user', 'repo'] auth = github3.authorize(username, password, scopes, note, note_url, two_factor_callback=self.prompt_2fa) token = auth.token id = auth.id with open('CREDENTIALS_FILE', 'w+') as fd: fd.write(token + '\n') fd.write(str(id)) fd.close() else: with open('CREDENTIALS_FILE', 'r') as fd: token = fd.readline().strip() id = fd.readline().strip() fd.close() print "Logging in." self.logged_in_gh = github3.login(token=token, two_factor_callback=self.prompt_2fa) self.logged_in_gh.user().to_json() except (ValueError, AttributeError, github3.models.GitHubError) as e: print 'Bad credentials. Try again.' self.login()
[ "Performs", "a", "login", "and", "sets", "the", "Github", "object", "via", "given", "credentials", ".", "If", "credentials", "are", "empty", "or", "incorrect", "then", "prompts", "user", "for", "credentials", ".", "Stores", "the", "authentication", "token", "in", "a", "CREDENTIALS_FILE", "used", "for", "future", "logins", ".", "Handles", "Two", "Factor", "Authentication", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_users_emails.py#L28-L64
[ "def", "login", "(", "self", ",", "username", "=", "''", ",", "password", "=", "''", ")", ":", "try", ":", "token", "=", "''", "id", "=", "''", "if", "not", "os", ".", "path", ".", "isfile", "(", "'CREDENTIALS_FILE'", ")", ":", "if", "(", "username", "==", "''", "or", "password", "==", "''", ")", ":", "username", "=", "raw_input", "(", "'Username: '", ")", "password", "=", "getpass", ".", "getpass", "(", "'Password: '", ")", "note", "=", "'GitHub Organization Stats App'", "note_url", "=", "'http://software.llnl.gov/'", "scopes", "=", "[", "'user'", ",", "'repo'", "]", "auth", "=", "github3", ".", "authorize", "(", "username", ",", "password", ",", "scopes", ",", "note", ",", "note_url", ",", "two_factor_callback", "=", "self", ".", "prompt_2fa", ")", "token", "=", "auth", ".", "token", "id", "=", "auth", ".", "id", "with", "open", "(", "'CREDENTIALS_FILE'", ",", "'w+'", ")", "as", "fd", ":", "fd", ".", "write", "(", "token", "+", "'\\n'", ")", "fd", ".", "write", "(", "str", "(", "id", ")", ")", "fd", ".", "close", "(", ")", "else", ":", "with", "open", "(", "'CREDENTIALS_FILE'", ",", "'r'", ")", "as", "fd", ":", "token", "=", "fd", ".", "readline", "(", ")", ".", "strip", "(", ")", "id", "=", "fd", ".", "readline", "(", ")", ".", "strip", "(", ")", "fd", ".", "close", "(", ")", "print", "\"Logging in.\"", "self", ".", "logged_in_gh", "=", "github3", ".", "login", "(", "token", "=", "token", ",", "two_factor_callback", "=", "self", ".", "prompt_2fa", ")", "self", ".", "logged_in_gh", ".", "user", "(", ")", ".", "to_json", "(", ")", "except", "(", "ValueError", ",", "AttributeError", ",", "github3", ".", "models", ".", "GitHubError", ")", "as", "e", ":", "print", "'Bad credentials. Try again.'", "self", ".", "login", "(", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_Users_Emails.get_mems_of_org
Retrieves the emails of the members of the organization. Note this Only gets public emails. Private emails would need authentication for each user.
scripts/get_users_emails.py
def get_mems_of_org(self): """ Retrieves the emails of the members of the organization. Note this Only gets public emails. Private emails would need authentication for each user. """ print 'Getting members\' emails.' for member in self.org_retrieved.iter_members(): login = member.to_json()['login'] user_email = self.logged_in_gh.user(login).to_json()['email'] if user_email is not None: self.emails[login] = user_email else:#user has no public email self.emails[login] = 'none' #used for sorting regardless of case self.logins_lower[login.lower()] = login
def get_mems_of_org(self): """ Retrieves the emails of the members of the organization. Note this Only gets public emails. Private emails would need authentication for each user. """ print 'Getting members\' emails.' for member in self.org_retrieved.iter_members(): login = member.to_json()['login'] user_email = self.logged_in_gh.user(login).to_json()['email'] if user_email is not None: self.emails[login] = user_email else:#user has no public email self.emails[login] = 'none' #used for sorting regardless of case self.logins_lower[login.lower()] = login
[ "Retrieves", "the", "emails", "of", "the", "members", "of", "the", "organization", ".", "Note", "this", "Only", "gets", "public", "emails", ".", "Private", "emails", "would", "need", "authentication", "for", "each", "user", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_users_emails.py#L87-L102
[ "def", "get_mems_of_org", "(", "self", ")", ":", "print", "'Getting members\\' emails.'", "for", "member", "in", "self", ".", "org_retrieved", ".", "iter_members", "(", ")", ":", "login", "=", "member", ".", "to_json", "(", ")", "[", "'login'", "]", "user_email", "=", "self", ".", "logged_in_gh", ".", "user", "(", "login", ")", ".", "to_json", "(", ")", "[", "'email'", "]", "if", "user_email", "is", "not", "None", ":", "self", ".", "emails", "[", "login", "]", "=", "user_email", "else", ":", "#user has no public email", "self", ".", "emails", "[", "login", "]", "=", "'none'", "#used for sorting regardless of case", "self", ".", "logins_lower", "[", "login", ".", "lower", "(", ")", "]", "=", "login" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_Users_Emails.write_to_file
Writes the user emails to file.
scripts/get_users_emails.py
def write_to_file(self, file_path=''): """ Writes the user emails to file. """ with open(file_path, 'w+') as out: out.write('user, email\n') sorted_names = sorted(self.logins_lower)#sort based on lowercase for login in sorted_names: out.write(self.logins_lower[login] + ',' + self.emails[self.logins_lower[login]] + '\n') out.close()
def write_to_file(self, file_path=''): """ Writes the user emails to file. """ with open(file_path, 'w+') as out: out.write('user, email\n') sorted_names = sorted(self.logins_lower)#sort based on lowercase for login in sorted_names: out.write(self.logins_lower[login] + ',' + self.emails[self.logins_lower[login]] + '\n') out.close()
[ "Writes", "the", "user", "emails", "to", "file", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_users_emails.py#L104-L114
[ "def", "write_to_file", "(", "self", ",", "file_path", "=", "''", ")", ":", "with", "open", "(", "file_path", ",", "'w+'", ")", "as", "out", ":", "out", ".", "write", "(", "'user, email\\n'", ")", "sorted_names", "=", "sorted", "(", "self", ".", "logins_lower", ")", "#sort based on lowercase", "for", "login", "in", "sorted_names", ":", "out", ".", "write", "(", "self", ".", "logins_lower", "[", "login", "]", "+", "','", "+", "self", ".", "emails", "[", "self", ".", "logins_lower", "[", "login", "]", "]", "+", "'\\n'", ")", "out", ".", "close", "(", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
connect
Return a connected Bitbucket session
scraper/bitbucket/__init__.py
def connect(url, username, password): """ Return a connected Bitbucket session """ bb_session = stashy.connect(url, username, password) logger.info('Connected to: %s as %s', url, username) return bb_session
def connect(url, username, password): """ Return a connected Bitbucket session """ bb_session = stashy.connect(url, username, password) logger.info('Connected to: %s as %s', url, username) return bb_session
[ "Return", "a", "connected", "Bitbucket", "session" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/bitbucket/__init__.py#L7-L16
[ "def", "connect", "(", "url", ",", "username", ",", "password", ")", ":", "bb_session", "=", "stashy", ".", "connect", "(", "url", ",", "username", ",", "password", ")", "logger", ".", "info", "(", "'Connected to: %s as %s'", ",", "url", ",", "username", ")", "return", "bb_session" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
get_stargazers
Return a list of the stargazers of a GitHub repo Includes both the 'starred_at' and 'user' data. param: url url is the 'stargazers_url' of the form: https://api.github.com/repos/LLNL/spack/stargazers
scripts/stars.py
def get_stargazers(url, session=None): """ Return a list of the stargazers of a GitHub repo Includes both the 'starred_at' and 'user' data. param: url url is the 'stargazers_url' of the form: https://api.github.com/repos/LLNL/spack/stargazers """ headers = {'Accept': 'application/vnd.github.v3.star+json'} url = url + '?per_page=100&page=%s' page = 1 gazers = [] response = github.get(url % page, headers=headers) gazers.extend(response.json()) #{rel: url for url, rel in LINK_REGEX.findall(r.headers['Link'])} while json_data: gazers.extend(json_data) page += 1 json_data = github.get(url % page, headers=headers).json() return gazers
def get_stargazers(url, session=None): """ Return a list of the stargazers of a GitHub repo Includes both the 'starred_at' and 'user' data. param: url url is the 'stargazers_url' of the form: https://api.github.com/repos/LLNL/spack/stargazers """ headers = {'Accept': 'application/vnd.github.v3.star+json'} url = url + '?per_page=100&page=%s' page = 1 gazers = [] response = github.get(url % page, headers=headers) gazers.extend(response.json()) #{rel: url for url, rel in LINK_REGEX.findall(r.headers['Link'])} while json_data: gazers.extend(json_data) page += 1 json_data = github.get(url % page, headers=headers).json() return gazers
[ "Return", "a", "list", "of", "the", "stargazers", "of", "a", "GitHub", "repo" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/stars.py#L15-L40
[ "def", "get_stargazers", "(", "url", ",", "session", "=", "None", ")", ":", "headers", "=", "{", "'Accept'", ":", "'application/vnd.github.v3.star+json'", "}", "url", "=", "url", "+", "'?per_page=100&page=%s'", "page", "=", "1", "gazers", "=", "[", "]", "response", "=", "github", ".", "get", "(", "url", "%", "page", ",", "headers", "=", "headers", ")", "gazers", ".", "extend", "(", "response", ".", "json", "(", ")", ")", "#{rel: url for url, rel in LINK_REGEX.findall(r.headers['Link'])}", "while", "json_data", ":", "gazers", ".", "extend", "(", "json_data", ")", "page", "+=", "1", "json_data", "=", "github", ".", "get", "(", "url", "%", "page", ",", "headers", "=", "headers", ")", ".", "json", "(", ")", "return", "gazers" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
connect
Return a connected GitLab session ``token`` should be a ``private_token`` from Gitlab
scraper/gitlab/__init__.py
def connect(url='https://gitlab.com', token=None): """ Return a connected GitLab session ``token`` should be a ``private_token`` from Gitlab """ if token is None: token = os.environ.get('GITLAB_API_TOKEN', None) gl_session = gitlab.Gitlab(url, token) try: gl_session.version() except (gitlab.execeptions.GitlabAuthenticationError): raise RuntimeError('Invalid or missing GITLAB_API_TOKEN') logger.info('Connected to: %s', url) return gl_session
def connect(url='https://gitlab.com', token=None): """ Return a connected GitLab session ``token`` should be a ``private_token`` from Gitlab """ if token is None: token = os.environ.get('GITLAB_API_TOKEN', None) gl_session = gitlab.Gitlab(url, token) try: gl_session.version() except (gitlab.execeptions.GitlabAuthenticationError): raise RuntimeError('Invalid or missing GITLAB_API_TOKEN') logger.info('Connected to: %s', url) return gl_session
[ "Return", "a", "connected", "GitLab", "session" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/gitlab/__init__.py#L9-L28
[ "def", "connect", "(", "url", "=", "'https://gitlab.com'", ",", "token", "=", "None", ")", ":", "if", "token", "is", "None", ":", "token", "=", "os", ".", "environ", ".", "get", "(", "'GITLAB_API_TOKEN'", ",", "None", ")", "gl_session", "=", "gitlab", ".", "Gitlab", "(", "url", ",", "token", ")", "try", ":", "gl_session", ".", "version", "(", ")", "except", "(", "gitlab", ".", "execeptions", ".", "GitlabAuthenticationError", ")", ":", "raise", "RuntimeError", "(", "'Invalid or missing GITLAB_API_TOKEN'", ")", "logger", ".", "info", "(", "'Connected to: %s'", ",", "url", ")", "return", "gl_session" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
query_repos
Yields Gitlab project objects for all projects in Bitbucket
scraper/gitlab/__init__.py
def query_repos(gl_session, repos=None): """ Yields Gitlab project objects for all projects in Bitbucket """ if repos is None: repos = [] for repo in repos: yield gl_session.projects.get(repo) if not repos: for project in gl_session.projects.list(as_list=False): yield project
def query_repos(gl_session, repos=None): """ Yields Gitlab project objects for all projects in Bitbucket """ if repos is None: repos = [] for repo in repos: yield gl_session.projects.get(repo) if not repos: for project in gl_session.projects.list(as_list=False): yield project
[ "Yields", "Gitlab", "project", "objects", "for", "all", "projects", "in", "Bitbucket" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/gitlab/__init__.py#L31-L44
[ "def", "query_repos", "(", "gl_session", ",", "repos", "=", "None", ")", ":", "if", "repos", "is", "None", ":", "repos", "=", "[", "]", "for", "repo", "in", "repos", ":", "yield", "gl_session", ".", "projects", ".", "get", "(", "repo", ")", "if", "not", "repos", ":", "for", "project", "in", "gl_session", ".", "projects", ".", "list", "(", "as_list", "=", "False", ")", ":", "yield", "project" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
git_repo_to_sloc
Given a Git repository URL, returns number of lines of code based on cloc Reference: - cloc: https://github.com/AlDanial/cloc - https://www.omg.org/spec/AFP/ - Another potential way to calculation effort Sample cloc output: { "header": { "cloc_url": "github.com/AlDanial/cloc", "cloc_version": "1.74", "elapsed_seconds": 0.195950984954834, "n_files": 27, "n_lines": 2435, "files_per_second": 137.78956000769, "lines_per_second": 12426.5769858787 }, "C++": { "nFiles": 7, "blank": 121, "comment": 314, "code": 371 }, "C/C++ Header": { "nFiles": 8, "blank": 107, "comment": 604, "code": 191 }, "CMake": { "nFiles": 11, "blank": 49, "comment": 465, "code": 165 }, "Markdown": { "nFiles": 1, "blank": 18, "comment": 0, "code": 30 }, "SUM": { "blank": 295, "comment": 1383, "code": 757, "nFiles": 27 } }
scraper/util.py
def git_repo_to_sloc(url): """ Given a Git repository URL, returns number of lines of code based on cloc Reference: - cloc: https://github.com/AlDanial/cloc - https://www.omg.org/spec/AFP/ - Another potential way to calculation effort Sample cloc output: { "header": { "cloc_url": "github.com/AlDanial/cloc", "cloc_version": "1.74", "elapsed_seconds": 0.195950984954834, "n_files": 27, "n_lines": 2435, "files_per_second": 137.78956000769, "lines_per_second": 12426.5769858787 }, "C++": { "nFiles": 7, "blank": 121, "comment": 314, "code": 371 }, "C/C++ Header": { "nFiles": 8, "blank": 107, "comment": 604, "code": 191 }, "CMake": { "nFiles": 11, "blank": 49, "comment": 465, "code": 165 }, "Markdown": { "nFiles": 1, "blank": 18, "comment": 0, "code": 30 }, "SUM": { "blank": 295, "comment": 1383, "code": 757, "nFiles": 27 } } """ with tempfile.TemporaryDirectory() as tmp_dir: logger.debug('Cloning: url=%s tmp_dir=%s', url, tmp_dir) tmp_clone = os.path.join(tmp_dir, 'clone-dir') cmd = ['git', 'clone', '--depth=1', url, tmp_clone] execute(cmd) cmd = ['cloc', '--json', tmp_clone] out, _ = execute(cmd) try: json_start = out.find('{"header"') json_blob = out[json_start:].replace('\\n', '').replace('\'', '') cloc_json = json.loads(json_blob) sloc = cloc_json['SUM']['code'] except json.decoder.JSONDecodeError: logger.debug('Error Decoding: url=%s, out=%s', url, out) sloc = 0 logger.debug('SLOC: url=%s, sloc=%d', url, sloc) return sloc
def git_repo_to_sloc(url): """ Given a Git repository URL, returns number of lines of code based on cloc Reference: - cloc: https://github.com/AlDanial/cloc - https://www.omg.org/spec/AFP/ - Another potential way to calculation effort Sample cloc output: { "header": { "cloc_url": "github.com/AlDanial/cloc", "cloc_version": "1.74", "elapsed_seconds": 0.195950984954834, "n_files": 27, "n_lines": 2435, "files_per_second": 137.78956000769, "lines_per_second": 12426.5769858787 }, "C++": { "nFiles": 7, "blank": 121, "comment": 314, "code": 371 }, "C/C++ Header": { "nFiles": 8, "blank": 107, "comment": 604, "code": 191 }, "CMake": { "nFiles": 11, "blank": 49, "comment": 465, "code": 165 }, "Markdown": { "nFiles": 1, "blank": 18, "comment": 0, "code": 30 }, "SUM": { "blank": 295, "comment": 1383, "code": 757, "nFiles": 27 } } """ with tempfile.TemporaryDirectory() as tmp_dir: logger.debug('Cloning: url=%s tmp_dir=%s', url, tmp_dir) tmp_clone = os.path.join(tmp_dir, 'clone-dir') cmd = ['git', 'clone', '--depth=1', url, tmp_clone] execute(cmd) cmd = ['cloc', '--json', tmp_clone] out, _ = execute(cmd) try: json_start = out.find('{"header"') json_blob = out[json_start:].replace('\\n', '').replace('\'', '') cloc_json = json.loads(json_blob) sloc = cloc_json['SUM']['code'] except json.decoder.JSONDecodeError: logger.debug('Error Decoding: url=%s, out=%s', url, out) sloc = 0 logger.debug('SLOC: url=%s, sloc=%d', url, sloc) return sloc
[ "Given", "a", "Git", "repository", "URL", "returns", "number", "of", "lines", "of", "code", "based", "on", "cloc" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/util.py#L83-L158
[ "def", "git_repo_to_sloc", "(", "url", ")", ":", "with", "tempfile", ".", "TemporaryDirectory", "(", ")", "as", "tmp_dir", ":", "logger", ".", "debug", "(", "'Cloning: url=%s tmp_dir=%s'", ",", "url", ",", "tmp_dir", ")", "tmp_clone", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "'clone-dir'", ")", "cmd", "=", "[", "'git'", ",", "'clone'", ",", "'--depth=1'", ",", "url", ",", "tmp_clone", "]", "execute", "(", "cmd", ")", "cmd", "=", "[", "'cloc'", ",", "'--json'", ",", "tmp_clone", "]", "out", ",", "_", "=", "execute", "(", "cmd", ")", "try", ":", "json_start", "=", "out", ".", "find", "(", "'{\"header\"'", ")", "json_blob", "=", "out", "[", "json_start", ":", "]", ".", "replace", "(", "'\\\\n'", ",", "''", ")", ".", "replace", "(", "'\\''", ",", "''", ")", "cloc_json", "=", "json", ".", "loads", "(", "json_blob", ")", "sloc", "=", "cloc_json", "[", "'SUM'", "]", "[", "'code'", "]", "except", "json", ".", "decoder", ".", "JSONDecodeError", ":", "logger", ".", "debug", "(", "'Error Decoding: url=%s, out=%s'", ",", "url", ",", "out", ")", "sloc", "=", "0", "logger", ".", "debug", "(", "'SLOC: url=%s, sloc=%d'", ",", "url", ",", "sloc", ")", "return", "sloc" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
compute_labor_hours
Compute the labor hours, given a count of source lines of code The intention is to use the COCOMO II model to compute this value. References: - http://csse.usc.edu/tools/cocomoii.php - http://docs.python-guide.org/en/latest/scenarios/scrape/
scraper/util.py
def compute_labor_hours(sloc, month_hours='cocomo_book'): """ Compute the labor hours, given a count of source lines of code The intention is to use the COCOMO II model to compute this value. References: - http://csse.usc.edu/tools/cocomoii.php - http://docs.python-guide.org/en/latest/scenarios/scrape/ """ # Calculation of hours in a month if month_hours == 'hours_per_year': # Use number of working hours in a year: # (40 Hours / week) * (52 weeks / year) / (12 months / year) ~= 173.33 HOURS_PER_PERSON_MONTH = 40.0 * 52 / 12 else: # Use value from COCOMO II Book (month_hours=='cocomo_book'): # Reference: https://dl.acm.org/citation.cfm?id=557000 # This is the value used by the Code.gov team: # https://github.com/GSA/code-gov/blob/master/LABOR_HOUR_CALC.md HOURS_PER_PERSON_MONTH = 152.0 cocomo_url = 'http://csse.usc.edu/tools/cocomoii.php' page = requests.post(cocomo_url, data={'new_size': sloc}) try: person_months = float(EFFORT_REGEX.search(page.text).group(1)) except AttributeError: logger.error('Unable to find Person Months in page text: sloc=%s', sloc) # If there is no match, and .search(..) returns None person_months = 0 labor_hours = person_months * HOURS_PER_PERSON_MONTH logger.debug('sloc=%d labor_hours=%d', sloc, labor_hours) return labor_hours
def compute_labor_hours(sloc, month_hours='cocomo_book'): """ Compute the labor hours, given a count of source lines of code The intention is to use the COCOMO II model to compute this value. References: - http://csse.usc.edu/tools/cocomoii.php - http://docs.python-guide.org/en/latest/scenarios/scrape/ """ # Calculation of hours in a month if month_hours == 'hours_per_year': # Use number of working hours in a year: # (40 Hours / week) * (52 weeks / year) / (12 months / year) ~= 173.33 HOURS_PER_PERSON_MONTH = 40.0 * 52 / 12 else: # Use value from COCOMO II Book (month_hours=='cocomo_book'): # Reference: https://dl.acm.org/citation.cfm?id=557000 # This is the value used by the Code.gov team: # https://github.com/GSA/code-gov/blob/master/LABOR_HOUR_CALC.md HOURS_PER_PERSON_MONTH = 152.0 cocomo_url = 'http://csse.usc.edu/tools/cocomoii.php' page = requests.post(cocomo_url, data={'new_size': sloc}) try: person_months = float(EFFORT_REGEX.search(page.text).group(1)) except AttributeError: logger.error('Unable to find Person Months in page text: sloc=%s', sloc) # If there is no match, and .search(..) returns None person_months = 0 labor_hours = person_months * HOURS_PER_PERSON_MONTH logger.debug('sloc=%d labor_hours=%d', sloc, labor_hours) return labor_hours
[ "Compute", "the", "labor", "hours", "given", "a", "count", "of", "source", "lines", "of", "code" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/util.py#L161-L196
[ "def", "compute_labor_hours", "(", "sloc", ",", "month_hours", "=", "'cocomo_book'", ")", ":", "# Calculation of hours in a month", "if", "month_hours", "==", "'hours_per_year'", ":", "# Use number of working hours in a year:", "# (40 Hours / week) * (52 weeks / year) / (12 months / year) ~= 173.33", "HOURS_PER_PERSON_MONTH", "=", "40.0", "*", "52", "/", "12", "else", ":", "# Use value from COCOMO II Book (month_hours=='cocomo_book'):", "# Reference: https://dl.acm.org/citation.cfm?id=557000", "# This is the value used by the Code.gov team:", "# https://github.com/GSA/code-gov/blob/master/LABOR_HOUR_CALC.md", "HOURS_PER_PERSON_MONTH", "=", "152.0", "cocomo_url", "=", "'http://csse.usc.edu/tools/cocomoii.php'", "page", "=", "requests", ".", "post", "(", "cocomo_url", ",", "data", "=", "{", "'new_size'", ":", "sloc", "}", ")", "try", ":", "person_months", "=", "float", "(", "EFFORT_REGEX", ".", "search", "(", "page", ".", "text", ")", ".", "group", "(", "1", ")", ")", "except", "AttributeError", ":", "logger", ".", "error", "(", "'Unable to find Person Months in page text: sloc=%s'", ",", "sloc", ")", "# If there is no match, and .search(..) returns None", "person_months", "=", "0", "labor_hours", "=", "person_months", "*", "HOURS_PER_PERSON_MONTH", "logger", ".", "debug", "(", "'sloc=%d labor_hours=%d'", ",", "sloc", ",", "labor_hours", ")", "return", "labor_hours" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
_prune_dict_null_str
Prune the "None" or emptry string values from dictionary items
scraper/util.py
def _prune_dict_null_str(dictionary): """ Prune the "None" or emptry string values from dictionary items """ for key, value in list(dictionary.items()): if value is None or str(value) == '': del dictionary[key] if isinstance(value, dict): dictionary[key] = _prune_dict_null_str(dictionary[key]) return dictionary
def _prune_dict_null_str(dictionary): """ Prune the "None" or emptry string values from dictionary items """ for key, value in list(dictionary.items()): if value is None or str(value) == '': del dictionary[key] if isinstance(value, dict): dictionary[key] = _prune_dict_null_str(dictionary[key]) return dictionary
[ "Prune", "the", "None", "or", "emptry", "string", "values", "from", "dictionary", "items" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/util.py#L209-L220
[ "def", "_prune_dict_null_str", "(", "dictionary", ")", ":", "for", "key", ",", "value", "in", "list", "(", "dictionary", ".", "items", "(", ")", ")", ":", "if", "value", "is", "None", "or", "str", "(", "value", ")", "==", "''", ":", "del", "dictionary", "[", "key", "]", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "dictionary", "[", "key", "]", "=", "_prune_dict_null_str", "(", "dictionary", "[", "key", "]", ")", "return", "dictionary" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHubQueryManager._readGQL
Read a 'pretty' formatted GraphQL query file into a one-line string. Removes line breaks and comments. Condenses white space. Args: filePath (str): A relative or absolute path to a file containing a GraphQL query. File may use comments and multi-line formatting. .. _GitHub GraphQL Explorer: https://developer.github.com/v4/explorer/ verbose (Optional[bool]): If False, prints will be suppressed. Defaults to False. Returns: str: A single line GraphQL query.
scraper/github/queryManager.py
def _readGQL(self, filePath, verbose=False): """Read a 'pretty' formatted GraphQL query file into a one-line string. Removes line breaks and comments. Condenses white space. Args: filePath (str): A relative or absolute path to a file containing a GraphQL query. File may use comments and multi-line formatting. .. _GitHub GraphQL Explorer: https://developer.github.com/v4/explorer/ verbose (Optional[bool]): If False, prints will be suppressed. Defaults to False. Returns: str: A single line GraphQL query. """ if not os.path.isfile(filePath): raise RuntimeError("Query file '%s' does not exist." % (filePath)) lastModified = os.path.getmtime(filePath) absPath = os.path.abspath(filePath) if absPath == self.__queryPath and lastModified == self.__queryTimestamp: _vPrint(verbose, "Using cached query '%s'" % (os.path.basename(self.__queryPath))) query_in = self.__query else: _vPrint(verbose, "Reading '%s' ... " % (filePath), end="", flush=True) with open(filePath, "r") as q: # Strip all comments and newlines. query_in = re.sub(r'#.*(\n|\Z)', '\n', q.read()) # Condense extra whitespace. query_in = re.sub(r'\s+', ' ', query_in) # Remove any leading or trailing whitespace. query_in = re.sub(r'(\A\s+)|(\s+\Z)', '', query_in) _vPrint(verbose, "File read!") self.__queryPath = absPath self.__queryTimestamp = lastModified self.__query = query_in return query_in
def _readGQL(self, filePath, verbose=False): """Read a 'pretty' formatted GraphQL query file into a one-line string. Removes line breaks and comments. Condenses white space. Args: filePath (str): A relative or absolute path to a file containing a GraphQL query. File may use comments and multi-line formatting. .. _GitHub GraphQL Explorer: https://developer.github.com/v4/explorer/ verbose (Optional[bool]): If False, prints will be suppressed. Defaults to False. Returns: str: A single line GraphQL query. """ if not os.path.isfile(filePath): raise RuntimeError("Query file '%s' does not exist." % (filePath)) lastModified = os.path.getmtime(filePath) absPath = os.path.abspath(filePath) if absPath == self.__queryPath and lastModified == self.__queryTimestamp: _vPrint(verbose, "Using cached query '%s'" % (os.path.basename(self.__queryPath))) query_in = self.__query else: _vPrint(verbose, "Reading '%s' ... " % (filePath), end="", flush=True) with open(filePath, "r") as q: # Strip all comments and newlines. query_in = re.sub(r'#.*(\n|\Z)', '\n', q.read()) # Condense extra whitespace. query_in = re.sub(r'\s+', ' ', query_in) # Remove any leading or trailing whitespace. query_in = re.sub(r'(\A\s+)|(\s+\Z)', '', query_in) _vPrint(verbose, "File read!") self.__queryPath = absPath self.__queryTimestamp = lastModified self.__query = query_in return query_in
[ "Read", "a", "pretty", "formatted", "GraphQL", "query", "file", "into", "a", "one", "-", "line", "string", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/queryManager.py#L101-L139
[ "def", "_readGQL", "(", "self", ",", "filePath", ",", "verbose", "=", "False", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "filePath", ")", ":", "raise", "RuntimeError", "(", "\"Query file '%s' does not exist.\"", "%", "(", "filePath", ")", ")", "lastModified", "=", "os", ".", "path", ".", "getmtime", "(", "filePath", ")", "absPath", "=", "os", ".", "path", ".", "abspath", "(", "filePath", ")", "if", "absPath", "==", "self", ".", "__queryPath", "and", "lastModified", "==", "self", ".", "__queryTimestamp", ":", "_vPrint", "(", "verbose", ",", "\"Using cached query '%s'\"", "%", "(", "os", ".", "path", ".", "basename", "(", "self", ".", "__queryPath", ")", ")", ")", "query_in", "=", "self", ".", "__query", "else", ":", "_vPrint", "(", "verbose", ",", "\"Reading '%s' ... \"", "%", "(", "filePath", ")", ",", "end", "=", "\"\"", ",", "flush", "=", "True", ")", "with", "open", "(", "filePath", ",", "\"r\"", ")", "as", "q", ":", "# Strip all comments and newlines.", "query_in", "=", "re", ".", "sub", "(", "r'#.*(\\n|\\Z)'", ",", "'\\n'", ",", "q", ".", "read", "(", ")", ")", "# Condense extra whitespace.", "query_in", "=", "re", ".", "sub", "(", "r'\\s+'", ",", "' '", ",", "query_in", ")", "# Remove any leading or trailing whitespace.", "query_in", "=", "re", ".", "sub", "(", "r'(\\A\\s+)|(\\s+\\Z)'", ",", "''", ",", "query_in", ")", "_vPrint", "(", "verbose", ",", "\"File read!\"", ")", "self", ".", "__queryPath", "=", "absPath", "self", ".", "__queryTimestamp", "=", "lastModified", "self", ".", "__query", "=", "query_in", "return", "query_in" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHubQueryManager.queryGitHubFromFile
Submit a GitHub GraphQL query from a file. Can only be used with GraphQL queries. For REST queries, see the 'queryGitHub' method. Args: filePath (str): A relative or absolute path to a file containing a GraphQL query. File may use comments and multi-line formatting. .. _GitHub GraphQL Explorer: https://developer.github.com/v4/explorer/ gitvars (Optional[Dict]): All query variables. Defaults to empty. GraphQL Only. verbosity (Optional[int]): Changes output verbosity levels. If < 0, all extra printouts are suppressed. If == 0, normal print statements are displayed. If > 0, additional status print statements are displayed. Defaults to 0. **kwargs: Keyword arguments for the 'queryGitHub' method. Returns: Dict: A JSON style dictionary.
scraper/github/queryManager.py
def queryGitHubFromFile(self, filePath, gitvars={}, verbosity=0, **kwargs): """Submit a GitHub GraphQL query from a file. Can only be used with GraphQL queries. For REST queries, see the 'queryGitHub' method. Args: filePath (str): A relative or absolute path to a file containing a GraphQL query. File may use comments and multi-line formatting. .. _GitHub GraphQL Explorer: https://developer.github.com/v4/explorer/ gitvars (Optional[Dict]): All query variables. Defaults to empty. GraphQL Only. verbosity (Optional[int]): Changes output verbosity levels. If < 0, all extra printouts are suppressed. If == 0, normal print statements are displayed. If > 0, additional status print statements are displayed. Defaults to 0. **kwargs: Keyword arguments for the 'queryGitHub' method. Returns: Dict: A JSON style dictionary. """ gitquery = self._readGQL(filePath, verbose=(verbosity >= 0)) return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, **kwargs)
def queryGitHubFromFile(self, filePath, gitvars={}, verbosity=0, **kwargs): """Submit a GitHub GraphQL query from a file. Can only be used with GraphQL queries. For REST queries, see the 'queryGitHub' method. Args: filePath (str): A relative or absolute path to a file containing a GraphQL query. File may use comments and multi-line formatting. .. _GitHub GraphQL Explorer: https://developer.github.com/v4/explorer/ gitvars (Optional[Dict]): All query variables. Defaults to empty. GraphQL Only. verbosity (Optional[int]): Changes output verbosity levels. If < 0, all extra printouts are suppressed. If == 0, normal print statements are displayed. If > 0, additional status print statements are displayed. Defaults to 0. **kwargs: Keyword arguments for the 'queryGitHub' method. Returns: Dict: A JSON style dictionary. """ gitquery = self._readGQL(filePath, verbose=(verbosity >= 0)) return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, **kwargs)
[ "Submit", "a", "GitHub", "GraphQL", "query", "from", "a", "file", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/queryManager.py#L141-L168
[ "def", "queryGitHubFromFile", "(", "self", ",", "filePath", ",", "gitvars", "=", "{", "}", ",", "verbosity", "=", "0", ",", "*", "*", "kwargs", ")", ":", "gitquery", "=", "self", ".", "_readGQL", "(", "filePath", ",", "verbose", "=", "(", "verbosity", ">=", "0", ")", ")", "return", "self", ".", "queryGitHub", "(", "gitquery", ",", "gitvars", "=", "gitvars", ",", "verbosity", "=", "verbosity", ",", "*", "*", "kwargs", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHubQueryManager.queryGitHub
Submit a GitHub query. Args: gitquery (str): The query or endpoint itself. Examples: query: 'query { viewer { login } }' endpoint: '/user' gitvars (Optional[Dict]): All query variables. Defaults to empty. GraphQL Only. verbosity (Optional[int]): Changes output verbosity levels. If < 0, all extra printouts are suppressed. If == 0, normal print statements are displayed. If > 0, additional status print statements are displayed. Defaults to 0. paginate (Optional[bool]): Pagination will be completed automatically if True. Defaults to False. cursorVar (Optional[str]): Key in 'gitvars' that represents the pagination cursor. Defaults to None. GraphQL Only. keysToList (Optional[List[str]]): Ordered list of keys needed to retrieve the list in the query results to be extended by pagination. Defaults to empty. Example: ['data', 'viewer', 'repositories', 'nodes'] GraphQL Only. rest (Optional[bool]): If True, uses the REST API instead of GraphQL. Defaults to False. requestCount (Optional[int]): Counter for repeated requests. pageNum (Optional[int]): Counter for pagination. For user readable log messages only, does not affect data. Returns: Dict: A JSON style dictionary.
scraper/github/queryManager.py
def queryGitHub(self, gitquery, gitvars={}, verbosity=0, paginate=False, cursorVar=None, keysToList=[], rest=False, requestCount=0, pageNum=0): """Submit a GitHub query. Args: gitquery (str): The query or endpoint itself. Examples: query: 'query { viewer { login } }' endpoint: '/user' gitvars (Optional[Dict]): All query variables. Defaults to empty. GraphQL Only. verbosity (Optional[int]): Changes output verbosity levels. If < 0, all extra printouts are suppressed. If == 0, normal print statements are displayed. If > 0, additional status print statements are displayed. Defaults to 0. paginate (Optional[bool]): Pagination will be completed automatically if True. Defaults to False. cursorVar (Optional[str]): Key in 'gitvars' that represents the pagination cursor. Defaults to None. GraphQL Only. keysToList (Optional[List[str]]): Ordered list of keys needed to retrieve the list in the query results to be extended by pagination. Defaults to empty. Example: ['data', 'viewer', 'repositories', 'nodes'] GraphQL Only. rest (Optional[bool]): If True, uses the REST API instead of GraphQL. Defaults to False. requestCount (Optional[int]): Counter for repeated requests. pageNum (Optional[int]): Counter for pagination. For user readable log messages only, does not affect data. Returns: Dict: A JSON style dictionary. """ requestCount += 1 if pageNum < 0: # no negative page numbers pageNum = 0 pageNum += 1 if paginate: _vPrint((verbosity >= 0), "Page %d" % (pageNum)) _vPrint((verbosity >= 0), "Sending %s query..." % ("REST" if rest else "GraphQL")) response = self._submitQuery(gitquery, gitvars=gitvars, verbose=(verbosity > 0), rest=rest) _vPrint((verbosity >= 0), "Checking response...") _vPrint((verbosity >= 0), response["headDict"]["http"]) statusNum = response["statusNum"] # Decrement page count before error checks to properly reflect any repeated queries pageNum -= 1 # Make sure the query limit didn't run out try: apiStatus = { "limit": int(response["headDict"]["X-RateLimit-Limit"]), "remaining": int(response["headDict"]["X-RateLimit-Remaining"]), "reset": int(response["headDict"]["X-RateLimit-Reset"]) } _vPrint((verbosity >= 0), "API Status %s" % (json.dumps(apiStatus))) if not apiStatus["remaining"] > 0: _vPrint((verbosity >= 0), "API usage limit reached during query.") self._awaitReset(apiStatus["reset"]) _vPrint((verbosity >= 0), "Repeating query...") return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=(requestCount - 1), pageNum=pageNum) except KeyError: # Handles error cases that don't return X-RateLimit data _vPrint((verbosity >= 0), "Failed to check API Status.") # Check for accepted but not yet processed, usually due to un-cached data if statusNum == 202: if requestCount >= self.maxRetry: raise RuntimeError("Query attempted but failed %d times.\n%s\n%s" % (self.maxRetry, response["headDict"]["http"], response["result"])) else: self._countdown(self.__retryDelay, printString="Query accepted but not yet processed. Trying again in %*dsec...", verbose=(verbosity >= 0)) return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=requestCount, pageNum=pageNum) # Check for server error responses if statusNum == 502 or statusNum == 503: if requestCount >= self.maxRetry: raise RuntimeError("Query attempted but failed %d times.\n%s\n%s" % (self.maxRetry, response["headDict"]["http"], response["result"])) else: self._countdown(self.__retryDelay, printString="Server error. Trying again in %*dsec...", verbose=(verbosity >= 0)) return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=requestCount, pageNum=pageNum) # Check for other error responses if statusNum >= 400 or statusNum == 204: raise RuntimeError("Request got an Error response.\n%s\n%s" % (response["headDict"]["http"], response["result"])) _vPrint((verbosity >= 0), "Data received!") outObj = json.loads(response["result"]) # Check for GraphQL API errors (e.g. repo not found) if not rest and "errors" in outObj: if requestCount >= self.maxRetry: raise RuntimeError("Query attempted but failed %d times.\n%s\n%s" % (self.maxRetry, response["headDict"]["http"], response["result"])) elif len(outObj["errors"]) == 1 and len(outObj["errors"][0]) == 1: # Poorly defined error type, usually intermittent, try again. _vPrint((verbosity >= 0), "GraphQL API error.\n%s" % (json.dumps(outObj["errors"]))) self._countdown(self.__retryDelay, printString="Unknown API error. Trying again in %*dsec...", verbose=(verbosity >= 0)) return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=requestCount, pageNum=pageNum) else: raise RuntimeError("GraphQL API error.\n%s" % (json.dumps(outObj["errors"]))) # Re-increment page count before the next page query pageNum += 1 # Pagination if paginate: if rest and response["linkDict"]: if "next" in response["linkDict"]: nextObj = self.queryGitHub(response["linkDict"]["next"], gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=0, pageNum=pageNum) outObj.extend(nextObj) elif not rest: if not cursorVar: raise ValueError("Must specify argument 'cursorVar' to use GraphQL auto-pagination.") if not len(keysToList) > 0: raise ValueError("Must specify argument 'keysToList' as a non-empty list to use GraphQL auto-pagination.") aPage = outObj for key in keysToList[0:-1]: aPage = aPage[key] gitvars[cursorVar] = aPage["pageInfo"]["endCursor"] if aPage["pageInfo"]["hasNextPage"]: nextObj = self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=0, pageNum=pageNum) newPage = nextObj for key in keysToList[0:-1]: newPage = newPage[key] aPage[keysToList[-1]].extend(newPage[keysToList[-1]]) aPage.pop("pageInfo", None) return outObj
def queryGitHub(self, gitquery, gitvars={}, verbosity=0, paginate=False, cursorVar=None, keysToList=[], rest=False, requestCount=0, pageNum=0): """Submit a GitHub query. Args: gitquery (str): The query or endpoint itself. Examples: query: 'query { viewer { login } }' endpoint: '/user' gitvars (Optional[Dict]): All query variables. Defaults to empty. GraphQL Only. verbosity (Optional[int]): Changes output verbosity levels. If < 0, all extra printouts are suppressed. If == 0, normal print statements are displayed. If > 0, additional status print statements are displayed. Defaults to 0. paginate (Optional[bool]): Pagination will be completed automatically if True. Defaults to False. cursorVar (Optional[str]): Key in 'gitvars' that represents the pagination cursor. Defaults to None. GraphQL Only. keysToList (Optional[List[str]]): Ordered list of keys needed to retrieve the list in the query results to be extended by pagination. Defaults to empty. Example: ['data', 'viewer', 'repositories', 'nodes'] GraphQL Only. rest (Optional[bool]): If True, uses the REST API instead of GraphQL. Defaults to False. requestCount (Optional[int]): Counter for repeated requests. pageNum (Optional[int]): Counter for pagination. For user readable log messages only, does not affect data. Returns: Dict: A JSON style dictionary. """ requestCount += 1 if pageNum < 0: # no negative page numbers pageNum = 0 pageNum += 1 if paginate: _vPrint((verbosity >= 0), "Page %d" % (pageNum)) _vPrint((verbosity >= 0), "Sending %s query..." % ("REST" if rest else "GraphQL")) response = self._submitQuery(gitquery, gitvars=gitvars, verbose=(verbosity > 0), rest=rest) _vPrint((verbosity >= 0), "Checking response...") _vPrint((verbosity >= 0), response["headDict"]["http"]) statusNum = response["statusNum"] # Decrement page count before error checks to properly reflect any repeated queries pageNum -= 1 # Make sure the query limit didn't run out try: apiStatus = { "limit": int(response["headDict"]["X-RateLimit-Limit"]), "remaining": int(response["headDict"]["X-RateLimit-Remaining"]), "reset": int(response["headDict"]["X-RateLimit-Reset"]) } _vPrint((verbosity >= 0), "API Status %s" % (json.dumps(apiStatus))) if not apiStatus["remaining"] > 0: _vPrint((verbosity >= 0), "API usage limit reached during query.") self._awaitReset(apiStatus["reset"]) _vPrint((verbosity >= 0), "Repeating query...") return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=(requestCount - 1), pageNum=pageNum) except KeyError: # Handles error cases that don't return X-RateLimit data _vPrint((verbosity >= 0), "Failed to check API Status.") # Check for accepted but not yet processed, usually due to un-cached data if statusNum == 202: if requestCount >= self.maxRetry: raise RuntimeError("Query attempted but failed %d times.\n%s\n%s" % (self.maxRetry, response["headDict"]["http"], response["result"])) else: self._countdown(self.__retryDelay, printString="Query accepted but not yet processed. Trying again in %*dsec...", verbose=(verbosity >= 0)) return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=requestCount, pageNum=pageNum) # Check for server error responses if statusNum == 502 or statusNum == 503: if requestCount >= self.maxRetry: raise RuntimeError("Query attempted but failed %d times.\n%s\n%s" % (self.maxRetry, response["headDict"]["http"], response["result"])) else: self._countdown(self.__retryDelay, printString="Server error. Trying again in %*dsec...", verbose=(verbosity >= 0)) return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=requestCount, pageNum=pageNum) # Check for other error responses if statusNum >= 400 or statusNum == 204: raise RuntimeError("Request got an Error response.\n%s\n%s" % (response["headDict"]["http"], response["result"])) _vPrint((verbosity >= 0), "Data received!") outObj = json.loads(response["result"]) # Check for GraphQL API errors (e.g. repo not found) if not rest and "errors" in outObj: if requestCount >= self.maxRetry: raise RuntimeError("Query attempted but failed %d times.\n%s\n%s" % (self.maxRetry, response["headDict"]["http"], response["result"])) elif len(outObj["errors"]) == 1 and len(outObj["errors"][0]) == 1: # Poorly defined error type, usually intermittent, try again. _vPrint((verbosity >= 0), "GraphQL API error.\n%s" % (json.dumps(outObj["errors"]))) self._countdown(self.__retryDelay, printString="Unknown API error. Trying again in %*dsec...", verbose=(verbosity >= 0)) return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=requestCount, pageNum=pageNum) else: raise RuntimeError("GraphQL API error.\n%s" % (json.dumps(outObj["errors"]))) # Re-increment page count before the next page query pageNum += 1 # Pagination if paginate: if rest and response["linkDict"]: if "next" in response["linkDict"]: nextObj = self.queryGitHub(response["linkDict"]["next"], gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=0, pageNum=pageNum) outObj.extend(nextObj) elif not rest: if not cursorVar: raise ValueError("Must specify argument 'cursorVar' to use GraphQL auto-pagination.") if not len(keysToList) > 0: raise ValueError("Must specify argument 'keysToList' as a non-empty list to use GraphQL auto-pagination.") aPage = outObj for key in keysToList[0:-1]: aPage = aPage[key] gitvars[cursorVar] = aPage["pageInfo"]["endCursor"] if aPage["pageInfo"]["hasNextPage"]: nextObj = self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=0, pageNum=pageNum) newPage = nextObj for key in keysToList[0:-1]: newPage = newPage[key] aPage[keysToList[-1]].extend(newPage[keysToList[-1]]) aPage.pop("pageInfo", None) return outObj
[ "Submit", "a", "GitHub", "query", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/queryManager.py#L170-L299
[ "def", "queryGitHub", "(", "self", ",", "gitquery", ",", "gitvars", "=", "{", "}", ",", "verbosity", "=", "0", ",", "paginate", "=", "False", ",", "cursorVar", "=", "None", ",", "keysToList", "=", "[", "]", ",", "rest", "=", "False", ",", "requestCount", "=", "0", ",", "pageNum", "=", "0", ")", ":", "requestCount", "+=", "1", "if", "pageNum", "<", "0", ":", "# no negative page numbers", "pageNum", "=", "0", "pageNum", "+=", "1", "if", "paginate", ":", "_vPrint", "(", "(", "verbosity", ">=", "0", ")", ",", "\"Page %d\"", "%", "(", "pageNum", ")", ")", "_vPrint", "(", "(", "verbosity", ">=", "0", ")", ",", "\"Sending %s query...\"", "%", "(", "\"REST\"", "if", "rest", "else", "\"GraphQL\"", ")", ")", "response", "=", "self", ".", "_submitQuery", "(", "gitquery", ",", "gitvars", "=", "gitvars", ",", "verbose", "=", "(", "verbosity", ">", "0", ")", ",", "rest", "=", "rest", ")", "_vPrint", "(", "(", "verbosity", ">=", "0", ")", ",", "\"Checking response...\"", ")", "_vPrint", "(", "(", "verbosity", ">=", "0", ")", ",", "response", "[", "\"headDict\"", "]", "[", "\"http\"", "]", ")", "statusNum", "=", "response", "[", "\"statusNum\"", "]", "# Decrement page count before error checks to properly reflect any repeated queries", "pageNum", "-=", "1", "# Make sure the query limit didn't run out", "try", ":", "apiStatus", "=", "{", "\"limit\"", ":", "int", "(", "response", "[", "\"headDict\"", "]", "[", "\"X-RateLimit-Limit\"", "]", ")", ",", "\"remaining\"", ":", "int", "(", "response", "[", "\"headDict\"", "]", "[", "\"X-RateLimit-Remaining\"", "]", ")", ",", "\"reset\"", ":", "int", "(", "response", "[", "\"headDict\"", "]", "[", "\"X-RateLimit-Reset\"", "]", ")", "}", "_vPrint", "(", "(", "verbosity", ">=", "0", ")", ",", "\"API Status %s\"", "%", "(", "json", ".", "dumps", "(", "apiStatus", ")", ")", ")", "if", "not", "apiStatus", "[", "\"remaining\"", "]", ">", "0", ":", "_vPrint", "(", "(", "verbosity", ">=", "0", ")", ",", "\"API usage limit reached during query.\"", ")", "self", ".", "_awaitReset", "(", "apiStatus", "[", "\"reset\"", "]", ")", "_vPrint", "(", "(", "verbosity", ">=", "0", ")", ",", "\"Repeating query...\"", ")", "return", "self", ".", "queryGitHub", "(", "gitquery", ",", "gitvars", "=", "gitvars", ",", "verbosity", "=", "verbosity", ",", "paginate", "=", "paginate", ",", "cursorVar", "=", "cursorVar", ",", "keysToList", "=", "keysToList", ",", "rest", "=", "rest", ",", "requestCount", "=", "(", "requestCount", "-", "1", ")", ",", "pageNum", "=", "pageNum", ")", "except", "KeyError", ":", "# Handles error cases that don't return X-RateLimit data", "_vPrint", "(", "(", "verbosity", ">=", "0", ")", ",", "\"Failed to check API Status.\"", ")", "# Check for accepted but not yet processed, usually due to un-cached data", "if", "statusNum", "==", "202", ":", "if", "requestCount", ">=", "self", ".", "maxRetry", ":", "raise", "RuntimeError", "(", "\"Query attempted but failed %d times.\\n%s\\n%s\"", "%", "(", "self", ".", "maxRetry", ",", "response", "[", "\"headDict\"", "]", "[", "\"http\"", "]", ",", "response", "[", "\"result\"", "]", ")", ")", "else", ":", "self", ".", "_countdown", "(", "self", ".", "__retryDelay", ",", "printString", "=", "\"Query accepted but not yet processed. Trying again in %*dsec...\"", ",", "verbose", "=", "(", "verbosity", ">=", "0", ")", ")", "return", "self", ".", "queryGitHub", "(", "gitquery", ",", "gitvars", "=", "gitvars", ",", "verbosity", "=", "verbosity", ",", "paginate", "=", "paginate", ",", "cursorVar", "=", "cursorVar", ",", "keysToList", "=", "keysToList", ",", "rest", "=", "rest", ",", "requestCount", "=", "requestCount", ",", "pageNum", "=", "pageNum", ")", "# Check for server error responses", "if", "statusNum", "==", "502", "or", "statusNum", "==", "503", ":", "if", "requestCount", ">=", "self", ".", "maxRetry", ":", "raise", "RuntimeError", "(", "\"Query attempted but failed %d times.\\n%s\\n%s\"", "%", "(", "self", ".", "maxRetry", ",", "response", "[", "\"headDict\"", "]", "[", "\"http\"", "]", ",", "response", "[", "\"result\"", "]", ")", ")", "else", ":", "self", ".", "_countdown", "(", "self", ".", "__retryDelay", ",", "printString", "=", "\"Server error. Trying again in %*dsec...\"", ",", "verbose", "=", "(", "verbosity", ">=", "0", ")", ")", "return", "self", ".", "queryGitHub", "(", "gitquery", ",", "gitvars", "=", "gitvars", ",", "verbosity", "=", "verbosity", ",", "paginate", "=", "paginate", ",", "cursorVar", "=", "cursorVar", ",", "keysToList", "=", "keysToList", ",", "rest", "=", "rest", ",", "requestCount", "=", "requestCount", ",", "pageNum", "=", "pageNum", ")", "# Check for other error responses", "if", "statusNum", ">=", "400", "or", "statusNum", "==", "204", ":", "raise", "RuntimeError", "(", "\"Request got an Error response.\\n%s\\n%s\"", "%", "(", "response", "[", "\"headDict\"", "]", "[", "\"http\"", "]", ",", "response", "[", "\"result\"", "]", ")", ")", "_vPrint", "(", "(", "verbosity", ">=", "0", ")", ",", "\"Data received!\"", ")", "outObj", "=", "json", ".", "loads", "(", "response", "[", "\"result\"", "]", ")", "# Check for GraphQL API errors (e.g. repo not found)", "if", "not", "rest", "and", "\"errors\"", "in", "outObj", ":", "if", "requestCount", ">=", "self", ".", "maxRetry", ":", "raise", "RuntimeError", "(", "\"Query attempted but failed %d times.\\n%s\\n%s\"", "%", "(", "self", ".", "maxRetry", ",", "response", "[", "\"headDict\"", "]", "[", "\"http\"", "]", ",", "response", "[", "\"result\"", "]", ")", ")", "elif", "len", "(", "outObj", "[", "\"errors\"", "]", ")", "==", "1", "and", "len", "(", "outObj", "[", "\"errors\"", "]", "[", "0", "]", ")", "==", "1", ":", "# Poorly defined error type, usually intermittent, try again.", "_vPrint", "(", "(", "verbosity", ">=", "0", ")", ",", "\"GraphQL API error.\\n%s\"", "%", "(", "json", ".", "dumps", "(", "outObj", "[", "\"errors\"", "]", ")", ")", ")", "self", ".", "_countdown", "(", "self", ".", "__retryDelay", ",", "printString", "=", "\"Unknown API error. Trying again in %*dsec...\"", ",", "verbose", "=", "(", "verbosity", ">=", "0", ")", ")", "return", "self", ".", "queryGitHub", "(", "gitquery", ",", "gitvars", "=", "gitvars", ",", "verbosity", "=", "verbosity", ",", "paginate", "=", "paginate", ",", "cursorVar", "=", "cursorVar", ",", "keysToList", "=", "keysToList", ",", "rest", "=", "rest", ",", "requestCount", "=", "requestCount", ",", "pageNum", "=", "pageNum", ")", "else", ":", "raise", "RuntimeError", "(", "\"GraphQL API error.\\n%s\"", "%", "(", "json", ".", "dumps", "(", "outObj", "[", "\"errors\"", "]", ")", ")", ")", "# Re-increment page count before the next page query", "pageNum", "+=", "1", "# Pagination", "if", "paginate", ":", "if", "rest", "and", "response", "[", "\"linkDict\"", "]", ":", "if", "\"next\"", "in", "response", "[", "\"linkDict\"", "]", ":", "nextObj", "=", "self", ".", "queryGitHub", "(", "response", "[", "\"linkDict\"", "]", "[", "\"next\"", "]", ",", "gitvars", "=", "gitvars", ",", "verbosity", "=", "verbosity", ",", "paginate", "=", "paginate", ",", "cursorVar", "=", "cursorVar", ",", "keysToList", "=", "keysToList", ",", "rest", "=", "rest", ",", "requestCount", "=", "0", ",", "pageNum", "=", "pageNum", ")", "outObj", ".", "extend", "(", "nextObj", ")", "elif", "not", "rest", ":", "if", "not", "cursorVar", ":", "raise", "ValueError", "(", "\"Must specify argument 'cursorVar' to use GraphQL auto-pagination.\"", ")", "if", "not", "len", "(", "keysToList", ")", ">", "0", ":", "raise", "ValueError", "(", "\"Must specify argument 'keysToList' as a non-empty list to use GraphQL auto-pagination.\"", ")", "aPage", "=", "outObj", "for", "key", "in", "keysToList", "[", "0", ":", "-", "1", "]", ":", "aPage", "=", "aPage", "[", "key", "]", "gitvars", "[", "cursorVar", "]", "=", "aPage", "[", "\"pageInfo\"", "]", "[", "\"endCursor\"", "]", "if", "aPage", "[", "\"pageInfo\"", "]", "[", "\"hasNextPage\"", "]", ":", "nextObj", "=", "self", ".", "queryGitHub", "(", "gitquery", ",", "gitvars", "=", "gitvars", ",", "verbosity", "=", "verbosity", ",", "paginate", "=", "paginate", ",", "cursorVar", "=", "cursorVar", ",", "keysToList", "=", "keysToList", ",", "rest", "=", "rest", ",", "requestCount", "=", "0", ",", "pageNum", "=", "pageNum", ")", "newPage", "=", "nextObj", "for", "key", "in", "keysToList", "[", "0", ":", "-", "1", "]", ":", "newPage", "=", "newPage", "[", "key", "]", "aPage", "[", "keysToList", "[", "-", "1", "]", "]", ".", "extend", "(", "newPage", "[", "keysToList", "[", "-", "1", "]", "]", ")", "aPage", ".", "pop", "(", "\"pageInfo\"", ",", "None", ")", "return", "outObj" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHubQueryManager._submitQuery
Send a curl request to GitHub. Args: gitquery (str): The query or endpoint itself. Examples: query: 'query { viewer { login } }' endpoint: '/user' gitvars (Optional[Dict]): All query variables. Defaults to empty. verbose (Optional[bool]): If False, stderr prints will be suppressed. Defaults to False. rest (Optional[bool]): If True, uses the REST API instead of GraphQL. Defaults to False. Returns: { 'statusNum' (int): The HTTP status code. 'headDict' (Dict[str]): The response headers. 'linkDict' (Dict[int]): Link based pagination data. 'result' (str): The body of the response. }
scraper/github/queryManager.py
def _submitQuery(self, gitquery, gitvars={}, verbose=False, rest=False): """Send a curl request to GitHub. Args: gitquery (str): The query or endpoint itself. Examples: query: 'query { viewer { login } }' endpoint: '/user' gitvars (Optional[Dict]): All query variables. Defaults to empty. verbose (Optional[bool]): If False, stderr prints will be suppressed. Defaults to False. rest (Optional[bool]): If True, uses the REST API instead of GraphQL. Defaults to False. Returns: { 'statusNum' (int): The HTTP status code. 'headDict' (Dict[str]): The response headers. 'linkDict' (Dict[int]): Link based pagination data. 'result' (str): The body of the response. } """ errOut = DEVNULL if not verbose else None authhead = 'Authorization: bearer ' + self.__githubApiToken bashcurl = 'curl -iH TMPauthhead -X POST -d TMPgitquery https://api.github.com/graphql' if not rest \ else 'curl -iH TMPauthhead https://api.github.com' + gitquery bashcurl_list = bashcurl.split() bashcurl_list[2] = authhead if not rest: gitqueryJSON = json.dumps({'query': gitquery, 'variables': json.dumps(gitvars)}) bashcurl_list[6] = gitqueryJSON fullResponse = check_output(bashcurl_list, stderr=errOut).decode() _vPrint(verbose, "\n" + fullResponse) fullResponse = fullResponse.split('\r\n\r\n') heads = fullResponse[0].split('\r\n') if len(fullResponse) > 1: result = fullResponse[1] else: result = "" http = heads[0].split() statusNum = int(http[1]) # Parse headers into a useful dictionary headDict = {} headDict["http"] = heads[0] for header in heads[1:]: h = header.split(': ') headDict[h[0]] = h[1] # Parse any Link headers even further linkDict = None if "Link" in headDict: linkProperties = headDict["Link"].split(', ') propDict = {} for item in linkProperties: divided = re.split(r'<https://api.github.com|>; rel="|"', item) propDict[divided[2]] = divided[1] linkDict = propDict return {'statusNum': statusNum, 'headDict': headDict, 'linkDict': linkDict, 'result': result}
def _submitQuery(self, gitquery, gitvars={}, verbose=False, rest=False): """Send a curl request to GitHub. Args: gitquery (str): The query or endpoint itself. Examples: query: 'query { viewer { login } }' endpoint: '/user' gitvars (Optional[Dict]): All query variables. Defaults to empty. verbose (Optional[bool]): If False, stderr prints will be suppressed. Defaults to False. rest (Optional[bool]): If True, uses the REST API instead of GraphQL. Defaults to False. Returns: { 'statusNum' (int): The HTTP status code. 'headDict' (Dict[str]): The response headers. 'linkDict' (Dict[int]): Link based pagination data. 'result' (str): The body of the response. } """ errOut = DEVNULL if not verbose else None authhead = 'Authorization: bearer ' + self.__githubApiToken bashcurl = 'curl -iH TMPauthhead -X POST -d TMPgitquery https://api.github.com/graphql' if not rest \ else 'curl -iH TMPauthhead https://api.github.com' + gitquery bashcurl_list = bashcurl.split() bashcurl_list[2] = authhead if not rest: gitqueryJSON = json.dumps({'query': gitquery, 'variables': json.dumps(gitvars)}) bashcurl_list[6] = gitqueryJSON fullResponse = check_output(bashcurl_list, stderr=errOut).decode() _vPrint(verbose, "\n" + fullResponse) fullResponse = fullResponse.split('\r\n\r\n') heads = fullResponse[0].split('\r\n') if len(fullResponse) > 1: result = fullResponse[1] else: result = "" http = heads[0].split() statusNum = int(http[1]) # Parse headers into a useful dictionary headDict = {} headDict["http"] = heads[0] for header in heads[1:]: h = header.split(': ') headDict[h[0]] = h[1] # Parse any Link headers even further linkDict = None if "Link" in headDict: linkProperties = headDict["Link"].split(', ') propDict = {} for item in linkProperties: divided = re.split(r'<https://api.github.com|>; rel="|"', item) propDict[divided[2]] = divided[1] linkDict = propDict return {'statusNum': statusNum, 'headDict': headDict, 'linkDict': linkDict, 'result': result}
[ "Send", "a", "curl", "request", "to", "GitHub", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/queryManager.py#L301-L364
[ "def", "_submitQuery", "(", "self", ",", "gitquery", ",", "gitvars", "=", "{", "}", ",", "verbose", "=", "False", ",", "rest", "=", "False", ")", ":", "errOut", "=", "DEVNULL", "if", "not", "verbose", "else", "None", "authhead", "=", "'Authorization: bearer '", "+", "self", ".", "__githubApiToken", "bashcurl", "=", "'curl -iH TMPauthhead -X POST -d TMPgitquery https://api.github.com/graphql'", "if", "not", "rest", "else", "'curl -iH TMPauthhead https://api.github.com'", "+", "gitquery", "bashcurl_list", "=", "bashcurl", ".", "split", "(", ")", "bashcurl_list", "[", "2", "]", "=", "authhead", "if", "not", "rest", ":", "gitqueryJSON", "=", "json", ".", "dumps", "(", "{", "'query'", ":", "gitquery", ",", "'variables'", ":", "json", ".", "dumps", "(", "gitvars", ")", "}", ")", "bashcurl_list", "[", "6", "]", "=", "gitqueryJSON", "fullResponse", "=", "check_output", "(", "bashcurl_list", ",", "stderr", "=", "errOut", ")", ".", "decode", "(", ")", "_vPrint", "(", "verbose", ",", "\"\\n\"", "+", "fullResponse", ")", "fullResponse", "=", "fullResponse", ".", "split", "(", "'\\r\\n\\r\\n'", ")", "heads", "=", "fullResponse", "[", "0", "]", ".", "split", "(", "'\\r\\n'", ")", "if", "len", "(", "fullResponse", ")", ">", "1", ":", "result", "=", "fullResponse", "[", "1", "]", "else", ":", "result", "=", "\"\"", "http", "=", "heads", "[", "0", "]", ".", "split", "(", ")", "statusNum", "=", "int", "(", "http", "[", "1", "]", ")", "# Parse headers into a useful dictionary", "headDict", "=", "{", "}", "headDict", "[", "\"http\"", "]", "=", "heads", "[", "0", "]", "for", "header", "in", "heads", "[", "1", ":", "]", ":", "h", "=", "header", ".", "split", "(", "': '", ")", "headDict", "[", "h", "[", "0", "]", "]", "=", "h", "[", "1", "]", "# Parse any Link headers even further", "linkDict", "=", "None", "if", "\"Link\"", "in", "headDict", ":", "linkProperties", "=", "headDict", "[", "\"Link\"", "]", ".", "split", "(", "', '", ")", "propDict", "=", "{", "}", "for", "item", "in", "linkProperties", ":", "divided", "=", "re", ".", "split", "(", "r'<https://api.github.com|>; rel=\"|\"'", ",", "item", ")", "propDict", "[", "divided", "[", "2", "]", "]", "=", "divided", "[", "1", "]", "linkDict", "=", "propDict", "return", "{", "'statusNum'", ":", "statusNum", ",", "'headDict'", ":", "headDict", ",", "'linkDict'", ":", "linkDict", ",", "'result'", ":", "result", "}" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHubQueryManager._awaitReset
Wait until the given UTC timestamp. Args: utcTimeStamp (int): A UTC format timestamp. verbose (Optional[bool]): If False, all extra printouts will be suppressed. Defaults to True.
scraper/github/queryManager.py
def _awaitReset(self, utcTimeStamp, verbose=True): """Wait until the given UTC timestamp. Args: utcTimeStamp (int): A UTC format timestamp. verbose (Optional[bool]): If False, all extra printouts will be suppressed. Defaults to True. """ resetTime = pytz.utc.localize(datetime.utcfromtimestamp(utcTimeStamp)) _vPrint(verbose, "--- Current Timestamp") _vPrint(verbose, " %s" % (time.strftime('%c'))) now = pytz.utc.localize(datetime.utcnow()) waitTime = round((resetTime - now).total_seconds()) + 1 _vPrint(verbose, "--- Current UTC Timestamp") _vPrint(verbose, " %s" % (now.strftime('%c'))) _vPrint(verbose, "--- GITHUB NEEDS A BREAK Until UTC Timestamp") _vPrint(verbose, " %s" % (resetTime.strftime('%c'))) self._countdown(waitTime, printString="--- Waiting %*d seconds...", verbose=verbose) _vPrint(verbose, "--- READY!")
def _awaitReset(self, utcTimeStamp, verbose=True): """Wait until the given UTC timestamp. Args: utcTimeStamp (int): A UTC format timestamp. verbose (Optional[bool]): If False, all extra printouts will be suppressed. Defaults to True. """ resetTime = pytz.utc.localize(datetime.utcfromtimestamp(utcTimeStamp)) _vPrint(verbose, "--- Current Timestamp") _vPrint(verbose, " %s" % (time.strftime('%c'))) now = pytz.utc.localize(datetime.utcnow()) waitTime = round((resetTime - now).total_seconds()) + 1 _vPrint(verbose, "--- Current UTC Timestamp") _vPrint(verbose, " %s" % (now.strftime('%c'))) _vPrint(verbose, "--- GITHUB NEEDS A BREAK Until UTC Timestamp") _vPrint(verbose, " %s" % (resetTime.strftime('%c'))) self._countdown(waitTime, printString="--- Waiting %*d seconds...", verbose=verbose) _vPrint(verbose, "--- READY!")
[ "Wait", "until", "the", "given", "UTC", "timestamp", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/queryManager.py#L366-L385
[ "def", "_awaitReset", "(", "self", ",", "utcTimeStamp", ",", "verbose", "=", "True", ")", ":", "resetTime", "=", "pytz", ".", "utc", ".", "localize", "(", "datetime", ".", "utcfromtimestamp", "(", "utcTimeStamp", ")", ")", "_vPrint", "(", "verbose", ",", "\"--- Current Timestamp\"", ")", "_vPrint", "(", "verbose", ",", "\" %s\"", "%", "(", "time", ".", "strftime", "(", "'%c'", ")", ")", ")", "now", "=", "pytz", ".", "utc", ".", "localize", "(", "datetime", ".", "utcnow", "(", ")", ")", "waitTime", "=", "round", "(", "(", "resetTime", "-", "now", ")", ".", "total_seconds", "(", ")", ")", "+", "1", "_vPrint", "(", "verbose", ",", "\"--- Current UTC Timestamp\"", ")", "_vPrint", "(", "verbose", ",", "\" %s\"", "%", "(", "now", ".", "strftime", "(", "'%c'", ")", ")", ")", "_vPrint", "(", "verbose", ",", "\"--- GITHUB NEEDS A BREAK Until UTC Timestamp\"", ")", "_vPrint", "(", "verbose", ",", "\" %s\"", "%", "(", "resetTime", ".", "strftime", "(", "'%c'", ")", ")", ")", "self", ".", "_countdown", "(", "waitTime", ",", "printString", "=", "\"--- Waiting %*d seconds...\"", ",", "verbose", "=", "verbose", ")", "_vPrint", "(", "verbose", ",", "\"--- READY!\"", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHubQueryManager._countdown
Makes a pretty countdown. Args: gitquery (str): The query or endpoint itself. Examples: query: 'query { viewer { login } }' endpoint: '/user' printString (Optional[str]): A counter message to display. Defaults to 'Waiting %*d seconds...' verbose (Optional[bool]): If False, all extra printouts will be suppressed. Defaults to True.
scraper/github/queryManager.py
def _countdown(self, waitTime=0, printString="Waiting %*d seconds...", verbose=True): """Makes a pretty countdown. Args: gitquery (str): The query or endpoint itself. Examples: query: 'query { viewer { login } }' endpoint: '/user' printString (Optional[str]): A counter message to display. Defaults to 'Waiting %*d seconds...' verbose (Optional[bool]): If False, all extra printouts will be suppressed. Defaults to True. """ if waitTime <= 0: waitTime = self.__retryDelay for remaining in range(waitTime, 0, -1): _vPrint(verbose, "\r" + printString % (len(str(waitTime)), remaining), end="", flush=True) time.sleep(1) if verbose: _vPrint(verbose, "\r" + printString % (len(str(waitTime)), 0))
def _countdown(self, waitTime=0, printString="Waiting %*d seconds...", verbose=True): """Makes a pretty countdown. Args: gitquery (str): The query or endpoint itself. Examples: query: 'query { viewer { login } }' endpoint: '/user' printString (Optional[str]): A counter message to display. Defaults to 'Waiting %*d seconds...' verbose (Optional[bool]): If False, all extra printouts will be suppressed. Defaults to True. """ if waitTime <= 0: waitTime = self.__retryDelay for remaining in range(waitTime, 0, -1): _vPrint(verbose, "\r" + printString % (len(str(waitTime)), remaining), end="", flush=True) time.sleep(1) if verbose: _vPrint(verbose, "\r" + printString % (len(str(waitTime)), 0))
[ "Makes", "a", "pretty", "countdown", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/queryManager.py#L387-L407
[ "def", "_countdown", "(", "self", ",", "waitTime", "=", "0", ",", "printString", "=", "\"Waiting %*d seconds...\"", ",", "verbose", "=", "True", ")", ":", "if", "waitTime", "<=", "0", ":", "waitTime", "=", "self", ".", "__retryDelay", "for", "remaining", "in", "range", "(", "waitTime", ",", "0", ",", "-", "1", ")", ":", "_vPrint", "(", "verbose", ",", "\"\\r\"", "+", "printString", "%", "(", "len", "(", "str", "(", "waitTime", ")", ")", ",", "remaining", ")", ",", "end", "=", "\"\"", ",", "flush", "=", "True", ")", "time", ".", "sleep", "(", "1", ")", "if", "verbose", ":", "_vPrint", "(", "verbose", ",", "\"\\r\"", "+", "printString", "%", "(", "len", "(", "str", "(", "waitTime", ")", ")", ",", "0", ")", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
DataManager.fileLoad
Load a JSON data file into the internal JSON data dictionary. Current internal data will be overwritten. If no file path is provided, the stored data file path will be used. Args: filePath (Optional[str]): A relative or absolute path to a '.json' file. Defaults to None. updatePath (Optional[bool]): Specifies whether or not to update the stored data file path. Defaults to True.
scraper/github/queryManager.py
def fileLoad(self, filePath=None, updatePath=True): """Load a JSON data file into the internal JSON data dictionary. Current internal data will be overwritten. If no file path is provided, the stored data file path will be used. Args: filePath (Optional[str]): A relative or absolute path to a '.json' file. Defaults to None. updatePath (Optional[bool]): Specifies whether or not to update the stored data file path. Defaults to True. """ if not filePath: filePath = self.filePath if not os.path.isfile(filePath): raise FileNotFoundError("Data file '%s' does not exist." % (filePath)) else: print("Importing existing data file '%s' ... " % (filePath), end="", flush=True) with open(filePath, "r") as q: data_raw = q.read() print("Imported!") self.data = json.loads(data_raw) if updatePath: self.filePath = filePath
def fileLoad(self, filePath=None, updatePath=True): """Load a JSON data file into the internal JSON data dictionary. Current internal data will be overwritten. If no file path is provided, the stored data file path will be used. Args: filePath (Optional[str]): A relative or absolute path to a '.json' file. Defaults to None. updatePath (Optional[bool]): Specifies whether or not to update the stored data file path. Defaults to True. """ if not filePath: filePath = self.filePath if not os.path.isfile(filePath): raise FileNotFoundError("Data file '%s' does not exist." % (filePath)) else: print("Importing existing data file '%s' ... " % (filePath), end="", flush=True) with open(filePath, "r") as q: data_raw = q.read() print("Imported!") self.data = json.loads(data_raw) if updatePath: self.filePath = filePath
[ "Load", "a", "JSON", "data", "file", "into", "the", "internal", "JSON", "data", "dictionary", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/queryManager.py#L454-L478
[ "def", "fileLoad", "(", "self", ",", "filePath", "=", "None", ",", "updatePath", "=", "True", ")", ":", "if", "not", "filePath", ":", "filePath", "=", "self", ".", "filePath", "if", "not", "os", ".", "path", ".", "isfile", "(", "filePath", ")", ":", "raise", "FileNotFoundError", "(", "\"Data file '%s' does not exist.\"", "%", "(", "filePath", ")", ")", "else", ":", "print", "(", "\"Importing existing data file '%s' ... \"", "%", "(", "filePath", ")", ",", "end", "=", "\"\"", ",", "flush", "=", "True", ")", "with", "open", "(", "filePath", ",", "\"r\"", ")", "as", "q", ":", "data_raw", "=", "q", ".", "read", "(", ")", "print", "(", "\"Imported!\"", ")", "self", ".", "data", "=", "json", ".", "loads", "(", "data_raw", ")", "if", "updatePath", ":", "self", ".", "filePath", "=", "filePath" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
DataManager.fileSave
Write the internal JSON data dictionary to a JSON data file. If no file path is provided, the stored data file path will be used. Args: filePath (Optional[str]): A relative or absolute path to a '.json' file. Defaults to None. updatePath (Optional[bool]): Specifies whether or not to update the stored data file path. Defaults to False.
scraper/github/queryManager.py
def fileSave(self, filePath=None, updatePath=False): """Write the internal JSON data dictionary to a JSON data file. If no file path is provided, the stored data file path will be used. Args: filePath (Optional[str]): A relative or absolute path to a '.json' file. Defaults to None. updatePath (Optional[bool]): Specifies whether or not to update the stored data file path. Defaults to False. """ if not filePath: filePath = self.filePath if not os.path.isfile(filePath): print("Data file '%s' does not exist, will create new file." % (filePath)) if not os.path.exists(os.path.split(filePath)[0]): os.makedirs(os.path.split(filePath)[0]) dataJsonString = json.dumps(self.data, indent=4, sort_keys=True) print("Writing to file '%s' ... " % (filePath), end="", flush=True) with open(filePath, "w") as fileout: fileout.write(dataJsonString) print("Wrote file!") if updatePath: self.filePath = filePath
def fileSave(self, filePath=None, updatePath=False): """Write the internal JSON data dictionary to a JSON data file. If no file path is provided, the stored data file path will be used. Args: filePath (Optional[str]): A relative or absolute path to a '.json' file. Defaults to None. updatePath (Optional[bool]): Specifies whether or not to update the stored data file path. Defaults to False. """ if not filePath: filePath = self.filePath if not os.path.isfile(filePath): print("Data file '%s' does not exist, will create new file." % (filePath)) if not os.path.exists(os.path.split(filePath)[0]): os.makedirs(os.path.split(filePath)[0]) dataJsonString = json.dumps(self.data, indent=4, sort_keys=True) print("Writing to file '%s' ... " % (filePath), end="", flush=True) with open(filePath, "w") as fileout: fileout.write(dataJsonString) print("Wrote file!") if updatePath: self.filePath = filePath
[ "Write", "the", "internal", "JSON", "data", "dictionary", "to", "a", "JSON", "data", "file", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/queryManager.py#L480-L504
[ "def", "fileSave", "(", "self", ",", "filePath", "=", "None", ",", "updatePath", "=", "False", ")", ":", "if", "not", "filePath", ":", "filePath", "=", "self", ".", "filePath", "if", "not", "os", ".", "path", ".", "isfile", "(", "filePath", ")", ":", "print", "(", "\"Data file '%s' does not exist, will create new file.\"", "%", "(", "filePath", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "split", "(", "filePath", ")", "[", "0", "]", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "split", "(", "filePath", ")", "[", "0", "]", ")", "dataJsonString", "=", "json", ".", "dumps", "(", "self", ".", "data", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ")", "print", "(", "\"Writing to file '%s' ... \"", "%", "(", "filePath", ")", ",", "end", "=", "\"\"", ",", "flush", "=", "True", ")", "with", "open", "(", "filePath", ",", "\"w\"", ")", "as", "fileout", ":", "fileout", ".", "write", "(", "dataJsonString", ")", "print", "(", "\"Wrote file!\"", ")", "if", "updatePath", ":", "self", ".", "filePath", "=", "filePath" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
create_tfs_connection
Creates the TFS Connection Context
scraper/tfs/__init__.py
def create_tfs_connection(url, token): """ Creates the TFS Connection Context """ if token is None: token = os.environ.get('TFS_API_TOKEN', None) tfs_credentials = BasicAuthentication('', token) tfs_connection = VssConnection(base_url=url, creds=tfs_credentials) return tfs_connection
def create_tfs_connection(url, token): """ Creates the TFS Connection Context """ if token is None: token = os.environ.get('TFS_API_TOKEN', None) tfs_credentials = BasicAuthentication('', token) tfs_connection = VssConnection(base_url=url, creds=tfs_credentials) return tfs_connection
[ "Creates", "the", "TFS", "Connection", "Context" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/tfs/__init__.py#L21-L30
[ "def", "create_tfs_connection", "(", "url", ",", "token", ")", ":", "if", "token", "is", "None", ":", "token", "=", "os", ".", "environ", ".", "get", "(", "'TFS_API_TOKEN'", ",", "None", ")", "tfs_credentials", "=", "BasicAuthentication", "(", "''", ",", "token", ")", "tfs_connection", "=", "VssConnection", "(", "base_url", "=", "url", ",", "creds", "=", "tfs_credentials", ")", "return", "tfs_connection" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
create_tfs_project_analysis_client
Create a project_analysis_client.py client for a Team Foundation Server Enterprise connection instance. This is helpful for understanding project languages, but currently blank for all our test conditions. If token is not provided, will attempt to use the TFS_API_TOKEN environment variable if present.
scraper/tfs/__init__.py
def create_tfs_project_analysis_client(url, token=None): """ Create a project_analysis_client.py client for a Team Foundation Server Enterprise connection instance. This is helpful for understanding project languages, but currently blank for all our test conditions. If token is not provided, will attempt to use the TFS_API_TOKEN environment variable if present. """ if token is None: token = os.environ.get('TFS_API_TOKEN', None) tfs_connection = create_tfs_connection(url, token) project_analysis_client = tfs_connection.get_client('vsts.project_analysis.v4_1.project_analysis_client.ProjectAnalysisClient') if project_analysis_client is None: msg = 'Unable to connect to TFS Enterprise (%s) with provided token.' raise RuntimeError(msg, url) return project_analysis_client
def create_tfs_project_analysis_client(url, token=None): """ Create a project_analysis_client.py client for a Team Foundation Server Enterprise connection instance. This is helpful for understanding project languages, but currently blank for all our test conditions. If token is not provided, will attempt to use the TFS_API_TOKEN environment variable if present. """ if token is None: token = os.environ.get('TFS_API_TOKEN', None) tfs_connection = create_tfs_connection(url, token) project_analysis_client = tfs_connection.get_client('vsts.project_analysis.v4_1.project_analysis_client.ProjectAnalysisClient') if project_analysis_client is None: msg = 'Unable to connect to TFS Enterprise (%s) with provided token.' raise RuntimeError(msg, url) return project_analysis_client
[ "Create", "a", "project_analysis_client", ".", "py", "client", "for", "a", "Team", "Foundation", "Server", "Enterprise", "connection", "instance", ".", "This", "is", "helpful", "for", "understanding", "project", "languages", "but", "currently", "blank", "for", "all", "our", "test", "conditions", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/tfs/__init__.py#L33-L51
[ "def", "create_tfs_project_analysis_client", "(", "url", ",", "token", "=", "None", ")", ":", "if", "token", "is", "None", ":", "token", "=", "os", ".", "environ", ".", "get", "(", "'TFS_API_TOKEN'", ",", "None", ")", "tfs_connection", "=", "create_tfs_connection", "(", "url", ",", "token", ")", "project_analysis_client", "=", "tfs_connection", ".", "get_client", "(", "'vsts.project_analysis.v4_1.project_analysis_client.ProjectAnalysisClient'", ")", "if", "project_analysis_client", "is", "None", ":", "msg", "=", "'Unable to connect to TFS Enterprise (%s) with provided token.'", "raise", "RuntimeError", "(", "msg", ",", "url", ")", "return", "project_analysis_client" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
create_tfs_core_client
Create a core_client.py client for a Team Foundation Server Enterprise connection instance If token is not provided, will attempt to use the TFS_API_TOKEN environment variable if present.
scraper/tfs/__init__.py
def create_tfs_core_client(url, token=None): """ Create a core_client.py client for a Team Foundation Server Enterprise connection instance If token is not provided, will attempt to use the TFS_API_TOKEN environment variable if present. """ if token is None: token = os.environ.get('TFS_API_TOKEN', None) tfs_connection = create_tfs_connection(url, token) tfs_client = tfs_connection.get_client('vsts.core.v4_1.core_client.CoreClient') if tfs_client is None: msg = 'Unable to connect to TFS Enterprise (%s) with provided token.' raise RuntimeError(msg, url) return tfs_client
def create_tfs_core_client(url, token=None): """ Create a core_client.py client for a Team Foundation Server Enterprise connection instance If token is not provided, will attempt to use the TFS_API_TOKEN environment variable if present. """ if token is None: token = os.environ.get('TFS_API_TOKEN', None) tfs_connection = create_tfs_connection(url, token) tfs_client = tfs_connection.get_client('vsts.core.v4_1.core_client.CoreClient') if tfs_client is None: msg = 'Unable to connect to TFS Enterprise (%s) with provided token.' raise RuntimeError(msg, url) return tfs_client
[ "Create", "a", "core_client", ".", "py", "client", "for", "a", "Team", "Foundation", "Server", "Enterprise", "connection", "instance" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/tfs/__init__.py#L54-L71
[ "def", "create_tfs_core_client", "(", "url", ",", "token", "=", "None", ")", ":", "if", "token", "is", "None", ":", "token", "=", "os", ".", "environ", ".", "get", "(", "'TFS_API_TOKEN'", ",", "None", ")", "tfs_connection", "=", "create_tfs_connection", "(", "url", ",", "token", ")", "tfs_client", "=", "tfs_connection", ".", "get_client", "(", "'vsts.core.v4_1.core_client.CoreClient'", ")", "if", "tfs_client", "is", "None", ":", "msg", "=", "'Unable to connect to TFS Enterprise (%s) with provided token.'", "raise", "RuntimeError", "(", "msg", ",", "url", ")", "return", "tfs_client" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
create_tfs_git_client
Creates a TFS Git Client to pull Git repo info
scraper/tfs/__init__.py
def create_tfs_git_client(url, token=None): """ Creates a TFS Git Client to pull Git repo info """ if token is None: token = os.environ.get('TFS_API_TOKEN', None) tfs_connection = create_tfs_connection(url, token) tfs_git_client = tfs_connection.get_client('vsts.git.v4_1.git_client.GitClient') if tfs_git_client is None: msg = 'Unable to create TFS Git Client, failed to connect to TFS Enterprise (%s) with provided token.' raise RuntimeError(msg, url) return tfs_git_client
def create_tfs_git_client(url, token=None): """ Creates a TFS Git Client to pull Git repo info """ if token is None: token = os.environ.get('TFS_API_TOKEN', None) tfs_connection = create_tfs_connection(url, token) tfs_git_client = tfs_connection.get_client('vsts.git.v4_1.git_client.GitClient') if tfs_git_client is None: msg = 'Unable to create TFS Git Client, failed to connect to TFS Enterprise (%s) with provided token.' raise RuntimeError(msg, url) return tfs_git_client
[ "Creates", "a", "TFS", "Git", "Client", "to", "pull", "Git", "repo", "info" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/tfs/__init__.py#L74-L88
[ "def", "create_tfs_git_client", "(", "url", ",", "token", "=", "None", ")", ":", "if", "token", "is", "None", ":", "token", "=", "os", ".", "environ", ".", "get", "(", "'TFS_API_TOKEN'", ",", "None", ")", "tfs_connection", "=", "create_tfs_connection", "(", "url", ",", "token", ")", "tfs_git_client", "=", "tfs_connection", ".", "get_client", "(", "'vsts.git.v4_1.git_client.GitClient'", ")", "if", "tfs_git_client", "is", "None", ":", "msg", "=", "'Unable to create TFS Git Client, failed to connect to TFS Enterprise (%s) with provided token.'", "raise", "RuntimeError", "(", "msg", ",", "url", ")", "return", "tfs_git_client" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
create_tfs_tfvc_client
Creates a TFS TFVC Client to pull TFVC repo info
scraper/tfs/__init__.py
def create_tfs_tfvc_client(url, token=None): """ Creates a TFS TFVC Client to pull TFVC repo info """ if token is None: token = os.environ.get('TFS_API_TOKEN', None) tfs_connection = create_tfs_connection(url, token) tfs_tfvc_client = tfs_connection.get_client('vsts.tfvc.v4_1.tfvc_client.TfvcClient') if tfs_tfvc_client is None: msg = 'Unable to create TFS Git Client, failed to connect to TFS Enterprise (%s) with provided token.' raise RuntimeError(msg, url) return tfs_tfvc_client
def create_tfs_tfvc_client(url, token=None): """ Creates a TFS TFVC Client to pull TFVC repo info """ if token is None: token = os.environ.get('TFS_API_TOKEN', None) tfs_connection = create_tfs_connection(url, token) tfs_tfvc_client = tfs_connection.get_client('vsts.tfvc.v4_1.tfvc_client.TfvcClient') if tfs_tfvc_client is None: msg = 'Unable to create TFS Git Client, failed to connect to TFS Enterprise (%s) with provided token.' raise RuntimeError(msg, url) return tfs_tfvc_client
[ "Creates", "a", "TFS", "TFVC", "Client", "to", "pull", "TFVC", "repo", "info" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/tfs/__init__.py#L91-L105
[ "def", "create_tfs_tfvc_client", "(", "url", ",", "token", "=", "None", ")", ":", "if", "token", "is", "None", ":", "token", "=", "os", ".", "environ", ".", "get", "(", "'TFS_API_TOKEN'", ",", "None", ")", "tfs_connection", "=", "create_tfs_connection", "(", "url", ",", "token", ")", "tfs_tfvc_client", "=", "tfs_connection", ".", "get_client", "(", "'vsts.tfvc.v4_1.tfvc_client.TfvcClient'", ")", "if", "tfs_tfvc_client", "is", "None", ":", "msg", "=", "'Unable to create TFS Git Client, failed to connect to TFS Enterprise (%s) with provided token.'", "raise", "RuntimeError", "(", "msg", ",", "url", ")", "return", "tfs_tfvc_client" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
get_all_projects
Returns a list of all projects with their collection info from the server. Currently limited functionality to only return the first 1000 projects. #TODO refactor to add multiple calls to api to retrieve all projects if more exist beyond top.
scraper/tfs/__init__.py
def get_all_projects(url, token, top=HARD_CODED_TOP): """ Returns a list of all projects with their collection info from the server. Currently limited functionality to only return the first 1000 projects. #TODO refactor to add multiple calls to api to retrieve all projects if more exist beyond top. """ project_list = [] tfs_client = create_tfs_core_client(url, token) collections = tfs_client.get_project_collections(top=top) for collection in collections: collection_client = create_tfs_core_client('{url}/{collection_name}'.format(url=url, collection_name=collection.name), token) logger.debug('Retrieving Projects for Project Collection: {collection_name}'.format(collection_name=collection.name)) # Retrieves all projects in the project collection projects = collection_client.get_projects(top=HARD_CODED_TOP) # get_projects only gets the project references, have to call get_project_history_entries to get last update info for projects # Only calling this once per collection as its an expensive API call, wil refactor later if there is a better API call to use collection_history_list = collection_client.get_project_history_entries() for project in projects: # get_projects only gets team project ref objects, # have to call get_project to get the team project object which includes the TFS Web Url for the project logger.debug('Retrieving Team Project for Project: {project_name}'.format(project_name=project.name)) projectInfo = collection_client.get_project(project.id, True, True) tfsProject = TFSProject(projectInfo, collection) logger.debug('Retrieving Last Updated and Created Info for Project: {project_name}'.format(project_name=project.name)) tfsProject.projectLastUpdateInfo = get_project_last_update_time(collection_history_list, project.id) tfsProject.projectCreateInfo = get_project_create_time(collection_history_list, project.id) project_list.append(tfsProject) return project_list
def get_all_projects(url, token, top=HARD_CODED_TOP): """ Returns a list of all projects with their collection info from the server. Currently limited functionality to only return the first 1000 projects. #TODO refactor to add multiple calls to api to retrieve all projects if more exist beyond top. """ project_list = [] tfs_client = create_tfs_core_client(url, token) collections = tfs_client.get_project_collections(top=top) for collection in collections: collection_client = create_tfs_core_client('{url}/{collection_name}'.format(url=url, collection_name=collection.name), token) logger.debug('Retrieving Projects for Project Collection: {collection_name}'.format(collection_name=collection.name)) # Retrieves all projects in the project collection projects = collection_client.get_projects(top=HARD_CODED_TOP) # get_projects only gets the project references, have to call get_project_history_entries to get last update info for projects # Only calling this once per collection as its an expensive API call, wil refactor later if there is a better API call to use collection_history_list = collection_client.get_project_history_entries() for project in projects: # get_projects only gets team project ref objects, # have to call get_project to get the team project object which includes the TFS Web Url for the project logger.debug('Retrieving Team Project for Project: {project_name}'.format(project_name=project.name)) projectInfo = collection_client.get_project(project.id, True, True) tfsProject = TFSProject(projectInfo, collection) logger.debug('Retrieving Last Updated and Created Info for Project: {project_name}'.format(project_name=project.name)) tfsProject.projectLastUpdateInfo = get_project_last_update_time(collection_history_list, project.id) tfsProject.projectCreateInfo = get_project_create_time(collection_history_list, project.id) project_list.append(tfsProject) return project_list
[ "Returns", "a", "list", "of", "all", "projects", "with", "their", "collection", "info", "from", "the", "server", ".", "Currently", "limited", "functionality", "to", "only", "return", "the", "first", "1000", "projects", ".", "#TODO", "refactor", "to", "add", "multiple", "calls", "to", "api", "to", "retrieve", "all", "projects", "if", "more", "exist", "beyond", "top", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/tfs/__init__.py#L108-L141
[ "def", "get_all_projects", "(", "url", ",", "token", ",", "top", "=", "HARD_CODED_TOP", ")", ":", "project_list", "=", "[", "]", "tfs_client", "=", "create_tfs_core_client", "(", "url", ",", "token", ")", "collections", "=", "tfs_client", ".", "get_project_collections", "(", "top", "=", "top", ")", "for", "collection", "in", "collections", ":", "collection_client", "=", "create_tfs_core_client", "(", "'{url}/{collection_name}'", ".", "format", "(", "url", "=", "url", ",", "collection_name", "=", "collection", ".", "name", ")", ",", "token", ")", "logger", ".", "debug", "(", "'Retrieving Projects for Project Collection: {collection_name}'", ".", "format", "(", "collection_name", "=", "collection", ".", "name", ")", ")", "# Retrieves all projects in the project collection", "projects", "=", "collection_client", ".", "get_projects", "(", "top", "=", "HARD_CODED_TOP", ")", "# get_projects only gets the project references, have to call get_project_history_entries to get last update info for projects", "# Only calling this once per collection as its an expensive API call, wil refactor later if there is a better API call to use", "collection_history_list", "=", "collection_client", ".", "get_project_history_entries", "(", ")", "for", "project", "in", "projects", ":", "# get_projects only gets team project ref objects,", "# have to call get_project to get the team project object which includes the TFS Web Url for the project", "logger", ".", "debug", "(", "'Retrieving Team Project for Project: {project_name}'", ".", "format", "(", "project_name", "=", "project", ".", "name", ")", ")", "projectInfo", "=", "collection_client", ".", "get_project", "(", "project", ".", "id", ",", "True", ",", "True", ")", "tfsProject", "=", "TFSProject", "(", "projectInfo", ",", "collection", ")", "logger", ".", "debug", "(", "'Retrieving Last Updated and Created Info for Project: {project_name}'", ".", "format", "(", "project_name", "=", "project", ".", "name", ")", ")", "tfsProject", ".", "projectLastUpdateInfo", "=", "get_project_last_update_time", "(", "collection_history_list", ",", "project", ".", "id", ")", "tfsProject", ".", "projectCreateInfo", "=", "get_project_create_time", "(", "collection_history_list", ",", "project", ".", "id", ")", "project_list", ".", "append", "(", "tfsProject", ")", "return", "project_list" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
get_git_repos
Returns a list of all git repos for the supplied project within the supplied collection
scraper/tfs/__init__.py
def get_git_repos(url, token, collection, project): """ Returns a list of all git repos for the supplied project within the supplied collection """ git_client = create_tfs_git_client('{url}/{collection_name}'.format(url=url, collection_name=collection.name), token) logger.debug('Retrieving Git Repos for Project: {project_name}'.format(project_name=project.name)) return git_client.get_repositories(project.id)
def get_git_repos(url, token, collection, project): """ Returns a list of all git repos for the supplied project within the supplied collection """ git_client = create_tfs_git_client('{url}/{collection_name}'.format(url=url, collection_name=collection.name), token) logger.debug('Retrieving Git Repos for Project: {project_name}'.format(project_name=project.name)) return git_client.get_repositories(project.id)
[ "Returns", "a", "list", "of", "all", "git", "repos", "for", "the", "supplied", "project", "within", "the", "supplied", "collection" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/tfs/__init__.py#L144-L150
[ "def", "get_git_repos", "(", "url", ",", "token", ",", "collection", ",", "project", ")", ":", "git_client", "=", "create_tfs_git_client", "(", "'{url}/{collection_name}'", ".", "format", "(", "url", "=", "url", ",", "collection_name", "=", "collection", ".", "name", ")", ",", "token", ")", "logger", ".", "debug", "(", "'Retrieving Git Repos for Project: {project_name}'", ".", "format", "(", "project_name", "=", "project", ".", "name", ")", ")", "return", "git_client", ".", "get_repositories", "(", "project", ".", "id", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
get_tfvc_repos
Returns a list of all tfvc branches for the supplied project within the supplied collection
scraper/tfs/__init__.py
def get_tfvc_repos(url, token, collection, project): """ Returns a list of all tfvc branches for the supplied project within the supplied collection """ branch_list = [] tfvc_client = create_tfs_tfvc_client('{url}/{collection_name}'.format(url=url, collection_name=collection.name), token) logger.debug('Retrieving Tfvc Branches for Project: {project_name}'.format(project_name=project.name)) branches = tfvc_client.get_branches(project.id, True, True, False, True) if branches: branch_list.extend(branches) else: logger.debug('No Tfvcc Branches in Project: {project_name}'.format(project_name=project.name)) return branch_list
def get_tfvc_repos(url, token, collection, project): """ Returns a list of all tfvc branches for the supplied project within the supplied collection """ branch_list = [] tfvc_client = create_tfs_tfvc_client('{url}/{collection_name}'.format(url=url, collection_name=collection.name), token) logger.debug('Retrieving Tfvc Branches for Project: {project_name}'.format(project_name=project.name)) branches = tfvc_client.get_branches(project.id, True, True, False, True) if branches: branch_list.extend(branches) else: logger.debug('No Tfvcc Branches in Project: {project_name}'.format(project_name=project.name)) return branch_list
[ "Returns", "a", "list", "of", "all", "tfvc", "branches", "for", "the", "supplied", "project", "within", "the", "supplied", "collection" ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/tfs/__init__.py#L153-L167
[ "def", "get_tfvc_repos", "(", "url", ",", "token", ",", "collection", ",", "project", ")", ":", "branch_list", "=", "[", "]", "tfvc_client", "=", "create_tfs_tfvc_client", "(", "'{url}/{collection_name}'", ".", "format", "(", "url", "=", "url", ",", "collection_name", "=", "collection", ".", "name", ")", ",", "token", ")", "logger", ".", "debug", "(", "'Retrieving Tfvc Branches for Project: {project_name}'", ".", "format", "(", "project_name", "=", "project", ".", "name", ")", ")", "branches", "=", "tfvc_client", ".", "get_branches", "(", "project", ".", "id", ",", "True", ",", "True", ",", "False", ",", "True", ")", "if", "branches", ":", "branch_list", ".", "extend", "(", "branches", ")", "else", ":", "logger", ".", "debug", "(", "'No Tfvcc Branches in Project: {project_name}'", ".", "format", "(", "project_name", "=", "project", ".", "name", ")", ")", "return", "branch_list" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_LLNL_Year_Commits.get_year_commits
Does setup such as login, printing API info, and waiting for GitHub to build the commit statistics. Then gets the last year of commits and prints them to file.
scripts/get_year_commits.py
def get_year_commits(self, username='', password='', organization='llnl', force=True): """ Does setup such as login, printing API info, and waiting for GitHub to build the commit statistics. Then gets the last year of commits and prints them to file. """ date = str(datetime.date.today()) file_path = ('year_commits.csv') if force or not os.path.isfile(file_path): my_github.login(username, password) calls_beginning = self.logged_in_gh.ratelimit_remaining + 1 print 'Rate Limit: ' + str(calls_beginning) my_github.get_org(organization) my_github.repos(building_stats=True) print "Letting GitHub build statistics." time.sleep(30) print "Trying again." my_github.repos(building_stats=False) my_github.calc_total_commits(starting_commits=35163) my_github.write_to_file() calls_remaining = self.logged_in_gh.ratelimit_remaining calls_used = calls_beginning - calls_remaining print ('Rate Limit Remaining: ' + str(calls_remaining) + '\nUsed ' + str(calls_used) + ' API calls.')
def get_year_commits(self, username='', password='', organization='llnl', force=True): """ Does setup such as login, printing API info, and waiting for GitHub to build the commit statistics. Then gets the last year of commits and prints them to file. """ date = str(datetime.date.today()) file_path = ('year_commits.csv') if force or not os.path.isfile(file_path): my_github.login(username, password) calls_beginning = self.logged_in_gh.ratelimit_remaining + 1 print 'Rate Limit: ' + str(calls_beginning) my_github.get_org(organization) my_github.repos(building_stats=True) print "Letting GitHub build statistics." time.sleep(30) print "Trying again." my_github.repos(building_stats=False) my_github.calc_total_commits(starting_commits=35163) my_github.write_to_file() calls_remaining = self.logged_in_gh.ratelimit_remaining calls_used = calls_beginning - calls_remaining print ('Rate Limit Remaining: ' + str(calls_remaining) + '\nUsed ' + str(calls_used) + ' API calls.')
[ "Does", "setup", "such", "as", "login", "printing", "API", "info", "and", "waiting", "for", "GitHub", "to", "build", "the", "commit", "statistics", ".", "Then", "gets", "the", "last", "year", "of", "commits", "and", "prints", "them", "to", "file", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_year_commits.py#L11-L34
[ "def", "get_year_commits", "(", "self", ",", "username", "=", "''", ",", "password", "=", "''", ",", "organization", "=", "'llnl'", ",", "force", "=", "True", ")", ":", "date", "=", "str", "(", "datetime", ".", "date", ".", "today", "(", ")", ")", "file_path", "=", "(", "'year_commits.csv'", ")", "if", "force", "or", "not", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "my_github", ".", "login", "(", "username", ",", "password", ")", "calls_beginning", "=", "self", ".", "logged_in_gh", ".", "ratelimit_remaining", "+", "1", "print", "'Rate Limit: '", "+", "str", "(", "calls_beginning", ")", "my_github", ".", "get_org", "(", "organization", ")", "my_github", ".", "repos", "(", "building_stats", "=", "True", ")", "print", "\"Letting GitHub build statistics.\"", "time", ".", "sleep", "(", "30", ")", "print", "\"Trying again.\"", "my_github", ".", "repos", "(", "building_stats", "=", "False", ")", "my_github", ".", "calc_total_commits", "(", "starting_commits", "=", "35163", ")", "my_github", ".", "write_to_file", "(", ")", "calls_remaining", "=", "self", ".", "logged_in_gh", ".", "ratelimit_remaining", "calls_used", "=", "calls_beginning", "-", "calls_remaining", "print", "(", "'Rate Limit Remaining: '", "+", "str", "(", "calls_remaining", ")", "+", "'\\nUsed '", "+", "str", "(", "calls_used", ")", "+", "' API calls.'", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_LLNL_Year_Commits.repos
Retrieves the last year of commits for the organization and stores them in weeks (UNIX time) associated with number of commits that week.
scripts/get_year_commits.py
def repos(self, building_stats=False): """ Retrieves the last year of commits for the organization and stores them in weeks (UNIX time) associated with number of commits that week. """ print 'Getting repos.' for repo in self.org_retrieved.iter_repos(): for activity in repo.iter_commit_activity(): if not building_stats: self.commits_dict_list.append(activity)
def repos(self, building_stats=False): """ Retrieves the last year of commits for the organization and stores them in weeks (UNIX time) associated with number of commits that week. """ print 'Getting repos.' for repo in self.org_retrieved.iter_repos(): for activity in repo.iter_commit_activity(): if not building_stats: self.commits_dict_list.append(activity)
[ "Retrieves", "the", "last", "year", "of", "commits", "for", "the", "organization", "and", "stores", "them", "in", "weeks", "(", "UNIX", "time", ")", "associated", "with", "number", "of", "commits", "that", "week", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_year_commits.py#L95-L104
[ "def", "repos", "(", "self", ",", "building_stats", "=", "False", ")", ":", "print", "'Getting repos.'", "for", "repo", "in", "self", ".", "org_retrieved", ".", "iter_repos", "(", ")", ":", "for", "activity", "in", "repo", ".", "iter_commit_activity", "(", ")", ":", "if", "not", "building_stats", ":", "self", ".", "commits_dict_list", ".", "append", "(", "activity", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_LLNL_Year_Commits.calc_total_commits
Uses the weekly commits and traverses back through the last year, each week subtracting the weekly commits and storing them. It needs an initial starting commits number, which should be taken from the most up to date number from github_stats.py output.
scripts/get_year_commits.py
def calc_total_commits(self, starting_commits=0): """ Uses the weekly commits and traverses back through the last year, each week subtracting the weekly commits and storing them. It needs an initial starting commits number, which should be taken from the most up to date number from github_stats.py output. """ for week_of_commits in self.commits_dict_list: try: self.commits[week_of_commits['week']] -= week_of_commits['total'] except KeyError: total = self.commits[week_of_commits['week']] \ = -week_of_commits['total'] self.sorted_weeks = sorted(self.commits) #reverse because lower numbered weeks are older in time. #we traverse from most recent to oldest for week in reversed(self.sorted_weeks): self.commits[week] = self.commits[week] + starting_commits starting_commits = self.commits[week]
def calc_total_commits(self, starting_commits=0): """ Uses the weekly commits and traverses back through the last year, each week subtracting the weekly commits and storing them. It needs an initial starting commits number, which should be taken from the most up to date number from github_stats.py output. """ for week_of_commits in self.commits_dict_list: try: self.commits[week_of_commits['week']] -= week_of_commits['total'] except KeyError: total = self.commits[week_of_commits['week']] \ = -week_of_commits['total'] self.sorted_weeks = sorted(self.commits) #reverse because lower numbered weeks are older in time. #we traverse from most recent to oldest for week in reversed(self.sorted_weeks): self.commits[week] = self.commits[week] + starting_commits starting_commits = self.commits[week]
[ "Uses", "the", "weekly", "commits", "and", "traverses", "back", "through", "the", "last", "year", "each", "week", "subtracting", "the", "weekly", "commits", "and", "storing", "them", ".", "It", "needs", "an", "initial", "starting", "commits", "number", "which", "should", "be", "taken", "from", "the", "most", "up", "to", "date", "number", "from", "github_stats", ".", "py", "output", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_year_commits.py#L106-L125
[ "def", "calc_total_commits", "(", "self", ",", "starting_commits", "=", "0", ")", ":", "for", "week_of_commits", "in", "self", ".", "commits_dict_list", ":", "try", ":", "self", ".", "commits", "[", "week_of_commits", "[", "'week'", "]", "]", "-=", "week_of_commits", "[", "'total'", "]", "except", "KeyError", ":", "total", "=", "self", ".", "commits", "[", "week_of_commits", "[", "'week'", "]", "]", "=", "-", "week_of_commits", "[", "'total'", "]", "self", ".", "sorted_weeks", "=", "sorted", "(", "self", ".", "commits", ")", "#reverse because lower numbered weeks are older in time.", "#we traverse from most recent to oldest", "for", "week", "in", "reversed", "(", "self", ".", "sorted_weeks", ")", ":", "self", ".", "commits", "[", "week", "]", "=", "self", ".", "commits", "[", "week", "]", "+", "starting_commits", "starting_commits", "=", "self", ".", "commits", "[", "week", "]" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
GitHub_LLNL_Year_Commits.write_to_file
Writes the weeks with associated commits to file.
scripts/get_year_commits.py
def write_to_file(self): """ Writes the weeks with associated commits to file. """ with open('../github_stats_output/last_year_commits.csv', 'w+') as output: output.write('date,organization,repos,members,teams,' + 'unique_contributors,total_contributors,forks,' + 'stargazers,pull_requests,open_issues,has_readme,' + 'has_license,pull_requests_open,pull_requests_closed,' + 'commits\n') #no reverse this time to print oldest first previous_commits = 0 for week in self.sorted_weeks: if str(self.commits[week]) != previous_commits:#delete dups week_formatted = datetime.datetime.utcfromtimestamp( week ).strftime('%Y-%m-%d') output.write(week_formatted + ',llnl,0,0,0,0,0,0,0,0,0,0,0,0,0,' + str(self.commits[week]) + '\n') previous_commits = str(self.commits[week])
def write_to_file(self): """ Writes the weeks with associated commits to file. """ with open('../github_stats_output/last_year_commits.csv', 'w+') as output: output.write('date,organization,repos,members,teams,' + 'unique_contributors,total_contributors,forks,' + 'stargazers,pull_requests,open_issues,has_readme,' + 'has_license,pull_requests_open,pull_requests_closed,' + 'commits\n') #no reverse this time to print oldest first previous_commits = 0 for week in self.sorted_weeks: if str(self.commits[week]) != previous_commits:#delete dups week_formatted = datetime.datetime.utcfromtimestamp( week ).strftime('%Y-%m-%d') output.write(week_formatted + ',llnl,0,0,0,0,0,0,0,0,0,0,0,0,0,' + str(self.commits[week]) + '\n') previous_commits = str(self.commits[week])
[ "Writes", "the", "weeks", "with", "associated", "commits", "to", "file", "." ]
LLNL/scraper
python
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_year_commits.py#L127-L146
[ "def", "write_to_file", "(", "self", ")", ":", "with", "open", "(", "'../github_stats_output/last_year_commits.csv'", ",", "'w+'", ")", "as", "output", ":", "output", ".", "write", "(", "'date,organization,repos,members,teams,'", "+", "'unique_contributors,total_contributors,forks,'", "+", "'stargazers,pull_requests,open_issues,has_readme,'", "+", "'has_license,pull_requests_open,pull_requests_closed,'", "+", "'commits\\n'", ")", "#no reverse this time to print oldest first", "previous_commits", "=", "0", "for", "week", "in", "self", ".", "sorted_weeks", ":", "if", "str", "(", "self", ".", "commits", "[", "week", "]", ")", "!=", "previous_commits", ":", "#delete dups", "week_formatted", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "week", ")", ".", "strftime", "(", "'%Y-%m-%d'", ")", "output", ".", "write", "(", "week_formatted", "+", "',llnl,0,0,0,0,0,0,0,0,0,0,0,0,0,'", "+", "str", "(", "self", ".", "commits", "[", "week", "]", ")", "+", "'\\n'", ")", "previous_commits", "=", "str", "(", "self", ".", "commits", "[", "week", "]", ")" ]
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
test
configure
Instantiate and configures backends. :arg list-of-dicts backends: the backend configuration as a list of dicts where each dict specifies a separate backend. Each backend dict consists of two things: 1. ``class`` with a value that is either a Python class or a dotted Python path to one 2. ``options`` dict with options for the backend in question to configure it See the documentation for the backends you're using to know what is configurable in the options dict. :arg raise_errors bool: whether or not to raise an exception if something happens in configuration; if it doesn't raise an exception, it'll log the exception For example, this sets up a :py:class:`markus.backends.logging.LoggingMetrics` backend:: markus.configure([ { 'class': 'markus.backends.logging.LoggingMetrics', 'options': { 'logger_name': 'metrics' } } ]) You can set up as many backends as you like. .. Note:: During application startup, Markus should get configured before the app starts generating metrics. Any metrics generated before Markus is configured will get dropped. However, anything can call :py:func:`markus.get_metrics` and get a :py:class:`markus.main.MetricsInterface` before Markus has been configured including at module load time.
markus/main.py
def configure(backends, raise_errors=False): """Instantiate and configures backends. :arg list-of-dicts backends: the backend configuration as a list of dicts where each dict specifies a separate backend. Each backend dict consists of two things: 1. ``class`` with a value that is either a Python class or a dotted Python path to one 2. ``options`` dict with options for the backend in question to configure it See the documentation for the backends you're using to know what is configurable in the options dict. :arg raise_errors bool: whether or not to raise an exception if something happens in configuration; if it doesn't raise an exception, it'll log the exception For example, this sets up a :py:class:`markus.backends.logging.LoggingMetrics` backend:: markus.configure([ { 'class': 'markus.backends.logging.LoggingMetrics', 'options': { 'logger_name': 'metrics' } } ]) You can set up as many backends as you like. .. Note:: During application startup, Markus should get configured before the app starts generating metrics. Any metrics generated before Markus is configured will get dropped. However, anything can call :py:func:`markus.get_metrics` and get a :py:class:`markus.main.MetricsInterface` before Markus has been configured including at module load time. """ good_backends = [] for backend in backends: clspath = backend['class'] options = backend.get('options', {}) if isinstance(clspath, str): modpath, clsname = split_clspath(clspath) try: __import__(modpath) module = sys.modules[modpath] cls = getattr(module, clsname) except Exception: logger.exception('Exception while importing %s', clspath) if raise_errors: raise continue else: cls = clspath try: good_backends.append(cls(options)) except Exception: logger.exception( 'Exception thrown while instantiating %s, %s', clspath, options ) if raise_errors: raise _change_metrics(good_backends)
def configure(backends, raise_errors=False): """Instantiate and configures backends. :arg list-of-dicts backends: the backend configuration as a list of dicts where each dict specifies a separate backend. Each backend dict consists of two things: 1. ``class`` with a value that is either a Python class or a dotted Python path to one 2. ``options`` dict with options for the backend in question to configure it See the documentation for the backends you're using to know what is configurable in the options dict. :arg raise_errors bool: whether or not to raise an exception if something happens in configuration; if it doesn't raise an exception, it'll log the exception For example, this sets up a :py:class:`markus.backends.logging.LoggingMetrics` backend:: markus.configure([ { 'class': 'markus.backends.logging.LoggingMetrics', 'options': { 'logger_name': 'metrics' } } ]) You can set up as many backends as you like. .. Note:: During application startup, Markus should get configured before the app starts generating metrics. Any metrics generated before Markus is configured will get dropped. However, anything can call :py:func:`markus.get_metrics` and get a :py:class:`markus.main.MetricsInterface` before Markus has been configured including at module load time. """ good_backends = [] for backend in backends: clspath = backend['class'] options = backend.get('options', {}) if isinstance(clspath, str): modpath, clsname = split_clspath(clspath) try: __import__(modpath) module = sys.modules[modpath] cls = getattr(module, clsname) except Exception: logger.exception('Exception while importing %s', clspath) if raise_errors: raise continue else: cls = clspath try: good_backends.append(cls(options)) except Exception: logger.exception( 'Exception thrown while instantiating %s, %s', clspath, options ) if raise_errors: raise _change_metrics(good_backends)
[ "Instantiate", "and", "configures", "backends", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/main.py#L51-L129
[ "def", "configure", "(", "backends", ",", "raise_errors", "=", "False", ")", ":", "good_backends", "=", "[", "]", "for", "backend", "in", "backends", ":", "clspath", "=", "backend", "[", "'class'", "]", "options", "=", "backend", ".", "get", "(", "'options'", ",", "{", "}", ")", "if", "isinstance", "(", "clspath", ",", "str", ")", ":", "modpath", ",", "clsname", "=", "split_clspath", "(", "clspath", ")", "try", ":", "__import__", "(", "modpath", ")", "module", "=", "sys", ".", "modules", "[", "modpath", "]", "cls", "=", "getattr", "(", "module", ",", "clsname", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "'Exception while importing %s'", ",", "clspath", ")", "if", "raise_errors", ":", "raise", "continue", "else", ":", "cls", "=", "clspath", "try", ":", "good_backends", ".", "append", "(", "cls", "(", "options", ")", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "'Exception thrown while instantiating %s, %s'", ",", "clspath", ",", "options", ")", "if", "raise_errors", ":", "raise", "_change_metrics", "(", "good_backends", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
get_metrics
Return MetricsInterface instance with specified name. The name is used as the prefix for all keys generated with this :py:class:`markus.main.MetricsInterface`. The :py:class:`markus.main.MetricsInterface` is not tied to metrics backends. The list of active backends are globally configured. This allows us to create :py:class:`markus.main.MetricsInterface` classes without having to worry about bootstrapping order of the app. :arg class/instance/str thing: The name to use as a key prefix. If this is a class, it uses the dotted Python path. If this is an instance, it uses the dotted Python path plus ``str(instance)``. :arg str extra: Any extra bits to add to the end of the name. :returns: a ``MetricsInterface`` instance Examples: >>> from markus import get_metrics Create a MetricsInterface with the name "myapp" and generate a count with stat "myapp.thing1" and value 1: >>> metrics = get_metrics('myapp') >>> metrics.incr('thing1', value=1) Create a MetricsInterface with the prefix of the Python module it's being called in: >>> metrics = get_metrics(__name__) Create a MetricsInterface with the prefix as the qualname of the class: >>> class Foo: ... def __init__(self): ... self.metrics = get_metrics(self) Create a prefix of the class path plus some identifying information: >>> class Foo: ... def __init__(self, myname): ... self.metrics = get_metrics(self, extra=myname) ... >>> foo = Foo('jim') Assume that ``Foo`` is defined in the ``myapp`` module. Then this will generate the name ``myapp.Foo.jim``.
markus/main.py
def get_metrics(thing, extra=''): """Return MetricsInterface instance with specified name. The name is used as the prefix for all keys generated with this :py:class:`markus.main.MetricsInterface`. The :py:class:`markus.main.MetricsInterface` is not tied to metrics backends. The list of active backends are globally configured. This allows us to create :py:class:`markus.main.MetricsInterface` classes without having to worry about bootstrapping order of the app. :arg class/instance/str thing: The name to use as a key prefix. If this is a class, it uses the dotted Python path. If this is an instance, it uses the dotted Python path plus ``str(instance)``. :arg str extra: Any extra bits to add to the end of the name. :returns: a ``MetricsInterface`` instance Examples: >>> from markus import get_metrics Create a MetricsInterface with the name "myapp" and generate a count with stat "myapp.thing1" and value 1: >>> metrics = get_metrics('myapp') >>> metrics.incr('thing1', value=1) Create a MetricsInterface with the prefix of the Python module it's being called in: >>> metrics = get_metrics(__name__) Create a MetricsInterface with the prefix as the qualname of the class: >>> class Foo: ... def __init__(self): ... self.metrics = get_metrics(self) Create a prefix of the class path plus some identifying information: >>> class Foo: ... def __init__(self, myname): ... self.metrics = get_metrics(self, extra=myname) ... >>> foo = Foo('jim') Assume that ``Foo`` is defined in the ``myapp`` module. Then this will generate the name ``myapp.Foo.jim``. """ thing = thing or '' if not isinstance(thing, str): # If it's not a str, it's either a class or an instance. Handle # accordingly. if type(thing) == type: thing = '%s.%s' % (thing.__module__, thing.__name__) else: thing = '%s.%s' % ( thing.__class__.__module__, thing.__class__.__name__ ) if extra: thing = '%s.%s' % (thing, extra) return MetricsInterface(thing)
def get_metrics(thing, extra=''): """Return MetricsInterface instance with specified name. The name is used as the prefix for all keys generated with this :py:class:`markus.main.MetricsInterface`. The :py:class:`markus.main.MetricsInterface` is not tied to metrics backends. The list of active backends are globally configured. This allows us to create :py:class:`markus.main.MetricsInterface` classes without having to worry about bootstrapping order of the app. :arg class/instance/str thing: The name to use as a key prefix. If this is a class, it uses the dotted Python path. If this is an instance, it uses the dotted Python path plus ``str(instance)``. :arg str extra: Any extra bits to add to the end of the name. :returns: a ``MetricsInterface`` instance Examples: >>> from markus import get_metrics Create a MetricsInterface with the name "myapp" and generate a count with stat "myapp.thing1" and value 1: >>> metrics = get_metrics('myapp') >>> metrics.incr('thing1', value=1) Create a MetricsInterface with the prefix of the Python module it's being called in: >>> metrics = get_metrics(__name__) Create a MetricsInterface with the prefix as the qualname of the class: >>> class Foo: ... def __init__(self): ... self.metrics = get_metrics(self) Create a prefix of the class path plus some identifying information: >>> class Foo: ... def __init__(self, myname): ... self.metrics = get_metrics(self, extra=myname) ... >>> foo = Foo('jim') Assume that ``Foo`` is defined in the ``myapp`` module. Then this will generate the name ``myapp.Foo.jim``. """ thing = thing or '' if not isinstance(thing, str): # If it's not a str, it's either a class or an instance. Handle # accordingly. if type(thing) == type: thing = '%s.%s' % (thing.__module__, thing.__name__) else: thing = '%s.%s' % ( thing.__class__.__module__, thing.__class__.__name__ ) if extra: thing = '%s.%s' % (thing, extra) return MetricsInterface(thing)
[ "Return", "MetricsInterface", "instance", "with", "specified", "name", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/main.py#L396-L464
[ "def", "get_metrics", "(", "thing", ",", "extra", "=", "''", ")", ":", "thing", "=", "thing", "or", "''", "if", "not", "isinstance", "(", "thing", ",", "str", ")", ":", "# If it's not a str, it's either a class or an instance. Handle", "# accordingly.", "if", "type", "(", "thing", ")", "==", "type", ":", "thing", "=", "'%s.%s'", "%", "(", "thing", ".", "__module__", ",", "thing", ".", "__name__", ")", "else", ":", "thing", "=", "'%s.%s'", "%", "(", "thing", ".", "__class__", ".", "__module__", ",", "thing", ".", "__class__", ".", "__name__", ")", "if", "extra", ":", "thing", "=", "'%s.%s'", "%", "(", "thing", ",", "extra", ")", "return", "MetricsInterface", "(", "thing", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
MetricsInterface.timing
Record a timing value. Record the length of time of something to be added to a set of values from which a statistical distribution is derived. Depending on the backend, you might end up with count, average, median, 95% and max for a set of timing values. This is useful for analyzing how long things take to occur. For example, how long it takes for a function to run, to upload files, or for a database query to execute. :arg string stat: A period delimited alphanumeric key. :arg int value: A timing in milliseconds. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> import time >>> import markus >>> metrics = markus.get_metrics('foo') >>> def upload_file(payload): ... start_time = time.perf_counter() # this is in seconds ... # upload the file ... timing = (time.perf_counter() - start_time) * 1000.0 # convert to ms ... metrics.timing('upload_file_time', value=timing) .. Note:: If you're timing a function or a block of code, it's probably more convenient to use :py:meth:`markus.main.MetricsInterface.timer` or :py:meth:`markus.main.MetricsInterface.timer_decorator`.
markus/main.py
def timing(self, stat, value, tags=None): """Record a timing value. Record the length of time of something to be added to a set of values from which a statistical distribution is derived. Depending on the backend, you might end up with count, average, median, 95% and max for a set of timing values. This is useful for analyzing how long things take to occur. For example, how long it takes for a function to run, to upload files, or for a database query to execute. :arg string stat: A period delimited alphanumeric key. :arg int value: A timing in milliseconds. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> import time >>> import markus >>> metrics = markus.get_metrics('foo') >>> def upload_file(payload): ... start_time = time.perf_counter() # this is in seconds ... # upload the file ... timing = (time.perf_counter() - start_time) * 1000.0 # convert to ms ... metrics.timing('upload_file_time', value=timing) .. Note:: If you're timing a function or a block of code, it's probably more convenient to use :py:meth:`markus.main.MetricsInterface.timer` or :py:meth:`markus.main.MetricsInterface.timer_decorator`. """ full_stat = self._full_stat(stat) for backend in _get_metrics_backends(): backend.timing(full_stat, value=value, tags=tags)
def timing(self, stat, value, tags=None): """Record a timing value. Record the length of time of something to be added to a set of values from which a statistical distribution is derived. Depending on the backend, you might end up with count, average, median, 95% and max for a set of timing values. This is useful for analyzing how long things take to occur. For example, how long it takes for a function to run, to upload files, or for a database query to execute. :arg string stat: A period delimited alphanumeric key. :arg int value: A timing in milliseconds. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> import time >>> import markus >>> metrics = markus.get_metrics('foo') >>> def upload_file(payload): ... start_time = time.perf_counter() # this is in seconds ... # upload the file ... timing = (time.perf_counter() - start_time) * 1000.0 # convert to ms ... metrics.timing('upload_file_time', value=timing) .. Note:: If you're timing a function or a block of code, it's probably more convenient to use :py:meth:`markus.main.MetricsInterface.timer` or :py:meth:`markus.main.MetricsInterface.timer_decorator`. """ full_stat = self._full_stat(stat) for backend in _get_metrics_backends(): backend.timing(full_stat, value=value, tags=tags)
[ "Record", "a", "timing", "value", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/main.py#L229-L273
[ "def", "timing", "(", "self", ",", "stat", ",", "value", ",", "tags", "=", "None", ")", ":", "full_stat", "=", "self", ".", "_full_stat", "(", "stat", ")", "for", "backend", "in", "_get_metrics_backends", "(", ")", ":", "backend", ".", "timing", "(", "full_stat", ",", "value", "=", "value", ",", "tags", "=", "tags", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
MetricsInterface.timer
Contextmanager for easily computing timings. :arg string stat: A period delimited alphanumeric key. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> mymetrics = get_metrics(__name__) >>> def long_function(): ... with mymetrics.timer('long_function'): ... # perform some thing we want to keep metrics on ... pass .. Note:: All timings generated with this are in milliseconds.
markus/main.py
def timer(self, stat, tags=None): """Contextmanager for easily computing timings. :arg string stat: A period delimited alphanumeric key. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> mymetrics = get_metrics(__name__) >>> def long_function(): ... with mymetrics.timer('long_function'): ... # perform some thing we want to keep metrics on ... pass .. Note:: All timings generated with this are in milliseconds. """ if six.PY3: start_time = time.perf_counter() else: start_time = time.time() yield if six.PY3: end_time = time.perf_counter() else: end_time = time.time() delta = end_time - start_time self.timing(stat, value=delta * 1000.0, tags=tags)
def timer(self, stat, tags=None): """Contextmanager for easily computing timings. :arg string stat: A period delimited alphanumeric key. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> mymetrics = get_metrics(__name__) >>> def long_function(): ... with mymetrics.timer('long_function'): ... # perform some thing we want to keep metrics on ... pass .. Note:: All timings generated with this are in milliseconds. """ if six.PY3: start_time = time.perf_counter() else: start_time = time.time() yield if six.PY3: end_time = time.perf_counter() else: end_time = time.time() delta = end_time - start_time self.timing(stat, value=delta * 1000.0, tags=tags)
[ "Contextmanager", "for", "easily", "computing", "timings", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/main.py#L320-L359
[ "def", "timer", "(", "self", ",", "stat", ",", "tags", "=", "None", ")", ":", "if", "six", ".", "PY3", ":", "start_time", "=", "time", ".", "perf_counter", "(", ")", "else", ":", "start_time", "=", "time", ".", "time", "(", ")", "yield", "if", "six", ".", "PY3", ":", "end_time", "=", "time", ".", "perf_counter", "(", ")", "else", ":", "end_time", "=", "time", ".", "time", "(", ")", "delta", "=", "end_time", "-", "start_time", "self", ".", "timing", "(", "stat", ",", "value", "=", "delta", "*", "1000.0", ",", "tags", "=", "tags", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
MetricsInterface.timer_decorator
Timer decorator for easily computing timings. :arg string stat: A period delimited alphanumeric key. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> mymetrics = get_metrics(__name__) >>> @mymetrics.timer_decorator('long_function') ... def long_function(): ... # perform some thing we want to keep metrics on ... pass .. Note:: All timings generated with this are in milliseconds.
markus/main.py
def timer_decorator(self, stat, tags=None): """Timer decorator for easily computing timings. :arg string stat: A period delimited alphanumeric key. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> mymetrics = get_metrics(__name__) >>> @mymetrics.timer_decorator('long_function') ... def long_function(): ... # perform some thing we want to keep metrics on ... pass .. Note:: All timings generated with this are in milliseconds. """ def _inner(fun): @wraps(fun) def _timer_decorator(*args, **kwargs): with self.timer(stat, tags): return fun(*args, **kwargs) return _timer_decorator return _inner
def timer_decorator(self, stat, tags=None): """Timer decorator for easily computing timings. :arg string stat: A period delimited alphanumeric key. :arg list-of-strings tags: Each string in the tag consists of a key and a value separated by a colon. Tags can make it easier to break down metrics for analysis. For example ``['env:stage', 'compressed:yes']``. For example: >>> mymetrics = get_metrics(__name__) >>> @mymetrics.timer_decorator('long_function') ... def long_function(): ... # perform some thing we want to keep metrics on ... pass .. Note:: All timings generated with this are in milliseconds. """ def _inner(fun): @wraps(fun) def _timer_decorator(*args, **kwargs): with self.timer(stat, tags): return fun(*args, **kwargs) return _timer_decorator return _inner
[ "Timer", "decorator", "for", "easily", "computing", "timings", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/main.py#L361-L393
[ "def", "timer_decorator", "(", "self", ",", "stat", ",", "tags", "=", "None", ")", ":", "def", "_inner", "(", "fun", ")", ":", "@", "wraps", "(", "fun", ")", "def", "_timer_decorator", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "self", ".", "timer", "(", "stat", ",", "tags", ")", ":", "return", "fun", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_timer_decorator", "return", "_inner" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
StatsdMetrics.incr
Increment a counter.
markus/backends/statsd.py
def incr(self, stat, value=1, tags=None): """Increment a counter.""" self.client.incr(stat=stat, count=value)
def incr(self, stat, value=1, tags=None): """Increment a counter.""" self.client.incr(stat=stat, count=value)
[ "Increment", "a", "counter", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/statsd.py#L84-L86
[ "def", "incr", "(", "self", ",", "stat", ",", "value", "=", "1", ",", "tags", "=", "None", ")", ":", "self", ".", "client", ".", "incr", "(", "stat", "=", "stat", ",", "count", "=", "value", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
StatsdMetrics.timing
Measure a timing for statistical distribution.
markus/backends/statsd.py
def timing(self, stat, value, tags=None): """Measure a timing for statistical distribution.""" self.client.timing(stat=stat, delta=value)
def timing(self, stat, value, tags=None): """Measure a timing for statistical distribution.""" self.client.timing(stat=stat, delta=value)
[ "Measure", "a", "timing", "for", "statistical", "distribution", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/statsd.py#L92-L94
[ "def", "timing", "(", "self", ",", "stat", ",", "value", ",", "tags", "=", "None", ")", ":", "self", ".", "client", ".", "timing", "(", "stat", "=", "stat", ",", "delta", "=", "value", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
DatadogMetrics.incr
Increment a counter.
markus/backends/datadog.py
def incr(self, stat, value=1, tags=None): """Increment a counter.""" self.client.increment(metric=stat, value=value, tags=tags)
def incr(self, stat, value=1, tags=None): """Increment a counter.""" self.client.increment(metric=stat, value=value, tags=tags)
[ "Increment", "a", "counter", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/datadog.py#L72-L74
[ "def", "incr", "(", "self", ",", "stat", ",", "value", "=", "1", ",", "tags", "=", "None", ")", ":", "self", ".", "client", ".", "increment", "(", "metric", "=", "stat", ",", "value", "=", "value", ",", "tags", "=", "tags", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
DatadogMetrics.gauge
Set a gauge.
markus/backends/datadog.py
def gauge(self, stat, value, tags=None): """Set a gauge.""" self.client.gauge(metric=stat, value=value, tags=tags)
def gauge(self, stat, value, tags=None): """Set a gauge.""" self.client.gauge(metric=stat, value=value, tags=tags)
[ "Set", "a", "gauge", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/datadog.py#L76-L78
[ "def", "gauge", "(", "self", ",", "stat", ",", "value", ",", "tags", "=", "None", ")", ":", "self", ".", "client", ".", "gauge", "(", "metric", "=", "stat", ",", "value", "=", "value", ",", "tags", "=", "tags", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
DatadogMetrics.timing
Measure a timing for statistical distribution.
markus/backends/datadog.py
def timing(self, stat, value, tags=None): """Measure a timing for statistical distribution.""" self.client.timing(metric=stat, value=value, tags=tags)
def timing(self, stat, value, tags=None): """Measure a timing for statistical distribution.""" self.client.timing(metric=stat, value=value, tags=tags)
[ "Measure", "a", "timing", "for", "statistical", "distribution", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/datadog.py#L80-L82
[ "def", "timing", "(", "self", ",", "stat", ",", "value", ",", "tags", "=", "None", ")", ":", "self", ".", "client", ".", "timing", "(", "metric", "=", "stat", ",", "value", "=", "value", ",", "tags", "=", "tags", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
DatadogMetrics.histogram
Measure a value for statistical distribution.
markus/backends/datadog.py
def histogram(self, stat, value, tags=None): """Measure a value for statistical distribution.""" self.client.histogram(metric=stat, value=value, tags=tags)
def histogram(self, stat, value, tags=None): """Measure a value for statistical distribution.""" self.client.histogram(metric=stat, value=value, tags=tags)
[ "Measure", "a", "value", "for", "statistical", "distribution", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/datadog.py#L84-L86
[ "def", "histogram", "(", "self", ",", "stat", ",", "value", ",", "tags", "=", "None", ")", ":", "self", ".", "client", ".", "histogram", "(", "metric", "=", "stat", ",", "value", "=", "value", ",", "tags", "=", "tags", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
generate_tag
Generate a tag for use with the tag backends. The key and value (if there is one) are sanitized according to the following rules: 1. after the first character, all characters must be alphanumeric, underscore, minus, period, or slash--invalid characters are converted to "_" 2. lowercase If a value is provided, the final tag is `key:value`. The final tag must start with a letter. If it doesn't, an "a" is prepended. The final tag is truncated to 200 characters. If the final tag is "device", "host", or "source", then a "_" will be appended the end. :arg str key: the key to use :arg str value: the value (if any) :returns: the final tag Examples: >>> generate_tag('yellow') 'yellow' >>> generate_tag('rule', 'is_yellow') 'rule:is_yellow' Example with ``incr``: >>> import markus >>> mymetrics = markus.get_metrics(__name__) >>> mymetrics.incr('somekey', value=1, ... tags=[generate_tag('rule', 'is_yellow')])
markus/utils.py
def generate_tag(key, value=None): """Generate a tag for use with the tag backends. The key and value (if there is one) are sanitized according to the following rules: 1. after the first character, all characters must be alphanumeric, underscore, minus, period, or slash--invalid characters are converted to "_" 2. lowercase If a value is provided, the final tag is `key:value`. The final tag must start with a letter. If it doesn't, an "a" is prepended. The final tag is truncated to 200 characters. If the final tag is "device", "host", or "source", then a "_" will be appended the end. :arg str key: the key to use :arg str value: the value (if any) :returns: the final tag Examples: >>> generate_tag('yellow') 'yellow' >>> generate_tag('rule', 'is_yellow') 'rule:is_yellow' Example with ``incr``: >>> import markus >>> mymetrics = markus.get_metrics(__name__) >>> mymetrics.incr('somekey', value=1, ... tags=[generate_tag('rule', 'is_yellow')]) """ # Verify the types if not isinstance(key, six.string_types): raise ValueError('key must be a string type, but got %r instead' % key) if not isinstance(value, six.string_types + (NONE_TYPE,)): raise ValueError('value must be None or a string type, but got %r instead' % value) # Sanitize the key key = BAD_TAG_CHAR_REGEXP.sub('_', key).strip() # Build the tag if value is None or not value.strip(): tag = key else: value = BAD_TAG_CHAR_REGEXP.sub('_', value).strip() tag = '%s:%s' % (key, value) if tag and not tag[0].isalpha(): tag = 'a' + tag # Lowercase and truncate tag = tag.lower()[:200] # Add _ if it's a reserved word if tag in ['device', 'host', 'source']: tag = tag + '_' return tag
def generate_tag(key, value=None): """Generate a tag for use with the tag backends. The key and value (if there is one) are sanitized according to the following rules: 1. after the first character, all characters must be alphanumeric, underscore, minus, period, or slash--invalid characters are converted to "_" 2. lowercase If a value is provided, the final tag is `key:value`. The final tag must start with a letter. If it doesn't, an "a" is prepended. The final tag is truncated to 200 characters. If the final tag is "device", "host", or "source", then a "_" will be appended the end. :arg str key: the key to use :arg str value: the value (if any) :returns: the final tag Examples: >>> generate_tag('yellow') 'yellow' >>> generate_tag('rule', 'is_yellow') 'rule:is_yellow' Example with ``incr``: >>> import markus >>> mymetrics = markus.get_metrics(__name__) >>> mymetrics.incr('somekey', value=1, ... tags=[generate_tag('rule', 'is_yellow')]) """ # Verify the types if not isinstance(key, six.string_types): raise ValueError('key must be a string type, but got %r instead' % key) if not isinstance(value, six.string_types + (NONE_TYPE,)): raise ValueError('value must be None or a string type, but got %r instead' % value) # Sanitize the key key = BAD_TAG_CHAR_REGEXP.sub('_', key).strip() # Build the tag if value is None or not value.strip(): tag = key else: value = BAD_TAG_CHAR_REGEXP.sub('_', value).strip() tag = '%s:%s' % (key, value) if tag and not tag[0].isalpha(): tag = 'a' + tag # Lowercase and truncate tag = tag.lower()[:200] # Add _ if it's a reserved word if tag in ['device', 'host', 'source']: tag = tag + '_' return tag
[ "Generate", "a", "tag", "for", "use", "with", "the", "tag", "backends", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/utils.py#L16-L84
[ "def", "generate_tag", "(", "key", ",", "value", "=", "None", ")", ":", "# Verify the types", "if", "not", "isinstance", "(", "key", ",", "six", ".", "string_types", ")", ":", "raise", "ValueError", "(", "'key must be a string type, but got %r instead'", "%", "key", ")", "if", "not", "isinstance", "(", "value", ",", "six", ".", "string_types", "+", "(", "NONE_TYPE", ",", ")", ")", ":", "raise", "ValueError", "(", "'value must be None or a string type, but got %r instead'", "%", "value", ")", "# Sanitize the key", "key", "=", "BAD_TAG_CHAR_REGEXP", ".", "sub", "(", "'_'", ",", "key", ")", ".", "strip", "(", ")", "# Build the tag", "if", "value", "is", "None", "or", "not", "value", ".", "strip", "(", ")", ":", "tag", "=", "key", "else", ":", "value", "=", "BAD_TAG_CHAR_REGEXP", ".", "sub", "(", "'_'", ",", "value", ")", ".", "strip", "(", ")", "tag", "=", "'%s:%s'", "%", "(", "key", ",", "value", ")", "if", "tag", "and", "not", "tag", "[", "0", "]", ".", "isalpha", "(", ")", ":", "tag", "=", "'a'", "+", "tag", "# Lowercase and truncate", "tag", "=", "tag", ".", "lower", "(", ")", "[", ":", "200", "]", "# Add _ if it's a reserved word", "if", "tag", "in", "[", "'device'", ",", "'host'", ",", "'source'", "]", ":", "tag", "=", "tag", "+", "'_'", "return", "tag" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
LoggingMetrics.incr
Increment a counter.
markus/backends/logging.py
def incr(self, stat, value=1, tags=None): """Increment a counter.""" self._log('incr', stat, value, tags)
def incr(self, stat, value=1, tags=None): """Increment a counter.""" self._log('incr', stat, value, tags)
[ "Increment", "a", "counter", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/logging.py#L74-L76
[ "def", "incr", "(", "self", ",", "stat", ",", "value", "=", "1", ",", "tags", "=", "None", ")", ":", "self", ".", "_log", "(", "'incr'", ",", "stat", ",", "value", ",", "tags", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
LoggingMetrics.gauge
Set a gauge.
markus/backends/logging.py
def gauge(self, stat, value, tags=None): """Set a gauge.""" self._log('gauge', stat, value, tags)
def gauge(self, stat, value, tags=None): """Set a gauge.""" self._log('gauge', stat, value, tags)
[ "Set", "a", "gauge", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/logging.py#L78-L80
[ "def", "gauge", "(", "self", ",", "stat", ",", "value", ",", "tags", "=", "None", ")", ":", "self", ".", "_log", "(", "'gauge'", ",", "stat", ",", "value", ",", "tags", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
LoggingMetrics.timing
Report a timing.
markus/backends/logging.py
def timing(self, stat, value, tags=None): """Report a timing.""" self._log('timing', stat, value, tags)
def timing(self, stat, value, tags=None): """Report a timing.""" self._log('timing', stat, value, tags)
[ "Report", "a", "timing", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/logging.py#L82-L84
[ "def", "timing", "(", "self", ",", "stat", ",", "value", ",", "tags", "=", "None", ")", ":", "self", ".", "_log", "(", "'timing'", ",", "stat", ",", "value", ",", "tags", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
LoggingMetrics.histogram
Report a histogram.
markus/backends/logging.py
def histogram(self, stat, value, tags=None): """Report a histogram.""" self._log('histogram', stat, value, tags)
def histogram(self, stat, value, tags=None): """Report a histogram.""" self._log('histogram', stat, value, tags)
[ "Report", "a", "histogram", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/logging.py#L86-L88
[ "def", "histogram", "(", "self", ",", "stat", ",", "value", ",", "tags", "=", "None", ")", ":", "self", ".", "_log", "(", "'histogram'", ",", "stat", ",", "value", ",", "tags", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
LoggingRollupMetrics.rollup
Roll up stats and log them.
markus/backends/logging.py
def rollup(self): """Roll up stats and log them.""" now = time.time() if now < self.next_rollup: return self.next_rollup = now + self.flush_interval for key, values in sorted(self.incr_stats.items()): self.logger.info( '%s INCR %s: count:%d|rate:%d/%d', self.leader, key, len(values), sum(values), self.flush_interval ) self.incr_stats[key] = [] for key, values in sorted(self.gauge_stats.items()): if values: self.logger.info( '%s GAUGE %s: count:%d|current:%s|min:%s|max:%s', self.leader, key, len(values), values[-1], min(values), max(values), ) else: self.logger.info('%s (gauge) %s: no data', self.leader, key) self.gauge_stats[key] = [] for key, values in sorted(self.histogram_stats.items()): if values: self.logger.info( ( '%s HISTOGRAM %s: ' 'count:%d|min:%.2f|avg:%.2f|median:%.2f|ninety-five:%.2f|max:%.2f' ), self.leader, key, len(values), min(values), statistics.mean(values), statistics.median(values), values[int(len(values) * 95 / 100)], max(values) ) else: self.logger.info('%s (histogram) %s: no data', self.leader, key) self.histogram_stats[key] = []
def rollup(self): """Roll up stats and log them.""" now = time.time() if now < self.next_rollup: return self.next_rollup = now + self.flush_interval for key, values in sorted(self.incr_stats.items()): self.logger.info( '%s INCR %s: count:%d|rate:%d/%d', self.leader, key, len(values), sum(values), self.flush_interval ) self.incr_stats[key] = [] for key, values in sorted(self.gauge_stats.items()): if values: self.logger.info( '%s GAUGE %s: count:%d|current:%s|min:%s|max:%s', self.leader, key, len(values), values[-1], min(values), max(values), ) else: self.logger.info('%s (gauge) %s: no data', self.leader, key) self.gauge_stats[key] = [] for key, values in sorted(self.histogram_stats.items()): if values: self.logger.info( ( '%s HISTOGRAM %s: ' 'count:%d|min:%.2f|avg:%.2f|median:%.2f|ninety-five:%.2f|max:%.2f' ), self.leader, key, len(values), min(values), statistics.mean(values), statistics.median(values), values[int(len(values) * 95 / 100)], max(values) ) else: self.logger.info('%s (histogram) %s: no data', self.leader, key) self.histogram_stats[key] = []
[ "Roll", "up", "stats", "and", "log", "them", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/logging.py#L156-L210
[ "def", "rollup", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "if", "now", "<", "self", ".", "next_rollup", ":", "return", "self", ".", "next_rollup", "=", "now", "+", "self", ".", "flush_interval", "for", "key", ",", "values", "in", "sorted", "(", "self", ".", "incr_stats", ".", "items", "(", ")", ")", ":", "self", ".", "logger", ".", "info", "(", "'%s INCR %s: count:%d|rate:%d/%d'", ",", "self", ".", "leader", ",", "key", ",", "len", "(", "values", ")", ",", "sum", "(", "values", ")", ",", "self", ".", "flush_interval", ")", "self", ".", "incr_stats", "[", "key", "]", "=", "[", "]", "for", "key", ",", "values", "in", "sorted", "(", "self", ".", "gauge_stats", ".", "items", "(", ")", ")", ":", "if", "values", ":", "self", ".", "logger", ".", "info", "(", "'%s GAUGE %s: count:%d|current:%s|min:%s|max:%s'", ",", "self", ".", "leader", ",", "key", ",", "len", "(", "values", ")", ",", "values", "[", "-", "1", "]", ",", "min", "(", "values", ")", ",", "max", "(", "values", ")", ",", ")", "else", ":", "self", ".", "logger", ".", "info", "(", "'%s (gauge) %s: no data'", ",", "self", ".", "leader", ",", "key", ")", "self", ".", "gauge_stats", "[", "key", "]", "=", "[", "]", "for", "key", ",", "values", "in", "sorted", "(", "self", ".", "histogram_stats", ".", "items", "(", ")", ")", ":", "if", "values", ":", "self", ".", "logger", ".", "info", "(", "(", "'%s HISTOGRAM %s: '", "'count:%d|min:%.2f|avg:%.2f|median:%.2f|ninety-five:%.2f|max:%.2f'", ")", ",", "self", ".", "leader", ",", "key", ",", "len", "(", "values", ")", ",", "min", "(", "values", ")", ",", "statistics", ".", "mean", "(", "values", ")", ",", "statistics", ".", "median", "(", "values", ")", ",", "values", "[", "int", "(", "len", "(", "values", ")", "*", "95", "/", "100", ")", "]", ",", "max", "(", "values", ")", ")", "else", ":", "self", ".", "logger", ".", "info", "(", "'%s (histogram) %s: no data'", ",", "self", ".", "leader", ",", "key", ")", "self", ".", "histogram_stats", "[", "key", "]", "=", "[", "]" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
LoggingRollupMetrics.incr
Increment a counter.
markus/backends/logging.py
def incr(self, stat, value=1, tags=None): """Increment a counter.""" self.rollup() # FIXME(willkg): what to do with tags? self.incr_stats.setdefault(stat, []).append(value)
def incr(self, stat, value=1, tags=None): """Increment a counter.""" self.rollup() # FIXME(willkg): what to do with tags? self.incr_stats.setdefault(stat, []).append(value)
[ "Increment", "a", "counter", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/logging.py#L212-L217
[ "def", "incr", "(", "self", ",", "stat", ",", "value", "=", "1", ",", "tags", "=", "None", ")", ":", "self", ".", "rollup", "(", ")", "# FIXME(willkg): what to do with tags?", "self", ".", "incr_stats", ".", "setdefault", "(", "stat", ",", "[", "]", ")", ".", "append", "(", "value", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
LoggingRollupMetrics.gauge
Set a gauge.
markus/backends/logging.py
def gauge(self, stat, value, tags=None): """Set a gauge.""" self.rollup() # FIXME(willkg): what to do with tags? self.gauge_stats.setdefault(stat, []).append(value)
def gauge(self, stat, value, tags=None): """Set a gauge.""" self.rollup() # FIXME(willkg): what to do with tags? self.gauge_stats.setdefault(stat, []).append(value)
[ "Set", "a", "gauge", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/logging.py#L219-L224
[ "def", "gauge", "(", "self", ",", "stat", ",", "value", ",", "tags", "=", "None", ")", ":", "self", ".", "rollup", "(", ")", "# FIXME(willkg): what to do with tags?", "self", ".", "gauge_stats", ".", "setdefault", "(", "stat", ",", "[", "]", ")", ".", "append", "(", "value", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
LoggingRollupMetrics.timing
Measure a timing for statistical distribution. Note: timing is a special case of histogram.
markus/backends/logging.py
def timing(self, stat, value, tags=None): """Measure a timing for statistical distribution. Note: timing is a special case of histogram. """ self.histogram(stat, value, tags)
def timing(self, stat, value, tags=None): """Measure a timing for statistical distribution. Note: timing is a special case of histogram. """ self.histogram(stat, value, tags)
[ "Measure", "a", "timing", "for", "statistical", "distribution", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/logging.py#L226-L232
[ "def", "timing", "(", "self", ",", "stat", ",", "value", ",", "tags", "=", "None", ")", ":", "self", ".", "histogram", "(", "stat", ",", "value", ",", "tags", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
LoggingRollupMetrics.histogram
Measure a value for statistical distribution.
markus/backends/logging.py
def histogram(self, stat, value, tags=None): """Measure a value for statistical distribution.""" self.rollup() # FIXME(willkg): what to do with tags? self.histogram_stats.setdefault(stat, []).append(value)
def histogram(self, stat, value, tags=None): """Measure a value for statistical distribution.""" self.rollup() # FIXME(willkg): what to do with tags? self.histogram_stats.setdefault(stat, []).append(value)
[ "Measure", "a", "value", "for", "statistical", "distribution", "." ]
willkg/markus
python
https://github.com/willkg/markus/blob/0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33/markus/backends/logging.py#L234-L239
[ "def", "histogram", "(", "self", ",", "stat", ",", "value", ",", "tags", "=", "None", ")", ":", "self", ".", "rollup", "(", ")", "# FIXME(willkg): what to do with tags?", "self", ".", "histogram_stats", ".", "setdefault", "(", "stat", ",", "[", "]", ")", ".", "append", "(", "value", ")" ]
0cfbe67fb7ccfa7488b0120d21ddc0cdc1f8ed33
test
order_enum
Make an annotation value that can be used to sort by an enum field. ``field`` The name of an EnumChoiceField. ``members`` An iterable of Enum members in the order to sort by. Use like: .. code-block:: python desired_order = [MyEnum.bar, MyEnum.baz, MyEnum.foo] ChoiceModel.objects\\ .annotate(my_order=order_enum('choice', desired_order))\\ .order_by('my_order') As Enums are iterable, ``members`` can be the Enum itself if the default ordering is desired: .. code-block:: python ChoiceModel.objects\\ .annotate(my_order=order_enum('choice', MyEnum))\\ .order_by('my_order') .. warning:: On Python 2, Enums may not have a consistent order, depending upon how they were defined. You can set an explicit order using ``__order__`` to fix this. See the ``enum34`` docs for more information. Any enum members not present in the list of members will be sorted to the end of the results.
enumchoicefield/utils.py
def order_enum(field, members): """ Make an annotation value that can be used to sort by an enum field. ``field`` The name of an EnumChoiceField. ``members`` An iterable of Enum members in the order to sort by. Use like: .. code-block:: python desired_order = [MyEnum.bar, MyEnum.baz, MyEnum.foo] ChoiceModel.objects\\ .annotate(my_order=order_enum('choice', desired_order))\\ .order_by('my_order') As Enums are iterable, ``members`` can be the Enum itself if the default ordering is desired: .. code-block:: python ChoiceModel.objects\\ .annotate(my_order=order_enum('choice', MyEnum))\\ .order_by('my_order') .. warning:: On Python 2, Enums may not have a consistent order, depending upon how they were defined. You can set an explicit order using ``__order__`` to fix this. See the ``enum34`` docs for more information. Any enum members not present in the list of members will be sorted to the end of the results. """ members = list(members) return Case( *(When(**{field: member, 'then': i}) for i, member in enumerate(members)), default=len(members), output_field=IntegerField())
def order_enum(field, members): """ Make an annotation value that can be used to sort by an enum field. ``field`` The name of an EnumChoiceField. ``members`` An iterable of Enum members in the order to sort by. Use like: .. code-block:: python desired_order = [MyEnum.bar, MyEnum.baz, MyEnum.foo] ChoiceModel.objects\\ .annotate(my_order=order_enum('choice', desired_order))\\ .order_by('my_order') As Enums are iterable, ``members`` can be the Enum itself if the default ordering is desired: .. code-block:: python ChoiceModel.objects\\ .annotate(my_order=order_enum('choice', MyEnum))\\ .order_by('my_order') .. warning:: On Python 2, Enums may not have a consistent order, depending upon how they were defined. You can set an explicit order using ``__order__`` to fix this. See the ``enum34`` docs for more information. Any enum members not present in the list of members will be sorted to the end of the results. """ members = list(members) return Case( *(When(**{field: member, 'then': i}) for i, member in enumerate(members)), default=len(members), output_field=IntegerField())
[ "Make", "an", "annotation", "value", "that", "can", "be", "used", "to", "sort", "by", "an", "enum", "field", "." ]
timheap/django-enumchoicefield
python
https://github.com/timheap/django-enumchoicefield/blob/59e230f8eed086c87ac6a9243448d2cd9adfc250/enumchoicefield/utils.py#L6-L49
[ "def", "order_enum", "(", "field", ",", "members", ")", ":", "members", "=", "list", "(", "members", ")", "return", "Case", "(", "*", "(", "When", "(", "*", "*", "{", "field", ":", "member", ",", "'then'", ":", "i", "}", ")", "for", "i", ",", "member", "in", "enumerate", "(", "members", ")", ")", ",", "default", "=", "len", "(", "members", ")", ",", "output_field", "=", "IntegerField", "(", ")", ")" ]
59e230f8eed086c87ac6a9243448d2cd9adfc250
test
EnumChoiceField.from_db_value
Convert a string from the database into an Enum value
enumchoicefield/fields.py
def from_db_value(self, value, expression, connection, context): """ Convert a string from the database into an Enum value """ if value is None: return value return self.enum[value]
def from_db_value(self, value, expression, connection, context): """ Convert a string from the database into an Enum value """ if value is None: return value return self.enum[value]
[ "Convert", "a", "string", "from", "the", "database", "into", "an", "Enum", "value" ]
timheap/django-enumchoicefield
python
https://github.com/timheap/django-enumchoicefield/blob/59e230f8eed086c87ac6a9243448d2cd9adfc250/enumchoicefield/fields.py#L43-L49
[ "def", "from_db_value", "(", "self", ",", "value", ",", "expression", ",", "connection", ",", "context", ")", ":", "if", "value", "is", "None", ":", "return", "value", "return", "self", ".", "enum", "[", "value", "]" ]
59e230f8eed086c87ac6a9243448d2cd9adfc250
test
EnumChoiceField.to_python
Convert a string from a form into an Enum value.
enumchoicefield/fields.py
def to_python(self, value): """ Convert a string from a form into an Enum value. """ if value is None: return value if isinstance(value, self.enum): return value return self.enum[value]
def to_python(self, value): """ Convert a string from a form into an Enum value. """ if value is None: return value if isinstance(value, self.enum): return value return self.enum[value]
[ "Convert", "a", "string", "from", "a", "form", "into", "an", "Enum", "value", "." ]
timheap/django-enumchoicefield
python
https://github.com/timheap/django-enumchoicefield/blob/59e230f8eed086c87ac6a9243448d2cd9adfc250/enumchoicefield/fields.py#L51-L59
[ "def", "to_python", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "return", "value", "if", "isinstance", "(", "value", ",", "self", ".", "enum", ")", ":", "return", "value", "return", "self", ".", "enum", "[", "value", "]" ]
59e230f8eed086c87ac6a9243448d2cd9adfc250
test
EnumChoiceField.get_prep_value
Convert an Enum value into a string for the database
enumchoicefield/fields.py
def get_prep_value(self, value): """ Convert an Enum value into a string for the database """ if value is None: return None if isinstance(value, self.enum): return value.name raise ValueError("Unknown value {value:r} of type {cls}".format( value=value, cls=type(value)))
def get_prep_value(self, value): """ Convert an Enum value into a string for the database """ if value is None: return None if isinstance(value, self.enum): return value.name raise ValueError("Unknown value {value:r} of type {cls}".format( value=value, cls=type(value)))
[ "Convert", "an", "Enum", "value", "into", "a", "string", "for", "the", "database" ]
timheap/django-enumchoicefield
python
https://github.com/timheap/django-enumchoicefield/blob/59e230f8eed086c87ac6a9243448d2cd9adfc250/enumchoicefield/fields.py#L61-L70
[ "def", "get_prep_value", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "return", "None", "if", "isinstance", "(", "value", ",", "self", ".", "enum", ")", ":", "return", "value", ".", "name", "raise", "ValueError", "(", "\"Unknown value {value:r} of type {cls}\"", ".", "format", "(", "value", "=", "value", ",", "cls", "=", "type", "(", "value", ")", ")", ")" ]
59e230f8eed086c87ac6a9243448d2cd9adfc250
test
count_id
0 -> no terms idd 1 -> most term idd are shared in root morphem 2 -> most term idd are shared in flexing morphem 3 -> most term idd are shared root <-> flexing (crossed) :param w0: :param w1: :return:
ieml/distance/order.py
def count_id(w0): """ 0 -> no terms idd 1 -> most term idd are shared in root morphem 2 -> most term idd are shared in flexing morphem 3 -> most term idd are shared root <-> flexing (crossed) :param w0: :param w1: :return: """ def f(w1): count = [set(w0.root).intersection(w1.root), set(w0.flexing).intersection(w1.flexing), set(w0.root).intersection(w1.flexing) | set(w1.root).intersection(w0.flexing)] if any(count): return max((1,2,3), key=lambda i: len(count[i - 1])) else: return 0 return f
def count_id(w0): """ 0 -> no terms idd 1 -> most term idd are shared in root morphem 2 -> most term idd are shared in flexing morphem 3 -> most term idd are shared root <-> flexing (crossed) :param w0: :param w1: :return: """ def f(w1): count = [set(w0.root).intersection(w1.root), set(w0.flexing).intersection(w1.flexing), set(w0.root).intersection(w1.flexing) | set(w1.root).intersection(w0.flexing)] if any(count): return max((1,2,3), key=lambda i: len(count[i - 1])) else: return 0 return f
[ "0", "-", ">", "no", "terms", "idd", "1", "-", ">", "most", "term", "idd", "are", "shared", "in", "root", "morphem", "2", "-", ">", "most", "term", "idd", "are", "shared", "in", "flexing", "morphem", "3", "-", ">", "most", "term", "idd", "are", "shared", "root", "<", "-", ">", "flexing", "(", "crossed", ")", ":", "param", "w0", ":", ":", "param", "w1", ":", ":", "return", ":" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/distance/order.py#L25-L46
[ "def", "count_id", "(", "w0", ")", ":", "def", "f", "(", "w1", ")", ":", "count", "=", "[", "set", "(", "w0", ".", "root", ")", ".", "intersection", "(", "w1", ".", "root", ")", ",", "set", "(", "w0", ".", "flexing", ")", ".", "intersection", "(", "w1", ".", "flexing", ")", ",", "set", "(", "w0", ".", "root", ")", ".", "intersection", "(", "w1", ".", "flexing", ")", "|", "set", "(", "w1", ".", "root", ")", ".", "intersection", "(", "w0", ".", "flexing", ")", "]", "if", "any", "(", "count", ")", ":", "return", "max", "(", "(", "1", ",", "2", ",", "3", ")", ",", "key", "=", "lambda", "i", ":", "len", "(", "count", "[", "i", "-", "1", "]", ")", ")", "else", ":", "return", "0", "return", "f" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
count_relations
0 -> no terms idd 1 -> most term idd are shared in root morphem 2 -> most term idd are shared in flexing morphem 3 -> most term idd are shared root <-> flexing (crossed) :param w0: :param w1: :return:
ieml/distance/order.py
def count_relations(w0): """ 0 -> no terms idd 1 -> most term idd are shared in root morphem 2 -> most term idd are shared in flexing morphem 3 -> most term idd are shared root <-> flexing (crossed) :param w0: :param w1: :return: """ root_w0_relations = set(chain.from_iterable(relations[t.index, :].indices for t in w0.root)) flexing_w0_relations = set(chain.from_iterable(relations[t.index, :].indices for t in w0.flexing)) def f(w1): root_w1 = set(t.index for t in w1.root) flexing_w1 = set(t.index for t in w1.flexing) count = [root_w0_relations.intersection(root_w1), flexing_w0_relations.intersection(flexing_w1), root_w0_relations.intersection(flexing_w1) | flexing_w0_relations.intersection(root_w1)] if any(count): return max((1,2,3), key=lambda i: len(count[i - 1])) else: return 0 return f
def count_relations(w0): """ 0 -> no terms idd 1 -> most term idd are shared in root morphem 2 -> most term idd are shared in flexing morphem 3 -> most term idd are shared root <-> flexing (crossed) :param w0: :param w1: :return: """ root_w0_relations = set(chain.from_iterable(relations[t.index, :].indices for t in w0.root)) flexing_w0_relations = set(chain.from_iterable(relations[t.index, :].indices for t in w0.flexing)) def f(w1): root_w1 = set(t.index for t in w1.root) flexing_w1 = set(t.index for t in w1.flexing) count = [root_w0_relations.intersection(root_w1), flexing_w0_relations.intersection(flexing_w1), root_w0_relations.intersection(flexing_w1) | flexing_w0_relations.intersection(root_w1)] if any(count): return max((1,2,3), key=lambda i: len(count[i - 1])) else: return 0 return f
[ "0", "-", ">", "no", "terms", "idd", "1", "-", ">", "most", "term", "idd", "are", "shared", "in", "root", "morphem", "2", "-", ">", "most", "term", "idd", "are", "shared", "in", "flexing", "morphem", "3", "-", ">", "most", "term", "idd", "are", "shared", "root", "<", "-", ">", "flexing", "(", "crossed", ")", ":", "param", "w0", ":", ":", "param", "w1", ":", ":", "return", ":" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/distance/order.py#L51-L77
[ "def", "count_relations", "(", "w0", ")", ":", "root_w0_relations", "=", "set", "(", "chain", ".", "from_iterable", "(", "relations", "[", "t", ".", "index", ",", ":", "]", ".", "indices", "for", "t", "in", "w0", ".", "root", ")", ")", "flexing_w0_relations", "=", "set", "(", "chain", ".", "from_iterable", "(", "relations", "[", "t", ".", "index", ",", ":", "]", ".", "indices", "for", "t", "in", "w0", ".", "flexing", ")", ")", "def", "f", "(", "w1", ")", ":", "root_w1", "=", "set", "(", "t", ".", "index", "for", "t", "in", "w1", ".", "root", ")", "flexing_w1", "=", "set", "(", "t", ".", "index", "for", "t", "in", "w1", ".", "flexing", ")", "count", "=", "[", "root_w0_relations", ".", "intersection", "(", "root_w1", ")", ",", "flexing_w0_relations", ".", "intersection", "(", "flexing_w1", ")", ",", "root_w0_relations", ".", "intersection", "(", "flexing_w1", ")", "|", "flexing_w0_relations", ".", "intersection", "(", "root_w1", ")", "]", "if", "any", "(", "count", ")", ":", "return", "max", "(", "(", "1", ",", "2", ",", "3", ")", ",", "key", "=", "lambda", "i", ":", "len", "(", "count", "[", "i", "-", "1", "]", ")", ")", "else", ":", "return", "0", "return", "f" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
PathParser.t_parse
Parses the input string, and returns a reference to the created AST's root
ieml/grammar/paths/parser/parser.py
def t_parse(self, s): """Parses the input string, and returns a reference to the created AST's root""" # self.root = None # self.path = s with self.lock: try: return self.parser.parse(s, lexer=self.lexer, debug=False) except CannotParse as e: e.s = s raise e
def t_parse(self, s): """Parses the input string, and returns a reference to the created AST's root""" # self.root = None # self.path = s with self.lock: try: return self.parser.parse(s, lexer=self.lexer, debug=False) except CannotParse as e: e.s = s raise e
[ "Parses", "the", "input", "string", "and", "returns", "a", "reference", "to", "the", "created", "AST", "s", "root" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/paths/parser/parser.py#L27-L36
[ "def", "t_parse", "(", "self", ",", "s", ")", ":", "# self.root = None", "# self.path = s", "with", "self", ".", "lock", ":", "try", ":", "return", "self", ".", "parser", ".", "parse", "(", "s", ",", "lexer", "=", "self", ".", "lexer", ",", "debug", "=", "False", ")", "except", "CannotParse", "as", "e", ":", "e", ".", "s", "=", "s", "raise", "e" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
PathParser.p_path
path : additive_path
ieml/grammar/paths/parser/parser.py
def p_path(self, p): """path : additive_path""" if len(p[1].children) == 1: p[0] = p[1].children[0] else: p[0] = p[1]
def p_path(self, p): """path : additive_path""" if len(p[1].children) == 1: p[0] = p[1].children[0] else: p[0] = p[1]
[ "path", ":", "additive_path" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/paths/parser/parser.py#L46-L51
[ "def", "p_path", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", "[", "1", "]", ".", "children", ")", "==", "1", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", ".", "children", "[", "0", "]", "else", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
PathParser.p_path_sum
path_sum : ctx_path | path_sum PLUS ctx_path
ieml/grammar/paths/parser/parser.py
def p_path_sum(self, p): """ path_sum : ctx_path | path_sum PLUS ctx_path""" if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[3]]
def p_path_sum(self, p): """ path_sum : ctx_path | path_sum PLUS ctx_path""" if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[3]]
[ "path_sum", ":", "ctx_path", "|", "path_sum", "PLUS", "ctx_path" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/paths/parser/parser.py#L57-L63
[ "def", "p_path_sum", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "[", "p", "[", "1", "]", "]", "else", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "p", "[", "3", "]", "]" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
PathParser.p_ctx_path
ctx_path : ctx_coords
ieml/grammar/paths/parser/parser.py
def p_ctx_path(self, p): """ ctx_path : ctx_coords""" if len(p[1]) == 1: p[0] = p[1][0] else: p[0] = ContextPath(p[1])
def p_ctx_path(self, p): """ ctx_path : ctx_coords""" if len(p[1]) == 1: p[0] = p[1][0] else: p[0] = ContextPath(p[1])
[ "ctx_path", ":", "ctx_coords" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/paths/parser/parser.py#L65-L70
[ "def", "p_ctx_path", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", "[", "1", "]", ")", "==", "1", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "[", "0", "]", "else", ":", "p", "[", "0", "]", "=", "ContextPath", "(", "p", "[", "1", "]", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
PathParser.p_ctx_coords
ctx_coords : multiplicative_path | ctx_coords COLON multiplicative_path
ieml/grammar/paths/parser/parser.py
def p_ctx_coords(self, p): """ ctx_coords : multiplicative_path | ctx_coords COLON multiplicative_path""" if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[3]]
def p_ctx_coords(self, p): """ ctx_coords : multiplicative_path | ctx_coords COLON multiplicative_path""" if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[3]]
[ "ctx_coords", ":", "multiplicative_path", "|", "ctx_coords", "COLON", "multiplicative_path" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/paths/parser/parser.py#L72-L78
[ "def", "p_ctx_coords", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "[", "p", "[", "1", "]", "]", "else", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "p", "[", "3", "]", "]" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
PathParser.p_product
product : additive_path_p | coordinate | product additive_path_p | product coordinate
ieml/grammar/paths/parser/parser.py
def p_product(self, p): """ product : additive_path_p | coordinate | product additive_path_p | product coordinate""" if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[2]]
def p_product(self, p): """ product : additive_path_p | coordinate | product additive_path_p | product coordinate""" if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[2]]
[ "product", ":", "additive_path_p", "|", "coordinate", "|", "product", "additive_path_p", "|", "product", "coordinate" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/paths/parser/parser.py#L84-L92
[ "def", "p_product", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "[", "p", "[", "1", "]", "]", "else", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "p", "[", "2", "]", "]" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
PathParser.p_coordinate
coordinate : COORD_KIND | COORD_KIND COORD_INDEX
ieml/grammar/paths/parser/parser.py
def p_coordinate(self, p): """ coordinate : COORD_KIND | COORD_KIND COORD_INDEX""" if len(p) == 2: p[0] = Coordinate(p[1]) else: p[0] = Coordinate(p[1], int(p[2]))
def p_coordinate(self, p): """ coordinate : COORD_KIND | COORD_KIND COORD_INDEX""" if len(p) == 2: p[0] = Coordinate(p[1]) else: p[0] = Coordinate(p[1], int(p[2]))
[ "coordinate", ":", "COORD_KIND", "|", "COORD_KIND", "COORD_INDEX" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/paths/parser/parser.py#L98-L105
[ "def", "p_coordinate", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "Coordinate", "(", "p", "[", "1", "]", ")", "else", ":", "p", "[", "0", "]", "=", "Coordinate", "(", "p", "[", "1", "]", ",", "int", "(", "p", "[", "2", "]", ")", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
diff
Display the terms added and removed between two versions :param version0: :param version1: :return:
scripts/version.py
def diff(version0, version1): """ Display the terms added and removed between two versions :param version0: :param version1: :return: """ version0.load() version1.load() deleted = set(version0.terms) - set(version1.terms) added = set(version1.terms) - set(version0.terms) print("====\n\tfrom: {0}".format(str(version0))) print("\n".join(("-{0} -- {1}".format(str(d), version0.translations['en'][d]) for d in deleted))) print("====\n\tto: {0}".format(str(version1))) print("\n".join(("+{0} -- {1}".format(str(d), version1.translations['en'][d]) for d in added)))
def diff(version0, version1): """ Display the terms added and removed between two versions :param version0: :param version1: :return: """ version0.load() version1.load() deleted = set(version0.terms) - set(version1.terms) added = set(version1.terms) - set(version0.terms) print("====\n\tfrom: {0}".format(str(version0))) print("\n".join(("-{0} -- {1}".format(str(d), version0.translations['en'][d]) for d in deleted))) print("====\n\tto: {0}".format(str(version1))) print("\n".join(("+{0} -- {1}".format(str(d), version1.translations['en'][d]) for d in added)))
[ "Display", "the", "terms", "added", "and", "removed", "between", "two", "versions", ":", "param", "version0", ":", ":", "param", "version1", ":", ":", "return", ":" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/version.py#L5-L22
[ "def", "diff", "(", "version0", ",", "version1", ")", ":", "version0", ".", "load", "(", ")", "version1", ".", "load", "(", ")", "deleted", "=", "set", "(", "version0", ".", "terms", ")", "-", "set", "(", "version1", ".", "terms", ")", "added", "=", "set", "(", "version1", ".", "terms", ")", "-", "set", "(", "version0", ".", "terms", ")", "print", "(", "\"====\\n\\tfrom: {0}\"", ".", "format", "(", "str", "(", "version0", ")", ")", ")", "print", "(", "\"\\n\"", ".", "join", "(", "(", "\"-{0} -- {1}\"", ".", "format", "(", "str", "(", "d", ")", ",", "version0", ".", "translations", "[", "'en'", "]", "[", "d", "]", ")", "for", "d", "in", "deleted", ")", ")", ")", "print", "(", "\"====\\n\\tto: {0}\"", ".", "format", "(", "str", "(", "version1", ")", ")", ")", "print", "(", "\"\\n\"", ".", "join", "(", "(", "\"+{0} -- {1}\"", ".", "format", "(", "str", "(", "d", ")", ",", "version1", ".", "translations", "[", "'en'", "]", "[", "d", "]", ")", "for", "d", "in", "added", ")", ")", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
DictionaryVersion.load
Download the dictionary version and cache the retrieved file. :return: None
ieml/dictionary/version.py
def load(self): """ Download the dictionary version and cache the retrieved file. :return: None """ if self.loaded: return file_name = "%s.json" % str(self) file = os.path.join(VERSIONS_FOLDER, file_name) if not os.path.isfile(file): DICTIONARY_BUCKET_URL = get_configuration().get('VERSIONS', 'versionsurl') url = urllib.parse.urljoin(DICTIONARY_BUCKET_URL, file_name) logger.log(logging.INFO, "Downloading dictionary %s at %s" % (file_name, url)) urlretrieve(url, file) with open(file, 'r') as fp: self.__setstate__(json.load(fp))
def load(self): """ Download the dictionary version and cache the retrieved file. :return: None """ if self.loaded: return file_name = "%s.json" % str(self) file = os.path.join(VERSIONS_FOLDER, file_name) if not os.path.isfile(file): DICTIONARY_BUCKET_URL = get_configuration().get('VERSIONS', 'versionsurl') url = urllib.parse.urljoin(DICTIONARY_BUCKET_URL, file_name) logger.log(logging.INFO, "Downloading dictionary %s at %s" % (file_name, url)) urlretrieve(url, file) with open(file, 'r') as fp: self.__setstate__(json.load(fp))
[ "Download", "the", "dictionary", "version", "and", "cache", "the", "retrieved", "file", ".", ":", "return", ":", "None" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/dictionary/version.py#L145-L164
[ "def", "load", "(", "self", ")", ":", "if", "self", ".", "loaded", ":", "return", "file_name", "=", "\"%s.json\"", "%", "str", "(", "self", ")", "file", "=", "os", ".", "path", ".", "join", "(", "VERSIONS_FOLDER", ",", "file_name", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "file", ")", ":", "DICTIONARY_BUCKET_URL", "=", "get_configuration", "(", ")", ".", "get", "(", "'VERSIONS'", ",", "'versionsurl'", ")", "url", "=", "urllib", ".", "parse", ".", "urljoin", "(", "DICTIONARY_BUCKET_URL", ",", "file_name", ")", "logger", ".", "log", "(", "logging", ".", "INFO", ",", "\"Downloading dictionary %s at %s\"", "%", "(", "file_name", ",", "url", ")", ")", "urlretrieve", "(", "url", ",", "file", ")", "with", "open", "(", "file", ",", "'r'", ")", "as", "fp", ":", "self", ".", "__setstate__", "(", "json", ".", "load", "(", "fp", ")", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
_rotate_sc_additive
s.-S:.U:.-'l.-S:.O:.-'n.-S:.U:.-',+M:.-'M:.-'n.-S:.U:.-', => n.-S:.U:.-'s.-S:.U:.-'l.-S:.O:.-',+n.-S:.U:.-‘M:.-‘M:.-‘,
scripts/dictionary_tools.py
def _rotate_sc_additive(s): """ s.-S:.U:.-'l.-S:.O:.-'n.-S:.U:.-',+M:.-'M:.-'n.-S:.U:.-', => n.-S:.U:.-'s.-S:.U:.-'l.-S:.O:.-',+n.-S:.U:.-‘M:.-‘M:.-‘,""" if isinstance(s, AdditiveScript): return AdditiveScript([_rotate_sc(_s) for _s in s]) else: return _rotate_sc(s)
def _rotate_sc_additive(s): """ s.-S:.U:.-'l.-S:.O:.-'n.-S:.U:.-',+M:.-'M:.-'n.-S:.U:.-', => n.-S:.U:.-'s.-S:.U:.-'l.-S:.O:.-',+n.-S:.U:.-‘M:.-‘M:.-‘,""" if isinstance(s, AdditiveScript): return AdditiveScript([_rotate_sc(_s) for _s in s]) else: return _rotate_sc(s)
[ "s", ".", "-", "S", ":", ".", "U", ":", ".", "-", "l", ".", "-", "S", ":", ".", "O", ":", ".", "-", "n", ".", "-", "S", ":", ".", "U", ":", ".", "-", "+", "M", ":", ".", "-", "M", ":", ".", "-", "n", ".", "-", "S", ":", ".", "U", ":", ".", "-", "=", ">", "n", ".", "-", "S", ":", ".", "U", ":", ".", "-", "s", ".", "-", "S", ":", ".", "U", ":", ".", "-", "l", ".", "-", "S", ":", ".", "O", ":", ".", "-", "+", "n", ".", "-", "S", ":", ".", "U", ":", ".", "-", "‘M", ":", ".", "-", "‘M", ":", ".", "-", "‘" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L53-L61
[ "def", "_rotate_sc_additive", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "AdditiveScript", ")", ":", "return", "AdditiveScript", "(", "[", "_rotate_sc", "(", "_s", ")", "for", "_s", "in", "s", "]", ")", "else", ":", "return", "_rotate_sc", "(", "s", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
_promote_and_split
E:F:.O:M:.t.- => E:.-F:.O:M:.-t.-‘ E:F:.M:M:.l.- => E:.-F:.M:M:.-l.-‘
scripts/dictionary_tools.py
def _promote_and_split(s): """ E:F:.O:M:.t.- => E:.-F:.O:M:.-t.-‘ E:F:.M:M:.l.- => E:.-F:.M:M:.-l.-‘ """ subst, attr, mode = s subst0, subst1, _mode = subst assert isinstance(_mode, NullScript) return m(m(m(subst0)) ,m(m(subst1), attr) ,m(mode))
def _promote_and_split(s): """ E:F:.O:M:.t.- => E:.-F:.O:M:.-t.-‘ E:F:.M:M:.l.- => E:.-F:.M:M:.-l.-‘ """ subst, attr, mode = s subst0, subst1, _mode = subst assert isinstance(_mode, NullScript) return m(m(m(subst0)) ,m(m(subst1), attr) ,m(mode))
[ "E", ":", "F", ":", ".", "O", ":", "M", ":", ".", "t", ".", "-", "=", ">", "E", ":", ".", "-", "F", ":", ".", "O", ":", "M", ":", ".", "-", "t", ".", "-", "‘", "E", ":", "F", ":", ".", "M", ":", "M", ":", ".", "l", ".", "-", "=", ">", "E", ":", ".", "-", "F", ":", ".", "M", ":", "M", ":", ".", "-", "l", ".", "-", "‘" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L78-L87
[ "def", "_promote_and_split", "(", "s", ")", ":", "subst", ",", "attr", ",", "mode", "=", "s", "subst0", ",", "subst1", ",", "_mode", "=", "subst", "assert", "isinstance", "(", "_mode", ",", "NullScript", ")", "return", "m", "(", "m", "(", "m", "(", "subst0", ")", ")", ",", "m", "(", "m", "(", "subst1", ")", ",", "attr", ")", ",", "m", "(", "mode", ")", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
_transfer_substance
E:O:.-M:O:.-t.o.-' => E:.-O:.M:O:.-t.o.-‘
scripts/dictionary_tools.py
def _transfer_substance(s): """ E:O:.-M:O:.-t.o.-' => E:.-O:.M:O:.-t.o.-‘ """ subst, attr, mode = s attr0, attr1, attr2 = attr assert isinstance(attr1, NullScript) and isinstance(attr2, NullScript) subst, subst1, subst2 = subst assert isinstance(subst1, NullScript) and isinstance(subst2, NullScript) subst0, subst1, subst2 = subst assert isinstance(subst2, NullScript) return m(m(m(subst0)), m(m(subst1), attr0), mode)
def _transfer_substance(s): """ E:O:.-M:O:.-t.o.-' => E:.-O:.M:O:.-t.o.-‘ """ subst, attr, mode = s attr0, attr1, attr2 = attr assert isinstance(attr1, NullScript) and isinstance(attr2, NullScript) subst, subst1, subst2 = subst assert isinstance(subst1, NullScript) and isinstance(subst2, NullScript) subst0, subst1, subst2 = subst assert isinstance(subst2, NullScript) return m(m(m(subst0)), m(m(subst1), attr0), mode)
[ "E", ":", "O", ":", ".", "-", "M", ":", "O", ":", ".", "-", "t", ".", "o", ".", "-", "=", ">", "E", ":", ".", "-", "O", ":", ".", "M", ":", "O", ":", ".", "-", "t", ".", "o", ".", "-", "‘" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L89-L101
[ "def", "_transfer_substance", "(", "s", ")", ":", "subst", ",", "attr", ",", "mode", "=", "s", "attr0", ",", "attr1", ",", "attr2", "=", "attr", "assert", "isinstance", "(", "attr1", ",", "NullScript", ")", "and", "isinstance", "(", "attr2", ",", "NullScript", ")", "subst", ",", "subst1", ",", "subst2", "=", "subst", "assert", "isinstance", "(", "subst1", ",", "NullScript", ")", "and", "isinstance", "(", "subst2", ",", "NullScript", ")", "subst0", ",", "subst1", ",", "subst2", "=", "subst", "assert", "isinstance", "(", "subst2", ",", "NullScript", ")", "return", "m", "(", "m", "(", "m", "(", "subst0", ")", ")", ",", "m", "(", "m", "(", "subst1", ")", ",", "attr0", ")", ",", "mode", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
_add_mode_t
O:O:.O:O:.- => O:O:.O:O:.t.-
scripts/dictionary_tools.py
def _add_mode_t(s): """ O:O:.O:O:.- => O:O:.O:O:.t.- """ subst, attr, mode = s assert isinstance(mode, NullScript) return m(subst, attr, script('t.'))
def _add_mode_t(s): """ O:O:.O:O:.- => O:O:.O:O:.t.- """ subst, attr, mode = s assert isinstance(mode, NullScript) return m(subst, attr, script('t.'))
[ "O", ":", "O", ":", ".", "O", ":", "O", ":", ".", "-", "=", ">", "O", ":", "O", ":", ".", "O", ":", "O", ":", ".", "t", ".", "-" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L106-L112
[ "def", "_add_mode_t", "(", "s", ")", ":", "subst", ",", "attr", ",", "mode", "=", "s", "assert", "isinstance", "(", "mode", ",", "NullScript", ")", "return", "m", "(", "subst", ",", "attr", ",", "script", "(", "'t.'", ")", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
_insert_f_additive
i.B:.-+u.M:.-O:.-' => i.f.B:.-+u.f.M:.-O:.-
scripts/dictionary_tools.py
def _insert_f_additive(s): """i.B:.-+u.M:.-O:.-' => i.f.B:.-+u.f.M:.-O:.-'""" subst, attr, mode = s assert isinstance(mode, NullScript) if isinstance(subst, AdditiveScript): subst = AdditiveScript([_insert_attr_f(_s) for _s in subst]) else: subst = _insert_attr_f(subst) return m(subst ,attr)
def _insert_f_additive(s): """i.B:.-+u.M:.-O:.-' => i.f.B:.-+u.f.M:.-O:.-'""" subst, attr, mode = s assert isinstance(mode, NullScript) if isinstance(subst, AdditiveScript): subst = AdditiveScript([_insert_attr_f(_s) for _s in subst]) else: subst = _insert_attr_f(subst) return m(subst ,attr)
[ "i", ".", "B", ":", ".", "-", "+", "u", ".", "M", ":", ".", "-", "O", ":", ".", "-", "=", ">", "i", ".", "f", ".", "B", ":", ".", "-", "+", "u", ".", "f", ".", "M", ":", ".", "-", "O", ":", ".", "-" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L120-L130
[ "def", "_insert_f_additive", "(", "s", ")", ":", "subst", ",", "attr", ",", "mode", "=", "s", "assert", "isinstance", "(", "mode", ",", "NullScript", ")", "if", "isinstance", "(", "subst", ",", "AdditiveScript", ")", ":", "subst", "=", "AdditiveScript", "(", "[", "_insert_attr_f", "(", "_s", ")", "for", "_s", "in", "subst", "]", ")", "else", ":", "subst", "=", "_insert_attr_f", "(", "subst", ")", "return", "m", "(", "subst", ",", "attr", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
_fix_typo
M:.-O:.-'M:.-wa.e.-'t.x.-s.y.-', => M:.-O:.-'M:.-wa.e.-'t.-x.-s.y.-',
scripts/dictionary_tools.py
def _fix_typo(s): """M:.-O:.-'M:.-wa.e.-'t.x.-s.y.-', => M:.-O:.-'M:.-wa.e.-'t.-x.-s.y.-',""" subst, attr, mode = s return m(subst, attr, script("t.-x.-s.y.-'"))
def _fix_typo(s): """M:.-O:.-'M:.-wa.e.-'t.x.-s.y.-', => M:.-O:.-'M:.-wa.e.-'t.-x.-s.y.-',""" subst, attr, mode = s return m(subst, attr, script("t.-x.-s.y.-'"))
[ "M", ":", ".", "-", "O", ":", ".", "-", "M", ":", ".", "-", "wa", ".", "e", ".", "-", "t", ".", "x", ".", "-", "s", ".", "y", ".", "-", "=", ">", "M", ":", ".", "-", "O", ":", ".", "-", "M", ":", ".", "-", "wa", ".", "e", ".", "-", "t", ".", "-", "x", ".", "-", "s", ".", "y", ".", "-" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L132-L135
[ "def", "_fix_typo", "(", "s", ")", ":", "subst", ",", "attr", ",", "mode", "=", "s", "return", "m", "(", "subst", ",", "attr", ",", "script", "(", "\"t.-x.-s.y.-'\"", ")", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
translate_script
translate the root paradigms in key in argument, with the function in value :param to_translate: :return:
scripts/dictionary_tools.py
def translate_script(to_translate): """ translate the root paradigms in key in argument, with the function in value :param to_translate: :return: """ version = DictionaryVersion(latest_dictionary_version()) version.load() to_remove = [] to_add = { 'terms': [], 'roots': [], 'inhibitions': {}, 'translations': {l: {} for l in LANGUAGES} } for root, func in to_translate.items(): root = script(root) terms = list(filter(lambda s: s in root, map(script, version.terms))) new_root = func(root) new_terms = [func(s) for s in terms] to_add['terms'].extend(map(str, new_terms)) to_add['roots'].append(str(new_root)) to_add['inhibitions'].update({str(new_root): version.inhibitions[root]}) for l in LANGUAGES: to_add['translations'][l].update({str(func(s)): version.translations[l][s] for s in terms}) to_remove.extend(map(str, terms)) return create_dictionary_version(version, add=to_add, remove=to_remove)
def translate_script(to_translate): """ translate the root paradigms in key in argument, with the function in value :param to_translate: :return: """ version = DictionaryVersion(latest_dictionary_version()) version.load() to_remove = [] to_add = { 'terms': [], 'roots': [], 'inhibitions': {}, 'translations': {l: {} for l in LANGUAGES} } for root, func in to_translate.items(): root = script(root) terms = list(filter(lambda s: s in root, map(script, version.terms))) new_root = func(root) new_terms = [func(s) for s in terms] to_add['terms'].extend(map(str, new_terms)) to_add['roots'].append(str(new_root)) to_add['inhibitions'].update({str(new_root): version.inhibitions[root]}) for l in LANGUAGES: to_add['translations'][l].update({str(func(s)): version.translations[l][s] for s in terms}) to_remove.extend(map(str, terms)) return create_dictionary_version(version, add=to_add, remove=to_remove)
[ "translate", "the", "root", "paradigms", "in", "key", "in", "argument", "with", "the", "function", "in", "value", ":", "param", "to_translate", ":", ":", "return", ":" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L180-L211
[ "def", "translate_script", "(", "to_translate", ")", ":", "version", "=", "DictionaryVersion", "(", "latest_dictionary_version", "(", ")", ")", "version", ".", "load", "(", ")", "to_remove", "=", "[", "]", "to_add", "=", "{", "'terms'", ":", "[", "]", ",", "'roots'", ":", "[", "]", ",", "'inhibitions'", ":", "{", "}", ",", "'translations'", ":", "{", "l", ":", "{", "}", "for", "l", "in", "LANGUAGES", "}", "}", "for", "root", ",", "func", "in", "to_translate", ".", "items", "(", ")", ":", "root", "=", "script", "(", "root", ")", "terms", "=", "list", "(", "filter", "(", "lambda", "s", ":", "s", "in", "root", ",", "map", "(", "script", ",", "version", ".", "terms", ")", ")", ")", "new_root", "=", "func", "(", "root", ")", "new_terms", "=", "[", "func", "(", "s", ")", "for", "s", "in", "terms", "]", "to_add", "[", "'terms'", "]", ".", "extend", "(", "map", "(", "str", ",", "new_terms", ")", ")", "to_add", "[", "'roots'", "]", ".", "append", "(", "str", "(", "new_root", ")", ")", "to_add", "[", "'inhibitions'", "]", ".", "update", "(", "{", "str", "(", "new_root", ")", ":", "version", ".", "inhibitions", "[", "root", "]", "}", ")", "for", "l", "in", "LANGUAGES", ":", "to_add", "[", "'translations'", "]", "[", "l", "]", ".", "update", "(", "{", "str", "(", "func", "(", "s", ")", ")", ":", "version", ".", "translations", "[", "l", "]", "[", "s", "]", "for", "s", "in", "terms", "}", ")", "to_remove", ".", "extend", "(", "map", "(", "str", ",", "terms", ")", ")", "return", "create_dictionary_version", "(", "version", ",", "add", "=", "to_add", ",", "remove", "=", "to_remove", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
translate_mouvements_et_milieux
i.f.B:.-+u.f.M:.-O:.-' -> i.B:.-+u.M:.-O:.-
scripts/dictionary_tools.py
def translate_mouvements_et_milieux(s): """i.f.B:.-+u.f.M:.-O:.-' -> i.B:.-+u.M:.-O:.-'""" subst, attr, mode = s assert isinstance(mode, NullScript) if isinstance(subst, AdditiveScript): subst = AdditiveScript([_remove_attr_f(_s) for _s in subst]) else: subst = _remove_attr_f(subst) return m(subst, attr)
def translate_mouvements_et_milieux(s): """i.f.B:.-+u.f.M:.-O:.-' -> i.B:.-+u.M:.-O:.-'""" subst, attr, mode = s assert isinstance(mode, NullScript) if isinstance(subst, AdditiveScript): subst = AdditiveScript([_remove_attr_f(_s) for _s in subst]) else: subst = _remove_attr_f(subst) return m(subst, attr)
[ "i", ".", "f", ".", "B", ":", ".", "-", "+", "u", ".", "f", ".", "M", ":", ".", "-", "O", ":", ".", "-", "-", ">", "i", ".", "B", ":", ".", "-", "+", "u", ".", "M", ":", ".", "-", "O", ":", ".", "-" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L300-L310
[ "def", "translate_mouvements_et_milieux", "(", "s", ")", ":", "subst", ",", "attr", ",", "mode", "=", "s", "assert", "isinstance", "(", "mode", ",", "NullScript", ")", "if", "isinstance", "(", "subst", ",", "AdditiveScript", ")", ":", "subst", "=", "AdditiveScript", "(", "[", "_remove_attr_f", "(", "_s", ")", "for", "_s", "in", "subst", "]", ")", "else", ":", "subst", "=", "_remove_attr_f", "(", "subst", ")", "return", "m", "(", "subst", ",", "attr", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
translate_competence_en_curr_data
M:.-O:.-'M:.-wa.e.-'t.-x.-s.y.-', => t.-x.-s.y.-' wa.e.-', M:M:.-',O:.-',_
scripts/dictionary_tools.py
def translate_competence_en_curr_data(s): """M:.-O:.-'M:.-wa.e.-'t.-x.-s.y.-', => t.-x.-s.y.-' wa.e.-', M:M:.-',O:.-',_""" subst, attr, mode = s attr_s, attr_a, attr_m = attr assert isinstance(attr_m, NullScript) subst_s, subst_a, subst_m = subst assert isinstance(subst_m, NullScript) first_M = subst_s.children[0].children[0] return m(m(mode, m(attr_a)), m(m(m(m(first_M, attr_s.children[0].children[0])))), m(m(subst_a)))
def translate_competence_en_curr_data(s): """M:.-O:.-'M:.-wa.e.-'t.-x.-s.y.-', => t.-x.-s.y.-' wa.e.-', M:M:.-',O:.-',_""" subst, attr, mode = s attr_s, attr_a, attr_m = attr assert isinstance(attr_m, NullScript) subst_s, subst_a, subst_m = subst assert isinstance(subst_m, NullScript) first_M = subst_s.children[0].children[0] return m(m(mode, m(attr_a)), m(m(m(m(first_M, attr_s.children[0].children[0])))), m(m(subst_a)))
[ "M", ":", ".", "-", "O", ":", ".", "-", "M", ":", ".", "-", "wa", ".", "e", ".", "-", "t", ".", "-", "x", ".", "-", "s", ".", "y", ".", "-", "=", ">", "t", ".", "-", "x", ".", "-", "s", ".", "y", ".", "-", "wa", ".", "e", ".", "-", "M", ":", "M", ":", ".", "-", "O", ":", ".", "-", "_" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L313-L323
[ "def", "translate_competence_en_curr_data", "(", "s", ")", ":", "subst", ",", "attr", ",", "mode", "=", "s", "attr_s", ",", "attr_a", ",", "attr_m", "=", "attr", "assert", "isinstance", "(", "attr_m", ",", "NullScript", ")", "subst_s", ",", "subst_a", ",", "subst_m", "=", "subst", "assert", "isinstance", "(", "subst_m", ",", "NullScript", ")", "first_M", "=", "subst_s", ".", "children", "[", "0", "]", ".", "children", "[", "0", "]", "return", "m", "(", "m", "(", "mode", ",", "m", "(", "attr_a", ")", ")", ",", "m", "(", "m", "(", "m", "(", "m", "(", "first_M", ",", "attr_s", ".", "children", "[", "0", "]", ".", "children", "[", "0", "]", ")", ")", ")", ")", ",", "m", "(", "m", "(", "subst_a", ")", ")", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
translate_noetic
M:.O:.-O:.O:.-B:.T:.n.-' => s.M:O:.O:O:.-
scripts/dictionary_tools.py
def translate_noetic(s): """M:.O:.-O:.O:.-B:.T:.n.-' => s.M:O:.O:O:.-""" subst, attr, mode = s return m(script('s.'), m(subst.children[0].children[0], subst.children[1].children[0]), m(attr.children[0].children[0], attr.children[1].children[0]))
def translate_noetic(s): """M:.O:.-O:.O:.-B:.T:.n.-' => s.M:O:.O:O:.-""" subst, attr, mode = s return m(script('s.'), m(subst.children[0].children[0], subst.children[1].children[0]), m(attr.children[0].children[0], attr.children[1].children[0]))
[ "M", ":", ".", "O", ":", ".", "-", "O", ":", ".", "O", ":", ".", "-", "B", ":", ".", "T", ":", ".", "n", ".", "-", "=", ">", "s", ".", "M", ":", "O", ":", ".", "O", ":", "O", ":", ".", "-" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L335-L340
[ "def", "translate_noetic", "(", "s", ")", ":", "subst", ",", "attr", ",", "mode", "=", "s", "return", "m", "(", "script", "(", "'s.'", ")", ",", "m", "(", "subst", ".", "children", "[", "0", "]", ".", "children", "[", "0", "]", ",", "subst", ".", "children", "[", "1", "]", ".", "children", "[", "0", "]", ")", ",", "m", "(", "attr", ".", "children", "[", "0", "]", ".", "children", "[", "0", "]", ",", "attr", ".", "children", "[", "1", "]", ".", "children", "[", "0", "]", ")", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
translate_tisse_intl_col
O:M:.-O:M:.-we.h.-' => O:M:.-'O:M:.-'s.o.-k.o.-',
scripts/dictionary_tools.py
def translate_tisse_intl_col(s): """O:M:.-O:M:.-we.h.-' => O:M:.-'O:M:.-'s.o.-k.o.-',""" subst, attr, mode = s return m(m(subst), m(attr), script("s.o.-k.o.-'"))
def translate_tisse_intl_col(s): """O:M:.-O:M:.-we.h.-' => O:M:.-'O:M:.-'s.o.-k.o.-',""" subst, attr, mode = s return m(m(subst), m(attr), script("s.o.-k.o.-'"))
[ "O", ":", "M", ":", ".", "-", "O", ":", "M", ":", ".", "-", "we", ".", "h", ".", "-", "=", ">", "O", ":", "M", ":", ".", "-", "O", ":", "M", ":", ".", "-", "s", ".", "o", ".", "-", "k", ".", "o", ".", "-" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L343-L346
[ "def", "translate_tisse_intl_col", "(", "s", ")", ":", "subst", ",", "attr", ",", "mode", "=", "s", "return", "m", "(", "m", "(", "subst", ")", ",", "m", "(", "attr", ")", ",", "script", "(", "\"s.o.-k.o.-'\"", ")", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
translate_formes_visuelles
s.u.-'O:M:.-'O:.-',+s.u.-'M:O:.-O:.-'M:.-', => b.-S:.U:.-'O:M:.-'O:.-', + b.-S:.U:.-'M:O:.-O:.-'M:.-',
scripts/dictionary_tools.py
def translate_formes_visuelles(s): """s.u.-'O:M:.-'O:.-',+s.u.-'M:O:.-O:.-'M:.-', => b.-S:.U:.-'O:M:.-'O:.-', + b.-S:.U:.-'M:O:.-O:.-'M:.-',""" def set_bSU_subst(s): subst, attr, mode = s return m(script("b.-S:.U:.-'"), attr, mode) if isinstance(s, AdditiveScript): return AdditiveScript([set_bSU_subst(i) for i in s.children]) else: return set_bSU_subst(s)
def translate_formes_visuelles(s): """s.u.-'O:M:.-'O:.-',+s.u.-'M:O:.-O:.-'M:.-', => b.-S:.U:.-'O:M:.-'O:.-', + b.-S:.U:.-'M:O:.-O:.-'M:.-',""" def set_bSU_subst(s): subst, attr, mode = s return m(script("b.-S:.U:.-'"), attr, mode) if isinstance(s, AdditiveScript): return AdditiveScript([set_bSU_subst(i) for i in s.children]) else: return set_bSU_subst(s)
[ "s", ".", "u", ".", "-", "O", ":", "M", ":", ".", "-", "O", ":", ".", "-", "+", "s", ".", "u", ".", "-", "M", ":", "O", ":", ".", "-", "O", ":", ".", "-", "M", ":", ".", "-", "=", ">", "b", ".", "-", "S", ":", ".", "U", ":", ".", "-", "O", ":", "M", ":", ".", "-", "O", ":", ".", "-", "+", "b", ".", "-", "S", ":", ".", "U", ":", ".", "-", "M", ":", "O", ":", ".", "-", "O", ":", ".", "-", "M", ":", ".", "-" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L349-L359
[ "def", "translate_formes_visuelles", "(", "s", ")", ":", "def", "set_bSU_subst", "(", "s", ")", ":", "subst", ",", "attr", ",", "mode", "=", "s", "return", "m", "(", "script", "(", "\"b.-S:.U:.-'\"", ")", ",", "attr", ",", "mode", ")", "if", "isinstance", "(", "s", ",", "AdditiveScript", ")", ":", "return", "AdditiveScript", "(", "[", "set_bSU_subst", "(", "i", ")", "for", "i", "in", "s", ".", "children", "]", ")", "else", ":", "return", "set_bSU_subst", "(", "s", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
translate_ecosystem_intl_col
O:.M:.- => s.o.-k.o.-'M:O:.-',
scripts/dictionary_tools.py
def translate_ecosystem_intl_col(s): """O:.M:.- => s.o.-k.o.-'M:O:.-',""" subst, attr, mode = s return m(script("s.o.-k.o.-'"), m(m(m(attr.children[0], subst.children[0]))))
def translate_ecosystem_intl_col(s): """O:.M:.- => s.o.-k.o.-'M:O:.-',""" subst, attr, mode = s return m(script("s.o.-k.o.-'"), m(m(m(attr.children[0], subst.children[0]))))
[ "O", ":", ".", "M", ":", ".", "-", "=", ">", "s", ".", "o", ".", "-", "k", ".", "o", ".", "-", "M", ":", "O", ":", ".", "-" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L371-L375
[ "def", "translate_ecosystem_intl_col", "(", "s", ")", ":", "subst", ",", "attr", ",", "mode", "=", "s", "return", "m", "(", "script", "(", "\"s.o.-k.o.-'\"", ")", ",", "m", "(", "m", "(", "m", "(", "attr", ".", "children", "[", "0", "]", ",", "subst", ".", "children", "[", "0", "]", ")", ")", ")", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
translate_ecosystem_intl_col_tern
O:.M:.-M:.-' => s.o.-k.o.-‘M:O:.-‘,M:.-',_
scripts/dictionary_tools.py
def translate_ecosystem_intl_col_tern(s): """O:.M:.-M:.-' => s.o.-k.o.-‘M:O:.-‘,M:.-',_""" subst, attr, mode = s return m(translate_ecosystem_intl_col(subst), m(m(attr)))
def translate_ecosystem_intl_col_tern(s): """O:.M:.-M:.-' => s.o.-k.o.-‘M:O:.-‘,M:.-',_""" subst, attr, mode = s return m(translate_ecosystem_intl_col(subst), m(m(attr)))
[ "O", ":", ".", "M", ":", ".", "-", "M", ":", ".", "-", "=", ">", "s", ".", "o", ".", "-", "k", ".", "o", ".", "-", "‘M", ":", "O", ":", ".", "-", "‘", "M", ":", ".", "-", "_" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/scripts/dictionary_tools.py#L378-L382
[ "def", "translate_ecosystem_intl_col_tern", "(", "s", ")", ":", "subst", ",", "attr", ",", "mode", "=", "s", "return", "m", "(", "translate_ecosystem_intl_col", "(", "subst", ")", ",", "m", "(", "m", "(", "attr", ")", ")", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
IEMLParser.parse
Parses the input string, and returns a reference to the created AST's root
ieml/grammar/parser/parser.py
def parse(self, s): """Parses the input string, and returns a reference to the created AST's root""" with self.lock: try: return self.parser.parse(s, lexer=self.lexer) except InvalidIEMLObjectArgument as e: raise CannotParse(s, str(e)) except CannotParse as e: e.s = s raise e
def parse(self, s): """Parses the input string, and returns a reference to the created AST's root""" with self.lock: try: return self.parser.parse(s, lexer=self.lexer) except InvalidIEMLObjectArgument as e: raise CannotParse(s, str(e)) except CannotParse as e: e.s = s raise e
[ "Parses", "the", "input", "string", "and", "returns", "a", "reference", "to", "the", "created", "AST", "s", "root" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/parser/parser.py#L73-L82
[ "def", "parse", "(", "self", ",", "s", ")", ":", "with", "self", ".", "lock", ":", "try", ":", "return", "self", ".", "parser", ".", "parse", "(", "s", ",", "lexer", "=", "self", ".", "lexer", ")", "except", "InvalidIEMLObjectArgument", "as", "e", ":", "raise", "CannotParse", "(", "s", ",", "str", "(", "e", ")", ")", "except", "CannotParse", "as", "e", ":", "e", ".", "s", "=", "s", "raise", "e" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
IEMLParser.p_literal_list
literal_list : literal_list LITERAL | LITERAL
ieml/grammar/parser/parser.py
def p_literal_list(self, p): """literal_list : literal_list LITERAL | LITERAL""" if len(p) == 3: p[0] = p[1] + [p[2][1:-1]] else: p[0] = [p[1][1:-1]]
def p_literal_list(self, p): """literal_list : literal_list LITERAL | LITERAL""" if len(p) == 3: p[0] = p[1] + [p[2][1:-1]] else: p[0] = [p[1][1:-1]]
[ "literal_list", ":", "literal_list", "LITERAL", "|", "LITERAL" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/parser/parser.py#L95-L102
[ "def", "p_literal_list", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "3", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "p", "[", "2", "]", "[", "1", ":", "-", "1", "]", "]", "else", ":", "p", "[", "0", "]", "=", "[", "p", "[", "1", "]", "[", "1", ":", "-", "1", "]", "]" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
IEMLParser.p_word
word : TERM | LBRACKET TERM RBRACKET | LBRACKET TERM RBRACKET literal_list
ieml/grammar/parser/parser.py
def p_word(self, p): """word : TERM | LBRACKET TERM RBRACKET | LBRACKET TERM RBRACKET literal_list""" try: term = self._get_term(p[1 if len(p) == 2 else 2]) except TermNotFoundInDictionary as e: raise CannotParse(self._ieml, str(e)) if len(p) == 5: p[0] = Word(term, literals=p[4]) else: p[0] = Word(term)
def p_word(self, p): """word : TERM | LBRACKET TERM RBRACKET | LBRACKET TERM RBRACKET literal_list""" try: term = self._get_term(p[1 if len(p) == 2 else 2]) except TermNotFoundInDictionary as e: raise CannotParse(self._ieml, str(e)) if len(p) == 5: p[0] = Word(term, literals=p[4]) else: p[0] = Word(term)
[ "word", ":", "TERM", "|", "LBRACKET", "TERM", "RBRACKET", "|", "LBRACKET", "TERM", "RBRACKET", "literal_list" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/parser/parser.py#L105-L117
[ "def", "p_word", "(", "self", ",", "p", ")", ":", "try", ":", "term", "=", "self", ".", "_get_term", "(", "p", "[", "1", "if", "len", "(", "p", ")", "==", "2", "else", "2", "]", ")", "except", "TermNotFoundInDictionary", "as", "e", ":", "raise", "CannotParse", "(", "self", ".", "_ieml", ",", "str", "(", "e", ")", ")", "if", "len", "(", "p", ")", "==", "5", ":", "p", "[", "0", "]", "=", "Word", "(", "term", ",", "literals", "=", "p", "[", "4", "]", ")", "else", ":", "p", "[", "0", "]", "=", "Word", "(", "term", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
IEMLParser.p_proposition_sum
word_sum : word_sum PLUS word | word clauses_sum : clauses_sum PLUS clause | clause superclauses_sum : superclauses_sum PLUS superclause | superclause
ieml/grammar/parser/parser.py
def p_proposition_sum(self, p): """word_sum : word_sum PLUS word | word clauses_sum : clauses_sum PLUS clause | clause superclauses_sum : superclauses_sum PLUS superclause | superclause""" # closed_proposition_list : closed_proposition_list closed_proposition # | closed_proposition""" if len(p) == 4: p[0] = p[1] + [p[3]] elif len(p) == 3: p[0] = p[1] + [p[2]] else: p[0] = [p[1]]
def p_proposition_sum(self, p): """word_sum : word_sum PLUS word | word clauses_sum : clauses_sum PLUS clause | clause superclauses_sum : superclauses_sum PLUS superclause | superclause""" # closed_proposition_list : closed_proposition_list closed_proposition # | closed_proposition""" if len(p) == 4: p[0] = p[1] + [p[3]] elif len(p) == 3: p[0] = p[1] + [p[2]] else: p[0] = [p[1]]
[ "word_sum", ":", "word_sum", "PLUS", "word", "|", "word", "clauses_sum", ":", "clauses_sum", "PLUS", "clause", "|", "clause", "superclauses_sum", ":", "superclauses_sum", "PLUS", "superclause", "|", "superclause" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/parser/parser.py#L119-L134
[ "def", "p_proposition_sum", "(", "self", ",", "p", ")", ":", "# closed_proposition_list : closed_proposition_list closed_proposition", "# | closed_proposition\"\"\"", "if", "len", "(", "p", ")", "==", "4", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "p", "[", "3", "]", "]", "elif", "len", "(", "p", ")", "==", "3", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "p", "[", "2", "]", "]", "else", ":", "p", "[", "0", "]", "=", "[", "p", "[", "1", "]", "]" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
IEMLParser.p_topic
topic : LBRACKET morpheme RBRACKET | LBRACKET morpheme RBRACKET literal_list | LBRACKET morpheme TIMES morpheme RBRACKET | LBRACKET morpheme TIMES morpheme RBRACKET literal_list
ieml/grammar/parser/parser.py
def p_topic(self, p): """topic : LBRACKET morpheme RBRACKET | LBRACKET morpheme RBRACKET literal_list | LBRACKET morpheme TIMES morpheme RBRACKET | LBRACKET morpheme TIMES morpheme RBRACKET literal_list""" if len(p) == 4: p[0] = Topic(root=tuple(p[2]), flexing=()) elif len(p) == 5: p[0] = Topic(root=tuple(p[2]), flexing=(), literals=p[4]) elif len(p) == 6: p[0] = Topic(root=tuple(p[2]), flexing=tuple(p[4])) else: p[0] = Topic(root=tuple(p[2]), flexing=tuple(p[4]), literals=p[6])
def p_topic(self, p): """topic : LBRACKET morpheme RBRACKET | LBRACKET morpheme RBRACKET literal_list | LBRACKET morpheme TIMES morpheme RBRACKET | LBRACKET morpheme TIMES morpheme RBRACKET literal_list""" if len(p) == 4: p[0] = Topic(root=tuple(p[2]), flexing=()) elif len(p) == 5: p[0] = Topic(root=tuple(p[2]), flexing=(), literals=p[4]) elif len(p) == 6: p[0] = Topic(root=tuple(p[2]), flexing=tuple(p[4])) else: p[0] = Topic(root=tuple(p[2]), flexing=tuple(p[4]), literals=p[6])
[ "topic", ":", "LBRACKET", "morpheme", "RBRACKET", "|", "LBRACKET", "morpheme", "RBRACKET", "literal_list", "|", "LBRACKET", "morpheme", "TIMES", "morpheme", "RBRACKET", "|", "LBRACKET", "morpheme", "TIMES", "morpheme", "RBRACKET", "literal_list" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/parser/parser.py#L140-L153
[ "def", "p_topic", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "4", ":", "p", "[", "0", "]", "=", "Topic", "(", "root", "=", "tuple", "(", "p", "[", "2", "]", ")", ",", "flexing", "=", "(", ")", ")", "elif", "len", "(", "p", ")", "==", "5", ":", "p", "[", "0", "]", "=", "Topic", "(", "root", "=", "tuple", "(", "p", "[", "2", "]", ")", ",", "flexing", "=", "(", ")", ",", "literals", "=", "p", "[", "4", "]", ")", "elif", "len", "(", "p", ")", "==", "6", ":", "p", "[", "0", "]", "=", "Topic", "(", "root", "=", "tuple", "(", "p", "[", "2", "]", ")", ",", "flexing", "=", "tuple", "(", "p", "[", "4", "]", ")", ")", "else", ":", "p", "[", "0", "]", "=", "Topic", "(", "root", "=", "tuple", "(", "p", "[", "2", "]", ")", ",", "flexing", "=", "tuple", "(", "p", "[", "4", "]", ")", ",", "literals", "=", "p", "[", "6", "]", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
IEMLParser.p_fact
fact : LBRACKET clauses_sum RBRACKET | LBRACKET clauses_sum RBRACKET literal_list
ieml/grammar/parser/parser.py
def p_fact(self, p): """fact : LBRACKET clauses_sum RBRACKET | LBRACKET clauses_sum RBRACKET literal_list""" if len(p) == 4: p[0] = Fact(p[2]) else: p[0] = Fact(p[2], literals=p[4])
def p_fact(self, p): """fact : LBRACKET clauses_sum RBRACKET | LBRACKET clauses_sum RBRACKET literal_list""" if len(p) == 4: p[0] = Fact(p[2]) else: p[0] = Fact(p[2], literals=p[4])
[ "fact", ":", "LBRACKET", "clauses_sum", "RBRACKET", "|", "LBRACKET", "clauses_sum", "RBRACKET", "literal_list" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/parser/parser.py#L173-L179
[ "def", "p_fact", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "4", ":", "p", "[", "0", "]", "=", "Fact", "(", "p", "[", "2", "]", ")", "else", ":", "p", "[", "0", "]", "=", "Fact", "(", "p", "[", "2", "]", ",", "literals", "=", "p", "[", "4", "]", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
IEMLParser.p_theory
theory : LBRACKET superclauses_sum RBRACKET | LBRACKET superclauses_sum RBRACKET literal_list
ieml/grammar/parser/parser.py
def p_theory(self, p): """theory : LBRACKET superclauses_sum RBRACKET | LBRACKET superclauses_sum RBRACKET literal_list""" if len(p) == 4: p[0] = Theory(p[2]) else: p[0] = Theory(p[2], literals=p[4])
def p_theory(self, p): """theory : LBRACKET superclauses_sum RBRACKET | LBRACKET superclauses_sum RBRACKET literal_list""" if len(p) == 4: p[0] = Theory(p[2]) else: p[0] = Theory(p[2], literals=p[4])
[ "theory", ":", "LBRACKET", "superclauses_sum", "RBRACKET", "|", "LBRACKET", "superclauses_sum", "RBRACKET", "literal_list" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/parser/parser.py#L186-L192
[ "def", "p_theory", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "4", ":", "p", "[", "0", "]", "=", "Theory", "(", "p", "[", "2", "]", ")", "else", ":", "p", "[", "0", "]", "=", "Theory", "(", "p", "[", "2", "]", ",", "literals", "=", "p", "[", "4", "]", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
IEMLParser.p_closed_proposition_list
closed_proposition_list : closed_proposition_list SLASH SLASH closed_proposition | closed_proposition
ieml/grammar/parser/parser.py
def p_closed_proposition_list(self, p): """ closed_proposition_list : closed_proposition_list SLASH SLASH closed_proposition | closed_proposition""" if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[4]]
def p_closed_proposition_list(self, p): """ closed_proposition_list : closed_proposition_list SLASH SLASH closed_proposition | closed_proposition""" if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] + [p[4]]
[ "closed_proposition_list", ":", "closed_proposition_list", "SLASH", "SLASH", "closed_proposition", "|", "closed_proposition" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/grammar/parser/parser.py#L200-L206
[ "def", "p_closed_proposition_list", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "[", "p", "[", "1", "]", "]", "else", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "p", "[", "4", "]", "]" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25
test
pack_factorisation
:param facto_list: list of parser or tuple of factorisation :return:
ieml/dictionary/script/tools.py
def pack_factorisation(facto_list): """ :param facto_list: list of parser or tuple of factorisation :return: """ _sum = [] for f in facto_list: if isinstance(f, Script): _sum.append(f) else: # tuple of factorisation _sum.append(MultiplicativeScript(children=(pack_factorisation(l_f) for l_f in f))) if len(_sum) == 1: return _sum[0] else: return AdditiveScript(children=_sum)
def pack_factorisation(facto_list): """ :param facto_list: list of parser or tuple of factorisation :return: """ _sum = [] for f in facto_list: if isinstance(f, Script): _sum.append(f) else: # tuple of factorisation _sum.append(MultiplicativeScript(children=(pack_factorisation(l_f) for l_f in f))) if len(_sum) == 1: return _sum[0] else: return AdditiveScript(children=_sum)
[ ":", "param", "facto_list", ":", "list", "of", "parser", "or", "tuple", "of", "factorisation", ":", "return", ":" ]
IEMLdev/ieml
python
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/dictionary/script/tools.py#L101-L117
[ "def", "pack_factorisation", "(", "facto_list", ")", ":", "_sum", "=", "[", "]", "for", "f", "in", "facto_list", ":", "if", "isinstance", "(", "f", ",", "Script", ")", ":", "_sum", ".", "append", "(", "f", ")", "else", ":", "# tuple of factorisation", "_sum", ".", "append", "(", "MultiplicativeScript", "(", "children", "=", "(", "pack_factorisation", "(", "l_f", ")", "for", "l_f", "in", "f", ")", ")", ")", "if", "len", "(", "_sum", ")", "==", "1", ":", "return", "_sum", "[", "0", "]", "else", ":", "return", "AdditiveScript", "(", "children", "=", "_sum", ")" ]
4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25