partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
Extension.get_unicodes
Return list of unicodes for <scanning-codepoints>
fontaine/ext/extensis.py
def get_unicodes(codepoint): """ Return list of unicodes for <scanning-codepoints> """ result = re.sub('\s', '', codepoint.text) return Extension.convert_to_list_of_unicodes(result)
def get_unicodes(codepoint): """ Return list of unicodes for <scanning-codepoints> """ result = re.sub('\s', '', codepoint.text) return Extension.convert_to_list_of_unicodes(result)
[ "Return", "list", "of", "unicodes", "for", "<scanning", "-", "codepoints", ">" ]
davelab6/pyfontaine
python
https://github.com/davelab6/pyfontaine/blob/e9af7f2667e85803a7f5ea2b1f0d9a34931f3b95/fontaine/ext/extensis.py#L63-L66
[ "def", "get_unicodes", "(", "codepoint", ")", ":", "result", "=", "re", ".", "sub", "(", "'\\s'", ",", "''", ",", "codepoint", ".", "text", ")", "return", "Extension", ".", "convert_to_list_of_unicodes", "(", "result", ")" ]
e9af7f2667e85803a7f5ea2b1f0d9a34931f3b95
valid
BaseOAuth.handler
* get request token if OAuth1 * Get user authorization * Get access token
yahoo_oauth/yahoo_oauth.py
def handler(self,): """* get request token if OAuth1 * Get user authorization * Get access token """ if self.oauth_version == 'oauth1': request_token, request_token_secret = self.oauth.get_request_token(params={'oauth_callback': self.callback_uri}) logger.debug("REQUEST_TOKEN = {0}\n REQUEST_TOKEN_SECRET = {1}\n".format(request_token, request_token_secret)) authorize_url = self.oauth.get_authorize_url(request_token) else: authorize_url = self.oauth.get_authorize_url(client_secret=self.consumer_secret, redirect_uri=self.callback_uri, response_type='code') logger.debug("AUTHORISATION URL : {0}".format(authorize_url)) # Open authorize_url webbrowser.open(authorize_url) self.verifier = input("Enter verifier : ") self.token_time = time.time() credentials = {'token_time': self.token_time} if self.oauth_version == 'oauth1': raw_access = self.oauth.get_raw_access_token(request_token, request_token_secret, params={"oauth_verifier": self.verifier}) parsed_access = parse_utf8_qsl(raw_access.content) self.access_token = parsed_access['oauth_token'] self.access_token_secret = parsed_access['oauth_token_secret'] self.session_handle = parsed_access['oauth_session_handle'] self.guid = parsed_access['xoauth_yahoo_guid'] # Updating credentials credentials.update({ 'access_token': self.access_token, 'access_token_secret': self.access_token_secret, 'session_handle': self.session_handle, 'guid': self.guid }) else: # Building headers headers = self.generate_oauth2_headers() # Getting access token raw_access = self.oauth.get_raw_access_token(data={"code": self.verifier, 'redirect_uri': self.callback_uri,'grant_type':'authorization_code'}, headers=headers) #parsed_access = parse_utf8_qsl(raw_access.content.decode('utf-8')) credentials.update(self.oauth2_access_parser(raw_access)) return credentials
def handler(self,): """* get request token if OAuth1 * Get user authorization * Get access token """ if self.oauth_version == 'oauth1': request_token, request_token_secret = self.oauth.get_request_token(params={'oauth_callback': self.callback_uri}) logger.debug("REQUEST_TOKEN = {0}\n REQUEST_TOKEN_SECRET = {1}\n".format(request_token, request_token_secret)) authorize_url = self.oauth.get_authorize_url(request_token) else: authorize_url = self.oauth.get_authorize_url(client_secret=self.consumer_secret, redirect_uri=self.callback_uri, response_type='code') logger.debug("AUTHORISATION URL : {0}".format(authorize_url)) # Open authorize_url webbrowser.open(authorize_url) self.verifier = input("Enter verifier : ") self.token_time = time.time() credentials = {'token_time': self.token_time} if self.oauth_version == 'oauth1': raw_access = self.oauth.get_raw_access_token(request_token, request_token_secret, params={"oauth_verifier": self.verifier}) parsed_access = parse_utf8_qsl(raw_access.content) self.access_token = parsed_access['oauth_token'] self.access_token_secret = parsed_access['oauth_token_secret'] self.session_handle = parsed_access['oauth_session_handle'] self.guid = parsed_access['xoauth_yahoo_guid'] # Updating credentials credentials.update({ 'access_token': self.access_token, 'access_token_secret': self.access_token_secret, 'session_handle': self.session_handle, 'guid': self.guid }) else: # Building headers headers = self.generate_oauth2_headers() # Getting access token raw_access = self.oauth.get_raw_access_token(data={"code": self.verifier, 'redirect_uri': self.callback_uri,'grant_type':'authorization_code'}, headers=headers) #parsed_access = parse_utf8_qsl(raw_access.content.decode('utf-8')) credentials.update(self.oauth2_access_parser(raw_access)) return credentials
[ "*", "get", "request", "token", "if", "OAuth1", "*", "Get", "user", "authorization", "*", "Get", "access", "token" ]
josuebrunel/yahoo-oauth
python
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/yahoo_oauth.py#L100-L145
[ "def", "handler", "(", "self", ",", ")", ":", "if", "self", ".", "oauth_version", "==", "'oauth1'", ":", "request_token", ",", "request_token_secret", "=", "self", ".", "oauth", ".", "get_request_token", "(", "params", "=", "{", "'oauth_callback'", ":", "self", ".", "callback_uri", "}", ")", "logger", ".", "debug", "(", "\"REQUEST_TOKEN = {0}\\n REQUEST_TOKEN_SECRET = {1}\\n\"", ".", "format", "(", "request_token", ",", "request_token_secret", ")", ")", "authorize_url", "=", "self", ".", "oauth", ".", "get_authorize_url", "(", "request_token", ")", "else", ":", "authorize_url", "=", "self", ".", "oauth", ".", "get_authorize_url", "(", "client_secret", "=", "self", ".", "consumer_secret", ",", "redirect_uri", "=", "self", ".", "callback_uri", ",", "response_type", "=", "'code'", ")", "logger", ".", "debug", "(", "\"AUTHORISATION URL : {0}\"", ".", "format", "(", "authorize_url", ")", ")", "# Open authorize_url", "webbrowser", ".", "open", "(", "authorize_url", ")", "self", ".", "verifier", "=", "input", "(", "\"Enter verifier : \"", ")", "self", ".", "token_time", "=", "time", ".", "time", "(", ")", "credentials", "=", "{", "'token_time'", ":", "self", ".", "token_time", "}", "if", "self", ".", "oauth_version", "==", "'oauth1'", ":", "raw_access", "=", "self", ".", "oauth", ".", "get_raw_access_token", "(", "request_token", ",", "request_token_secret", ",", "params", "=", "{", "\"oauth_verifier\"", ":", "self", ".", "verifier", "}", ")", "parsed_access", "=", "parse_utf8_qsl", "(", "raw_access", ".", "content", ")", "self", ".", "access_token", "=", "parsed_access", "[", "'oauth_token'", "]", "self", ".", "access_token_secret", "=", "parsed_access", "[", "'oauth_token_secret'", "]", "self", ".", "session_handle", "=", "parsed_access", "[", "'oauth_session_handle'", "]", "self", ".", "guid", "=", "parsed_access", "[", "'xoauth_yahoo_guid'", "]", "# Updating credentials ", "credentials", ".", "update", "(", "{", "'access_token'", ":", "self", ".", "access_token", ",", "'access_token_secret'", ":", "self", ".", "access_token_secret", ",", "'session_handle'", ":", "self", ".", "session_handle", ",", "'guid'", ":", "self", ".", "guid", "}", ")", "else", ":", "# Building headers ", "headers", "=", "self", ".", "generate_oauth2_headers", "(", ")", "# Getting access token", "raw_access", "=", "self", ".", "oauth", ".", "get_raw_access_token", "(", "data", "=", "{", "\"code\"", ":", "self", ".", "verifier", ",", "'redirect_uri'", ":", "self", ".", "callback_uri", ",", "'grant_type'", ":", "'authorization_code'", "}", ",", "headers", "=", "headers", ")", "#parsed_access = parse_utf8_qsl(raw_access.content.decode('utf-8'))", "credentials", ".", "update", "(", "self", ".", "oauth2_access_parser", "(", "raw_access", ")", ")", "return", "credentials" ]
40eff7809366850c46e1a3340469044f33cd1713
valid
BaseOAuth.generate_oauth2_headers
Generates header for oauth2
yahoo_oauth/yahoo_oauth.py
def generate_oauth2_headers(self): """Generates header for oauth2 """ encoded_credentials = base64.b64encode(('{0}:{1}'.format(self.consumer_key,self.consumer_secret)).encode('utf-8')) headers={ 'Authorization':'Basic {0}'.format(encoded_credentials.decode('utf-8')), 'Content-Type': 'application/x-www-form-urlencoded' } return headers
def generate_oauth2_headers(self): """Generates header for oauth2 """ encoded_credentials = base64.b64encode(('{0}:{1}'.format(self.consumer_key,self.consumer_secret)).encode('utf-8')) headers={ 'Authorization':'Basic {0}'.format(encoded_credentials.decode('utf-8')), 'Content-Type': 'application/x-www-form-urlencoded' } return headers
[ "Generates", "header", "for", "oauth2" ]
josuebrunel/yahoo-oauth
python
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/yahoo_oauth.py#L147-L156
[ "def", "generate_oauth2_headers", "(", "self", ")", ":", "encoded_credentials", "=", "base64", ".", "b64encode", "(", "(", "'{0}:{1}'", ".", "format", "(", "self", ".", "consumer_key", ",", "self", ".", "consumer_secret", ")", ")", ".", "encode", "(", "'utf-8'", ")", ")", "headers", "=", "{", "'Authorization'", ":", "'Basic {0}'", ".", "format", "(", "encoded_credentials", ".", "decode", "(", "'utf-8'", ")", ")", ",", "'Content-Type'", ":", "'application/x-www-form-urlencoded'", "}", "return", "headers" ]
40eff7809366850c46e1a3340469044f33cd1713
valid
BaseOAuth.oauth2_access_parser
Parse oauth2 access
yahoo_oauth/yahoo_oauth.py
def oauth2_access_parser(self, raw_access): """Parse oauth2 access """ parsed_access = json.loads(raw_access.content.decode('utf-8')) self.access_token = parsed_access['access_token'] self.token_type = parsed_access['token_type'] self.refresh_token = parsed_access['refresh_token'] self.guid = parsed_access['xoauth_yahoo_guid'] credentials = { 'access_token': self.access_token, 'token_type': self.token_type, 'refresh_token': self.refresh_token, 'guid': self.guid } return credentials
def oauth2_access_parser(self, raw_access): """Parse oauth2 access """ parsed_access = json.loads(raw_access.content.decode('utf-8')) self.access_token = parsed_access['access_token'] self.token_type = parsed_access['token_type'] self.refresh_token = parsed_access['refresh_token'] self.guid = parsed_access['xoauth_yahoo_guid'] credentials = { 'access_token': self.access_token, 'token_type': self.token_type, 'refresh_token': self.refresh_token, 'guid': self.guid } return credentials
[ "Parse", "oauth2", "access" ]
josuebrunel/yahoo-oauth
python
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/yahoo_oauth.py#L158-L174
[ "def", "oauth2_access_parser", "(", "self", ",", "raw_access", ")", ":", "parsed_access", "=", "json", ".", "loads", "(", "raw_access", ".", "content", ".", "decode", "(", "'utf-8'", ")", ")", "self", ".", "access_token", "=", "parsed_access", "[", "'access_token'", "]", "self", ".", "token_type", "=", "parsed_access", "[", "'token_type'", "]", "self", ".", "refresh_token", "=", "parsed_access", "[", "'refresh_token'", "]", "self", ".", "guid", "=", "parsed_access", "[", "'xoauth_yahoo_guid'", "]", "credentials", "=", "{", "'access_token'", ":", "self", ".", "access_token", ",", "'token_type'", ":", "self", ".", "token_type", ",", "'refresh_token'", ":", "self", ".", "refresh_token", ",", "'guid'", ":", "self", ".", "guid", "}", "return", "credentials" ]
40eff7809366850c46e1a3340469044f33cd1713
valid
BaseOAuth.refresh_access_token
Refresh access token
yahoo_oauth/yahoo_oauth.py
def refresh_access_token(self,): """Refresh access token """ logger.debug("REFRESHING TOKEN") self.token_time = time.time() credentials = { 'token_time': self.token_time } if self.oauth_version == 'oauth1': self.access_token, self.access_token_secret = self.oauth.get_access_token(self.access_token, self.access_token_secret, params={"oauth_session_handle": self.session_handle}) credentials.update({ 'access_token': self.access_token, 'access_token_secret': self.access_token_secret, 'session_handle': self.session_handle, 'token_time': self.token_time }) else: headers = self.generate_oauth2_headers() raw_access = self.oauth.get_raw_access_token(data={"refresh_token": self.refresh_token, 'redirect_uri': self.callback_uri,'grant_type':'refresh_token'}, headers=headers) credentials.update(self.oauth2_access_parser(raw_access)) return credentials
def refresh_access_token(self,): """Refresh access token """ logger.debug("REFRESHING TOKEN") self.token_time = time.time() credentials = { 'token_time': self.token_time } if self.oauth_version == 'oauth1': self.access_token, self.access_token_secret = self.oauth.get_access_token(self.access_token, self.access_token_secret, params={"oauth_session_handle": self.session_handle}) credentials.update({ 'access_token': self.access_token, 'access_token_secret': self.access_token_secret, 'session_handle': self.session_handle, 'token_time': self.token_time }) else: headers = self.generate_oauth2_headers() raw_access = self.oauth.get_raw_access_token(data={"refresh_token": self.refresh_token, 'redirect_uri': self.callback_uri,'grant_type':'refresh_token'}, headers=headers) credentials.update(self.oauth2_access_parser(raw_access)) return credentials
[ "Refresh", "access", "token" ]
josuebrunel/yahoo-oauth
python
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/yahoo_oauth.py#L176-L199
[ "def", "refresh_access_token", "(", "self", ",", ")", ":", "logger", ".", "debug", "(", "\"REFRESHING TOKEN\"", ")", "self", ".", "token_time", "=", "time", ".", "time", "(", ")", "credentials", "=", "{", "'token_time'", ":", "self", ".", "token_time", "}", "if", "self", ".", "oauth_version", "==", "'oauth1'", ":", "self", ".", "access_token", ",", "self", ".", "access_token_secret", "=", "self", ".", "oauth", ".", "get_access_token", "(", "self", ".", "access_token", ",", "self", ".", "access_token_secret", ",", "params", "=", "{", "\"oauth_session_handle\"", ":", "self", ".", "session_handle", "}", ")", "credentials", ".", "update", "(", "{", "'access_token'", ":", "self", ".", "access_token", ",", "'access_token_secret'", ":", "self", ".", "access_token_secret", ",", "'session_handle'", ":", "self", ".", "session_handle", ",", "'token_time'", ":", "self", ".", "token_time", "}", ")", "else", ":", "headers", "=", "self", ".", "generate_oauth2_headers", "(", ")", "raw_access", "=", "self", ".", "oauth", ".", "get_raw_access_token", "(", "data", "=", "{", "\"refresh_token\"", ":", "self", ".", "refresh_token", ",", "'redirect_uri'", ":", "self", ".", "callback_uri", ",", "'grant_type'", ":", "'refresh_token'", "}", ",", "headers", "=", "headers", ")", "credentials", ".", "update", "(", "self", ".", "oauth2_access_parser", "(", "raw_access", ")", ")", "return", "credentials" ]
40eff7809366850c46e1a3340469044f33cd1713
valid
BaseOAuth.token_is_valid
Check the validity of the token :3600s
yahoo_oauth/yahoo_oauth.py
def token_is_valid(self,): """Check the validity of the token :3600s """ elapsed_time = time.time() - self.token_time logger.debug("ELAPSED TIME : {0}".format(elapsed_time)) if elapsed_time > 3540: # 1 minute before it expires logger.debug("TOKEN HAS EXPIRED") return False logger.debug("TOKEN IS STILL VALID") return True
def token_is_valid(self,): """Check the validity of the token :3600s """ elapsed_time = time.time() - self.token_time logger.debug("ELAPSED TIME : {0}".format(elapsed_time)) if elapsed_time > 3540: # 1 minute before it expires logger.debug("TOKEN HAS EXPIRED") return False logger.debug("TOKEN IS STILL VALID") return True
[ "Check", "the", "validity", "of", "the", "token", ":", "3600s" ]
josuebrunel/yahoo-oauth
python
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/yahoo_oauth.py#L201-L211
[ "def", "token_is_valid", "(", "self", ",", ")", ":", "elapsed_time", "=", "time", ".", "time", "(", ")", "-", "self", ".", "token_time", "logger", ".", "debug", "(", "\"ELAPSED TIME : {0}\"", ".", "format", "(", "elapsed_time", ")", ")", "if", "elapsed_time", ">", "3540", ":", "# 1 minute before it expires", "logger", ".", "debug", "(", "\"TOKEN HAS EXPIRED\"", ")", "return", "False", "logger", ".", "debug", "(", "\"TOKEN IS STILL VALID\"", ")", "return", "True" ]
40eff7809366850c46e1a3340469044f33cd1713
valid
get_data
Calls right function according to file extension
yahoo_oauth/utils.py
def get_data(filename): """Calls right function according to file extension """ name, ext = get_file_extension(filename) func = json_get_data if ext == '.json' else yaml_get_data return func(filename)
def get_data(filename): """Calls right function according to file extension """ name, ext = get_file_extension(filename) func = json_get_data if ext == '.json' else yaml_get_data return func(filename)
[ "Calls", "right", "function", "according", "to", "file", "extension" ]
josuebrunel/yahoo-oauth
python
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/utils.py#L29-L34
[ "def", "get_data", "(", "filename", ")", ":", "name", ",", "ext", "=", "get_file_extension", "(", "filename", ")", "func", "=", "json_get_data", "if", "ext", "==", "'.json'", "else", "yaml_get_data", "return", "func", "(", "filename", ")" ]
40eff7809366850c46e1a3340469044f33cd1713
valid
write_data
Call right func to save data according to file extension
yahoo_oauth/utils.py
def write_data(data, filename): """Call right func to save data according to file extension """ name, ext = get_file_extension(filename) func = json_write_data if ext == '.json' else yaml_write_data return func(data, filename)
def write_data(data, filename): """Call right func to save data according to file extension """ name, ext = get_file_extension(filename) func = json_write_data if ext == '.json' else yaml_write_data return func(data, filename)
[ "Call", "right", "func", "to", "save", "data", "according", "to", "file", "extension" ]
josuebrunel/yahoo-oauth
python
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/utils.py#L36-L41
[ "def", "write_data", "(", "data", ",", "filename", ")", ":", "name", ",", "ext", "=", "get_file_extension", "(", "filename", ")", "func", "=", "json_write_data", "if", "ext", "==", "'.json'", "else", "yaml_write_data", "return", "func", "(", "data", ",", "filename", ")" ]
40eff7809366850c46e1a3340469044f33cd1713
valid
json_write_data
Write json data into a file
yahoo_oauth/utils.py
def json_write_data(json_data, filename): """Write json data into a file """ with open(filename, 'w') as fp: json.dump(json_data, fp, indent=4, sort_keys=True, ensure_ascii=False) return True return False
def json_write_data(json_data, filename): """Write json data into a file """ with open(filename, 'w') as fp: json.dump(json_data, fp, indent=4, sort_keys=True, ensure_ascii=False) return True return False
[ "Write", "json", "data", "into", "a", "file" ]
josuebrunel/yahoo-oauth
python
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/utils.py#L43-L49
[ "def", "json_write_data", "(", "json_data", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fp", ":", "json", ".", "dump", "(", "json_data", ",", "fp", ",", "indent", "=", "4", ",", "sort_keys", "=", "True", ",", "ensure_ascii", "=", "False", ")", "return", "True", "return", "False" ]
40eff7809366850c46e1a3340469044f33cd1713
valid
json_get_data
Get data from json file
yahoo_oauth/utils.py
def json_get_data(filename): """Get data from json file """ with open(filename) as fp: json_data = json.load(fp) return json_data return False
def json_get_data(filename): """Get data from json file """ with open(filename) as fp: json_data = json.load(fp) return json_data return False
[ "Get", "data", "from", "json", "file" ]
josuebrunel/yahoo-oauth
python
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/utils.py#L51-L58
[ "def", "json_get_data", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "fp", ":", "json_data", "=", "json", ".", "load", "(", "fp", ")", "return", "json_data", "return", "False" ]
40eff7809366850c46e1a3340469044f33cd1713
valid
yaml_get_data
Get data from .yml file
yahoo_oauth/utils.py
def yaml_get_data(filename): """Get data from .yml file """ with open(filename, 'rb') as fd: yaml_data = yaml.load(fd) return yaml_data return False
def yaml_get_data(filename): """Get data from .yml file """ with open(filename, 'rb') as fd: yaml_data = yaml.load(fd) return yaml_data return False
[ "Get", "data", "from", ".", "yml", "file" ]
josuebrunel/yahoo-oauth
python
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/utils.py#L60-L66
[ "def", "yaml_get_data", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "fd", ":", "yaml_data", "=", "yaml", ".", "load", "(", "fd", ")", "return", "yaml_data", "return", "False" ]
40eff7809366850c46e1a3340469044f33cd1713
valid
yaml_write_data
Write data into a .yml file
yahoo_oauth/utils.py
def yaml_write_data(yaml_data, filename): """Write data into a .yml file """ with open(filename, 'w') as fd: yaml.dump(yaml_data, fd, default_flow_style=False) return True return False
def yaml_write_data(yaml_data, filename): """Write data into a .yml file """ with open(filename, 'w') as fd: yaml.dump(yaml_data, fd, default_flow_style=False) return True return False
[ "Write", "data", "into", "a", ".", "yml", "file" ]
josuebrunel/yahoo-oauth
python
https://github.com/josuebrunel/yahoo-oauth/blob/40eff7809366850c46e1a3340469044f33cd1713/yahoo_oauth/utils.py#L68-L75
[ "def", "yaml_write_data", "(", "yaml_data", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "fd", ":", "yaml", ".", "dump", "(", "yaml_data", ",", "fd", ",", "default_flow_style", "=", "False", ")", "return", "True", "return", "False" ]
40eff7809366850c46e1a3340469044f33cd1713
valid
RBFize.fit
If scale_by_median, find :attr:`median_`; otherwise, do nothing. Parameters ---------- X : array The raw pairwise distances.
skl_groups/kernels/transform.py
def fit(self, X, y=None): ''' If scale_by_median, find :attr:`median_`; otherwise, do nothing. Parameters ---------- X : array The raw pairwise distances. ''' X = check_array(X) if self.scale_by_median: self.median_ = np.median(X[np.triu_indices_from(X, k=1)], overwrite_input=True) elif hasattr(self, 'median_'): del self.median_ return self
def fit(self, X, y=None): ''' If scale_by_median, find :attr:`median_`; otherwise, do nothing. Parameters ---------- X : array The raw pairwise distances. ''' X = check_array(X) if self.scale_by_median: self.median_ = np.median(X[np.triu_indices_from(X, k=1)], overwrite_input=True) elif hasattr(self, 'median_'): del self.median_ return self
[ "If", "scale_by_median", "find", ":", "attr", ":", "median_", ";", "otherwise", "do", "nothing", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/transform.py#L156-L172
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "X", "=", "check_array", "(", "X", ")", "if", "self", ".", "scale_by_median", ":", "self", ".", "median_", "=", "np", ".", "median", "(", "X", "[", "np", ".", "triu_indices_from", "(", "X", ",", "k", "=", "1", ")", "]", ",", "overwrite_input", "=", "True", ")", "elif", "hasattr", "(", "self", ",", "'median_'", ")", ":", "del", "self", ".", "median_", "return", "self" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
RBFize.transform
Turns distances into RBF values. Parameters ---------- X : array The raw pairwise distances. Returns ------- X_rbf : array of same shape as X The distances in X passed through the RBF kernel.
skl_groups/kernels/transform.py
def transform(self, X): ''' Turns distances into RBF values. Parameters ---------- X : array The raw pairwise distances. Returns ------- X_rbf : array of same shape as X The distances in X passed through the RBF kernel. ''' X = check_array(X) X_rbf = np.empty_like(X) if self.copy else X X_in = X if not self.squared: np.power(X_in, 2, out=X_rbf) X_in = X_rbf if self.scale_by_median: scale = self.median_ if self.squared else self.median_ ** 2 gamma = self.gamma * scale else: gamma = self.gamma np.multiply(X_in, -gamma, out=X_rbf) np.exp(X_rbf, out=X_rbf) return X_rbf
def transform(self, X): ''' Turns distances into RBF values. Parameters ---------- X : array The raw pairwise distances. Returns ------- X_rbf : array of same shape as X The distances in X passed through the RBF kernel. ''' X = check_array(X) X_rbf = np.empty_like(X) if self.copy else X X_in = X if not self.squared: np.power(X_in, 2, out=X_rbf) X_in = X_rbf if self.scale_by_median: scale = self.median_ if self.squared else self.median_ ** 2 gamma = self.gamma * scale else: gamma = self.gamma np.multiply(X_in, -gamma, out=X_rbf) np.exp(X_rbf, out=X_rbf) return X_rbf
[ "Turns", "distances", "into", "RBF", "values", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/transform.py#L174-L204
[ "def", "transform", "(", "self", ",", "X", ")", ":", "X", "=", "check_array", "(", "X", ")", "X_rbf", "=", "np", ".", "empty_like", "(", "X", ")", "if", "self", ".", "copy", "else", "X", "X_in", "=", "X", "if", "not", "self", ".", "squared", ":", "np", ".", "power", "(", "X_in", ",", "2", ",", "out", "=", "X_rbf", ")", "X_in", "=", "X_rbf", "if", "self", ".", "scale_by_median", ":", "scale", "=", "self", ".", "median_", "if", "self", ".", "squared", "else", "self", ".", "median_", "**", "2", "gamma", "=", "self", ".", "gamma", "*", "scale", "else", ":", "gamma", "=", "self", ".", "gamma", "np", ".", "multiply", "(", "X_in", ",", "-", "gamma", ",", "out", "=", "X_rbf", ")", "np", ".", "exp", "(", "X_rbf", ",", "out", "=", "X_rbf", ")", "return", "X_rbf" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
ProjectPSD.fit
Learn the linear transformation to clipped eigenvalues. Note that if min_eig isn't zero and any of the original eigenvalues were exactly zero, this will leave those eigenvalues as zero. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part.
skl_groups/kernels/transform.py
def fit(self, X, y=None): ''' Learn the linear transformation to clipped eigenvalues. Note that if min_eig isn't zero and any of the original eigenvalues were exactly zero, this will leave those eigenvalues as zero. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") # TODO: only get negative eigs somehow? memory = get_memory(self.memory) vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=not self.copy) vals = vals.reshape(-1, 1) if self.min_eig == 0: inner = vals > self.min_eig else: with np.errstate(divide='ignore'): inner = np.where(vals >= self.min_eig, 1, np.where(vals == 0, 0, self.min_eig / vals)) self.clip_ = np.dot(vecs, inner * vecs.T) return self
def fit(self, X, y=None): ''' Learn the linear transformation to clipped eigenvalues. Note that if min_eig isn't zero and any of the original eigenvalues were exactly zero, this will leave those eigenvalues as zero. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") # TODO: only get negative eigs somehow? memory = get_memory(self.memory) vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=not self.copy) vals = vals.reshape(-1, 1) if self.min_eig == 0: inner = vals > self.min_eig else: with np.errstate(divide='ignore'): inner = np.where(vals >= self.min_eig, 1, np.where(vals == 0, 0, self.min_eig / vals)) self.clip_ = np.dot(vecs, inner * vecs.T) return self
[ "Learn", "the", "linear", "transformation", "to", "clipped", "eigenvalues", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/transform.py#L259-L290
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "n", "=", "X", ".", "shape", "[", "0", "]", "if", "X", ".", "shape", "!=", "(", "n", ",", "n", ")", ":", "raise", "TypeError", "(", "\"Input must be a square matrix.\"", ")", "# TODO: only get negative eigs somehow?", "memory", "=", "get_memory", "(", "self", ".", "memory", ")", "vals", ",", "vecs", "=", "memory", ".", "cache", "(", "scipy", ".", "linalg", ".", "eigh", ",", "ignore", "=", "[", "'overwrite_a'", "]", ")", "(", "X", ",", "overwrite_a", "=", "not", "self", ".", "copy", ")", "vals", "=", "vals", ".", "reshape", "(", "-", "1", ",", "1", ")", "if", "self", ".", "min_eig", "==", "0", ":", "inner", "=", "vals", ">", "self", ".", "min_eig", "else", ":", "with", "np", ".", "errstate", "(", "divide", "=", "'ignore'", ")", ":", "inner", "=", "np", ".", "where", "(", "vals", ">=", "self", ".", "min_eig", ",", "1", ",", "np", ".", "where", "(", "vals", "==", "0", ",", "0", ",", "self", ".", "min_eig", "/", "vals", ")", ")", "self", ".", "clip_", "=", "np", ".", "dot", "(", "vecs", ",", "inner", "*", "vecs", ".", "T", ")", "return", "self" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
FlipPSD.fit
Learn the linear transformation to flipped eigenvalues. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part.
skl_groups/kernels/transform.py
def fit(self, X, y=None): ''' Learn the linear transformation to flipped eigenvalues. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") # TODO: only get negative eigs somehow? memory = get_memory(self.memory) vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=not self.copy) vals = vals[:, None] self.flip_ = np.dot(vecs, np.sign(vals) * vecs.T) return self
def fit(self, X, y=None): ''' Learn the linear transformation to flipped eigenvalues. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") # TODO: only get negative eigs somehow? memory = get_memory(self.memory) vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=not self.copy) vals = vals[:, None] self.flip_ = np.dot(vecs, np.sign(vals) * vecs.T) return self
[ "Learn", "the", "linear", "transformation", "to", "flipped", "eigenvalues", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/transform.py#L400-L421
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "n", "=", "X", ".", "shape", "[", "0", "]", "if", "X", ".", "shape", "!=", "(", "n", ",", "n", ")", ":", "raise", "TypeError", "(", "\"Input must be a square matrix.\"", ")", "# TODO: only get negative eigs somehow?", "memory", "=", "get_memory", "(", "self", ".", "memory", ")", "vals", ",", "vecs", "=", "memory", ".", "cache", "(", "scipy", ".", "linalg", ".", "eigh", ",", "ignore", "=", "[", "'overwrite_a'", "]", ")", "(", "X", ",", "overwrite_a", "=", "not", "self", ".", "copy", ")", "vals", "=", "vals", "[", ":", ",", "None", "]", "self", ".", "flip_", "=", "np", ".", "dot", "(", "vecs", ",", "np", ".", "sign", "(", "vals", ")", "*", "vecs", ".", "T", ")", "return", "self" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
FlipPSD.transform
Transforms X according to the linear transformation corresponding to flipping the input eigenvalues. Parameters ---------- X : array, shape [n_test, n] The test similarities to training points. Returns ------- Xt : array, shape [n_test, n] The transformed test similarites to training points.
skl_groups/kernels/transform.py
def transform(self, X): ''' Transforms X according to the linear transformation corresponding to flipping the input eigenvalues. Parameters ---------- X : array, shape [n_test, n] The test similarities to training points. Returns ------- Xt : array, shape [n_test, n] The transformed test similarites to training points. ''' n = self.flip_.shape[0] if X.ndim != 2 or X.shape[1] != n: msg = "X should have {} columns, the number of samples at fit time" raise TypeError(msg.format(self.flip_.shape[0])) return np.dot(X, self.flip_)
def transform(self, X): ''' Transforms X according to the linear transformation corresponding to flipping the input eigenvalues. Parameters ---------- X : array, shape [n_test, n] The test similarities to training points. Returns ------- Xt : array, shape [n_test, n] The transformed test similarites to training points. ''' n = self.flip_.shape[0] if X.ndim != 2 or X.shape[1] != n: msg = "X should have {} columns, the number of samples at fit time" raise TypeError(msg.format(self.flip_.shape[0])) return np.dot(X, self.flip_)
[ "Transforms", "X", "according", "to", "the", "linear", "transformation", "corresponding", "to", "flipping", "the", "input", "eigenvalues", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/transform.py#L423-L442
[ "def", "transform", "(", "self", ",", "X", ")", ":", "n", "=", "self", ".", "flip_", ".", "shape", "[", "0", "]", "if", "X", ".", "ndim", "!=", "2", "or", "X", ".", "shape", "[", "1", "]", "!=", "n", ":", "msg", "=", "\"X should have {} columns, the number of samples at fit time\"", "raise", "TypeError", "(", "msg", ".", "format", "(", "self", ".", "flip_", ".", "shape", "[", "0", "]", ")", ")", "return", "np", ".", "dot", "(", "X", ",", "self", ".", "flip_", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
FlipPSD.fit_transform
Flips the negative eigenvalues of X. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. Returns ------- Xt : array, shape [n, n] The transformed training similarities.
skl_groups/kernels/transform.py
def fit_transform(self, X, y=None): ''' Flips the negative eigenvalues of X. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. Returns ------- Xt : array, shape [n, n] The transformed training similarities. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") memory = get_memory(self.memory) discard_X = not self.copy and self.negatives_likely vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=discard_X) vals = vals[:, None] self.clip_ = np.dot(vecs, np.sign(vals) * vecs.T) if discard_X or vals[0, 0] < 0: del X np.abs(vals, out=vals) X = np.dot(vecs, vals * vecs.T) del vals, vecs # should be symmetric, but make sure because floats X = Symmetrize(copy=False).fit_transform(X) return X
def fit_transform(self, X, y=None): ''' Flips the negative eigenvalues of X. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. Returns ------- Xt : array, shape [n, n] The transformed training similarities. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") memory = get_memory(self.memory) discard_X = not self.copy and self.negatives_likely vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=discard_X) vals = vals[:, None] self.clip_ = np.dot(vecs, np.sign(vals) * vecs.T) if discard_X or vals[0, 0] < 0: del X np.abs(vals, out=vals) X = np.dot(vecs, vals * vecs.T) del vals, vecs # should be symmetric, but make sure because floats X = Symmetrize(copy=False).fit_transform(X) return X
[ "Flips", "the", "negative", "eigenvalues", "of", "X", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/transform.py#L444-L479
[ "def", "fit_transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "n", "=", "X", ".", "shape", "[", "0", "]", "if", "X", ".", "shape", "!=", "(", "n", ",", "n", ")", ":", "raise", "TypeError", "(", "\"Input must be a square matrix.\"", ")", "memory", "=", "get_memory", "(", "self", ".", "memory", ")", "discard_X", "=", "not", "self", ".", "copy", "and", "self", ".", "negatives_likely", "vals", ",", "vecs", "=", "memory", ".", "cache", "(", "scipy", ".", "linalg", ".", "eigh", ",", "ignore", "=", "[", "'overwrite_a'", "]", ")", "(", "X", ",", "overwrite_a", "=", "discard_X", ")", "vals", "=", "vals", "[", ":", ",", "None", "]", "self", ".", "clip_", "=", "np", ".", "dot", "(", "vecs", ",", "np", ".", "sign", "(", "vals", ")", "*", "vecs", ".", "T", ")", "if", "discard_X", "or", "vals", "[", "0", ",", "0", "]", "<", "0", ":", "del", "X", "np", ".", "abs", "(", "vals", ",", "out", "=", "vals", ")", "X", "=", "np", ".", "dot", "(", "vecs", ",", "vals", "*", "vecs", ".", "T", ")", "del", "vals", ",", "vecs", "# should be symmetric, but make sure because floats", "X", "=", "Symmetrize", "(", "copy", "=", "False", ")", ".", "fit_transform", "(", "X", ")", "return", "X" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
ShiftPSD.fit
Learn the transformation to shifted eigenvalues. Only depends on the input dimension. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities.
skl_groups/kernels/transform.py
def fit(self, X, y=None): ''' Learn the transformation to shifted eigenvalues. Only depends on the input dimension. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") self.train_ = X memory = get_memory(self.memory) lo, = memory.cache(scipy.linalg.eigvalsh)(X, eigvals=(0, 0)) self.shift_ = max(self.min_eig - lo, 0) return self
def fit(self, X, y=None): ''' Learn the transformation to shifted eigenvalues. Only depends on the input dimension. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") self.train_ = X memory = get_memory(self.memory) lo, = memory.cache(scipy.linalg.eigvalsh)(X, eigvals=(0, 0)) self.shift_ = max(self.min_eig - lo, 0) return self
[ "Learn", "the", "transformation", "to", "shifted", "eigenvalues", ".", "Only", "depends", "on", "the", "input", "dimension", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/transform.py#L527-L547
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "n", "=", "X", ".", "shape", "[", "0", "]", "if", "X", ".", "shape", "!=", "(", "n", ",", "n", ")", ":", "raise", "TypeError", "(", "\"Input must be a square matrix.\"", ")", "self", ".", "train_", "=", "X", "memory", "=", "get_memory", "(", "self", ".", "memory", ")", "lo", ",", "=", "memory", ".", "cache", "(", "scipy", ".", "linalg", ".", "eigvalsh", ")", "(", "X", ",", "eigvals", "=", "(", "0", ",", "0", ")", ")", "self", ".", "shift_", "=", "max", "(", "self", ".", "min_eig", "-", "lo", ",", "0", ")", "return", "self" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
ShiftPSD.transform
Transforms X according to the linear transformation corresponding to shifting the input eigenvalues to all be at least ``self.min_eig``. Parameters ---------- X : array, shape [n_test, n] The test similarities to training points. Returns ------- Xt : array, shape [n_test, n] The transformed test similarites to training points. Only different from X if X is the training data.
skl_groups/kernels/transform.py
def transform(self, X): ''' Transforms X according to the linear transformation corresponding to shifting the input eigenvalues to all be at least ``self.min_eig``. Parameters ---------- X : array, shape [n_test, n] The test similarities to training points. Returns ------- Xt : array, shape [n_test, n] The transformed test similarites to training points. Only different from X if X is the training data. ''' n = self.train_.shape[0] if X.ndim != 2 or X.shape[1] != n: msg = "X should have {} columns, the number of samples at fit time" raise TypeError(msg.format(n)) if self.copy: X = X.copy() if self.shift_ != 0 and X is self.train_ or ( X.shape == self.train_.shape and np.allclose(X, self.train_)): X[xrange(n), xrange(n)] += self.shift_ return X
def transform(self, X): ''' Transforms X according to the linear transformation corresponding to shifting the input eigenvalues to all be at least ``self.min_eig``. Parameters ---------- X : array, shape [n_test, n] The test similarities to training points. Returns ------- Xt : array, shape [n_test, n] The transformed test similarites to training points. Only different from X if X is the training data. ''' n = self.train_.shape[0] if X.ndim != 2 or X.shape[1] != n: msg = "X should have {} columns, the number of samples at fit time" raise TypeError(msg.format(n)) if self.copy: X = X.copy() if self.shift_ != 0 and X is self.train_ or ( X.shape == self.train_.shape and np.allclose(X, self.train_)): X[xrange(n), xrange(n)] += self.shift_ return X
[ "Transforms", "X", "according", "to", "the", "linear", "transformation", "corresponding", "to", "shifting", "the", "input", "eigenvalues", "to", "all", "be", "at", "least", "self", ".", "min_eig", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/transform.py#L549-L576
[ "def", "transform", "(", "self", ",", "X", ")", ":", "n", "=", "self", ".", "train_", ".", "shape", "[", "0", "]", "if", "X", ".", "ndim", "!=", "2", "or", "X", ".", "shape", "[", "1", "]", "!=", "n", ":", "msg", "=", "\"X should have {} columns, the number of samples at fit time\"", "raise", "TypeError", "(", "msg", ".", "format", "(", "n", ")", ")", "if", "self", ".", "copy", ":", "X", "=", "X", ".", "copy", "(", ")", "if", "self", ".", "shift_", "!=", "0", "and", "X", "is", "self", ".", "train_", "or", "(", "X", ".", "shape", "==", "self", ".", "train_", ".", "shape", "and", "np", ".", "allclose", "(", "X", ",", "self", ".", "train_", ")", ")", ":", "X", "[", "xrange", "(", "n", ")", ",", "xrange", "(", "n", ")", "]", "+=", "self", ".", "shift_", "return", "X" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
L2DensityTransformer.fit
Picks the elements of the basis to use for the given data. Only depends on the dimension of X. If it's more convenient, you can pass a single integer for X, which is the dimension to use. Parameters ---------- X : an integer, a :class:`Features` instance, or a list of bag features The input data, or just its dimension, since only the dimension is needed here.
skl_groups/summaries/l2_density.py
def fit(self, X, y=None): ''' Picks the elements of the basis to use for the given data. Only depends on the dimension of X. If it's more convenient, you can pass a single integer for X, which is the dimension to use. Parameters ---------- X : an integer, a :class:`Features` instance, or a list of bag features The input data, or just its dimension, since only the dimension is needed here. ''' if is_integer(X): dim = X else: X = as_features(X) dim = X.dim M = self.smoothness # figure out the smooth-enough elements of our basis inds = np.mgrid[(slice(M + 1),) * dim].reshape(dim, (M + 1) ** dim).T self.inds_ = inds[(inds ** 2).sum(axis=1) <= M ** 2] return self
def fit(self, X, y=None): ''' Picks the elements of the basis to use for the given data. Only depends on the dimension of X. If it's more convenient, you can pass a single integer for X, which is the dimension to use. Parameters ---------- X : an integer, a :class:`Features` instance, or a list of bag features The input data, or just its dimension, since only the dimension is needed here. ''' if is_integer(X): dim = X else: X = as_features(X) dim = X.dim M = self.smoothness # figure out the smooth-enough elements of our basis inds = np.mgrid[(slice(M + 1),) * dim].reshape(dim, (M + 1) ** dim).T self.inds_ = inds[(inds ** 2).sum(axis=1) <= M ** 2] return self
[ "Picks", "the", "elements", "of", "the", "basis", "to", "use", "for", "the", "given", "data", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/summaries/l2_density.py#L116-L139
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "if", "is_integer", "(", "X", ")", ":", "dim", "=", "X", "else", ":", "X", "=", "as_features", "(", "X", ")", "dim", "=", "X", ".", "dim", "M", "=", "self", ".", "smoothness", "# figure out the smooth-enough elements of our basis", "inds", "=", "np", ".", "mgrid", "[", "(", "slice", "(", "M", "+", "1", ")", ",", ")", "*", "dim", "]", ".", "reshape", "(", "dim", ",", "(", "M", "+", "1", ")", "**", "dim", ")", ".", "T", "self", ".", "inds_", "=", "inds", "[", "(", "inds", "**", "2", ")", ".", "sum", "(", "axis", "=", "1", ")", "<=", "M", "**", "2", "]", "return", "self" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
L2DensityTransformer.transform
Transform a list of bag features into its projection series representation. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. The data should all lie in [0, 1]; use :class:`skl_groups.preprocessing.BagMinMaxScaler` if not. Returns ------- X_new : integer array, shape ``[len(X), dim_]`` X transformed into the new space.
skl_groups/summaries/l2_density.py
def transform(self, X): ''' Transform a list of bag features into its projection series representation. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. The data should all lie in [0, 1]; use :class:`skl_groups.preprocessing.BagMinMaxScaler` if not. Returns ------- X_new : integer array, shape ``[len(X), dim_]`` X transformed into the new space. ''' self._check_fitted() M = self.smoothness dim = self.dim_ inds = self.inds_ do_check = self.do_bounds_check X = as_features(X) if X.dim != dim: msg = "model fit for dimension {} but got dim {}" raise ValueError(msg.format(dim, X.dim)) Xt = np.empty((len(X), self.inds_.shape[0])) Xt.fill(np.nan) if self.basis == 'cosine': # TODO: put this in a C extension? coefs = (np.pi * np.arange(M + 1))[..., :] for i, bag in enumerate(X): if do_check: if np.min(bag) < 0 or np.max(bag) > 1: raise ValueError("Bag {} not in [0, 1]".format(i)) # apply each phi func to each dataset point: n x dim x M phi = coefs * bag[..., np.newaxis] np.cos(phi, out=phi) phi[:, :, 1:] *= np.sqrt(2) # B is the evaluation of each tensor-prodded basis func # at each point: n x inds.shape[0] B = reduce(op.mul, (phi[:, i, inds[:, i]] for i in xrange(dim))) Xt[i, :] = np.mean(B, axis=0) else: raise ValueError("unknown basis '{}'".format(self.basis)) return Xt
def transform(self, X): ''' Transform a list of bag features into its projection series representation. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. The data should all lie in [0, 1]; use :class:`skl_groups.preprocessing.BagMinMaxScaler` if not. Returns ------- X_new : integer array, shape ``[len(X), dim_]`` X transformed into the new space. ''' self._check_fitted() M = self.smoothness dim = self.dim_ inds = self.inds_ do_check = self.do_bounds_check X = as_features(X) if X.dim != dim: msg = "model fit for dimension {} but got dim {}" raise ValueError(msg.format(dim, X.dim)) Xt = np.empty((len(X), self.inds_.shape[0])) Xt.fill(np.nan) if self.basis == 'cosine': # TODO: put this in a C extension? coefs = (np.pi * np.arange(M + 1))[..., :] for i, bag in enumerate(X): if do_check: if np.min(bag) < 0 or np.max(bag) > 1: raise ValueError("Bag {} not in [0, 1]".format(i)) # apply each phi func to each dataset point: n x dim x M phi = coefs * bag[..., np.newaxis] np.cos(phi, out=phi) phi[:, :, 1:] *= np.sqrt(2) # B is the evaluation of each tensor-prodded basis func # at each point: n x inds.shape[0] B = reduce(op.mul, (phi[:, i, inds[:, i]] for i in xrange(dim))) Xt[i, :] = np.mean(B, axis=0) else: raise ValueError("unknown basis '{}'".format(self.basis)) return Xt
[ "Transform", "a", "list", "of", "bag", "features", "into", "its", "projection", "series", "representation", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/summaries/l2_density.py#L141-L191
[ "def", "transform", "(", "self", ",", "X", ")", ":", "self", ".", "_check_fitted", "(", ")", "M", "=", "self", ".", "smoothness", "dim", "=", "self", ".", "dim_", "inds", "=", "self", ".", "inds_", "do_check", "=", "self", ".", "do_bounds_check", "X", "=", "as_features", "(", "X", ")", "if", "X", ".", "dim", "!=", "dim", ":", "msg", "=", "\"model fit for dimension {} but got dim {}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "dim", ",", "X", ".", "dim", ")", ")", "Xt", "=", "np", ".", "empty", "(", "(", "len", "(", "X", ")", ",", "self", ".", "inds_", ".", "shape", "[", "0", "]", ")", ")", "Xt", ".", "fill", "(", "np", ".", "nan", ")", "if", "self", ".", "basis", "==", "'cosine'", ":", "# TODO: put this in a C extension?", "coefs", "=", "(", "np", ".", "pi", "*", "np", ".", "arange", "(", "M", "+", "1", ")", ")", "[", "...", ",", ":", "]", "for", "i", ",", "bag", "in", "enumerate", "(", "X", ")", ":", "if", "do_check", ":", "if", "np", ".", "min", "(", "bag", ")", "<", "0", "or", "np", ".", "max", "(", "bag", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Bag {} not in [0, 1]\"", ".", "format", "(", "i", ")", ")", "# apply each phi func to each dataset point: n x dim x M", "phi", "=", "coefs", "*", "bag", "[", "...", ",", "np", ".", "newaxis", "]", "np", ".", "cos", "(", "phi", ",", "out", "=", "phi", ")", "phi", "[", ":", ",", ":", ",", "1", ":", "]", "*=", "np", ".", "sqrt", "(", "2", ")", "# B is the evaluation of each tensor-prodded basis func", "# at each point: n x inds.shape[0]", "B", "=", "reduce", "(", "op", ".", "mul", ",", "(", "phi", "[", ":", ",", "i", ",", "inds", "[", ":", ",", "i", "]", "]", "for", "i", "in", "xrange", "(", "dim", ")", ")", ")", "Xt", "[", "i", ",", ":", "]", "=", "np", ".", "mean", "(", "B", ",", "axis", "=", "0", ")", "else", ":", "raise", "ValueError", "(", "\"unknown basis '{}'\"", ".", "format", "(", "self", ".", "basis", ")", ")", "return", "Xt" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
VersiontoolsEnchancedDistributionMetadata.get_version
Get distribution version. This method is enhanced compared to original distutils implementation. If the version string is set to a special value then instead of using the actual value the real version is obtained by querying versiontools. If versiontools package is not installed then the version is obtained from the standard section of the ``PKG-INFO`` file. This file is automatically created by any source distribution. This method is less useful as it cannot take advantage of version control information that is automatically loaded by versiontools. It has the advantage of not requiring versiontools installation and that it does not depend on ``setup_requires`` feature of ``setuptools``.
versiontools_support.py
def get_version(self): """ Get distribution version. This method is enhanced compared to original distutils implementation. If the version string is set to a special value then instead of using the actual value the real version is obtained by querying versiontools. If versiontools package is not installed then the version is obtained from the standard section of the ``PKG-INFO`` file. This file is automatically created by any source distribution. This method is less useful as it cannot take advantage of version control information that is automatically loaded by versiontools. It has the advantage of not requiring versiontools installation and that it does not depend on ``setup_requires`` feature of ``setuptools``. """ if (self.name is not None and self.version is not None and self.version.startswith(":versiontools:")): return (self.__get_live_version() or self.__get_frozen_version() or self.__fail_to_get_any_version()) else: return self.__base.get_version(self)
def get_version(self): """ Get distribution version. This method is enhanced compared to original distutils implementation. If the version string is set to a special value then instead of using the actual value the real version is obtained by querying versiontools. If versiontools package is not installed then the version is obtained from the standard section of the ``PKG-INFO`` file. This file is automatically created by any source distribution. This method is less useful as it cannot take advantage of version control information that is automatically loaded by versiontools. It has the advantage of not requiring versiontools installation and that it does not depend on ``setup_requires`` feature of ``setuptools``. """ if (self.name is not None and self.version is not None and self.version.startswith(":versiontools:")): return (self.__get_live_version() or self.__get_frozen_version() or self.__fail_to_get_any_version()) else: return self.__base.get_version(self)
[ "Get", "distribution", "version", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/versiontools_support.py#L78-L99
[ "def", "get_version", "(", "self", ")", ":", "if", "(", "self", ".", "name", "is", "not", "None", "and", "self", ".", "version", "is", "not", "None", "and", "self", ".", "version", ".", "startswith", "(", "\":versiontools:\"", ")", ")", ":", "return", "(", "self", ".", "__get_live_version", "(", ")", "or", "self", ".", "__get_frozen_version", "(", ")", "or", "self", ".", "__fail_to_get_any_version", "(", ")", ")", "else", ":", "return", "self", ".", "__base", ".", "get_version", "(", "self", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
VersiontoolsEnchancedDistributionMetadata.__get_live_version
Get a live version string using versiontools
versiontools_support.py
def __get_live_version(self): """ Get a live version string using versiontools """ try: import versiontools except ImportError: return None else: return str(versiontools.Version.from_expression(self.name))
def __get_live_version(self): """ Get a live version string using versiontools """ try: import versiontools except ImportError: return None else: return str(versiontools.Version.from_expression(self.name))
[ "Get", "a", "live", "version", "string", "using", "versiontools" ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/versiontools_support.py#L101-L110
[ "def", "__get_live_version", "(", "self", ")", ":", "try", ":", "import", "versiontools", "except", "ImportError", ":", "return", "None", "else", ":", "return", "str", "(", "versiontools", ".", "Version", ".", "from_expression", "(", "self", ".", "name", ")", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
BagPreprocesser.fit
Fit the transformer on the stacked points. Parameters ---------- X : :class:`Features` or list of arrays of shape ``[n_samples[i], n_features]`` Training set. If a Features object, it will be stacked. any other keyword argument : Passed on as keyword arguments to the transformer's ``fit()``.
skl_groups/preprocessing.py
def fit(self, X, y=None, **params): ''' Fit the transformer on the stacked points. Parameters ---------- X : :class:`Features` or list of arrays of shape ``[n_samples[i], n_features]`` Training set. If a Features object, it will be stacked. any other keyword argument : Passed on as keyword arguments to the transformer's ``fit()``. ''' X = as_features(X, stack=True) self.transformer.fit(X.stacked_features, y, **params) return self
def fit(self, X, y=None, **params): ''' Fit the transformer on the stacked points. Parameters ---------- X : :class:`Features` or list of arrays of shape ``[n_samples[i], n_features]`` Training set. If a Features object, it will be stacked. any other keyword argument : Passed on as keyword arguments to the transformer's ``fit()``. ''' X = as_features(X, stack=True) self.transformer.fit(X.stacked_features, y, **params) return self
[ "Fit", "the", "transformer", "on", "the", "stacked", "points", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/preprocessing.py#L41-L55
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "params", ")", ":", "X", "=", "as_features", "(", "X", ",", "stack", "=", "True", ")", "self", ".", "transformer", ".", "fit", "(", "X", ".", "stacked_features", ",", "y", ",", "*", "*", "params", ")", "return", "self" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
BagPreprocesser.transform
Transform the stacked points. Parameters ---------- X : :class:`Features` or list of bag feature arrays New data to transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``transform()``. Returns ------- X_new : :class:`Features` Transformed features.
skl_groups/preprocessing.py
def transform(self, X, **params): ''' Transform the stacked points. Parameters ---------- X : :class:`Features` or list of bag feature arrays New data to transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``transform()``. Returns ------- X_new : :class:`Features` Transformed features. ''' X = as_features(X, stack=True) X_new = self.transformer.transform(X.stacked_features, **params) return self._gather_outputs(X, X_new)
def transform(self, X, **params): ''' Transform the stacked points. Parameters ---------- X : :class:`Features` or list of bag feature arrays New data to transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``transform()``. Returns ------- X_new : :class:`Features` Transformed features. ''' X = as_features(X, stack=True) X_new = self.transformer.transform(X.stacked_features, **params) return self._gather_outputs(X, X_new)
[ "Transform", "the", "stacked", "points", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/preprocessing.py#L57-L76
[ "def", "transform", "(", "self", ",", "X", ",", "*", "*", "params", ")", ":", "X", "=", "as_features", "(", "X", ",", "stack", "=", "True", ")", "X_new", "=", "self", ".", "transformer", ".", "transform", "(", "X", ".", "stacked_features", ",", "*", "*", "params", ")", "return", "self", ".", "_gather_outputs", "(", "X", ",", "X_new", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
BagPreprocesser.fit_transform
Fit and transform the stacked points. Parameters ---------- X : :class:`Features` or list of bag feature arrays Data to train on and transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``transform()``. Returns ------- X_new : :class:`Features` Transformed features.
skl_groups/preprocessing.py
def fit_transform(self, X, y=None, **params): ''' Fit and transform the stacked points. Parameters ---------- X : :class:`Features` or list of bag feature arrays Data to train on and transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``transform()``. Returns ------- X_new : :class:`Features` Transformed features. ''' X = as_features(X, stack=True) X_new = self.transformer.fit_transform(X.stacked_features, y, **params) return self._gather_outputs(X, X_new)
def fit_transform(self, X, y=None, **params): ''' Fit and transform the stacked points. Parameters ---------- X : :class:`Features` or list of bag feature arrays Data to train on and transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``transform()``. Returns ------- X_new : :class:`Features` Transformed features. ''' X = as_features(X, stack=True) X_new = self.transformer.fit_transform(X.stacked_features, y, **params) return self._gather_outputs(X, X_new)
[ "Fit", "and", "transform", "the", "stacked", "points", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/preprocessing.py#L78-L97
[ "def", "fit_transform", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "params", ")", ":", "X", "=", "as_features", "(", "X", ",", "stack", "=", "True", ")", "X_new", "=", "self", ".", "transformer", ".", "fit_transform", "(", "X", ".", "stacked_features", ",", "y", ",", "*", "*", "params", ")", "return", "self", ".", "_gather_outputs", "(", "X", ",", "X_new", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
BagPreprocesser.inverse_transform
Transform data back to its original space, i.e., return an input X_original whose transform would (maybe approximately) be X. Parameters ---------- X : :class:`Features` or list of bag feature arrays Data to train on and transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``inverse_transform()``. Returns ------- X_original : :class:`Features`
skl_groups/preprocessing.py
def inverse_transform(self, X, **params): ''' Transform data back to its original space, i.e., return an input X_original whose transform would (maybe approximately) be X. Parameters ---------- X : :class:`Features` or list of bag feature arrays Data to train on and transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``inverse_transform()``. Returns ------- X_original : :class:`Features` ''' X = as_features(X, stack=True) Xo = self.transformer.inverse_transform(X.stacked_features, **params) return self._gather_outputs(X, Xo)
def inverse_transform(self, X, **params): ''' Transform data back to its original space, i.e., return an input X_original whose transform would (maybe approximately) be X. Parameters ---------- X : :class:`Features` or list of bag feature arrays Data to train on and transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``inverse_transform()``. Returns ------- X_original : :class:`Features` ''' X = as_features(X, stack=True) Xo = self.transformer.inverse_transform(X.stacked_features, **params) return self._gather_outputs(X, Xo)
[ "Transform", "data", "back", "to", "its", "original", "space", "i", ".", "e", ".", "return", "an", "input", "X_original", "whose", "transform", "would", "(", "maybe", "approximately", ")", "be", "X", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/preprocessing.py#L99-L119
[ "def", "inverse_transform", "(", "self", ",", "X", ",", "*", "*", "params", ")", ":", "X", "=", "as_features", "(", "X", ",", "stack", "=", "True", ")", "Xo", "=", "self", ".", "transformer", ".", "inverse_transform", "(", "X", ".", "stacked_features", ",", "*", "*", "params", ")", "return", "self", ".", "_gather_outputs", "(", "X", ",", "Xo", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
MinMaxScaler.fit
Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis.
skl_groups/preprocessing.py
def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32, np.float16, np.float128]) feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) if self.fit_feature_range is not None: fit_feature_range = self.fit_feature_range if fit_feature_range[0] >= fit_feature_range[1]: raise ValueError("Minimum of desired (fit) feature range must " "be smaller than maximum. Got %s." % str(feature_range)) if (fit_feature_range[0] < feature_range[0] or fit_feature_range[1] > feature_range[1]): raise ValueError("fit_feature_range must be a subset of " "feature_range. Got %s, fit %s." % (str(feature_range), str(fit_feature_range))) feature_range = fit_feature_range data_min = np.min(X, axis=0) data_range = np.max(X, axis=0) - data_min # Do not scale constant features data_range[data_range == 0.0] = 1.0 self.scale_ = (feature_range[1] - feature_range[0]) / data_range self.min_ = feature_range[0] - data_min * self.scale_ self.data_range = data_range self.data_min = data_min return self
def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32, np.float16, np.float128]) feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) if self.fit_feature_range is not None: fit_feature_range = self.fit_feature_range if fit_feature_range[0] >= fit_feature_range[1]: raise ValueError("Minimum of desired (fit) feature range must " "be smaller than maximum. Got %s." % str(feature_range)) if (fit_feature_range[0] < feature_range[0] or fit_feature_range[1] > feature_range[1]): raise ValueError("fit_feature_range must be a subset of " "feature_range. Got %s, fit %s." % (str(feature_range), str(fit_feature_range))) feature_range = fit_feature_range data_min = np.min(X, axis=0) data_range = np.max(X, axis=0) - data_min # Do not scale constant features data_range[data_range == 0.0] = 1.0 self.scale_ = (feature_range[1] - feature_range[0]) / data_range self.min_ = feature_range[0] - data_min * self.scale_ self.data_range = data_range self.data_min = data_min return self
[ "Compute", "the", "minimum", "and", "maximum", "to", "be", "used", "for", "later", "scaling", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/preprocessing.py#L196-L234
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "X", "=", "check_array", "(", "X", ",", "copy", "=", "self", ".", "copy", ",", "dtype", "=", "[", "np", ".", "float64", ",", "np", ".", "float32", ",", "np", ".", "float16", ",", "np", ".", "float128", "]", ")", "feature_range", "=", "self", ".", "feature_range", "if", "feature_range", "[", "0", "]", ">=", "feature_range", "[", "1", "]", ":", "raise", "ValueError", "(", "\"Minimum of desired feature range must be smaller\"", "\" than maximum. Got %s.\"", "%", "str", "(", "feature_range", ")", ")", "if", "self", ".", "fit_feature_range", "is", "not", "None", ":", "fit_feature_range", "=", "self", ".", "fit_feature_range", "if", "fit_feature_range", "[", "0", "]", ">=", "fit_feature_range", "[", "1", "]", ":", "raise", "ValueError", "(", "\"Minimum of desired (fit) feature range must \"", "\"be smaller than maximum. Got %s.\"", "%", "str", "(", "feature_range", ")", ")", "if", "(", "fit_feature_range", "[", "0", "]", "<", "feature_range", "[", "0", "]", "or", "fit_feature_range", "[", "1", "]", ">", "feature_range", "[", "1", "]", ")", ":", "raise", "ValueError", "(", "\"fit_feature_range must be a subset of \"", "\"feature_range. Got %s, fit %s.\"", "%", "(", "str", "(", "feature_range", ")", ",", "str", "(", "fit_feature_range", ")", ")", ")", "feature_range", "=", "fit_feature_range", "data_min", "=", "np", ".", "min", "(", "X", ",", "axis", "=", "0", ")", "data_range", "=", "np", ".", "max", "(", "X", ",", "axis", "=", "0", ")", "-", "data_min", "# Do not scale constant features", "data_range", "[", "data_range", "==", "0.0", "]", "=", "1.0", "self", ".", "scale_", "=", "(", "feature_range", "[", "1", "]", "-", "feature_range", "[", "0", "]", ")", "/", "data_range", "self", ".", "min_", "=", "feature_range", "[", "0", "]", "-", "data_min", "*", "self", ".", "scale_", "self", ".", "data_range", "=", "data_range", "self", ".", "data_min", "=", "data_min", "return", "self" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
MinMaxScaler.transform
Scaling features of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed.
skl_groups/preprocessing.py
def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ X = check_array(X, copy=self.copy) X *= self.scale_ X += self.min_ if self.truncate: np.maximum(self.feature_range[0], X, out=X) np.minimum(self.feature_range[1], X, out=X) return X
def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ X = check_array(X, copy=self.copy) X *= self.scale_ X += self.min_ if self.truncate: np.maximum(self.feature_range[0], X, out=X) np.minimum(self.feature_range[1], X, out=X) return X
[ "Scaling", "features", "of", "X", "according", "to", "feature_range", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/preprocessing.py#L236-L250
[ "def", "transform", "(", "self", ",", "X", ")", ":", "X", "=", "check_array", "(", "X", ",", "copy", "=", "self", ".", "copy", ")", "X", "*=", "self", ".", "scale_", "X", "+=", "self", ".", "min_", "if", "self", ".", "truncate", ":", "np", ".", "maximum", "(", "self", ".", "feature_range", "[", "0", "]", ",", "X", ",", "out", "=", "X", ")", "np", ".", "minimum", "(", "self", ".", "feature_range", "[", "1", "]", ",", "X", ",", "out", "=", "X", ")", "return", "X" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
MinMaxScaler.inverse_transform
Undo the scaling of X according to feature_range. Note that if truncate is true, any truncated points will not be restored exactly. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed.
skl_groups/preprocessing.py
def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Note that if truncate is true, any truncated points will not be restored exactly. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ X = check_array(X, copy=self.copy) X -= self.min_ X /= self.scale_ return X
def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Note that if truncate is true, any truncated points will not be restored exactly. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ X = check_array(X, copy=self.copy) X -= self.min_ X /= self.scale_ return X
[ "Undo", "the", "scaling", "of", "X", "according", "to", "feature_range", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/preprocessing.py#L252-L266
[ "def", "inverse_transform", "(", "self", ",", "X", ")", ":", "X", "=", "check_array", "(", "X", ",", "copy", "=", "self", ".", "copy", ")", "X", "-=", "self", ".", "min_", "X", "/=", "self", ".", "scale_", "return", "X" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
BagOfWords.fit
Choose the codewords based on a training set. Parameters ---------- X : :class:`skl_groups.features.Features` or list of arrays of shape ``[n_samples[i], n_features]`` Training set. If a Features object, it will be stacked.
skl_groups/summaries/bag_of_words.py
def fit(self, X, y=None): ''' Choose the codewords based on a training set. Parameters ---------- X : :class:`skl_groups.features.Features` or list of arrays of shape ``[n_samples[i], n_features]`` Training set. If a Features object, it will be stacked. ''' self.kmeans_fit_ = copy(self.kmeans) X = as_features(X, stack=True) self.kmeans_fit_.fit(X.stacked_features) return self
def fit(self, X, y=None): ''' Choose the codewords based on a training set. Parameters ---------- X : :class:`skl_groups.features.Features` or list of arrays of shape ``[n_samples[i], n_features]`` Training set. If a Features object, it will be stacked. ''' self.kmeans_fit_ = copy(self.kmeans) X = as_features(X, stack=True) self.kmeans_fit_.fit(X.stacked_features) return self
[ "Choose", "the", "codewords", "based", "on", "a", "training", "set", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/summaries/bag_of_words.py#L82-L94
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "self", ".", "kmeans_fit_", "=", "copy", "(", "self", ".", "kmeans", ")", "X", "=", "as_features", "(", "X", ",", "stack", "=", "True", ")", "self", ".", "kmeans_fit_", ".", "fit", "(", "X", ".", "stacked_features", ")", "return", "self" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
BagOfWords.transform
Transform a list of bag features into its bag-of-words representation. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. Returns ------- X_new : integer array, shape [len(X), kmeans.n_clusters] X transformed into the new space.
skl_groups/summaries/bag_of_words.py
def transform(self, X): ''' Transform a list of bag features into its bag-of-words representation. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. Returns ------- X_new : integer array, shape [len(X), kmeans.n_clusters] X transformed into the new space. ''' self._check_fitted() X = as_features(X, stack=True) assignments = self.kmeans_fit_.predict(X.stacked_features) return self._group_assignments(X, assignments)
def transform(self, X): ''' Transform a list of bag features into its bag-of-words representation. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. Returns ------- X_new : integer array, shape [len(X), kmeans.n_clusters] X transformed into the new space. ''' self._check_fitted() X = as_features(X, stack=True) assignments = self.kmeans_fit_.predict(X.stacked_features) return self._group_assignments(X, assignments)
[ "Transform", "a", "list", "of", "bag", "features", "into", "its", "bag", "-", "of", "-", "words", "representation", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/summaries/bag_of_words.py#L96-L113
[ "def", "transform", "(", "self", ",", "X", ")", ":", "self", ".", "_check_fitted", "(", ")", "X", "=", "as_features", "(", "X", ",", "stack", "=", "True", ")", "assignments", "=", "self", ".", "kmeans_fit_", ".", "predict", "(", "X", ".", "stacked_features", ")", "return", "self", ".", "_group_assignments", "(", "X", ",", "assignments", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
BagOfWords.fit_transform
Compute clustering and transform a list of bag features into its bag-of-words representation. Like calling fit(X) and then transform(X), but more efficient. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. Returns ------- X_new : integer array, shape [len(X), kmeans.n_clusters] X transformed into the new space.
skl_groups/summaries/bag_of_words.py
def fit_transform(self, X): ''' Compute clustering and transform a list of bag features into its bag-of-words representation. Like calling fit(X) and then transform(X), but more efficient. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. Returns ------- X_new : integer array, shape [len(X), kmeans.n_clusters] X transformed into the new space. ''' X = as_features(X, stack=True) self.kmeans_fit_ = copy(self.kmeans) assignments = self.kmeans_fit_.fit_predict(X.stacked_features) return self._group_assignments(X, assignments)
def fit_transform(self, X): ''' Compute clustering and transform a list of bag features into its bag-of-words representation. Like calling fit(X) and then transform(X), but more efficient. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. Returns ------- X_new : integer array, shape [len(X), kmeans.n_clusters] X transformed into the new space. ''' X = as_features(X, stack=True) self.kmeans_fit_ = copy(self.kmeans) assignments = self.kmeans_fit_.fit_predict(X.stacked_features) return self._group_assignments(X, assignments)
[ "Compute", "clustering", "and", "transform", "a", "list", "of", "bag", "features", "into", "its", "bag", "-", "of", "-", "words", "representation", ".", "Like", "calling", "fit", "(", "X", ")", "and", "then", "transform", "(", "X", ")", "but", "more", "efficient", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/summaries/bag_of_words.py#L115-L134
[ "def", "fit_transform", "(", "self", ",", "X", ")", ":", "X", "=", "as_features", "(", "X", ",", "stack", "=", "True", ")", "self", ".", "kmeans_fit_", "=", "copy", "(", "self", ".", "kmeans", ")", "assignments", "=", "self", ".", "kmeans_fit_", ".", "fit_predict", "(", "X", ".", "stacked_features", ")", "return", "self", ".", "_group_assignments", "(", "X", ",", "assignments", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
is_categorical_type
Checks whether the array is either integral or boolean.
skl_groups/utils.py
def is_categorical_type(ary): "Checks whether the array is either integral or boolean." ary = np.asanyarray(ary) return is_integer_type(ary) or ary.dtype.kind == 'b'
def is_categorical_type(ary): "Checks whether the array is either integral or boolean." ary = np.asanyarray(ary) return is_integer_type(ary) or ary.dtype.kind == 'b'
[ "Checks", "whether", "the", "array", "is", "either", "integral", "or", "boolean", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/utils.py#L23-L26
[ "def", "is_categorical_type", "(", "ary", ")", ":", "ary", "=", "np", ".", "asanyarray", "(", "ary", ")", "return", "is_integer_type", "(", "ary", ")", "or", "ary", ".", "dtype", ".", "kind", "==", "'b'" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
as_integer_type
Returns argument as an integer array, converting floats if convertable. Raises ValueError if it's a float array with nonintegral values.
skl_groups/utils.py
def as_integer_type(ary): ''' Returns argument as an integer array, converting floats if convertable. Raises ValueError if it's a float array with nonintegral values. ''' ary = np.asanyarray(ary) if is_integer_type(ary): return ary rounded = np.rint(ary) if np.any(rounded != ary): raise ValueError("argument array must contain only integers") return rounded.astype(int)
def as_integer_type(ary): ''' Returns argument as an integer array, converting floats if convertable. Raises ValueError if it's a float array with nonintegral values. ''' ary = np.asanyarray(ary) if is_integer_type(ary): return ary rounded = np.rint(ary) if np.any(rounded != ary): raise ValueError("argument array must contain only integers") return rounded.astype(int)
[ "Returns", "argument", "as", "an", "integer", "array", "converting", "floats", "if", "convertable", ".", "Raises", "ValueError", "if", "it", "s", "a", "float", "array", "with", "nonintegral", "values", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/utils.py#L39-L50
[ "def", "as_integer_type", "(", "ary", ")", ":", "ary", "=", "np", ".", "asanyarray", "(", "ary", ")", "if", "is_integer_type", "(", "ary", ")", ":", "return", "ary", "rounded", "=", "np", ".", "rint", "(", "ary", ")", "if", "np", ".", "any", "(", "rounded", "!=", "ary", ")", ":", "raise", "ValueError", "(", "\"argument array must contain only integers\"", ")", "return", "rounded", ".", "astype", "(", "int", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
show_progress
Sets up a :class:`ProgressBarHandler` to handle progess logs for a given module. Parameters ---------- name : string The module name of the progress logger to use. For example, :class:`skl_groups.divergences.KNNDivergenceEstimator` uses ``'skl_groups.divergences.knn.progress'``. * : anything Other keyword arguments are passed to the :class:`ProgressBarHandler`.
skl_groups/utils.py
def show_progress(name, **kwargs): ''' Sets up a :class:`ProgressBarHandler` to handle progess logs for a given module. Parameters ---------- name : string The module name of the progress logger to use. For example, :class:`skl_groups.divergences.KNNDivergenceEstimator` uses ``'skl_groups.divergences.knn.progress'``. * : anything Other keyword arguments are passed to the :class:`ProgressBarHandler`. ''' logger = logging.getLogger(name) logger.setLevel(logging.INFO) logger.addHandler(ProgressBarHandler(**kwargs))
def show_progress(name, **kwargs): ''' Sets up a :class:`ProgressBarHandler` to handle progess logs for a given module. Parameters ---------- name : string The module name of the progress logger to use. For example, :class:`skl_groups.divergences.KNNDivergenceEstimator` uses ``'skl_groups.divergences.knn.progress'``. * : anything Other keyword arguments are passed to the :class:`ProgressBarHandler`. ''' logger = logging.getLogger(name) logger.setLevel(logging.INFO) logger.addHandler(ProgressBarHandler(**kwargs))
[ "Sets", "up", "a", ":", "class", ":", "ProgressBarHandler", "to", "handle", "progess", "logs", "for", "a", "given", "module", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/utils.py#L199-L216
[ "def", "show_progress", "(", "name", ",", "*", "*", "kwargs", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "logger", ".", "addHandler", "(", "ProgressBarHandler", "(", "*", "*", "kwargs", ")", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
ProgressLogger.start
Signal the start of the process. Parameters ---------- total : int The total number of steps in the process, or None if unknown.
skl_groups/utils.py
def start(self, total): ''' Signal the start of the process. Parameters ---------- total : int The total number of steps in the process, or None if unknown. ''' self.logger.info(json.dumps(['START', self.name, total]))
def start(self, total): ''' Signal the start of the process. Parameters ---------- total : int The total number of steps in the process, or None if unknown. ''' self.logger.info(json.dumps(['START', self.name, total]))
[ "Signal", "the", "start", "of", "the", "process", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/utils.py#L114-L123
[ "def", "start", "(", "self", ",", "total", ")", ":", "self", ".", "logger", ".", "info", "(", "json", ".", "dumps", "(", "[", "'START'", ",", "self", ".", "name", ",", "total", "]", ")", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
_build_indices
Builds FLANN indices for each bag.
skl_groups/divergences/knn.py
def _build_indices(X, flann_args): "Builds FLANN indices for each bag." # TODO: should probably multithread this logger.info("Building indices...") indices = [None] * len(X) for i, bag in enumerate(plog(X, name="index building")): indices[i] = idx = FLANNIndex(**flann_args) idx.build_index(bag) return indices
def _build_indices(X, flann_args): "Builds FLANN indices for each bag." # TODO: should probably multithread this logger.info("Building indices...") indices = [None] * len(X) for i, bag in enumerate(plog(X, name="index building")): indices[i] = idx = FLANNIndex(**flann_args) idx.build_index(bag) return indices
[ "Builds", "FLANN", "indices", "for", "each", "bag", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L403-L411
[ "def", "_build_indices", "(", "X", ",", "flann_args", ")", ":", "# TODO: should probably multithread this", "logger", ".", "info", "(", "\"Building indices...\"", ")", "indices", "=", "[", "None", "]", "*", "len", "(", "X", ")", "for", "i", ",", "bag", "in", "enumerate", "(", "plog", "(", "X", ",", "name", "=", "\"index building\"", ")", ")", ":", "indices", "[", "i", "]", "=", "idx", "=", "FLANNIndex", "(", "*", "*", "flann_args", ")", "idx", ".", "build_index", "(", "bag", ")", "return", "indices" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
_get_rhos
Gets within-bag distances for each bag.
skl_groups/divergences/knn.py
def _get_rhos(X, indices, Ks, max_K, save_all_Ks, min_dist): "Gets within-bag distances for each bag." logger.info("Getting within-bag distances...") if max_K >= X.n_pts.min(): msg = "asked for K = {}, but there's a bag with only {} points" raise ValueError(msg.format(max_K, X.n_pts.min())) # need to throw away the closest neighbor, which will always be self # thus K=1 corresponds to column 1 in the result array which_Ks = slice(1, None) if save_all_Ks else Ks indices = plog(indices, name="within-bag distances") rhos = [None] * len(X) for i, (idx, bag) in enumerate(zip(indices, X)): r = np.sqrt(idx.nn_index(bag, max_K + 1)[1][:, which_Ks]) np.maximum(min_dist, r, out=r) rhos[i] = r return rhos
def _get_rhos(X, indices, Ks, max_K, save_all_Ks, min_dist): "Gets within-bag distances for each bag." logger.info("Getting within-bag distances...") if max_K >= X.n_pts.min(): msg = "asked for K = {}, but there's a bag with only {} points" raise ValueError(msg.format(max_K, X.n_pts.min())) # need to throw away the closest neighbor, which will always be self # thus K=1 corresponds to column 1 in the result array which_Ks = slice(1, None) if save_all_Ks else Ks indices = plog(indices, name="within-bag distances") rhos = [None] * len(X) for i, (idx, bag) in enumerate(zip(indices, X)): r = np.sqrt(idx.nn_index(bag, max_K + 1)[1][:, which_Ks]) np.maximum(min_dist, r, out=r) rhos[i] = r return rhos
[ "Gets", "within", "-", "bag", "distances", "for", "each", "bag", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L414-L432
[ "def", "_get_rhos", "(", "X", ",", "indices", ",", "Ks", ",", "max_K", ",", "save_all_Ks", ",", "min_dist", ")", ":", "logger", ".", "info", "(", "\"Getting within-bag distances...\"", ")", "if", "max_K", ">=", "X", ".", "n_pts", ".", "min", "(", ")", ":", "msg", "=", "\"asked for K = {}, but there's a bag with only {} points\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "max_K", ",", "X", ".", "n_pts", ".", "min", "(", ")", ")", ")", "# need to throw away the closest neighbor, which will always be self", "# thus K=1 corresponds to column 1 in the result array", "which_Ks", "=", "slice", "(", "1", ",", "None", ")", "if", "save_all_Ks", "else", "Ks", "indices", "=", "plog", "(", "indices", ",", "name", "=", "\"within-bag distances\"", ")", "rhos", "=", "[", "None", "]", "*", "len", "(", "X", ")", "for", "i", ",", "(", "idx", ",", "bag", ")", "in", "enumerate", "(", "zip", "(", "indices", ",", "X", ")", ")", ":", "r", "=", "np", ".", "sqrt", "(", "idx", ".", "nn_index", "(", "bag", ",", "max_K", "+", "1", ")", "[", "1", "]", "[", ":", ",", "which_Ks", "]", ")", "np", ".", "maximum", "(", "min_dist", ",", "r", ",", "out", "=", "r", ")", "rhos", "[", "i", "]", "=", "r", "return", "rhos" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
linear
r''' Estimates the linear inner product \int p q between two distributions, based on kNN distances.
skl_groups/divergences/knn.py
def linear(Ks, dim, num_q, rhos, nus): r''' Estimates the linear inner product \int p q between two distributions, based on kNN distances. ''' return _get_linear(Ks, dim)(num_q, rhos, nus)
def linear(Ks, dim, num_q, rhos, nus): r''' Estimates the linear inner product \int p q between two distributions, based on kNN distances. ''' return _get_linear(Ks, dim)(num_q, rhos, nus)
[ "r", "Estimates", "the", "linear", "inner", "product", "\\", "int", "p", "q", "between", "two", "distributions", "based", "on", "kNN", "distances", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L560-L565
[ "def", "linear", "(", "Ks", ",", "dim", ",", "num_q", ",", "rhos", ",", "nus", ")", ":", "return", "_get_linear", "(", "Ks", ",", "dim", ")", "(", "num_q", ",", "rhos", ",", "nus", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
alpha_div
r''' Estimate the alpha divergence between distributions: \int p^\alpha q^(1-\alpha) based on kNN distances. Used in Renyi, Hellinger, Bhattacharyya, Tsallis divergences. Enforces that estimates are >= 0. Returns divergence estimates with shape (num_alphas, num_Ks).
skl_groups/divergences/knn.py
def alpha_div(alphas, Ks, dim, num_q, rhos, nus): r''' Estimate the alpha divergence between distributions: \int p^\alpha q^(1-\alpha) based on kNN distances. Used in Renyi, Hellinger, Bhattacharyya, Tsallis divergences. Enforces that estimates are >= 0. Returns divergence estimates with shape (num_alphas, num_Ks). ''' return _get_alpha_div(alphas, Ks, dim)(num_q, rhos, nus)
def alpha_div(alphas, Ks, dim, num_q, rhos, nus): r''' Estimate the alpha divergence between distributions: \int p^\alpha q^(1-\alpha) based on kNN distances. Used in Renyi, Hellinger, Bhattacharyya, Tsallis divergences. Enforces that estimates are >= 0. Returns divergence estimates with shape (num_alphas, num_Ks). ''' return _get_alpha_div(alphas, Ks, dim)(num_q, rhos, nus)
[ "r", "Estimate", "the", "alpha", "divergence", "between", "distributions", ":", "\\", "int", "p^", "\\", "alpha", "q^", "(", "1", "-", "\\", "alpha", ")", "based", "on", "kNN", "distances", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L580-L592
[ "def", "alpha_div", "(", "alphas", ",", "Ks", ",", "dim", ",", "num_q", ",", "rhos", ",", "nus", ")", ":", "return", "_get_alpha_div", "(", "alphas", ",", "Ks", ",", "dim", ")", "(", "num_q", ",", "rhos", ",", "nus", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
jensen_shannon_core
r''' Estimates 1/2 mean_X( d * log radius of largest ball in X+Y around X_i with no more than M/(n+m-1) weight where X points have weight 1 / (2 n - 1) and Y points have weight n / (m (2 n - 1)) - digamma(# of neighbors in that ball)) This is the core pairwise component of the estimator of Jensen-Shannon divergence based on the Hino-Murata weighted information estimator. See the docstring for jensen_shannon for an explanation.
skl_groups/divergences/knn.py
def jensen_shannon_core(Ks, dim, num_q, rhos, nus): r''' Estimates 1/2 mean_X( d * log radius of largest ball in X+Y around X_i with no more than M/(n+m-1) weight where X points have weight 1 / (2 n - 1) and Y points have weight n / (m (2 n - 1)) - digamma(# of neighbors in that ball)) This is the core pairwise component of the estimator of Jensen-Shannon divergence based on the Hino-Murata weighted information estimator. See the docstring for jensen_shannon for an explanation. ''' ns = np.array([rhos.shape[0], num_q]) return _get_jensen_shannon_core(Ks, dim, ns)[0](num_q, rhos, nus)
def jensen_shannon_core(Ks, dim, num_q, rhos, nus): r''' Estimates 1/2 mean_X( d * log radius of largest ball in X+Y around X_i with no more than M/(n+m-1) weight where X points have weight 1 / (2 n - 1) and Y points have weight n / (m (2 n - 1)) - digamma(# of neighbors in that ball)) This is the core pairwise component of the estimator of Jensen-Shannon divergence based on the Hino-Murata weighted information estimator. See the docstring for jensen_shannon for an explanation. ''' ns = np.array([rhos.shape[0], num_q]) return _get_jensen_shannon_core(Ks, dim, ns)[0](num_q, rhos, nus)
[ "r", "Estimates", "1", "/", "2", "mean_X", "(", "d", "*", "log", "radius", "of", "largest", "ball", "in", "X", "+", "Y", "around", "X_i", "with", "no", "more", "than", "M", "/", "(", "n", "+", "m", "-", "1", ")", "weight", "where", "X", "points", "have", "weight", "1", "/", "(", "2", "n", "-", "1", ")", "and", "Y", "points", "have", "weight", "n", "/", "(", "m", "(", "2", "n", "-", "1", "))", "-", "digamma", "(", "#", "of", "neighbors", "in", "that", "ball", "))" ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L613-L627
[ "def", "jensen_shannon_core", "(", "Ks", ",", "dim", ",", "num_q", ",", "rhos", ",", "nus", ")", ":", "ns", "=", "np", ".", "array", "(", "[", "rhos", ".", "shape", "[", "0", "]", ",", "num_q", "]", ")", "return", "_get_jensen_shannon_core", "(", "Ks", ",", "dim", ",", "ns", ")", "[", "0", "]", "(", "num_q", ",", "rhos", ",", "nus", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
bhattacharyya
r''' Estimate the Bhattacharyya coefficient between distributions, based on kNN distances: \int \sqrt{p q} If clamp (the default), enforces 0 <= BC <= 1. Returns an array of shape (num_Ks,).
skl_groups/divergences/knn.py
def bhattacharyya(Ks, dim, required, clamp=True, to_self=False): r''' Estimate the Bhattacharyya coefficient between distributions, based on kNN distances: \int \sqrt{p q} If clamp (the default), enforces 0 <= BC <= 1. Returns an array of shape (num_Ks,). ''' est = required if clamp: est = np.minimum(est, 1) # BC <= 1 return est
def bhattacharyya(Ks, dim, required, clamp=True, to_self=False): r''' Estimate the Bhattacharyya coefficient between distributions, based on kNN distances: \int \sqrt{p q} If clamp (the default), enforces 0 <= BC <= 1. Returns an array of shape (num_Ks,). ''' est = required if clamp: est = np.minimum(est, 1) # BC <= 1 return est
[ "r", "Estimate", "the", "Bhattacharyya", "coefficient", "between", "distributions", "based", "on", "kNN", "distances", ":", "\\", "int", "\\", "sqrt", "{", "p", "q", "}" ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L734-L746
[ "def", "bhattacharyya", "(", "Ks", ",", "dim", ",", "required", ",", "clamp", "=", "True", ",", "to_self", "=", "False", ")", ":", "est", "=", "required", "if", "clamp", ":", "est", "=", "np", ".", "minimum", "(", "est", ",", "1", ")", "# BC <= 1", "return", "est" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
hellinger
r''' Estimate the Hellinger distance between distributions, based on kNN distances: \sqrt{1 - \int \sqrt{p q}} Always enforces 0 <= H, to be able to sqrt; if clamp, also enforces H <= 1. Returns a vector: one element for each K.
skl_groups/divergences/knn.py
def hellinger(Ks, dim, required, clamp=True, to_self=False): r''' Estimate the Hellinger distance between distributions, based on kNN distances: \sqrt{1 - \int \sqrt{p q}} Always enforces 0 <= H, to be able to sqrt; if clamp, also enforces H <= 1. Returns a vector: one element for each K. ''' bc = required est = 1 - bc np.maximum(est, 0, out=est) if clamp: np.minimum(est, 1, out=est) np.sqrt(est, out=est) return est
def hellinger(Ks, dim, required, clamp=True, to_self=False): r''' Estimate the Hellinger distance between distributions, based on kNN distances: \sqrt{1 - \int \sqrt{p q}} Always enforces 0 <= H, to be able to sqrt; if clamp, also enforces H <= 1. Returns a vector: one element for each K. ''' bc = required est = 1 - bc np.maximum(est, 0, out=est) if clamp: np.minimum(est, 1, out=est) np.sqrt(est, out=est) return est
[ "r", "Estimate", "the", "Hellinger", "distance", "between", "distributions", "based", "on", "kNN", "distances", ":", "\\", "sqrt", "{", "1", "-", "\\", "int", "\\", "sqrt", "{", "p", "q", "}}" ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L752-L768
[ "def", "hellinger", "(", "Ks", ",", "dim", ",", "required", ",", "clamp", "=", "True", ",", "to_self", "=", "False", ")", ":", "bc", "=", "required", "est", "=", "1", "-", "bc", "np", ".", "maximum", "(", "est", ",", "0", ",", "out", "=", "est", ")", "if", "clamp", ":", "np", ".", "minimum", "(", "est", ",", "1", ",", "out", "=", "est", ")", "np", ".", "sqrt", "(", "est", ",", "out", "=", "est", ")", "return", "est" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
renyi
r''' Estimate the Renyi-alpha divergence between distributions, based on kNN distances: 1/(\alpha-1) \log \int p^alpha q^(1-\alpha) If the inner integral is less than min_val (default ``np.spacing(1)``), uses the log of min_val instead. If clamp (the default), enforces that the estimates are nonnegative by replacing any negative estimates with 0. Returns an array of shape (num_alphas, num_Ks).
skl_groups/divergences/knn.py
def renyi(alphas, Ks, dim, required, min_val=np.spacing(1), clamp=True, to_self=False): r''' Estimate the Renyi-alpha divergence between distributions, based on kNN distances: 1/(\alpha-1) \log \int p^alpha q^(1-\alpha) If the inner integral is less than min_val (default ``np.spacing(1)``), uses the log of min_val instead. If clamp (the default), enforces that the estimates are nonnegative by replacing any negative estimates with 0. Returns an array of shape (num_alphas, num_Ks). ''' alphas = np.reshape(alphas, (-1, 1)) est = required est = np.maximum(est, min_val) # TODO: can we modify in-place? np.log(est, out=est) est /= alphas - 1 if clamp: np.maximum(est, 0, out=est) return est
def renyi(alphas, Ks, dim, required, min_val=np.spacing(1), clamp=True, to_self=False): r''' Estimate the Renyi-alpha divergence between distributions, based on kNN distances: 1/(\alpha-1) \log \int p^alpha q^(1-\alpha) If the inner integral is less than min_val (default ``np.spacing(1)``), uses the log of min_val instead. If clamp (the default), enforces that the estimates are nonnegative by replacing any negative estimates with 0. Returns an array of shape (num_alphas, num_Ks). ''' alphas = np.reshape(alphas, (-1, 1)) est = required est = np.maximum(est, min_val) # TODO: can we modify in-place? np.log(est, out=est) est /= alphas - 1 if clamp: np.maximum(est, 0, out=est) return est
[ "r", "Estimate", "the", "Renyi", "-", "alpha", "divergence", "between", "distributions", "based", "on", "kNN", "distances", ":", "1", "/", "(", "\\", "alpha", "-", "1", ")", "\\", "log", "\\", "int", "p^alpha", "q^", "(", "1", "-", "\\", "alpha", ")" ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L774-L796
[ "def", "renyi", "(", "alphas", ",", "Ks", ",", "dim", ",", "required", ",", "min_val", "=", "np", ".", "spacing", "(", "1", ")", ",", "clamp", "=", "True", ",", "to_self", "=", "False", ")", ":", "alphas", "=", "np", ".", "reshape", "(", "alphas", ",", "(", "-", "1", ",", "1", ")", ")", "est", "=", "required", "est", "=", "np", ".", "maximum", "(", "est", ",", "min_val", ")", "# TODO: can we modify in-place?", "np", ".", "log", "(", "est", ",", "out", "=", "est", ")", "est", "/=", "alphas", "-", "1", "if", "clamp", ":", "np", ".", "maximum", "(", "est", ",", "0", ",", "out", "=", "est", ")", "return", "est" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
tsallis
r''' Estimate the Tsallis-alpha divergence between distributions, based on kNN distances: (\int p^alpha q^(1-\alpha) - 1) / (\alpha - 1) If clamp (the default), enforces the estimate is nonnegative. Returns an array of shape (num_alphas, num_Ks).
skl_groups/divergences/knn.py
def tsallis(alphas, Ks, dim, required, clamp=True, to_self=False): r''' Estimate the Tsallis-alpha divergence between distributions, based on kNN distances: (\int p^alpha q^(1-\alpha) - 1) / (\alpha - 1) If clamp (the default), enforces the estimate is nonnegative. Returns an array of shape (num_alphas, num_Ks). ''' alphas = np.reshape(alphas, (-1, 1)) alpha_est = required est = alpha_est - 1 est /= alphas - 1 if clamp: np.maximum(est, 0, out=est) return est
def tsallis(alphas, Ks, dim, required, clamp=True, to_self=False): r''' Estimate the Tsallis-alpha divergence between distributions, based on kNN distances: (\int p^alpha q^(1-\alpha) - 1) / (\alpha - 1) If clamp (the default), enforces the estimate is nonnegative. Returns an array of shape (num_alphas, num_Ks). ''' alphas = np.reshape(alphas, (-1, 1)) alpha_est = required est = alpha_est - 1 est /= alphas - 1 if clamp: np.maximum(est, 0, out=est) return est
[ "r", "Estimate", "the", "Tsallis", "-", "alpha", "divergence", "between", "distributions", "based", "on", "kNN", "distances", ":", "(", "\\", "int", "p^alpha", "q^", "(", "1", "-", "\\", "alpha", ")", "-", "1", ")", "/", "(", "\\", "alpha", "-", "1", ")" ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L802-L818
[ "def", "tsallis", "(", "alphas", ",", "Ks", ",", "dim", ",", "required", ",", "clamp", "=", "True", ",", "to_self", "=", "False", ")", ":", "alphas", "=", "np", ".", "reshape", "(", "alphas", ",", "(", "-", "1", ",", "1", ")", ")", "alpha_est", "=", "required", "est", "=", "alpha_est", "-", "1", "est", "/=", "alphas", "-", "1", "if", "clamp", ":", "np", ".", "maximum", "(", "est", ",", "0", ",", "out", "=", "est", ")", "return", "est" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
l2
r''' Estimates the L2 distance between distributions, via \int (p - q)^2 = \int p^2 - \int p q - \int q p + \int q^2. \int pq and \int qp are estimated with the linear function (in both directions), while \int p^2 and \int q^2 are estimated via the quadratic function below. Always clamps negative estimates of l2^2 to 0, because otherwise the sqrt would break.
skl_groups/divergences/knn.py
def l2(Ks, dim, X_rhos, Y_rhos, required, clamp=True, to_self=False): r''' Estimates the L2 distance between distributions, via \int (p - q)^2 = \int p^2 - \int p q - \int q p + \int q^2. \int pq and \int qp are estimated with the linear function (in both directions), while \int p^2 and \int q^2 are estimated via the quadratic function below. Always clamps negative estimates of l2^2 to 0, because otherwise the sqrt would break. ''' n_X = len(X_rhos) n_Y = len(Y_rhos) linears = required assert linears.shape == (1, Ks.size, n_X, n_Y, 2) X_quadratics = np.empty((Ks.size, n_X), dtype=np.float32) for i, rho in enumerate(X_rhos): X_quadratics[:, i] = quadratic(Ks, dim, rho) Y_quadratics = np.empty((Ks.size, n_Y), dtype=np.float32) for j, rho in enumerate(Y_rhos): Y_quadratics[:, j] = quadratic(Ks, dim, rho) est = -linears.sum(axis=4) est += X_quadratics[None, :, :, None] est += Y_quadratics[None, :, None, :] np.maximum(est, 0, out=est) np.sqrt(est, out=est) # diagonal is of course known to be zero if to_self: est[:, :, xrange(n_X), xrange(n_Y)] = 0 return est[:, :, :, :, None]
def l2(Ks, dim, X_rhos, Y_rhos, required, clamp=True, to_self=False): r''' Estimates the L2 distance between distributions, via \int (p - q)^2 = \int p^2 - \int p q - \int q p + \int q^2. \int pq and \int qp are estimated with the linear function (in both directions), while \int p^2 and \int q^2 are estimated via the quadratic function below. Always clamps negative estimates of l2^2 to 0, because otherwise the sqrt would break. ''' n_X = len(X_rhos) n_Y = len(Y_rhos) linears = required assert linears.shape == (1, Ks.size, n_X, n_Y, 2) X_quadratics = np.empty((Ks.size, n_X), dtype=np.float32) for i, rho in enumerate(X_rhos): X_quadratics[:, i] = quadratic(Ks, dim, rho) Y_quadratics = np.empty((Ks.size, n_Y), dtype=np.float32) for j, rho in enumerate(Y_rhos): Y_quadratics[:, j] = quadratic(Ks, dim, rho) est = -linears.sum(axis=4) est += X_quadratics[None, :, :, None] est += Y_quadratics[None, :, None, :] np.maximum(est, 0, out=est) np.sqrt(est, out=est) # diagonal is of course known to be zero if to_self: est[:, :, xrange(n_X), xrange(n_Y)] = 0 return est[:, :, :, :, None]
[ "r", "Estimates", "the", "L2", "distance", "between", "distributions", "via", "\\", "int", "(", "p", "-", "q", ")", "^2", "=", "\\", "int", "p^2", "-", "\\", "int", "p", "q", "-", "\\", "int", "q", "p", "+", "\\", "int", "q^2", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L824-L859
[ "def", "l2", "(", "Ks", ",", "dim", ",", "X_rhos", ",", "Y_rhos", ",", "required", ",", "clamp", "=", "True", ",", "to_self", "=", "False", ")", ":", "n_X", "=", "len", "(", "X_rhos", ")", "n_Y", "=", "len", "(", "Y_rhos", ")", "linears", "=", "required", "assert", "linears", ".", "shape", "==", "(", "1", ",", "Ks", ".", "size", ",", "n_X", ",", "n_Y", ",", "2", ")", "X_quadratics", "=", "np", ".", "empty", "(", "(", "Ks", ".", "size", ",", "n_X", ")", ",", "dtype", "=", "np", ".", "float32", ")", "for", "i", ",", "rho", "in", "enumerate", "(", "X_rhos", ")", ":", "X_quadratics", "[", ":", ",", "i", "]", "=", "quadratic", "(", "Ks", ",", "dim", ",", "rho", ")", "Y_quadratics", "=", "np", ".", "empty", "(", "(", "Ks", ".", "size", ",", "n_Y", ")", ",", "dtype", "=", "np", ".", "float32", ")", "for", "j", ",", "rho", "in", "enumerate", "(", "Y_rhos", ")", ":", "Y_quadratics", "[", ":", ",", "j", "]", "=", "quadratic", "(", "Ks", ",", "dim", ",", "rho", ")", "est", "=", "-", "linears", ".", "sum", "(", "axis", "=", "4", ")", "est", "+=", "X_quadratics", "[", "None", ",", ":", ",", ":", ",", "None", "]", "est", "+=", "Y_quadratics", "[", "None", ",", ":", ",", "None", ",", ":", "]", "np", ".", "maximum", "(", "est", ",", "0", ",", "out", "=", "est", ")", "np", ".", "sqrt", "(", "est", ",", "out", "=", "est", ")", "# diagonal is of course known to be zero", "if", "to_self", ":", "est", "[", ":", ",", ":", ",", "xrange", "(", "n_X", ")", ",", "xrange", "(", "n_Y", ")", "]", "=", "0", "return", "est", "[", ":", ",", ":", ",", ":", ",", ":", ",", "None", "]" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
quadratic
r''' Estimates \int p^2 based on kNN distances. In here because it's used in the l2 distance, above. Returns array of shape (num_Ks,).
skl_groups/divergences/knn.py
def quadratic(Ks, dim, rhos, required=None): r''' Estimates \int p^2 based on kNN distances. In here because it's used in the l2 distance, above. Returns array of shape (num_Ks,). ''' # Estimated with alpha=1, beta=0: # B_{k,d,1,0} is the same as B_{k,d,0,1} in linear() # and the full estimator is # B / (n - 1) * mean(rho ^ -dim) N = rhos.shape[0] Ks = np.asarray(Ks) Bs = (Ks - 1) / np.pi ** (dim / 2) * gamma(dim / 2 + 1) # shape (num_Ks,) est = Bs / (N - 1) * np.mean(rhos ** (-dim), axis=0) return est
def quadratic(Ks, dim, rhos, required=None): r''' Estimates \int p^2 based on kNN distances. In here because it's used in the l2 distance, above. Returns array of shape (num_Ks,). ''' # Estimated with alpha=1, beta=0: # B_{k,d,1,0} is the same as B_{k,d,0,1} in linear() # and the full estimator is # B / (n - 1) * mean(rho ^ -dim) N = rhos.shape[0] Ks = np.asarray(Ks) Bs = (Ks - 1) / np.pi ** (dim / 2) * gamma(dim / 2 + 1) # shape (num_Ks,) est = Bs / (N - 1) * np.mean(rhos ** (-dim), axis=0) return est
[ "r", "Estimates", "\\", "int", "p^2", "based", "on", "kNN", "distances", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L867-L883
[ "def", "quadratic", "(", "Ks", ",", "dim", ",", "rhos", ",", "required", "=", "None", ")", ":", "# Estimated with alpha=1, beta=0:", "# B_{k,d,1,0} is the same as B_{k,d,0,1} in linear()", "# and the full estimator is", "# B / (n - 1) * mean(rho ^ -dim)", "N", "=", "rhos", ".", "shape", "[", "0", "]", "Ks", "=", "np", ".", "asarray", "(", "Ks", ")", "Bs", "=", "(", "Ks", "-", "1", ")", "/", "np", ".", "pi", "**", "(", "dim", "/", "2", ")", "*", "gamma", "(", "dim", "/", "2", "+", "1", ")", "# shape (num_Ks,)", "est", "=", "Bs", "/", "(", "N", "-", "1", ")", "*", "np", ".", "mean", "(", "rhos", "**", "(", "-", "dim", ")", ",", "axis", "=", "0", ")", "return", "est" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
jensen_shannon
r''' Estimate the difference between the Shannon entropy of an equally-weighted mixture between X and Y and the mixture of the Shannon entropies: JS(X, Y) = H[ (X + Y) / 2 ] - (H[X] + H[Y]) / 2 We use a special case of the Hino-Murata weighted information estimator with a fixed M = n \alpha, about equivalent to the K-nearest-neighbor approach used for the other estimators: Hideitsu Hino and Noboru Murata (2013). Information estimators for weighted observations. Neural Networks. http://linkinghub.elsevier.com/retrieve/pii/S0893608013001676 The estimator for JS(X, Y) is: log volume of the unit ball - log M + log(n + m - 1) + digamma(M) + 1/2 mean_X( d * log radius of largest ball in X+Y around X_i with no more than M/(n+m-1) weight where X points have weight 1 / (2 n - 1) and Y points have weight n / (m (2 n - 1)) - digamma(# of neighbors in that ball) ) + 1/2 mean_Y( d * log radius of largest ball in X+Y around Y_i with no more than M/(n+m-1) weight where X points have weight m / (n (2 m - 1)) and Y points have weight 1 / (2 m - 1) - digamma(# of neighbors in that ball) ) - 1/2 (log volume of the unit ball - log M + log(n - 1) + digamma(M)) - 1/2 mean_X( d * log radius of the largest ball in X around X_i with no more than M/(n-1) weight where X points have weight 1 / (n - 1)) - digamma(# of neighbors in that ball) ) - 1/2 (log volume of the unit ball - log M + log(m - 1) + digamma(M)) - 1/2 mean_Y( d * log radius of the largest ball in Y around Y_i with no more than M/(n-1) weight where X points have weight 1 / (m - 1)) - digamma(# of neighbors in that ball) ) = log(n + m - 1) + digamma(M) + 1/2 mean_X( d * log radius of largest ball in X+Y around X_i with no more than M/(n+m-1) weight where X points have weight 1 / (2 n - 1) and Y points have weight n / (m (2 n - 1)) - digamma(# of neighbors in that ball) ) + 1/2 mean_Y( d * log radius of largest ball in X+Y around Y_i with no more than M/(n+m-1) weight where X points have weight m / (n (2 m - 1)) and Y points have weight 1 / (2 m - 1) - digamma(# of neighbors in that ball) ) - 1/2 [log(n-1) + mean_X( d * log rho_M(X_i) )] - 1/2 [log(m-1) + mean_Y( d * log rho_M(Y_i) )]
skl_groups/divergences/knn.py
def jensen_shannon(Ks, dim, X_rhos, Y_rhos, required, clamp=True, to_self=False): r''' Estimate the difference between the Shannon entropy of an equally-weighted mixture between X and Y and the mixture of the Shannon entropies: JS(X, Y) = H[ (X + Y) / 2 ] - (H[X] + H[Y]) / 2 We use a special case of the Hino-Murata weighted information estimator with a fixed M = n \alpha, about equivalent to the K-nearest-neighbor approach used for the other estimators: Hideitsu Hino and Noboru Murata (2013). Information estimators for weighted observations. Neural Networks. http://linkinghub.elsevier.com/retrieve/pii/S0893608013001676 The estimator for JS(X, Y) is: log volume of the unit ball - log M + log(n + m - 1) + digamma(M) + 1/2 mean_X( d * log radius of largest ball in X+Y around X_i with no more than M/(n+m-1) weight where X points have weight 1 / (2 n - 1) and Y points have weight n / (m (2 n - 1)) - digamma(# of neighbors in that ball) ) + 1/2 mean_Y( d * log radius of largest ball in X+Y around Y_i with no more than M/(n+m-1) weight where X points have weight m / (n (2 m - 1)) and Y points have weight 1 / (2 m - 1) - digamma(# of neighbors in that ball) ) - 1/2 (log volume of the unit ball - log M + log(n - 1) + digamma(M)) - 1/2 mean_X( d * log radius of the largest ball in X around X_i with no more than M/(n-1) weight where X points have weight 1 / (n - 1)) - digamma(# of neighbors in that ball) ) - 1/2 (log volume of the unit ball - log M + log(m - 1) + digamma(M)) - 1/2 mean_Y( d * log radius of the largest ball in Y around Y_i with no more than M/(n-1) weight where X points have weight 1 / (m - 1)) - digamma(# of neighbors in that ball) ) = log(n + m - 1) + digamma(M) + 1/2 mean_X( d * log radius of largest ball in X+Y around X_i with no more than M/(n+m-1) weight where X points have weight 1 / (2 n - 1) and Y points have weight n / (m (2 n - 1)) - digamma(# of neighbors in that ball) ) + 1/2 mean_Y( d * log radius of largest ball in X+Y around Y_i with no more than M/(n+m-1) weight where X points have weight m / (n (2 m - 1)) and Y points have weight 1 / (2 m - 1) - digamma(# of neighbors in that ball) ) - 1/2 [log(n-1) + mean_X( d * log rho_M(X_i) )] - 1/2 [log(m-1) + mean_Y( d * log rho_M(Y_i) )] ''' X_ns = np.array([rho.shape[0] for rho in X_rhos]) Y_ns = np.array([rho.shape[0] for rho in Y_rhos]) n_X = X_ns.size n_Y = Y_ns.size # cores[0, k, i, j, 0] is mean_X(d * ... - psi(...)) for X[i], Y[j], M=Ks[k] # cores[0, k, i, j, 1] is mean_Y(d * ... - psi(...)) for X[i], Y[j], M=Ks[k] cores = required assert cores.shape == (1, Ks.size, n_X, n_Y, 2) # X_bits[k, i] is log(n-1) + mean_X( d * log rho_M(X_i) ) for X[i], M=Ks[k] X_bits = np.empty((Ks.size, n_X), dtype=np.float32) for i, rho in enumerate(X_rhos): X_bits[:, i] = dim * np.mean(np.log(rho), axis=0) X_bits += np.log(X_ns - 1)[np.newaxis, :] # Y_bits[k, j] is log(n-1) + mean_Y( d * log rho_M(Y_i) ) for Y[j], M=Ks[k] Y_bits = np.empty((Ks.size, n_Y), dtype=np.float32) for j, rho in enumerate(Y_rhos): Y_bits[:, j] = dim * np.mean(np.log(rho), axis=0) Y_bits += np.log(Y_ns - 1)[np.newaxis, :] est = cores.sum(axis=4) est -= X_bits.reshape(1, Ks.size, n_X, 1) est -= Y_bits.reshape(1, Ks.size, 1, n_Y) est /= 2 est += np.log(-1 + X_ns[None, None, :, None] + Y_ns[None, None, None, :]) est += psi(Ks)[None, :, None, None] # diagonal is zero if to_self: est[:, :, xrange(n_X), xrange(n_Y)] = 0 if clamp: # know that 0 <= JS <= ln(2) np.maximum(0, est, out=est) np.minimum(np.log(2), est, out=est) return est[:, :, :, :, None]
def jensen_shannon(Ks, dim, X_rhos, Y_rhos, required, clamp=True, to_self=False): r''' Estimate the difference between the Shannon entropy of an equally-weighted mixture between X and Y and the mixture of the Shannon entropies: JS(X, Y) = H[ (X + Y) / 2 ] - (H[X] + H[Y]) / 2 We use a special case of the Hino-Murata weighted information estimator with a fixed M = n \alpha, about equivalent to the K-nearest-neighbor approach used for the other estimators: Hideitsu Hino and Noboru Murata (2013). Information estimators for weighted observations. Neural Networks. http://linkinghub.elsevier.com/retrieve/pii/S0893608013001676 The estimator for JS(X, Y) is: log volume of the unit ball - log M + log(n + m - 1) + digamma(M) + 1/2 mean_X( d * log radius of largest ball in X+Y around X_i with no more than M/(n+m-1) weight where X points have weight 1 / (2 n - 1) and Y points have weight n / (m (2 n - 1)) - digamma(# of neighbors in that ball) ) + 1/2 mean_Y( d * log radius of largest ball in X+Y around Y_i with no more than M/(n+m-1) weight where X points have weight m / (n (2 m - 1)) and Y points have weight 1 / (2 m - 1) - digamma(# of neighbors in that ball) ) - 1/2 (log volume of the unit ball - log M + log(n - 1) + digamma(M)) - 1/2 mean_X( d * log radius of the largest ball in X around X_i with no more than M/(n-1) weight where X points have weight 1 / (n - 1)) - digamma(# of neighbors in that ball) ) - 1/2 (log volume of the unit ball - log M + log(m - 1) + digamma(M)) - 1/2 mean_Y( d * log radius of the largest ball in Y around Y_i with no more than M/(n-1) weight where X points have weight 1 / (m - 1)) - digamma(# of neighbors in that ball) ) = log(n + m - 1) + digamma(M) + 1/2 mean_X( d * log radius of largest ball in X+Y around X_i with no more than M/(n+m-1) weight where X points have weight 1 / (2 n - 1) and Y points have weight n / (m (2 n - 1)) - digamma(# of neighbors in that ball) ) + 1/2 mean_Y( d * log radius of largest ball in X+Y around Y_i with no more than M/(n+m-1) weight where X points have weight m / (n (2 m - 1)) and Y points have weight 1 / (2 m - 1) - digamma(# of neighbors in that ball) ) - 1/2 [log(n-1) + mean_X( d * log rho_M(X_i) )] - 1/2 [log(m-1) + mean_Y( d * log rho_M(Y_i) )] ''' X_ns = np.array([rho.shape[0] for rho in X_rhos]) Y_ns = np.array([rho.shape[0] for rho in Y_rhos]) n_X = X_ns.size n_Y = Y_ns.size # cores[0, k, i, j, 0] is mean_X(d * ... - psi(...)) for X[i], Y[j], M=Ks[k] # cores[0, k, i, j, 1] is mean_Y(d * ... - psi(...)) for X[i], Y[j], M=Ks[k] cores = required assert cores.shape == (1, Ks.size, n_X, n_Y, 2) # X_bits[k, i] is log(n-1) + mean_X( d * log rho_M(X_i) ) for X[i], M=Ks[k] X_bits = np.empty((Ks.size, n_X), dtype=np.float32) for i, rho in enumerate(X_rhos): X_bits[:, i] = dim * np.mean(np.log(rho), axis=0) X_bits += np.log(X_ns - 1)[np.newaxis, :] # Y_bits[k, j] is log(n-1) + mean_Y( d * log rho_M(Y_i) ) for Y[j], M=Ks[k] Y_bits = np.empty((Ks.size, n_Y), dtype=np.float32) for j, rho in enumerate(Y_rhos): Y_bits[:, j] = dim * np.mean(np.log(rho), axis=0) Y_bits += np.log(Y_ns - 1)[np.newaxis, :] est = cores.sum(axis=4) est -= X_bits.reshape(1, Ks.size, n_X, 1) est -= Y_bits.reshape(1, Ks.size, 1, n_Y) est /= 2 est += np.log(-1 + X_ns[None, None, :, None] + Y_ns[None, None, None, :]) est += psi(Ks)[None, :, None, None] # diagonal is zero if to_self: est[:, :, xrange(n_X), xrange(n_Y)] = 0 if clamp: # know that 0 <= JS <= ln(2) np.maximum(0, est, out=est) np.minimum(np.log(2), est, out=est) return est[:, :, :, :, None]
[ "r", "Estimate", "the", "difference", "between", "the", "Shannon", "entropy", "of", "an", "equally", "-", "weighted", "mixture", "between", "X", "and", "Y", "and", "the", "mixture", "of", "the", "Shannon", "entropies", ":" ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L886-L982
[ "def", "jensen_shannon", "(", "Ks", ",", "dim", ",", "X_rhos", ",", "Y_rhos", ",", "required", ",", "clamp", "=", "True", ",", "to_self", "=", "False", ")", ":", "X_ns", "=", "np", ".", "array", "(", "[", "rho", ".", "shape", "[", "0", "]", "for", "rho", "in", "X_rhos", "]", ")", "Y_ns", "=", "np", ".", "array", "(", "[", "rho", ".", "shape", "[", "0", "]", "for", "rho", "in", "Y_rhos", "]", ")", "n_X", "=", "X_ns", ".", "size", "n_Y", "=", "Y_ns", ".", "size", "# cores[0, k, i, j, 0] is mean_X(d * ... - psi(...)) for X[i], Y[j], M=Ks[k]", "# cores[0, k, i, j, 1] is mean_Y(d * ... - psi(...)) for X[i], Y[j], M=Ks[k]", "cores", "=", "required", "assert", "cores", ".", "shape", "==", "(", "1", ",", "Ks", ".", "size", ",", "n_X", ",", "n_Y", ",", "2", ")", "# X_bits[k, i] is log(n-1) + mean_X( d * log rho_M(X_i) ) for X[i], M=Ks[k]", "X_bits", "=", "np", ".", "empty", "(", "(", "Ks", ".", "size", ",", "n_X", ")", ",", "dtype", "=", "np", ".", "float32", ")", "for", "i", ",", "rho", "in", "enumerate", "(", "X_rhos", ")", ":", "X_bits", "[", ":", ",", "i", "]", "=", "dim", "*", "np", ".", "mean", "(", "np", ".", "log", "(", "rho", ")", ",", "axis", "=", "0", ")", "X_bits", "+=", "np", ".", "log", "(", "X_ns", "-", "1", ")", "[", "np", ".", "newaxis", ",", ":", "]", "# Y_bits[k, j] is log(n-1) + mean_Y( d * log rho_M(Y_i) ) for Y[j], M=Ks[k]", "Y_bits", "=", "np", ".", "empty", "(", "(", "Ks", ".", "size", ",", "n_Y", ")", ",", "dtype", "=", "np", ".", "float32", ")", "for", "j", ",", "rho", "in", "enumerate", "(", "Y_rhos", ")", ":", "Y_bits", "[", ":", ",", "j", "]", "=", "dim", "*", "np", ".", "mean", "(", "np", ".", "log", "(", "rho", ")", ",", "axis", "=", "0", ")", "Y_bits", "+=", "np", ".", "log", "(", "Y_ns", "-", "1", ")", "[", "np", ".", "newaxis", ",", ":", "]", "est", "=", "cores", ".", "sum", "(", "axis", "=", "4", ")", "est", "-=", "X_bits", ".", "reshape", "(", "1", ",", "Ks", ".", "size", ",", "n_X", ",", "1", ")", "est", "-=", "Y_bits", ".", "reshape", "(", "1", ",", "Ks", ".", "size", ",", "1", ",", "n_Y", ")", "est", "/=", "2", "est", "+=", "np", ".", "log", "(", "-", "1", "+", "X_ns", "[", "None", ",", "None", ",", ":", ",", "None", "]", "+", "Y_ns", "[", "None", ",", "None", ",", "None", ",", ":", "]", ")", "est", "+=", "psi", "(", "Ks", ")", "[", "None", ",", ":", ",", "None", ",", "None", "]", "# diagonal is zero", "if", "to_self", ":", "est", "[", ":", ",", ":", ",", "xrange", "(", "n_X", ")", ",", "xrange", "(", "n_Y", ")", "]", "=", "0", "if", "clamp", ":", "# know that 0 <= JS <= ln(2)", "np", ".", "maximum", "(", "0", ",", "est", ",", "out", "=", "est", ")", "np", ".", "minimum", "(", "np", ".", "log", "(", "2", ")", ",", "est", ",", "out", "=", "est", ")", "return", "est", "[", ":", ",", ":", ",", ":", ",", ":", ",", "None", "]" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
topological_sort
Topologically sort a DAG, represented by a dict of child => set of parents. The dependency dict is destroyed during operation. Uses the Kahn algorithm: http://en.wikipedia.org/wiki/Topological_sorting Not a particularly good implementation, but we're just running it on tiny graphs.
skl_groups/divergences/knn.py
def topological_sort(deps): ''' Topologically sort a DAG, represented by a dict of child => set of parents. The dependency dict is destroyed during operation. Uses the Kahn algorithm: http://en.wikipedia.org/wiki/Topological_sorting Not a particularly good implementation, but we're just running it on tiny graphs. ''' order = [] available = set() def _move_available(): to_delete = [] for n, parents in iteritems(deps): if not parents: available.add(n) to_delete.append(n) for n in to_delete: del deps[n] _move_available() while available: n = available.pop() order.append(n) for parents in itervalues(deps): parents.discard(n) _move_available() if available: raise ValueError("dependency cycle found") return order
def topological_sort(deps): ''' Topologically sort a DAG, represented by a dict of child => set of parents. The dependency dict is destroyed during operation. Uses the Kahn algorithm: http://en.wikipedia.org/wiki/Topological_sorting Not a particularly good implementation, but we're just running it on tiny graphs. ''' order = [] available = set() def _move_available(): to_delete = [] for n, parents in iteritems(deps): if not parents: available.add(n) to_delete.append(n) for n in to_delete: del deps[n] _move_available() while available: n = available.pop() order.append(n) for parents in itervalues(deps): parents.discard(n) _move_available() if available: raise ValueError("dependency cycle found") return order
[ "Topologically", "sort", "a", "DAG", "represented", "by", "a", "dict", "of", "child", "=", ">", "set", "of", "parents", ".", "The", "dependency", "dict", "is", "destroyed", "during", "operation", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L1008-L1039
[ "def", "topological_sort", "(", "deps", ")", ":", "order", "=", "[", "]", "available", "=", "set", "(", ")", "def", "_move_available", "(", ")", ":", "to_delete", "=", "[", "]", "for", "n", ",", "parents", "in", "iteritems", "(", "deps", ")", ":", "if", "not", "parents", ":", "available", ".", "add", "(", "n", ")", "to_delete", ".", "append", "(", "n", ")", "for", "n", "in", "to_delete", ":", "del", "deps", "[", "n", "]", "_move_available", "(", ")", "while", "available", ":", "n", "=", "available", ".", "pop", "(", ")", "order", ".", "append", "(", "n", ")", "for", "parents", "in", "itervalues", "(", "deps", ")", ":", "parents", ".", "discard", "(", "n", ")", "_move_available", "(", ")", "if", "available", ":", "raise", "ValueError", "(", "\"dependency cycle found\"", ")", "return", "order" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
_parse_specs
Set up the different functions we need to call. Returns: - a dict mapping base estimator functions to _FuncInfo objects. If the function needs_alpha, then the alphas attribute is an array of alpha values and pos is a corresponding array of indices. Otherwise, alphas is None and pos is a list containing a single index. Indices are >= 0 if they correspond to something in a spec, and negative if they're just used for a meta estimator but not directly requested. - an OrderedDict mapping functions to _MetaFuncInfo objects. alphas and pos are like for _FuncInfo; deps is a list of indices which should be passed to the estimator. Note that these might be other meta functions; this list is guaranteed to be in an order such that all dependencies are resolved before calling that function. If no such order is possible, raise ValueError. - the number of meta-only results # TODO: update doctests for _parse_specs >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3])}, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 3) >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]), <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[-4]) }, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function l2 at 0x10954fde8>, _MetaFuncInfo(alphas=None, pos=[3], deps=[-4])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 4) >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2', 'linear']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]), <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[4]) }, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function l2 at 0x10954fde8>, _MetaFuncInfo(alphas=None, pos=[3], deps=[4])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 3)
skl_groups/divergences/knn.py
def _parse_specs(specs, Ks): ''' Set up the different functions we need to call. Returns: - a dict mapping base estimator functions to _FuncInfo objects. If the function needs_alpha, then the alphas attribute is an array of alpha values and pos is a corresponding array of indices. Otherwise, alphas is None and pos is a list containing a single index. Indices are >= 0 if they correspond to something in a spec, and negative if they're just used for a meta estimator but not directly requested. - an OrderedDict mapping functions to _MetaFuncInfo objects. alphas and pos are like for _FuncInfo; deps is a list of indices which should be passed to the estimator. Note that these might be other meta functions; this list is guaranteed to be in an order such that all dependencies are resolved before calling that function. If no such order is possible, raise ValueError. - the number of meta-only results # TODO: update doctests for _parse_specs >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3])}, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 3) >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]), <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[-4]) }, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function l2 at 0x10954fde8>, _MetaFuncInfo(alphas=None, pos=[3], deps=[-4])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 4) >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2', 'linear']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]), <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[4]) }, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function l2 at 0x10954fde8>, _MetaFuncInfo(alphas=None, pos=[3], deps=[4])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 3) ''' funcs = {} metas = {} meta_deps = defaultdict(set) def add_func(func, alpha=None, pos=None): needs_alpha = getattr(func, 'needs_alpha', False) is_meta = hasattr(func, 'needs_results') d = metas if is_meta else funcs if func not in d: if needs_alpha: args = {'alphas': [alpha], 'pos': [pos]} else: args = {'alphas': None, 'pos': [pos]} if not is_meta: d[func] = _FuncInfo(**args) else: d[func] = _MetaFuncInfo(deps=[], **args) for req in func.needs_results: if callable(req.alpha): req_alpha = req.alpha(alpha) else: req_alpha = req.alpha add_func(req.func, alpha=req_alpha) meta_deps[func].add(req.func) meta_deps[req.func] # make sure required func is in there else: # already have an entry for the func # need to give it this pos, if it's not None # and also make sure that the alpha is present info = d[func] if not needs_alpha: if pos is not None: if info.pos != [None]: msg = "{} passed more than once" raise ValueError(msg.format(func_name)) info.pos[0] = pos else: # needs alpha try: idx = info.alphas.index(alpha) except ValueError: # this is a new alpha value we haven't seen yet info.alphas.append(alpha) info.pos.append(pos) if is_meta: for req in func.needs_results: if callable(req.alpha): req_alpha = req.alpha(alpha) else: req_alpha = req.alpha add_func(req.func, alpha=req_alpha) else: # repeated alpha value if pos is not None: if info.pos[idx] is not None: msg = "{} with alpha {} passed more than once" raise ValueError(msg.format(func_name, alpha)) info.pos[idx] = pos # add functions for each spec for i, spec in enumerate(specs): func_name, alpha = (spec.split(':', 1) + [None])[:2] if alpha is not None: alpha = float(alpha) try: func = func_mapping[func_name] except KeyError: msg = "'{}' is not a known function type" raise ValueError(msg.format(func_name)) needs_alpha = getattr(func, 'needs_alpha', False) if needs_alpha and alpha is None: msg = "{} needs alpha but not passed in spec '{}'" raise ValueError(msg.format(func_name, spec)) elif not needs_alpha and alpha is not None: msg = "{} doesn't need alpha but is passed in spec '{}'" raise ValueError(msg.format(func_name, spec)) add_func(func, alpha, i) # number things that are dependencies only meta_counter = itertools.count(-1, step=-1) for info in itertools.chain(itervalues(funcs), itervalues(metas)): for i, pos in enumerate(info.pos): if pos is None: info.pos[i] = next(meta_counter) # fill in the dependencies for metas for func, info in iteritems(metas): deps = info.deps assert deps == [] for req in func.needs_results: f = req.func req_info = (metas if hasattr(f, 'needs_results') else funcs)[f] if req.alpha is not None: if callable(req.alpha): req_alpha = req.alpha(info.alphas) else: req_alpha = req.alpha find_alpha = np.vectorize(req_info.alphas.index, otypes=[int]) pos = np.asarray(req_info.pos)[find_alpha(req_alpha)] if np.isscalar(pos): deps.append(pos[()]) else: deps.extend(pos) else: pos, = req_info.pos deps.append(pos) # topological sort of metas meta_order = topological_sort(meta_deps) metas_ordered = OrderedDict( (f, metas[f]) for f in meta_order if hasattr(f, 'needs_results')) return funcs, metas_ordered, -next(meta_counter) - 1
def _parse_specs(specs, Ks): ''' Set up the different functions we need to call. Returns: - a dict mapping base estimator functions to _FuncInfo objects. If the function needs_alpha, then the alphas attribute is an array of alpha values and pos is a corresponding array of indices. Otherwise, alphas is None and pos is a list containing a single index. Indices are >= 0 if they correspond to something in a spec, and negative if they're just used for a meta estimator but not directly requested. - an OrderedDict mapping functions to _MetaFuncInfo objects. alphas and pos are like for _FuncInfo; deps is a list of indices which should be passed to the estimator. Note that these might be other meta functions; this list is guaranteed to be in an order such that all dependencies are resolved before calling that function. If no such order is possible, raise ValueError. - the number of meta-only results # TODO: update doctests for _parse_specs >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3])}, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 3) >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]), <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[-4]) }, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function l2 at 0x10954fde8>, _MetaFuncInfo(alphas=None, pos=[3], deps=[-4])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 4) >>> _parse_specs(['renyi:.8', 'hellinger', 'renyi:.9', 'l2', 'linear']) ({<function alpha_div at 0x10954f848>: _FuncInfo(alphas=[0.8, 0.5, 0.9], pos=[-1, -2, -3]), <function linear at 0x10954f758>: _FuncInfo(alphas=None, pos=[4]) }, OrderedDict([ (<function hellinger at 0x10954fc80>, _MetaFuncInfo(alphas=None, pos=[1], deps=[array(-2)])), (<function l2 at 0x10954fde8>, _MetaFuncInfo(alphas=None, pos=[3], deps=[4])), (<function renyi at 0x10954fcf8>, _MetaFuncInfo(alphas=[0.8, 0.9], pos=[0, 2], deps=[-1, -3])) ]), 3) ''' funcs = {} metas = {} meta_deps = defaultdict(set) def add_func(func, alpha=None, pos=None): needs_alpha = getattr(func, 'needs_alpha', False) is_meta = hasattr(func, 'needs_results') d = metas if is_meta else funcs if func not in d: if needs_alpha: args = {'alphas': [alpha], 'pos': [pos]} else: args = {'alphas': None, 'pos': [pos]} if not is_meta: d[func] = _FuncInfo(**args) else: d[func] = _MetaFuncInfo(deps=[], **args) for req in func.needs_results: if callable(req.alpha): req_alpha = req.alpha(alpha) else: req_alpha = req.alpha add_func(req.func, alpha=req_alpha) meta_deps[func].add(req.func) meta_deps[req.func] # make sure required func is in there else: # already have an entry for the func # need to give it this pos, if it's not None # and also make sure that the alpha is present info = d[func] if not needs_alpha: if pos is not None: if info.pos != [None]: msg = "{} passed more than once" raise ValueError(msg.format(func_name)) info.pos[0] = pos else: # needs alpha try: idx = info.alphas.index(alpha) except ValueError: # this is a new alpha value we haven't seen yet info.alphas.append(alpha) info.pos.append(pos) if is_meta: for req in func.needs_results: if callable(req.alpha): req_alpha = req.alpha(alpha) else: req_alpha = req.alpha add_func(req.func, alpha=req_alpha) else: # repeated alpha value if pos is not None: if info.pos[idx] is not None: msg = "{} with alpha {} passed more than once" raise ValueError(msg.format(func_name, alpha)) info.pos[idx] = pos # add functions for each spec for i, spec in enumerate(specs): func_name, alpha = (spec.split(':', 1) + [None])[:2] if alpha is not None: alpha = float(alpha) try: func = func_mapping[func_name] except KeyError: msg = "'{}' is not a known function type" raise ValueError(msg.format(func_name)) needs_alpha = getattr(func, 'needs_alpha', False) if needs_alpha and alpha is None: msg = "{} needs alpha but not passed in spec '{}'" raise ValueError(msg.format(func_name, spec)) elif not needs_alpha and alpha is not None: msg = "{} doesn't need alpha but is passed in spec '{}'" raise ValueError(msg.format(func_name, spec)) add_func(func, alpha, i) # number things that are dependencies only meta_counter = itertools.count(-1, step=-1) for info in itertools.chain(itervalues(funcs), itervalues(metas)): for i, pos in enumerate(info.pos): if pos is None: info.pos[i] = next(meta_counter) # fill in the dependencies for metas for func, info in iteritems(metas): deps = info.deps assert deps == [] for req in func.needs_results: f = req.func req_info = (metas if hasattr(f, 'needs_results') else funcs)[f] if req.alpha is not None: if callable(req.alpha): req_alpha = req.alpha(info.alphas) else: req_alpha = req.alpha find_alpha = np.vectorize(req_info.alphas.index, otypes=[int]) pos = np.asarray(req_info.pos)[find_alpha(req_alpha)] if np.isscalar(pos): deps.append(pos[()]) else: deps.extend(pos) else: pos, = req_info.pos deps.append(pos) # topological sort of metas meta_order = topological_sort(meta_deps) metas_ordered = OrderedDict( (f, metas[f]) for f in meta_order if hasattr(f, 'needs_results')) return funcs, metas_ordered, -next(meta_counter) - 1
[ "Set", "up", "the", "different", "functions", "we", "need", "to", "call", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L1044-L1222
[ "def", "_parse_specs", "(", "specs", ",", "Ks", ")", ":", "funcs", "=", "{", "}", "metas", "=", "{", "}", "meta_deps", "=", "defaultdict", "(", "set", ")", "def", "add_func", "(", "func", ",", "alpha", "=", "None", ",", "pos", "=", "None", ")", ":", "needs_alpha", "=", "getattr", "(", "func", ",", "'needs_alpha'", ",", "False", ")", "is_meta", "=", "hasattr", "(", "func", ",", "'needs_results'", ")", "d", "=", "metas", "if", "is_meta", "else", "funcs", "if", "func", "not", "in", "d", ":", "if", "needs_alpha", ":", "args", "=", "{", "'alphas'", ":", "[", "alpha", "]", ",", "'pos'", ":", "[", "pos", "]", "}", "else", ":", "args", "=", "{", "'alphas'", ":", "None", ",", "'pos'", ":", "[", "pos", "]", "}", "if", "not", "is_meta", ":", "d", "[", "func", "]", "=", "_FuncInfo", "(", "*", "*", "args", ")", "else", ":", "d", "[", "func", "]", "=", "_MetaFuncInfo", "(", "deps", "=", "[", "]", ",", "*", "*", "args", ")", "for", "req", "in", "func", ".", "needs_results", ":", "if", "callable", "(", "req", ".", "alpha", ")", ":", "req_alpha", "=", "req", ".", "alpha", "(", "alpha", ")", "else", ":", "req_alpha", "=", "req", ".", "alpha", "add_func", "(", "req", ".", "func", ",", "alpha", "=", "req_alpha", ")", "meta_deps", "[", "func", "]", ".", "add", "(", "req", ".", "func", ")", "meta_deps", "[", "req", ".", "func", "]", "# make sure required func is in there", "else", ":", "# already have an entry for the func", "# need to give it this pos, if it's not None", "# and also make sure that the alpha is present", "info", "=", "d", "[", "func", "]", "if", "not", "needs_alpha", ":", "if", "pos", "is", "not", "None", ":", "if", "info", ".", "pos", "!=", "[", "None", "]", ":", "msg", "=", "\"{} passed more than once\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "func_name", ")", ")", "info", ".", "pos", "[", "0", "]", "=", "pos", "else", ":", "# needs alpha", "try", ":", "idx", "=", "info", ".", "alphas", ".", "index", "(", "alpha", ")", "except", "ValueError", ":", "# this is a new alpha value we haven't seen yet", "info", ".", "alphas", ".", "append", "(", "alpha", ")", "info", ".", "pos", ".", "append", "(", "pos", ")", "if", "is_meta", ":", "for", "req", "in", "func", ".", "needs_results", ":", "if", "callable", "(", "req", ".", "alpha", ")", ":", "req_alpha", "=", "req", ".", "alpha", "(", "alpha", ")", "else", ":", "req_alpha", "=", "req", ".", "alpha", "add_func", "(", "req", ".", "func", ",", "alpha", "=", "req_alpha", ")", "else", ":", "# repeated alpha value", "if", "pos", "is", "not", "None", ":", "if", "info", ".", "pos", "[", "idx", "]", "is", "not", "None", ":", "msg", "=", "\"{} with alpha {} passed more than once\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "func_name", ",", "alpha", ")", ")", "info", ".", "pos", "[", "idx", "]", "=", "pos", "# add functions for each spec", "for", "i", ",", "spec", "in", "enumerate", "(", "specs", ")", ":", "func_name", ",", "alpha", "=", "(", "spec", ".", "split", "(", "':'", ",", "1", ")", "+", "[", "None", "]", ")", "[", ":", "2", "]", "if", "alpha", "is", "not", "None", ":", "alpha", "=", "float", "(", "alpha", ")", "try", ":", "func", "=", "func_mapping", "[", "func_name", "]", "except", "KeyError", ":", "msg", "=", "\"'{}' is not a known function type\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "func_name", ")", ")", "needs_alpha", "=", "getattr", "(", "func", ",", "'needs_alpha'", ",", "False", ")", "if", "needs_alpha", "and", "alpha", "is", "None", ":", "msg", "=", "\"{} needs alpha but not passed in spec '{}'\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "func_name", ",", "spec", ")", ")", "elif", "not", "needs_alpha", "and", "alpha", "is", "not", "None", ":", "msg", "=", "\"{} doesn't need alpha but is passed in spec '{}'\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "func_name", ",", "spec", ")", ")", "add_func", "(", "func", ",", "alpha", ",", "i", ")", "# number things that are dependencies only", "meta_counter", "=", "itertools", ".", "count", "(", "-", "1", ",", "step", "=", "-", "1", ")", "for", "info", "in", "itertools", ".", "chain", "(", "itervalues", "(", "funcs", ")", ",", "itervalues", "(", "metas", ")", ")", ":", "for", "i", ",", "pos", "in", "enumerate", "(", "info", ".", "pos", ")", ":", "if", "pos", "is", "None", ":", "info", ".", "pos", "[", "i", "]", "=", "next", "(", "meta_counter", ")", "# fill in the dependencies for metas", "for", "func", ",", "info", "in", "iteritems", "(", "metas", ")", ":", "deps", "=", "info", ".", "deps", "assert", "deps", "==", "[", "]", "for", "req", "in", "func", ".", "needs_results", ":", "f", "=", "req", ".", "func", "req_info", "=", "(", "metas", "if", "hasattr", "(", "f", ",", "'needs_results'", ")", "else", "funcs", ")", "[", "f", "]", "if", "req", ".", "alpha", "is", "not", "None", ":", "if", "callable", "(", "req", ".", "alpha", ")", ":", "req_alpha", "=", "req", ".", "alpha", "(", "info", ".", "alphas", ")", "else", ":", "req_alpha", "=", "req", ".", "alpha", "find_alpha", "=", "np", ".", "vectorize", "(", "req_info", ".", "alphas", ".", "index", ",", "otypes", "=", "[", "int", "]", ")", "pos", "=", "np", ".", "asarray", "(", "req_info", ".", "pos", ")", "[", "find_alpha", "(", "req_alpha", ")", "]", "if", "np", ".", "isscalar", "(", "pos", ")", ":", "deps", ".", "append", "(", "pos", "[", "(", ")", "]", ")", "else", ":", "deps", ".", "extend", "(", "pos", ")", "else", ":", "pos", ",", "=", "req_info", ".", "pos", "deps", ".", "append", "(", "pos", ")", "# topological sort of metas", "meta_order", "=", "topological_sort", "(", "meta_deps", ")", "metas_ordered", "=", "OrderedDict", "(", "(", "f", ",", "metas", "[", "f", "]", ")", "for", "f", "in", "meta_order", "if", "hasattr", "(", "f", ",", "'needs_results'", ")", ")", "return", "funcs", ",", "metas_ordered", ",", "-", "next", "(", "meta_counter", ")", "-", "1" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
KNNDivergenceEstimator._get_Ks
Ks as an array and type-checked.
skl_groups/divergences/knn.py
def _get_Ks(self): "Ks as an array and type-checked." Ks = as_integer_type(self.Ks) if Ks.ndim != 1: raise TypeError("Ks should be 1-dim, got shape {}".format(Ks.shape)) if Ks.min() < 1: raise ValueError("Ks should be positive; got {}".format(Ks.min())) return Ks
def _get_Ks(self): "Ks as an array and type-checked." Ks = as_integer_type(self.Ks) if Ks.ndim != 1: raise TypeError("Ks should be 1-dim, got shape {}".format(Ks.shape)) if Ks.min() < 1: raise ValueError("Ks should be positive; got {}".format(Ks.min())) return Ks
[ "Ks", "as", "an", "array", "and", "type", "-", "checked", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L234-L241
[ "def", "_get_Ks", "(", "self", ")", ":", "Ks", "=", "as_integer_type", "(", "self", ".", "Ks", ")", "if", "Ks", ".", "ndim", "!=", "1", ":", "raise", "TypeError", "(", "\"Ks should be 1-dim, got shape {}\"", ".", "format", "(", "Ks", ".", "shape", ")", ")", "if", "Ks", ".", "min", "(", ")", "<", "1", ":", "raise", "ValueError", "(", "\"Ks should be positive; got {}\"", ".", "format", "(", "Ks", ".", "min", "(", ")", ")", ")", "return", "Ks" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
KNNDivergenceEstimator._flann_args
The dictionary of arguments to give to FLANN.
skl_groups/divergences/knn.py
def _flann_args(self, X=None): "The dictionary of arguments to give to FLANN." args = {'cores': self._n_jobs} if self.flann_algorithm == 'auto': if X is None or X.dim > 5: args['algorithm'] = 'linear' else: args['algorithm'] = 'kdtree_single' else: args['algorithm'] = self.flann_algorithm if self.flann_args: args.update(self.flann_args) # check that arguments are correct try: FLANNParameters().update(args) except AttributeError as e: msg = "flann_args contains an invalid argument:\n {}" raise TypeError(msg.format(e)) return args
def _flann_args(self, X=None): "The dictionary of arguments to give to FLANN." args = {'cores': self._n_jobs} if self.flann_algorithm == 'auto': if X is None or X.dim > 5: args['algorithm'] = 'linear' else: args['algorithm'] = 'kdtree_single' else: args['algorithm'] = self.flann_algorithm if self.flann_args: args.update(self.flann_args) # check that arguments are correct try: FLANNParameters().update(args) except AttributeError as e: msg = "flann_args contains an invalid argument:\n {}" raise TypeError(msg.format(e)) return args
[ "The", "dictionary", "of", "arguments", "to", "give", "to", "FLANN", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L251-L271
[ "def", "_flann_args", "(", "self", ",", "X", "=", "None", ")", ":", "args", "=", "{", "'cores'", ":", "self", ".", "_n_jobs", "}", "if", "self", ".", "flann_algorithm", "==", "'auto'", ":", "if", "X", "is", "None", "or", "X", ".", "dim", ">", "5", ":", "args", "[", "'algorithm'", "]", "=", "'linear'", "else", ":", "args", "[", "'algorithm'", "]", "=", "'kdtree_single'", "else", ":", "args", "[", "'algorithm'", "]", "=", "self", ".", "flann_algorithm", "if", "self", ".", "flann_args", ":", "args", ".", "update", "(", "self", ".", "flann_args", ")", "# check that arguments are correct", "try", ":", "FLANNParameters", "(", ")", ".", "update", "(", "args", ")", "except", "AttributeError", "as", "e", ":", "msg", "=", "\"flann_args contains an invalid argument:\\n {}\"", "raise", "TypeError", "(", "msg", ".", "format", "(", "e", ")", ")", "return", "args" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
KNNDivergenceEstimator.fit
Sets up for divergence estimation "from" new data "to" X. Builds FLANN indices for each bag, and maybe gets within-bag distances. Parameters ---------- X : list of arrays or :class:`skl_groups.features.Features` The bags to search "to". get_rhos : boolean, optional, default False Compute within-bag distances :attr:`rhos_`. These are only needed for some divergence functions or if do_sym is passed, and they'll be computed (and saved) during :meth:`transform` if they're not computed here. If you're using Jensen-Shannon divergence, a higher max_K may be needed once it sees the number of points in the transformed bags, so the computation here might be wasted.
skl_groups/divergences/knn.py
def fit(self, X, y=None, get_rhos=False): ''' Sets up for divergence estimation "from" new data "to" X. Builds FLANN indices for each bag, and maybe gets within-bag distances. Parameters ---------- X : list of arrays or :class:`skl_groups.features.Features` The bags to search "to". get_rhos : boolean, optional, default False Compute within-bag distances :attr:`rhos_`. These are only needed for some divergence functions or if do_sym is passed, and they'll be computed (and saved) during :meth:`transform` if they're not computed here. If you're using Jensen-Shannon divergence, a higher max_K may be needed once it sees the number of points in the transformed bags, so the computation here might be wasted. ''' self.features_ = X = as_features(X, stack=True, bare=True) # if we're using a function that needs to pick its K vals itself, # then we need to set max_K here. when we transform(), might have to # re-do this :| Ks = self._get_Ks() _, _, _, max_K, save_all_Ks, _ = _choose_funcs( self.div_funcs, Ks, X.dim, X.n_pts, None, self.version) if max_K >= X.n_pts.min(): msg = "asked for K = {}, but there's a bag with only {} points" raise ValueError(msg.format(max_K, X.n_pts.min())) memory = self.memory if isinstance(memory, string_types): memory = Memory(cachedir=memory, verbose=0) self.indices_ = id = memory.cache(_build_indices)(X, self._flann_args()) if get_rhos: self.rhos_ = _get_rhos(X, id, Ks, max_K, save_all_Ks, self.min_dist) elif hasattr(self, 'rhos_'): del self.rhos_ return self
def fit(self, X, y=None, get_rhos=False): ''' Sets up for divergence estimation "from" new data "to" X. Builds FLANN indices for each bag, and maybe gets within-bag distances. Parameters ---------- X : list of arrays or :class:`skl_groups.features.Features` The bags to search "to". get_rhos : boolean, optional, default False Compute within-bag distances :attr:`rhos_`. These are only needed for some divergence functions or if do_sym is passed, and they'll be computed (and saved) during :meth:`transform` if they're not computed here. If you're using Jensen-Shannon divergence, a higher max_K may be needed once it sees the number of points in the transformed bags, so the computation here might be wasted. ''' self.features_ = X = as_features(X, stack=True, bare=True) # if we're using a function that needs to pick its K vals itself, # then we need to set max_K here. when we transform(), might have to # re-do this :| Ks = self._get_Ks() _, _, _, max_K, save_all_Ks, _ = _choose_funcs( self.div_funcs, Ks, X.dim, X.n_pts, None, self.version) if max_K >= X.n_pts.min(): msg = "asked for K = {}, but there's a bag with only {} points" raise ValueError(msg.format(max_K, X.n_pts.min())) memory = self.memory if isinstance(memory, string_types): memory = Memory(cachedir=memory, verbose=0) self.indices_ = id = memory.cache(_build_indices)(X, self._flann_args()) if get_rhos: self.rhos_ = _get_rhos(X, id, Ks, max_K, save_all_Ks, self.min_dist) elif hasattr(self, 'rhos_'): del self.rhos_ return self
[ "Sets", "up", "for", "divergence", "estimation", "from", "new", "data", "to", "X", ".", "Builds", "FLANN", "indices", "for", "each", "bag", "and", "maybe", "gets", "within", "-", "bag", "distances", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L273-L315
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ",", "get_rhos", "=", "False", ")", ":", "self", ".", "features_", "=", "X", "=", "as_features", "(", "X", ",", "stack", "=", "True", ",", "bare", "=", "True", ")", "# if we're using a function that needs to pick its K vals itself,", "# then we need to set max_K here. when we transform(), might have to", "# re-do this :|", "Ks", "=", "self", ".", "_get_Ks", "(", ")", "_", ",", "_", ",", "_", ",", "max_K", ",", "save_all_Ks", ",", "_", "=", "_choose_funcs", "(", "self", ".", "div_funcs", ",", "Ks", ",", "X", ".", "dim", ",", "X", ".", "n_pts", ",", "None", ",", "self", ".", "version", ")", "if", "max_K", ">=", "X", ".", "n_pts", ".", "min", "(", ")", ":", "msg", "=", "\"asked for K = {}, but there's a bag with only {} points\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "max_K", ",", "X", ".", "n_pts", ".", "min", "(", ")", ")", ")", "memory", "=", "self", ".", "memory", "if", "isinstance", "(", "memory", ",", "string_types", ")", ":", "memory", "=", "Memory", "(", "cachedir", "=", "memory", ",", "verbose", "=", "0", ")", "self", ".", "indices_", "=", "id", "=", "memory", ".", "cache", "(", "_build_indices", ")", "(", "X", ",", "self", ".", "_flann_args", "(", ")", ")", "if", "get_rhos", ":", "self", ".", "rhos_", "=", "_get_rhos", "(", "X", ",", "id", ",", "Ks", ",", "max_K", ",", "save_all_Ks", ",", "self", ".", "min_dist", ")", "elif", "hasattr", "(", "self", ",", "'rhos_'", ")", ":", "del", "self", ".", "rhos_", "return", "self" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
KNNDivergenceEstimator.transform
r''' Computes the divergences from X to :attr:`features_`. Parameters ---------- X : list of bag feature arrays or :class:`skl_groups.features.Features` The bags to search "from". Returns ------- divs : array of shape ``[len(div_funcs), len(Ks), len(X), len(features_)] + ([2] if do_sym else [])`` The divergences from X to :attr:`features_`. ``divs[d, k, i, j]`` is the ``div_funcs[d]`` divergence from ``X[i]`` to ``fetaures_[j]`` using a K of ``Ks[k]``. If ``do_sym``, ``divs[d, k, i, j, 0]`` is :math:`D_{d,k}( X_i \| \texttt{features_}_j)` and ``divs[d, k, i, j, 1]`` is :math:`D_{d,k}(\texttt{features_}_j \| X_i)`.
skl_groups/divergences/knn.py
def transform(self, X): r''' Computes the divergences from X to :attr:`features_`. Parameters ---------- X : list of bag feature arrays or :class:`skl_groups.features.Features` The bags to search "from". Returns ------- divs : array of shape ``[len(div_funcs), len(Ks), len(X), len(features_)] + ([2] if do_sym else [])`` The divergences from X to :attr:`features_`. ``divs[d, k, i, j]`` is the ``div_funcs[d]`` divergence from ``X[i]`` to ``fetaures_[j]`` using a K of ``Ks[k]``. If ``do_sym``, ``divs[d, k, i, j, 0]`` is :math:`D_{d,k}( X_i \| \texttt{features_}_j)` and ``divs[d, k, i, j, 1]`` is :math:`D_{d,k}(\texttt{features_}_j \| X_i)`. ''' X = as_features(X, stack=True, bare=True) Y = self.features_ Ks = np.asarray(self.Ks) if X.dim != Y.dim: msg = "incompatible dimensions: fit with {}, transform with {}" raise ValueError(msg.format(Y.dim, X.dim)) memory = self.memory if isinstance(memory, string_types): memory = Memory(cachedir=memory, verbose=0) # ignore Y_indices to avoid slow pickling of them # NOTE: if the indices are approximate, then might not get the same # results! est = memory.cache(_est_divs, ignore=['n_jobs', 'Y_indices', 'Y_rhos']) output, self.rhos_ = est( X, Y, self.indices_, getattr(self, 'rhos_', None), self.div_funcs, Ks, self.do_sym, self.clamp, self.version, self.min_dist, self._flann_args(), self._n_jobs) return output
def transform(self, X): r''' Computes the divergences from X to :attr:`features_`. Parameters ---------- X : list of bag feature arrays or :class:`skl_groups.features.Features` The bags to search "from". Returns ------- divs : array of shape ``[len(div_funcs), len(Ks), len(X), len(features_)] + ([2] if do_sym else [])`` The divergences from X to :attr:`features_`. ``divs[d, k, i, j]`` is the ``div_funcs[d]`` divergence from ``X[i]`` to ``fetaures_[j]`` using a K of ``Ks[k]``. If ``do_sym``, ``divs[d, k, i, j, 0]`` is :math:`D_{d,k}( X_i \| \texttt{features_}_j)` and ``divs[d, k, i, j, 1]`` is :math:`D_{d,k}(\texttt{features_}_j \| X_i)`. ''' X = as_features(X, stack=True, bare=True) Y = self.features_ Ks = np.asarray(self.Ks) if X.dim != Y.dim: msg = "incompatible dimensions: fit with {}, transform with {}" raise ValueError(msg.format(Y.dim, X.dim)) memory = self.memory if isinstance(memory, string_types): memory = Memory(cachedir=memory, verbose=0) # ignore Y_indices to avoid slow pickling of them # NOTE: if the indices are approximate, then might not get the same # results! est = memory.cache(_est_divs, ignore=['n_jobs', 'Y_indices', 'Y_rhos']) output, self.rhos_ = est( X, Y, self.indices_, getattr(self, 'rhos_', None), self.div_funcs, Ks, self.do_sym, self.clamp, self.version, self.min_dist, self._flann_args(), self._n_jobs) return output
[ "r", "Computes", "the", "divergences", "from", "X", "to", ":", "attr", ":", "features_", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/knn.py#L317-L358
[ "def", "transform", "(", "self", ",", "X", ")", ":", "X", "=", "as_features", "(", "X", ",", "stack", "=", "True", ",", "bare", "=", "True", ")", "Y", "=", "self", ".", "features_", "Ks", "=", "np", ".", "asarray", "(", "self", ".", "Ks", ")", "if", "X", ".", "dim", "!=", "Y", ".", "dim", ":", "msg", "=", "\"incompatible dimensions: fit with {}, transform with {}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "Y", ".", "dim", ",", "X", ".", "dim", ")", ")", "memory", "=", "self", ".", "memory", "if", "isinstance", "(", "memory", ",", "string_types", ")", ":", "memory", "=", "Memory", "(", "cachedir", "=", "memory", ",", "verbose", "=", "0", ")", "# ignore Y_indices to avoid slow pickling of them", "# NOTE: if the indices are approximate, then might not get the same", "# results!", "est", "=", "memory", ".", "cache", "(", "_est_divs", ",", "ignore", "=", "[", "'n_jobs'", ",", "'Y_indices'", ",", "'Y_rhos'", "]", ")", "output", ",", "self", ".", "rhos_", "=", "est", "(", "X", ",", "Y", ",", "self", ".", "indices_", ",", "getattr", "(", "self", ",", "'rhos_'", ",", "None", ")", ",", "self", ".", "div_funcs", ",", "Ks", ",", "self", ".", "do_sym", ",", "self", ".", "clamp", ",", "self", ".", "version", ",", "self", ".", "min_dist", ",", "self", ".", "_flann_args", "(", ")", ",", "self", ".", "_n_jobs", ")", "return", "output" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
as_features
Returns a version of X as a :class:`Features` object. Parameters ---------- stack : boolean, default False Make a stacked version of X. Note that if X is a features object, this will stack it in-place, since that's usually what you want. (If not, just use the :class:`Features` constructor instead.) bare : boolean, default False Return a bare version of X (no metadata). Returns ------- feats : :class:`Features` A version of X. If X is already a :class:`Features` object, the original X may be returned, depending on the arguments.
skl_groups/features.py
def as_features(X, stack=False, bare=False): ''' Returns a version of X as a :class:`Features` object. Parameters ---------- stack : boolean, default False Make a stacked version of X. Note that if X is a features object, this will stack it in-place, since that's usually what you want. (If not, just use the :class:`Features` constructor instead.) bare : boolean, default False Return a bare version of X (no metadata). Returns ------- feats : :class:`Features` A version of X. If X is already a :class:`Features` object, the original X may be returned, depending on the arguments. ''' if isinstance(X, Features): if stack: X.make_stacked() return X.bare() if bare else X return Features(X, stack=stack, bare=bare)
def as_features(X, stack=False, bare=False): ''' Returns a version of X as a :class:`Features` object. Parameters ---------- stack : boolean, default False Make a stacked version of X. Note that if X is a features object, this will stack it in-place, since that's usually what you want. (If not, just use the :class:`Features` constructor instead.) bare : boolean, default False Return a bare version of X (no metadata). Returns ------- feats : :class:`Features` A version of X. If X is already a :class:`Features` object, the original X may be returned, depending on the arguments. ''' if isinstance(X, Features): if stack: X.make_stacked() return X.bare() if bare else X return Features(X, stack=stack, bare=bare)
[ "Returns", "a", "version", "of", "X", "as", "a", ":", "class", ":", "Features", "object", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/features.py#L385-L409
[ "def", "as_features", "(", "X", ",", "stack", "=", "False", ",", "bare", "=", "False", ")", ":", "if", "isinstance", "(", "X", ",", "Features", ")", ":", "if", "stack", ":", "X", ".", "make_stacked", "(", ")", "return", "X", ".", "bare", "(", ")", "if", "bare", "else", "X", "return", "Features", "(", "X", ",", "stack", "=", "stack", ",", "bare", "=", "bare", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
Features.make_stacked
If unstacked, convert to stacked. If stacked, do nothing.
skl_groups/features.py
def make_stacked(self): "If unstacked, convert to stacked. If stacked, do nothing." if self.stacked: return self._boundaries = bounds = np.r_[0, np.cumsum(self.n_pts)] self.stacked_features = stacked = np.vstack(self.features) self.features = np.array( [stacked[bounds[i-1]:bounds[i]] for i in xrange(1, len(bounds))], dtype=object) self.stacked = True
def make_stacked(self): "If unstacked, convert to stacked. If stacked, do nothing." if self.stacked: return self._boundaries = bounds = np.r_[0, np.cumsum(self.n_pts)] self.stacked_features = stacked = np.vstack(self.features) self.features = np.array( [stacked[bounds[i-1]:bounds[i]] for i in xrange(1, len(bounds))], dtype=object) self.stacked = True
[ "If", "unstacked", "convert", "to", "stacked", ".", "If", "stacked", "do", "nothing", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/features.py#L219-L229
[ "def", "make_stacked", "(", "self", ")", ":", "if", "self", ".", "stacked", ":", "return", "self", ".", "_boundaries", "=", "bounds", "=", "np", ".", "r_", "[", "0", ",", "np", ".", "cumsum", "(", "self", ".", "n_pts", ")", "]", "self", ".", "stacked_features", "=", "stacked", "=", "np", ".", "vstack", "(", "self", ".", "features", ")", "self", ".", "features", "=", "np", ".", "array", "(", "[", "stacked", "[", "bounds", "[", "i", "-", "1", "]", ":", "bounds", "[", "i", "]", "]", "for", "i", "in", "xrange", "(", "1", ",", "len", "(", "bounds", ")", ")", "]", ",", "dtype", "=", "object", ")", "self", ".", "stacked", "=", "True" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
Features.copy
Copies the Feature object. Makes a copy of the features array. Parameters ---------- stack : boolean, optional, default False Whether to stack the copy if this one is unstacked. copy_meta : boolean, optional, default False Also copy the metadata. If False, metadata in both points to the same object.
skl_groups/features.py
def copy(self, stack=False, copy_meta=False, memo=None): ''' Copies the Feature object. Makes a copy of the features array. Parameters ---------- stack : boolean, optional, default False Whether to stack the copy if this one is unstacked. copy_meta : boolean, optional, default False Also copy the metadata. If False, metadata in both points to the same object. ''' if self.stacked: fs = deepcopy(self.stacked_features, memo) n_pts = self.n_pts.copy() elif stack: fs = np.vstack(self.features) n_pts = self.n_pts.copy() else: fs = deepcopy(self.features, memo) n_pts = None meta = deepcopy(self.meta, memo) if copy_meta else self.meta return Features(fs, n_pts, copy=False, **meta)
def copy(self, stack=False, copy_meta=False, memo=None): ''' Copies the Feature object. Makes a copy of the features array. Parameters ---------- stack : boolean, optional, default False Whether to stack the copy if this one is unstacked. copy_meta : boolean, optional, default False Also copy the metadata. If False, metadata in both points to the same object. ''' if self.stacked: fs = deepcopy(self.stacked_features, memo) n_pts = self.n_pts.copy() elif stack: fs = np.vstack(self.features) n_pts = self.n_pts.copy() else: fs = deepcopy(self.features, memo) n_pts = None meta = deepcopy(self.meta, memo) if copy_meta else self.meta return Features(fs, n_pts, copy=False, **meta)
[ "Copies", "the", "Feature", "object", ".", "Makes", "a", "copy", "of", "the", "features", "array", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/features.py#L252-L276
[ "def", "copy", "(", "self", ",", "stack", "=", "False", ",", "copy_meta", "=", "False", ",", "memo", "=", "None", ")", ":", "if", "self", ".", "stacked", ":", "fs", "=", "deepcopy", "(", "self", ".", "stacked_features", ",", "memo", ")", "n_pts", "=", "self", ".", "n_pts", ".", "copy", "(", ")", "elif", "stack", ":", "fs", "=", "np", ".", "vstack", "(", "self", ".", "features", ")", "n_pts", "=", "self", ".", "n_pts", ".", "copy", "(", ")", "else", ":", "fs", "=", "deepcopy", "(", "self", ".", "features", ",", "memo", ")", "n_pts", "=", "None", "meta", "=", "deepcopy", "(", "self", ".", "meta", ",", "memo", ")", "if", "copy_meta", "else", "self", ".", "meta", "return", "Features", "(", "fs", ",", "n_pts", ",", "copy", "=", "False", ",", "*", "*", "meta", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
Features.bare
Make a Features object with no metadata; points to the same features.
skl_groups/features.py
def bare(self): "Make a Features object with no metadata; points to the same features." if not self.meta: return self elif self.stacked: return Features(self.stacked_features, self.n_pts, copy=False) else: return Features(self.features, copy=False)
def bare(self): "Make a Features object with no metadata; points to the same features." if not self.meta: return self elif self.stacked: return Features(self.stacked_features, self.n_pts, copy=False) else: return Features(self.features, copy=False)
[ "Make", "a", "Features", "object", "with", "no", "metadata", ";", "points", "to", "the", "same", "features", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/features.py#L375-L382
[ "def", "bare", "(", "self", ")", ":", "if", "not", "self", ".", "meta", ":", "return", "self", "elif", "self", ".", "stacked", ":", "return", "Features", "(", "self", ".", "stacked_features", ",", "self", ".", "n_pts", ",", "copy", "=", "False", ")", "else", ":", "return", "Features", "(", "self", ".", "features", ",", "copy", "=", "False", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
kl
r''' Estimate the KL divergence between distributions: \int p(x) \log (p(x) / q(x)) using the kNN-based estimator (5) of Qing Wang, Sanjeev R Kulkarni, and Sergio Verdu (2009). Divergence Estimation for Multidimensional Densities Via k-Nearest-Neighbor Distances. IEEE Transactions on Information Theory. http://www.ee.princeton.edu/~verdu/reprints/WanKulVer.May2009.pdf which is: d * 1/n \sum \log (nu_k(i) / rho_k(i)) + log(m / (n - 1)) If clamp, enforces KL >= 0. Returns an array of shape (num_Ks,).
skl_groups/divergences/_knn.py
def kl(Ks, dim, num_q, rhos, nus, clamp=True): r''' Estimate the KL divergence between distributions: \int p(x) \log (p(x) / q(x)) using the kNN-based estimator (5) of Qing Wang, Sanjeev R Kulkarni, and Sergio Verdu (2009). Divergence Estimation for Multidimensional Densities Via k-Nearest-Neighbor Distances. IEEE Transactions on Information Theory. http://www.ee.princeton.edu/~verdu/reprints/WanKulVer.May2009.pdf which is: d * 1/n \sum \log (nu_k(i) / rho_k(i)) + log(m / (n - 1)) If clamp, enforces KL >= 0. Returns an array of shape (num_Ks,). ''' est = dim * np.mean(np.log(nus) - np.log(rhos), axis=0) est += np.log(num_q / (rhos.shape[0] - 1)) if clamp: np.maximum(est, 0, out=est) return est
def kl(Ks, dim, num_q, rhos, nus, clamp=True): r''' Estimate the KL divergence between distributions: \int p(x) \log (p(x) / q(x)) using the kNN-based estimator (5) of Qing Wang, Sanjeev R Kulkarni, and Sergio Verdu (2009). Divergence Estimation for Multidimensional Densities Via k-Nearest-Neighbor Distances. IEEE Transactions on Information Theory. http://www.ee.princeton.edu/~verdu/reprints/WanKulVer.May2009.pdf which is: d * 1/n \sum \log (nu_k(i) / rho_k(i)) + log(m / (n - 1)) If clamp, enforces KL >= 0. Returns an array of shape (num_Ks,). ''' est = dim * np.mean(np.log(nus) - np.log(rhos), axis=0) est += np.log(num_q / (rhos.shape[0] - 1)) if clamp: np.maximum(est, 0, out=est) return est
[ "r", "Estimate", "the", "KL", "divergence", "between", "distributions", ":", "\\", "int", "p", "(", "x", ")", "\\", "log", "(", "p", "(", "x", ")", "/", "q", "(", "x", "))", "using", "the", "kNN", "-", "based", "estimator", "(", "5", ")", "of", "Qing", "Wang", "Sanjeev", "R", "Kulkarni", "and", "Sergio", "Verdu", "(", "2009", ")", ".", "Divergence", "Estimation", "for", "Multidimensional", "Densities", "Via", "k", "-", "Nearest", "-", "Neighbor", "Distances", ".", "IEEE", "Transactions", "on", "Information", "Theory", ".", "http", ":", "//", "www", ".", "ee", ".", "princeton", ".", "edu", "/", "~verdu", "/", "reprints", "/", "WanKulVer", ".", "May2009", ".", "pdf", "which", "is", ":", "d", "*", "1", "/", "n", "\\", "sum", "\\", "log", "(", "nu_k", "(", "i", ")", "/", "rho_k", "(", "i", "))", "+", "log", "(", "m", "/", "(", "n", "-", "1", "))" ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/divergences/_knn.py#L22-L43
[ "def", "kl", "(", "Ks", ",", "dim", ",", "num_q", ",", "rhos", ",", "nus", ",", "clamp", "=", "True", ")", ":", "est", "=", "dim", "*", "np", ".", "mean", "(", "np", ".", "log", "(", "nus", ")", "-", "np", ".", "log", "(", "rhos", ")", ",", "axis", "=", "0", ")", "est", "+=", "np", ".", "log", "(", "num_q", "/", "(", "rhos", ".", "shape", "[", "0", "]", "-", "1", ")", ")", "if", "clamp", ":", "np", ".", "maximum", "(", "est", ",", "0", ",", "out", "=", "est", ")", "return", "est" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
MeanMapKernel.fit
Specify the data to which kernel values should be computed. Parameters ---------- X : list of arrays or :class:`skl_groups.features.Features` The bags to compute "to".
skl_groups/kernels/mmk.py
def fit(self, X, y=None): ''' Specify the data to which kernel values should be computed. Parameters ---------- X : list of arrays or :class:`skl_groups.features.Features` The bags to compute "to". ''' self.features_ = as_features(X, stack=True, bare=True) # TODO: could precompute things like squared norms if kernel == "rbf". # Probably should add support to sklearn instead of hacking it here. return self
def fit(self, X, y=None): ''' Specify the data to which kernel values should be computed. Parameters ---------- X : list of arrays or :class:`skl_groups.features.Features` The bags to compute "to". ''' self.features_ = as_features(X, stack=True, bare=True) # TODO: could precompute things like squared norms if kernel == "rbf". # Probably should add support to sklearn instead of hacking it here. return self
[ "Specify", "the", "data", "to", "which", "kernel", "values", "should", "be", "computed", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/mmk.py#L72-L84
[ "def", "fit", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "self", ".", "features_", "=", "as_features", "(", "X", ",", "stack", "=", "True", ",", "bare", "=", "True", ")", "# TODO: could precompute things like squared norms if kernel == \"rbf\".", "# Probably should add support to sklearn instead of hacking it here.", "return", "self" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
MeanMapKernel.transform
Compute kernels from X to :attr:`features_`. Parameters ---------- X : list of arrays or :class:`skl_groups.features.Features` The bags to compute "from". Must have same dimension as :attr:`features_`. Returns ------- K : array of shape ``[len(X), len(features_)]`` The kernel evaluations from X to :attr:`features_`.
skl_groups/kernels/mmk.py
def transform(self, X): ''' Compute kernels from X to :attr:`features_`. Parameters ---------- X : list of arrays or :class:`skl_groups.features.Features` The bags to compute "from". Must have same dimension as :attr:`features_`. Returns ------- K : array of shape ``[len(X), len(features_)]`` The kernel evaluations from X to :attr:`features_`. ''' X = as_features(X, stack=True, bare=True) Y = self.features_ if X.dim != Y.dim: raise ValueError("MMK transform got dimension {} but had {} at fit" .format(X.dim, Y.dim)) pointwise = pairwise_kernels(X.stacked_features, Y.stacked_features, metric=self.kernel, filter_params=True, **self._get_kernel_params()) # TODO: is there a way to do this without a Python loop? K = np.empty((len(X), len(Y))) for i in range(len(X)): for j in range(len(Y)): K[i, j] = pointwise[X._boundaries[i]:X._boundaries[i+1], Y._boundaries[j]:Y._boundaries[j+1]].mean() return K
def transform(self, X): ''' Compute kernels from X to :attr:`features_`. Parameters ---------- X : list of arrays or :class:`skl_groups.features.Features` The bags to compute "from". Must have same dimension as :attr:`features_`. Returns ------- K : array of shape ``[len(X), len(features_)]`` The kernel evaluations from X to :attr:`features_`. ''' X = as_features(X, stack=True, bare=True) Y = self.features_ if X.dim != Y.dim: raise ValueError("MMK transform got dimension {} but had {} at fit" .format(X.dim, Y.dim)) pointwise = pairwise_kernels(X.stacked_features, Y.stacked_features, metric=self.kernel, filter_params=True, **self._get_kernel_params()) # TODO: is there a way to do this without a Python loop? K = np.empty((len(X), len(Y))) for i in range(len(X)): for j in range(len(Y)): K[i, j] = pointwise[X._boundaries[i]:X._boundaries[i+1], Y._boundaries[j]:Y._boundaries[j+1]].mean() return K
[ "Compute", "kernels", "from", "X", "to", ":", "attr", ":", "features_", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/kernels/mmk.py#L86-L121
[ "def", "transform", "(", "self", ",", "X", ")", ":", "X", "=", "as_features", "(", "X", ",", "stack", "=", "True", ",", "bare", "=", "True", ")", "Y", "=", "self", ".", "features_", "if", "X", ".", "dim", "!=", "Y", ".", "dim", ":", "raise", "ValueError", "(", "\"MMK transform got dimension {} but had {} at fit\"", ".", "format", "(", "X", ".", "dim", ",", "Y", ".", "dim", ")", ")", "pointwise", "=", "pairwise_kernels", "(", "X", ".", "stacked_features", ",", "Y", ".", "stacked_features", ",", "metric", "=", "self", ".", "kernel", ",", "filter_params", "=", "True", ",", "*", "*", "self", ".", "_get_kernel_params", "(", ")", ")", "# TODO: is there a way to do this without a Python loop?", "K", "=", "np", ".", "empty", "(", "(", "len", "(", "X", ")", ",", "len", "(", "Y", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "X", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "Y", ")", ")", ":", "K", "[", "i", ",", "j", "]", "=", "pointwise", "[", "X", ".", "_boundaries", "[", "i", "]", ":", "X", ".", "_boundaries", "[", "i", "+", "1", "]", ",", "Y", ".", "_boundaries", "[", "j", "]", ":", "Y", ".", "_boundaries", "[", "j", "+", "1", "]", "]", ".", "mean", "(", ")", "return", "K" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
BagMean.transform
Transform a list of bag features into a matrix of its mean features. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays Data to transform. Returns ------- X_new : array, shape ``[len(X), X.dim]`` X transformed into its means.
skl_groups/summaries/mean.py
def transform(self, X): ''' Transform a list of bag features into a matrix of its mean features. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays Data to transform. Returns ------- X_new : array, shape ``[len(X), X.dim]`` X transformed into its means. ''' X = as_features(X) return np.vstack([np.mean(bag, axis=0) for bag in X])
def transform(self, X): ''' Transform a list of bag features into a matrix of its mean features. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays Data to transform. Returns ------- X_new : array, shape ``[len(X), X.dim]`` X transformed into its means. ''' X = as_features(X) return np.vstack([np.mean(bag, axis=0) for bag in X])
[ "Transform", "a", "list", "of", "bag", "features", "into", "a", "matrix", "of", "its", "mean", "features", "." ]
dougalsutherland/skl-groups
python
https://github.com/dougalsutherland/skl-groups/blob/2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b/skl_groups/summaries/mean.py#L32-L47
[ "def", "transform", "(", "self", ",", "X", ")", ":", "X", "=", "as_features", "(", "X", ")", "return", "np", ".", "vstack", "(", "[", "np", ".", "mean", "(", "bag", ",", "axis", "=", "0", ")", "for", "bag", "in", "X", "]", ")" ]
2584c10a413626c6d5f9078cdbf3dcc84e4e9a5b
valid
Sentence.from_shypo
Constructor from xml element *SHYPO* :param xml.etree.ElementTree xml: the xml *SHYPO* element :param string encoding: encoding of the xml
pyjulius/models.py
def from_shypo(cls, xml, encoding='utf-8'): """Constructor from xml element *SHYPO* :param xml.etree.ElementTree xml: the xml *SHYPO* element :param string encoding: encoding of the xml """ score = float(xml.get('SCORE')) words = [Word.from_whypo(w_xml, encoding) for w_xml in xml.findall('WHYPO') if w_xml.get('WORD') not in ['<s>', '</s>']] return cls(words, score)
def from_shypo(cls, xml, encoding='utf-8'): """Constructor from xml element *SHYPO* :param xml.etree.ElementTree xml: the xml *SHYPO* element :param string encoding: encoding of the xml """ score = float(xml.get('SCORE')) words = [Word.from_whypo(w_xml, encoding) for w_xml in xml.findall('WHYPO') if w_xml.get('WORD') not in ['<s>', '</s>']] return cls(words, score)
[ "Constructor", "from", "xml", "element", "*", "SHYPO", "*" ]
Diaoul/pyjulius
python
https://github.com/Diaoul/pyjulius/blob/48f2752ff4e0f3bd7b578754b1c583cabdc24b09/pyjulius/models.py#L42-L51
[ "def", "from_shypo", "(", "cls", ",", "xml", ",", "encoding", "=", "'utf-8'", ")", ":", "score", "=", "float", "(", "xml", ".", "get", "(", "'SCORE'", ")", ")", "words", "=", "[", "Word", ".", "from_whypo", "(", "w_xml", ",", "encoding", ")", "for", "w_xml", "in", "xml", ".", "findall", "(", "'WHYPO'", ")", "if", "w_xml", ".", "get", "(", "'WORD'", ")", "not", "in", "[", "'<s>'", ",", "'</s>'", "]", "]", "return", "cls", "(", "words", ",", "score", ")" ]
48f2752ff4e0f3bd7b578754b1c583cabdc24b09
valid
Word.from_whypo
Constructor from xml element *WHYPO* :param xml.etree.ElementTree xml: the xml *WHYPO* element :param string encoding: encoding of the xml
pyjulius/models.py
def from_whypo(cls, xml, encoding='utf-8'): """Constructor from xml element *WHYPO* :param xml.etree.ElementTree xml: the xml *WHYPO* element :param string encoding: encoding of the xml """ word = unicode(xml.get('WORD'), encoding) confidence = float(xml.get('CM')) return cls(word, confidence)
def from_whypo(cls, xml, encoding='utf-8'): """Constructor from xml element *WHYPO* :param xml.etree.ElementTree xml: the xml *WHYPO* element :param string encoding: encoding of the xml """ word = unicode(xml.get('WORD'), encoding) confidence = float(xml.get('CM')) return cls(word, confidence)
[ "Constructor", "from", "xml", "element", "*", "WHYPO", "*" ]
Diaoul/pyjulius
python
https://github.com/Diaoul/pyjulius/blob/48f2752ff4e0f3bd7b578754b1c583cabdc24b09/pyjulius/models.py#L86-L95
[ "def", "from_whypo", "(", "cls", ",", "xml", ",", "encoding", "=", "'utf-8'", ")", ":", "word", "=", "unicode", "(", "xml", ".", "get", "(", "'WORD'", ")", ",", "encoding", ")", "confidence", "=", "float", "(", "xml", ".", "get", "(", "'CM'", ")", ")", "return", "cls", "(", "word", ",", "confidence", ")" ]
48f2752ff4e0f3bd7b578754b1c583cabdc24b09
valid
Client.run
Start listening to the server
pyjulius/core.py
def run(self): """Start listening to the server""" logger.info(u'Started listening') while not self._stop: xml = self._readxml() # Exit on invalid XML if xml is None: break # Raw xml only if not self.modelize: logger.info(u'Raw xml: %s' % xml) self.results.put(xml) continue # Model objects + raw xml as fallback if xml.tag == 'RECOGOUT': sentence = Sentence.from_shypo(xml.find('SHYPO'), self.encoding) logger.info(u'Modelized recognition: %r' % sentence) self.results.put(sentence) else: logger.info(u'Unmodelized xml: %s' % xml) self.results.put(xml) logger.info(u'Stopped listening')
def run(self): """Start listening to the server""" logger.info(u'Started listening') while not self._stop: xml = self._readxml() # Exit on invalid XML if xml is None: break # Raw xml only if not self.modelize: logger.info(u'Raw xml: %s' % xml) self.results.put(xml) continue # Model objects + raw xml as fallback if xml.tag == 'RECOGOUT': sentence = Sentence.from_shypo(xml.find('SHYPO'), self.encoding) logger.info(u'Modelized recognition: %r' % sentence) self.results.put(sentence) else: logger.info(u'Unmodelized xml: %s' % xml) self.results.put(xml) logger.info(u'Stopped listening')
[ "Start", "listening", "to", "the", "server" ]
Diaoul/pyjulius
python
https://github.com/Diaoul/pyjulius/blob/48f2752ff4e0f3bd7b578754b1c583cabdc24b09/pyjulius/core.py#L97-L122
[ "def", "run", "(", "self", ")", ":", "logger", ".", "info", "(", "u'Started listening'", ")", "while", "not", "self", ".", "_stop", ":", "xml", "=", "self", ".", "_readxml", "(", ")", "# Exit on invalid XML", "if", "xml", "is", "None", ":", "break", "# Raw xml only", "if", "not", "self", ".", "modelize", ":", "logger", ".", "info", "(", "u'Raw xml: %s'", "%", "xml", ")", "self", ".", "results", ".", "put", "(", "xml", ")", "continue", "# Model objects + raw xml as fallback", "if", "xml", ".", "tag", "==", "'RECOGOUT'", ":", "sentence", "=", "Sentence", ".", "from_shypo", "(", "xml", ".", "find", "(", "'SHYPO'", ")", ",", "self", ".", "encoding", ")", "logger", ".", "info", "(", "u'Modelized recognition: %r'", "%", "sentence", ")", "self", ".", "results", ".", "put", "(", "sentence", ")", "else", ":", "logger", ".", "info", "(", "u'Unmodelized xml: %s'", "%", "xml", ")", "self", ".", "results", ".", "put", "(", "xml", ")", "logger", ".", "info", "(", "u'Stopped listening'", ")" ]
48f2752ff4e0f3bd7b578754b1c583cabdc24b09
valid
Client.connect
Connect to the server :raise ConnectionError: If socket cannot establish a connection
pyjulius/core.py
def connect(self): """Connect to the server :raise ConnectionError: If socket cannot establish a connection """ try: logger.info(u'Connecting %s:%d' % (self.host, self.port)) self.sock.connect((self.host, self.port)) except socket.error: raise ConnectionError() self.state = CONNECTED
def connect(self): """Connect to the server :raise ConnectionError: If socket cannot establish a connection """ try: logger.info(u'Connecting %s:%d' % (self.host, self.port)) self.sock.connect((self.host, self.port)) except socket.error: raise ConnectionError() self.state = CONNECTED
[ "Connect", "to", "the", "server" ]
Diaoul/pyjulius
python
https://github.com/Diaoul/pyjulius/blob/48f2752ff4e0f3bd7b578754b1c583cabdc24b09/pyjulius/core.py#L124-L135
[ "def", "connect", "(", "self", ")", ":", "try", ":", "logger", ".", "info", "(", "u'Connecting %s:%d'", "%", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")", "self", ".", "sock", ".", "connect", "(", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")", "except", "socket", ".", "error", ":", "raise", "ConnectionError", "(", ")", "self", ".", "state", "=", "CONNECTED" ]
48f2752ff4e0f3bd7b578754b1c583cabdc24b09
valid
Client.disconnect
Disconnect from the server
pyjulius/core.py
def disconnect(self): """Disconnect from the server""" logger.info(u'Disconnecting') self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() self.state = DISCONNECTED
def disconnect(self): """Disconnect from the server""" logger.info(u'Disconnecting') self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() self.state = DISCONNECTED
[ "Disconnect", "from", "the", "server" ]
Diaoul/pyjulius
python
https://github.com/Diaoul/pyjulius/blob/48f2752ff4e0f3bd7b578754b1c583cabdc24b09/pyjulius/core.py#L137-L142
[ "def", "disconnect", "(", "self", ")", ":", "logger", ".", "info", "(", "u'Disconnecting'", ")", "self", ".", "sock", ".", "shutdown", "(", "socket", ".", "SHUT_RDWR", ")", "self", ".", "sock", ".", "close", "(", ")", "self", ".", "state", "=", "DISCONNECTED" ]
48f2752ff4e0f3bd7b578754b1c583cabdc24b09
valid
Client.send
Send a command to the server :param string command: command to send
pyjulius/core.py
def send(self, command, timeout=5): """Send a command to the server :param string command: command to send """ logger.info(u'Sending %s' % command) _, writable, __ = select.select([], [self.sock], [], timeout) if not writable: raise SendTimeoutError() writable[0].sendall(command + '\n')
def send(self, command, timeout=5): """Send a command to the server :param string command: command to send """ logger.info(u'Sending %s' % command) _, writable, __ = select.select([], [self.sock], [], timeout) if not writable: raise SendTimeoutError() writable[0].sendall(command + '\n')
[ "Send", "a", "command", "to", "the", "server" ]
Diaoul/pyjulius
python
https://github.com/Diaoul/pyjulius/blob/48f2752ff4e0f3bd7b578754b1c583cabdc24b09/pyjulius/core.py#L144-L154
[ "def", "send", "(", "self", ",", "command", ",", "timeout", "=", "5", ")", ":", "logger", ".", "info", "(", "u'Sending %s'", "%", "command", ")", "_", ",", "writable", ",", "__", "=", "select", ".", "select", "(", "[", "]", ",", "[", "self", ".", "sock", "]", ",", "[", "]", ",", "timeout", ")", "if", "not", "writable", ":", "raise", "SendTimeoutError", "(", ")", "writable", "[", "0", "]", ".", "sendall", "(", "command", "+", "'\\n'", ")" ]
48f2752ff4e0f3bd7b578754b1c583cabdc24b09
valid
Client._readline
Read a line from the server. Data is read from the socket until a character ``\n`` is found :return: the read line :rtype: string
pyjulius/core.py
def _readline(self): """Read a line from the server. Data is read from the socket until a character ``\n`` is found :return: the read line :rtype: string """ line = '' while 1: readable, _, __ = select.select([self.sock], [], [], 0.5) if self._stop: break if not readable: continue data = readable[0].recv(1) if data == '\n': break line += unicode(data, self.encoding) return line
def _readline(self): """Read a line from the server. Data is read from the socket until a character ``\n`` is found :return: the read line :rtype: string """ line = '' while 1: readable, _, __ = select.select([self.sock], [], [], 0.5) if self._stop: break if not readable: continue data = readable[0].recv(1) if data == '\n': break line += unicode(data, self.encoding) return line
[ "Read", "a", "line", "from", "the", "server", ".", "Data", "is", "read", "from", "the", "socket", "until", "a", "character", "\\", "n", "is", "found" ]
Diaoul/pyjulius
python
https://github.com/Diaoul/pyjulius/blob/48f2752ff4e0f3bd7b578754b1c583cabdc24b09/pyjulius/core.py#L156-L174
[ "def", "_readline", "(", "self", ")", ":", "line", "=", "''", "while", "1", ":", "readable", ",", "_", ",", "__", "=", "select", ".", "select", "(", "[", "self", ".", "sock", "]", ",", "[", "]", ",", "[", "]", ",", "0.5", ")", "if", "self", ".", "_stop", ":", "break", "if", "not", "readable", ":", "continue", "data", "=", "readable", "[", "0", "]", ".", "recv", "(", "1", ")", "if", "data", "==", "'\\n'", ":", "break", "line", "+=", "unicode", "(", "data", ",", "self", ".", "encoding", ")", "return", "line" ]
48f2752ff4e0f3bd7b578754b1c583cabdc24b09
valid
Client._readblock
Read a block from the server. Lines are read until a character ``.`` is found :return: the read block :rtype: string
pyjulius/core.py
def _readblock(self): """Read a block from the server. Lines are read until a character ``.`` is found :return: the read block :rtype: string """ block = '' while not self._stop: line = self._readline() if line == '.': break block += line return block
def _readblock(self): """Read a block from the server. Lines are read until a character ``.`` is found :return: the read block :rtype: string """ block = '' while not self._stop: line = self._readline() if line == '.': break block += line return block
[ "Read", "a", "block", "from", "the", "server", ".", "Lines", "are", "read", "until", "a", "character", ".", "is", "found" ]
Diaoul/pyjulius
python
https://github.com/Diaoul/pyjulius/blob/48f2752ff4e0f3bd7b578754b1c583cabdc24b09/pyjulius/core.py#L176-L189
[ "def", "_readblock", "(", "self", ")", ":", "block", "=", "''", "while", "not", "self", ".", "_stop", ":", "line", "=", "self", ".", "_readline", "(", ")", "if", "line", "==", "'.'", ":", "break", "block", "+=", "line", "return", "block" ]
48f2752ff4e0f3bd7b578754b1c583cabdc24b09
valid
Client._readxml
Read a block and return the result as XML :return: block as xml :rtype: xml.etree.ElementTree
pyjulius/core.py
def _readxml(self): """Read a block and return the result as XML :return: block as xml :rtype: xml.etree.ElementTree """ block = re.sub(r'<(/?)s>', r'&lt;\1s&gt;', self._readblock()) try: xml = XML(block) except ParseError: xml = None return xml
def _readxml(self): """Read a block and return the result as XML :return: block as xml :rtype: xml.etree.ElementTree """ block = re.sub(r'<(/?)s>', r'&lt;\1s&gt;', self._readblock()) try: xml = XML(block) except ParseError: xml = None return xml
[ "Read", "a", "block", "and", "return", "the", "result", "as", "XML" ]
Diaoul/pyjulius
python
https://github.com/Diaoul/pyjulius/blob/48f2752ff4e0f3bd7b578754b1c583cabdc24b09/pyjulius/core.py#L191-L203
[ "def", "_readxml", "(", "self", ")", ":", "block", "=", "re", ".", "sub", "(", "r'<(/?)s>'", ",", "r'&lt;\\1s&gt;'", ",", "self", ".", "_readblock", "(", ")", ")", "try", ":", "xml", "=", "XML", "(", "block", ")", "except", "ParseError", ":", "xml", "=", "None", "return", "xml" ]
48f2752ff4e0f3bd7b578754b1c583cabdc24b09
valid
cli
Analyse an OpenStreetMap changeset.
osmcha/scripts/cli.py
def cli(id): """Analyse an OpenStreetMap changeset.""" ch = Analyse(id) ch.full_analysis() click.echo( 'Created: %s. Modified: %s. Deleted: %s' % (ch.create, ch.modify, ch.delete) ) if ch.is_suspect: click.echo('The changeset {} is suspect! Reasons: {}'.format( id, ', '.join(ch.suspicion_reasons) )) else: click.echo('The changeset %s is not suspect!' % id)
def cli(id): """Analyse an OpenStreetMap changeset.""" ch = Analyse(id) ch.full_analysis() click.echo( 'Created: %s. Modified: %s. Deleted: %s' % (ch.create, ch.modify, ch.delete) ) if ch.is_suspect: click.echo('The changeset {} is suspect! Reasons: {}'.format( id, ', '.join(ch.suspicion_reasons) )) else: click.echo('The changeset %s is not suspect!' % id)
[ "Analyse", "an", "OpenStreetMap", "changeset", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/scripts/cli.py#L9-L22
[ "def", "cli", "(", "id", ")", ":", "ch", "=", "Analyse", "(", "id", ")", "ch", ".", "full_analysis", "(", ")", "click", ".", "echo", "(", "'Created: %s. Modified: %s. Deleted: %s'", "%", "(", "ch", ".", "create", ",", "ch", ".", "modify", ",", "ch", ".", "delete", ")", ")", "if", "ch", ".", "is_suspect", ":", "click", ".", "echo", "(", "'The changeset {} is suspect! Reasons: {}'", ".", "format", "(", "id", ",", "', '", ".", "join", "(", "ch", ".", "suspicion_reasons", ")", ")", ")", "else", ":", "click", ".", "echo", "(", "'The changeset %s is not suspect!'", "%", "id", ")" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
get_user_details
Get information about number of changesets, blocks and mapping days of a user, using both the OSM API and the Mapbox comments APIself.
osmcha/changeset.py
def get_user_details(user_id): """Get information about number of changesets, blocks and mapping days of a user, using both the OSM API and the Mapbox comments APIself. """ reasons = [] try: url = OSM_USERS_API.format(user_id=requests.compat.quote(user_id)) user_request = requests.get(url) if user_request.status_code == 200: user_data = user_request.content xml_data = ET.fromstring(user_data).getchildren()[0].getchildren() changesets = [i for i in xml_data if i.tag == 'changesets'][0] blocks = [i for i in xml_data if i.tag == 'blocks'][0] if int(changesets.get('count')) <= 5: reasons.append('New mapper') elif int(changesets.get('count')) <= 30: url = MAPBOX_USERS_API.format( user_id=requests.compat.quote(user_id) ) user_request = requests.get(url) if user_request.status_code == 200: mapping_days = int( user_request.json().get('extra').get('mapping_days') ) if mapping_days <= 5: reasons.append('New mapper') if int(blocks.getchildren()[0].get('count')) > 1: reasons.append('User has multiple blocks') except Exception as e: message = 'Could not verify user of the changeset: {}, {}' print(message.format(user_id, str(e))) return reasons
def get_user_details(user_id): """Get information about number of changesets, blocks and mapping days of a user, using both the OSM API and the Mapbox comments APIself. """ reasons = [] try: url = OSM_USERS_API.format(user_id=requests.compat.quote(user_id)) user_request = requests.get(url) if user_request.status_code == 200: user_data = user_request.content xml_data = ET.fromstring(user_data).getchildren()[0].getchildren() changesets = [i for i in xml_data if i.tag == 'changesets'][0] blocks = [i for i in xml_data if i.tag == 'blocks'][0] if int(changesets.get('count')) <= 5: reasons.append('New mapper') elif int(changesets.get('count')) <= 30: url = MAPBOX_USERS_API.format( user_id=requests.compat.quote(user_id) ) user_request = requests.get(url) if user_request.status_code == 200: mapping_days = int( user_request.json().get('extra').get('mapping_days') ) if mapping_days <= 5: reasons.append('New mapper') if int(blocks.getchildren()[0].get('count')) > 1: reasons.append('User has multiple blocks') except Exception as e: message = 'Could not verify user of the changeset: {}, {}' print(message.format(user_id, str(e))) return reasons
[ "Get", "information", "about", "number", "of", "changesets", "blocks", "and", "mapping", "days", "of", "a", "user", "using", "both", "the", "OSM", "API", "and", "the", "Mapbox", "comments", "APIself", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L45-L76
[ "def", "get_user_details", "(", "user_id", ")", ":", "reasons", "=", "[", "]", "try", ":", "url", "=", "OSM_USERS_API", ".", "format", "(", "user_id", "=", "requests", ".", "compat", ".", "quote", "(", "user_id", ")", ")", "user_request", "=", "requests", ".", "get", "(", "url", ")", "if", "user_request", ".", "status_code", "==", "200", ":", "user_data", "=", "user_request", ".", "content", "xml_data", "=", "ET", ".", "fromstring", "(", "user_data", ")", ".", "getchildren", "(", ")", "[", "0", "]", ".", "getchildren", "(", ")", "changesets", "=", "[", "i", "for", "i", "in", "xml_data", "if", "i", ".", "tag", "==", "'changesets'", "]", "[", "0", "]", "blocks", "=", "[", "i", "for", "i", "in", "xml_data", "if", "i", ".", "tag", "==", "'blocks'", "]", "[", "0", "]", "if", "int", "(", "changesets", ".", "get", "(", "'count'", ")", ")", "<=", "5", ":", "reasons", ".", "append", "(", "'New mapper'", ")", "elif", "int", "(", "changesets", ".", "get", "(", "'count'", ")", ")", "<=", "30", ":", "url", "=", "MAPBOX_USERS_API", ".", "format", "(", "user_id", "=", "requests", ".", "compat", ".", "quote", "(", "user_id", ")", ")", "user_request", "=", "requests", ".", "get", "(", "url", ")", "if", "user_request", ".", "status_code", "==", "200", ":", "mapping_days", "=", "int", "(", "user_request", ".", "json", "(", ")", ".", "get", "(", "'extra'", ")", ".", "get", "(", "'mapping_days'", ")", ")", "if", "mapping_days", "<=", "5", ":", "reasons", ".", "append", "(", "'New mapper'", ")", "if", "int", "(", "blocks", ".", "getchildren", "(", ")", "[", "0", "]", ".", "get", "(", "'count'", ")", ")", ">", "1", ":", "reasons", ".", "append", "(", "'User has multiple blocks'", ")", "except", "Exception", "as", "e", ":", "message", "=", "'Could not verify user of the changeset: {}, {}'", "print", "(", "message", ".", "format", "(", "user_id", ",", "str", "(", "e", ")", ")", ")", "return", "reasons" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
changeset_info
Return a dictionary with id, user, user_id, bounds, date of creation and all the tags of the changeset. Args: changeset: the XML string of the changeset.
osmcha/changeset.py
def changeset_info(changeset): """Return a dictionary with id, user, user_id, bounds, date of creation and all the tags of the changeset. Args: changeset: the XML string of the changeset. """ keys = [tag.attrib.get('k') for tag in changeset.getchildren()] keys += ['id', 'user', 'uid', 'bbox', 'created_at'] values = [tag.attrib.get('v') for tag in changeset.getchildren()] values += [ changeset.get('id'), changeset.get('user'), changeset.get('uid'), get_bounds(changeset), changeset.get('created_at') ] return dict(zip(keys, values))
def changeset_info(changeset): """Return a dictionary with id, user, user_id, bounds, date of creation and all the tags of the changeset. Args: changeset: the XML string of the changeset. """ keys = [tag.attrib.get('k') for tag in changeset.getchildren()] keys += ['id', 'user', 'uid', 'bbox', 'created_at'] values = [tag.attrib.get('v') for tag in changeset.getchildren()] values += [ changeset.get('id'), changeset.get('user'), changeset.get('uid'), get_bounds(changeset), changeset.get('created_at') ] return dict(zip(keys, values))
[ "Return", "a", "dictionary", "with", "id", "user", "user_id", "bounds", "date", "of", "creation", "and", "all", "the", "tags", "of", "the", "changeset", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L79-L94
[ "def", "changeset_info", "(", "changeset", ")", ":", "keys", "=", "[", "tag", ".", "attrib", ".", "get", "(", "'k'", ")", "for", "tag", "in", "changeset", ".", "getchildren", "(", ")", "]", "keys", "+=", "[", "'id'", ",", "'user'", ",", "'uid'", ",", "'bbox'", ",", "'created_at'", "]", "values", "=", "[", "tag", ".", "attrib", ".", "get", "(", "'v'", ")", "for", "tag", "in", "changeset", ".", "getchildren", "(", ")", "]", "values", "+=", "[", "changeset", ".", "get", "(", "'id'", ")", ",", "changeset", ".", "get", "(", "'user'", ")", ",", "changeset", ".", "get", "(", "'uid'", ")", ",", "get_bounds", "(", "changeset", ")", ",", "changeset", ".", "get", "(", "'created_at'", ")", "]", "return", "dict", "(", "zip", "(", "keys", ",", "values", ")", ")" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
get_changeset
Get the changeset using the OSM API and return the content as a XML ElementTree. Args: changeset: the id of the changeset.
osmcha/changeset.py
def get_changeset(changeset): """Get the changeset using the OSM API and return the content as a XML ElementTree. Args: changeset: the id of the changeset. """ url = 'https://www.openstreetmap.org/api/0.6/changeset/{}/download'.format( changeset ) return ET.fromstring(requests.get(url).content)
def get_changeset(changeset): """Get the changeset using the OSM API and return the content as a XML ElementTree. Args: changeset: the id of the changeset. """ url = 'https://www.openstreetmap.org/api/0.6/changeset/{}/download'.format( changeset ) return ET.fromstring(requests.get(url).content)
[ "Get", "the", "changeset", "using", "the", "OSM", "API", "and", "return", "the", "content", "as", "a", "XML", "ElementTree", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L97-L107
[ "def", "get_changeset", "(", "changeset", ")", ":", "url", "=", "'https://www.openstreetmap.org/api/0.6/changeset/{}/download'", ".", "format", "(", "changeset", ")", "return", "ET", ".", "fromstring", "(", "requests", ".", "get", "(", "url", ")", ".", "content", ")" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
get_metadata
Get the metadata of a changeset using the OSM API and return it as a XML ElementTree. Args: changeset: the id of the changeset.
osmcha/changeset.py
def get_metadata(changeset): """Get the metadata of a changeset using the OSM API and return it as a XML ElementTree. Args: changeset: the id of the changeset. """ url = 'https://www.openstreetmap.org/api/0.6/changeset/{}'.format(changeset) return ET.fromstring(requests.get(url).content).getchildren()[0]
def get_metadata(changeset): """Get the metadata of a changeset using the OSM API and return it as a XML ElementTree. Args: changeset: the id of the changeset. """ url = 'https://www.openstreetmap.org/api/0.6/changeset/{}'.format(changeset) return ET.fromstring(requests.get(url).content).getchildren()[0]
[ "Get", "the", "metadata", "of", "a", "changeset", "using", "the", "OSM", "API", "and", "return", "it", "as", "a", "XML", "ElementTree", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L110-L118
[ "def", "get_metadata", "(", "changeset", ")", ":", "url", "=", "'https://www.openstreetmap.org/api/0.6/changeset/{}'", ".", "format", "(", "changeset", ")", "return", "ET", ".", "fromstring", "(", "requests", ".", "get", "(", "url", ")", ".", "content", ")", ".", "getchildren", "(", ")", "[", "0", "]" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
get_bounds
Get the bounds of the changeset and return it as a Polygon object. If the changeset has not coordinates (case of the changesets that deal only with relations), it returns an empty Polygon. Args: changeset: the XML string of the changeset.
osmcha/changeset.py
def get_bounds(changeset): """Get the bounds of the changeset and return it as a Polygon object. If the changeset has not coordinates (case of the changesets that deal only with relations), it returns an empty Polygon. Args: changeset: the XML string of the changeset. """ try: return Polygon([ (float(changeset.get('min_lon')), float(changeset.get('min_lat'))), (float(changeset.get('max_lon')), float(changeset.get('min_lat'))), (float(changeset.get('max_lon')), float(changeset.get('max_lat'))), (float(changeset.get('min_lon')), float(changeset.get('max_lat'))), (float(changeset.get('min_lon')), float(changeset.get('min_lat'))), ]) except TypeError: return Polygon()
def get_bounds(changeset): """Get the bounds of the changeset and return it as a Polygon object. If the changeset has not coordinates (case of the changesets that deal only with relations), it returns an empty Polygon. Args: changeset: the XML string of the changeset. """ try: return Polygon([ (float(changeset.get('min_lon')), float(changeset.get('min_lat'))), (float(changeset.get('max_lon')), float(changeset.get('min_lat'))), (float(changeset.get('max_lon')), float(changeset.get('max_lat'))), (float(changeset.get('min_lon')), float(changeset.get('max_lat'))), (float(changeset.get('min_lon')), float(changeset.get('min_lat'))), ]) except TypeError: return Polygon()
[ "Get", "the", "bounds", "of", "the", "changeset", "and", "return", "it", "as", "a", "Polygon", "object", ".", "If", "the", "changeset", "has", "not", "coordinates", "(", "case", "of", "the", "changesets", "that", "deal", "only", "with", "relations", ")", "it", "returns", "an", "empty", "Polygon", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L121-L138
[ "def", "get_bounds", "(", "changeset", ")", ":", "try", ":", "return", "Polygon", "(", "[", "(", "float", "(", "changeset", ".", "get", "(", "'min_lon'", ")", ")", ",", "float", "(", "changeset", ".", "get", "(", "'min_lat'", ")", ")", ")", ",", "(", "float", "(", "changeset", ".", "get", "(", "'max_lon'", ")", ")", ",", "float", "(", "changeset", ".", "get", "(", "'min_lat'", ")", ")", ")", ",", "(", "float", "(", "changeset", ".", "get", "(", "'max_lon'", ")", ")", ",", "float", "(", "changeset", ".", "get", "(", "'max_lat'", ")", ")", ")", ",", "(", "float", "(", "changeset", ".", "get", "(", "'min_lon'", ")", ")", ",", "float", "(", "changeset", ".", "get", "(", "'max_lat'", ")", ")", ")", ",", "(", "float", "(", "changeset", ".", "get", "(", "'min_lon'", ")", ")", ",", "float", "(", "changeset", ".", "get", "(", "'min_lat'", ")", ")", ")", ",", "]", ")", "except", "TypeError", ":", "return", "Polygon", "(", ")" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
find_words
Check if a text has some of the suspect words (or words that starts with one of the suspect words). You can set some words to be excluded of the search, so you can remove false positives like 'important' be detected when you search by 'import'. It will return True if the number of suspect words found is greater than the number of excluded words. Otherwise, it will return False. Args: text (str): a string with the text to be analysed. It will be converted to lowercase. suspect_words: a list of strings that you want to check the presence in the text. excluded_words: a list of strings to be whitelisted.
osmcha/changeset.py
def find_words(text, suspect_words, excluded_words=[]): """Check if a text has some of the suspect words (or words that starts with one of the suspect words). You can set some words to be excluded of the search, so you can remove false positives like 'important' be detected when you search by 'import'. It will return True if the number of suspect words found is greater than the number of excluded words. Otherwise, it will return False. Args: text (str): a string with the text to be analysed. It will be converted to lowercase. suspect_words: a list of strings that you want to check the presence in the text. excluded_words: a list of strings to be whitelisted. """ text = text.lower() suspect_found = [i for i in re.finditer(make_regex(suspect_words), text)] if len(excluded_words) > 0: excluded_found = [i for i in re.finditer(make_regex(excluded_words), text)] if len(suspect_found) > len(excluded_found): return True else: return False else: if len(suspect_found) > 0: return True else: return False
def find_words(text, suspect_words, excluded_words=[]): """Check if a text has some of the suspect words (or words that starts with one of the suspect words). You can set some words to be excluded of the search, so you can remove false positives like 'important' be detected when you search by 'import'. It will return True if the number of suspect words found is greater than the number of excluded words. Otherwise, it will return False. Args: text (str): a string with the text to be analysed. It will be converted to lowercase. suspect_words: a list of strings that you want to check the presence in the text. excluded_words: a list of strings to be whitelisted. """ text = text.lower() suspect_found = [i for i in re.finditer(make_regex(suspect_words), text)] if len(excluded_words) > 0: excluded_found = [i for i in re.finditer(make_regex(excluded_words), text)] if len(suspect_found) > len(excluded_found): return True else: return False else: if len(suspect_found) > 0: return True else: return False
[ "Check", "if", "a", "text", "has", "some", "of", "the", "suspect", "words", "(", "or", "words", "that", "starts", "with", "one", "of", "the", "suspect", "words", ")", ".", "You", "can", "set", "some", "words", "to", "be", "excluded", "of", "the", "search", "so", "you", "can", "remove", "false", "positives", "like", "important", "be", "detected", "when", "you", "search", "by", "import", ".", "It", "will", "return", "True", "if", "the", "number", "of", "suspect", "words", "found", "is", "greater", "than", "the", "number", "of", "excluded", "words", ".", "Otherwise", "it", "will", "return", "False", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L153-L180
[ "def", "find_words", "(", "text", ",", "suspect_words", ",", "excluded_words", "=", "[", "]", ")", ":", "text", "=", "text", ".", "lower", "(", ")", "suspect_found", "=", "[", "i", "for", "i", "in", "re", ".", "finditer", "(", "make_regex", "(", "suspect_words", ")", ",", "text", ")", "]", "if", "len", "(", "excluded_words", ")", ">", "0", ":", "excluded_found", "=", "[", "i", "for", "i", "in", "re", ".", "finditer", "(", "make_regex", "(", "excluded_words", ")", ",", "text", ")", "]", "if", "len", "(", "suspect_found", ")", ">", "len", "(", "excluded_found", ")", ":", "return", "True", "else", ":", "return", "False", "else", ":", "if", "len", "(", "suspect_found", ")", ">", "0", ":", "return", "True", "else", ":", "return", "False" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
ChangesetList.read_file
Download the replication changeset file or read it directly from the filesystem (to test purposes).
osmcha/changeset.py
def read_file(self, changeset_file): """Download the replication changeset file or read it directly from the filesystem (to test purposes). """ if isfile(changeset_file): self.filename = changeset_file else: self.path = mkdtemp() self.filename = join(self.path, basename(changeset_file)) download(changeset_file, self.path) self.xml = ET.fromstring(gzip.open(self.filename).read()) # delete folder created to download the file if not isfile(changeset_file): rmtree(self.path)
def read_file(self, changeset_file): """Download the replication changeset file or read it directly from the filesystem (to test purposes). """ if isfile(changeset_file): self.filename = changeset_file else: self.path = mkdtemp() self.filename = join(self.path, basename(changeset_file)) download(changeset_file, self.path) self.xml = ET.fromstring(gzip.open(self.filename).read()) # delete folder created to download the file if not isfile(changeset_file): rmtree(self.path)
[ "Download", "the", "replication", "changeset", "file", "or", "read", "it", "directly", "from", "the", "filesystem", "(", "to", "test", "purposes", ")", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L210-L225
[ "def", "read_file", "(", "self", ",", "changeset_file", ")", ":", "if", "isfile", "(", "changeset_file", ")", ":", "self", ".", "filename", "=", "changeset_file", "else", ":", "self", ".", "path", "=", "mkdtemp", "(", ")", "self", ".", "filename", "=", "join", "(", "self", ".", "path", ",", "basename", "(", "changeset_file", ")", ")", "download", "(", "changeset_file", ",", "self", ".", "path", ")", "self", ".", "xml", "=", "ET", ".", "fromstring", "(", "gzip", ".", "open", "(", "self", ".", "filename", ")", ".", "read", "(", ")", ")", "# delete folder created to download the file", "if", "not", "isfile", "(", "changeset_file", ")", ":", "rmtree", "(", "self", ".", "path", ")" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
ChangesetList.get_area
Read the first feature from the geojson and return it as a Polygon object.
osmcha/changeset.py
def get_area(self, geojson): """Read the first feature from the geojson and return it as a Polygon object. """ geojson = json.load(open(geojson, 'r')) self.area = Polygon(geojson['features'][0]['geometry']['coordinates'][0])
def get_area(self, geojson): """Read the first feature from the geojson and return it as a Polygon object. """ geojson = json.load(open(geojson, 'r')) self.area = Polygon(geojson['features'][0]['geometry']['coordinates'][0])
[ "Read", "the", "first", "feature", "from", "the", "geojson", "and", "return", "it", "as", "a", "Polygon", "object", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L227-L232
[ "def", "get_area", "(", "self", ",", "geojson", ")", ":", "geojson", "=", "json", ".", "load", "(", "open", "(", "geojson", ",", "'r'", ")", ")", "self", ".", "area", "=", "Polygon", "(", "geojson", "[", "'features'", "]", "[", "0", "]", "[", "'geometry'", "]", "[", "'coordinates'", "]", "[", "0", "]", ")" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
ChangesetList.filter
Filter the changesets that intersects with the geojson geometry.
osmcha/changeset.py
def filter(self): """Filter the changesets that intersects with the geojson geometry.""" self.content = [ ch for ch in self.xml.getchildren() if get_bounds(ch).intersects(self.area) ]
def filter(self): """Filter the changesets that intersects with the geojson geometry.""" self.content = [ ch for ch in self.xml.getchildren() if get_bounds(ch).intersects(self.area) ]
[ "Filter", "the", "changesets", "that", "intersects", "with", "the", "geojson", "geometry", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L234-L240
[ "def", "filter", "(", "self", ")", ":", "self", ".", "content", "=", "[", "ch", "for", "ch", "in", "self", ".", "xml", ".", "getchildren", "(", ")", "if", "get_bounds", "(", "ch", ")", ".", "intersects", "(", "self", ".", "area", ")", "]" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
Analyse.set_fields
Set the fields of this class with the metadata of the analysed changeset.
osmcha/changeset.py
def set_fields(self, changeset): """Set the fields of this class with the metadata of the analysed changeset. """ self.id = int(changeset.get('id')) self.user = changeset.get('user') self.uid = changeset.get('uid') self.editor = changeset.get('created_by', None) self.review_requested = changeset.get('review_requested', False) self.host = changeset.get('host', 'Not reported') self.bbox = changeset.get('bbox').wkt self.comment = changeset.get('comment', 'Not reported') self.source = changeset.get('source', 'Not reported') self.imagery_used = changeset.get('imagery_used', 'Not reported') self.date = datetime.strptime( changeset.get('created_at'), '%Y-%m-%dT%H:%M:%SZ' ) self.suspicion_reasons = [] self.is_suspect = False self.powerfull_editor = False
def set_fields(self, changeset): """Set the fields of this class with the metadata of the analysed changeset. """ self.id = int(changeset.get('id')) self.user = changeset.get('user') self.uid = changeset.get('uid') self.editor = changeset.get('created_by', None) self.review_requested = changeset.get('review_requested', False) self.host = changeset.get('host', 'Not reported') self.bbox = changeset.get('bbox').wkt self.comment = changeset.get('comment', 'Not reported') self.source = changeset.get('source', 'Not reported') self.imagery_used = changeset.get('imagery_used', 'Not reported') self.date = datetime.strptime( changeset.get('created_at'), '%Y-%m-%dT%H:%M:%SZ' ) self.suspicion_reasons = [] self.is_suspect = False self.powerfull_editor = False
[ "Set", "the", "fields", "of", "this", "class", "with", "the", "metadata", "of", "the", "analysed", "changeset", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L268-L288
[ "def", "set_fields", "(", "self", ",", "changeset", ")", ":", "self", ".", "id", "=", "int", "(", "changeset", ".", "get", "(", "'id'", ")", ")", "self", ".", "user", "=", "changeset", ".", "get", "(", "'user'", ")", "self", ".", "uid", "=", "changeset", ".", "get", "(", "'uid'", ")", "self", ".", "editor", "=", "changeset", ".", "get", "(", "'created_by'", ",", "None", ")", "self", ".", "review_requested", "=", "changeset", ".", "get", "(", "'review_requested'", ",", "False", ")", "self", ".", "host", "=", "changeset", ".", "get", "(", "'host'", ",", "'Not reported'", ")", "self", ".", "bbox", "=", "changeset", ".", "get", "(", "'bbox'", ")", ".", "wkt", "self", ".", "comment", "=", "changeset", ".", "get", "(", "'comment'", ",", "'Not reported'", ")", "self", ".", "source", "=", "changeset", ".", "get", "(", "'source'", ",", "'Not reported'", ")", "self", ".", "imagery_used", "=", "changeset", ".", "get", "(", "'imagery_used'", ",", "'Not reported'", ")", "self", ".", "date", "=", "datetime", ".", "strptime", "(", "changeset", ".", "get", "(", "'created_at'", ")", ",", "'%Y-%m-%dT%H:%M:%SZ'", ")", "self", ".", "suspicion_reasons", "=", "[", "]", "self", ".", "is_suspect", "=", "False", "self", ".", "powerfull_editor", "=", "False" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
Analyse.label_suspicious
Add suspicion reason and set the suspicious flag.
osmcha/changeset.py
def label_suspicious(self, reason): """Add suspicion reason and set the suspicious flag.""" self.suspicion_reasons.append(reason) self.is_suspect = True
def label_suspicious(self, reason): """Add suspicion reason and set the suspicious flag.""" self.suspicion_reasons.append(reason) self.is_suspect = True
[ "Add", "suspicion", "reason", "and", "set", "the", "suspicious", "flag", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L290-L293
[ "def", "label_suspicious", "(", "self", ",", "reason", ")", ":", "self", ".", "suspicion_reasons", ".", "append", "(", "reason", ")", "self", ".", "is_suspect", "=", "True" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
Analyse.full_analysis
Execute the count and verify_words methods.
osmcha/changeset.py
def full_analysis(self): """Execute the count and verify_words methods.""" self.count() self.verify_words() self.verify_user() if self.review_requested == 'yes': self.label_suspicious('Review requested')
def full_analysis(self): """Execute the count and verify_words methods.""" self.count() self.verify_words() self.verify_user() if self.review_requested == 'yes': self.label_suspicious('Review requested')
[ "Execute", "the", "count", "and", "verify_words", "methods", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L295-L302
[ "def", "full_analysis", "(", "self", ")", ":", "self", ".", "count", "(", ")", "self", ".", "verify_words", "(", ")", "self", ".", "verify_user", "(", ")", "if", "self", ".", "review_requested", "==", "'yes'", ":", "self", ".", "label_suspicious", "(", "'Review requested'", ")" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
Analyse.verify_user
Verify if the changeset was made by a inexperienced mapper (anyone with less than 5 edits) or by a user that was blocked more than once.
osmcha/changeset.py
def verify_user(self): """Verify if the changeset was made by a inexperienced mapper (anyone with less than 5 edits) or by a user that was blocked more than once. """ user_reasons = get_user_details(self.uid) [self.label_suspicious(reason) for reason in user_reasons]
def verify_user(self): """Verify if the changeset was made by a inexperienced mapper (anyone with less than 5 edits) or by a user that was blocked more than once. """ user_reasons = get_user_details(self.uid) [self.label_suspicious(reason) for reason in user_reasons]
[ "Verify", "if", "the", "changeset", "was", "made", "by", "a", "inexperienced", "mapper", "(", "anyone", "with", "less", "than", "5", "edits", ")", "or", "by", "a", "user", "that", "was", "blocked", "more", "than", "once", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L304-L309
[ "def", "verify_user", "(", "self", ")", ":", "user_reasons", "=", "get_user_details", "(", "self", ".", "uid", ")", "[", "self", ".", "label_suspicious", "(", "reason", ")", "for", "reason", "in", "user_reasons", "]" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
Analyse.verify_words
Verify the fields source, imagery_used and comment of the changeset for some suspect words.
osmcha/changeset.py
def verify_words(self): """Verify the fields source, imagery_used and comment of the changeset for some suspect words. """ if self.comment: if find_words(self.comment, self.suspect_words, self.excluded_words): self.label_suspicious('suspect_word') if self.source: for word in self.illegal_sources: if word in self.source.lower(): self.label_suspicious('suspect_word') break if self.imagery_used: for word in self.illegal_sources: if word in self.imagery_used.lower(): self.label_suspicious('suspect_word') break self.suspicion_reasons = list(set(self.suspicion_reasons))
def verify_words(self): """Verify the fields source, imagery_used and comment of the changeset for some suspect words. """ if self.comment: if find_words(self.comment, self.suspect_words, self.excluded_words): self.label_suspicious('suspect_word') if self.source: for word in self.illegal_sources: if word in self.source.lower(): self.label_suspicious('suspect_word') break if self.imagery_used: for word in self.illegal_sources: if word in self.imagery_used.lower(): self.label_suspicious('suspect_word') break self.suspicion_reasons = list(set(self.suspicion_reasons))
[ "Verify", "the", "fields", "source", "imagery_used", "and", "comment", "of", "the", "changeset", "for", "some", "suspect", "words", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L311-L331
[ "def", "verify_words", "(", "self", ")", ":", "if", "self", ".", "comment", ":", "if", "find_words", "(", "self", ".", "comment", ",", "self", ".", "suspect_words", ",", "self", ".", "excluded_words", ")", ":", "self", ".", "label_suspicious", "(", "'suspect_word'", ")", "if", "self", ".", "source", ":", "for", "word", "in", "self", ".", "illegal_sources", ":", "if", "word", "in", "self", ".", "source", ".", "lower", "(", ")", ":", "self", ".", "label_suspicious", "(", "'suspect_word'", ")", "break", "if", "self", ".", "imagery_used", ":", "for", "word", "in", "self", ".", "illegal_sources", ":", "if", "word", "in", "self", ".", "imagery_used", ".", "lower", "(", ")", ":", "self", ".", "label_suspicious", "(", "'suspect_word'", ")", "break", "self", ".", "suspicion_reasons", "=", "list", "(", "set", "(", "self", ".", "suspicion_reasons", ")", ")" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
Analyse.verify_editor
Verify if the software used in the changeset is a powerfull_editor.
osmcha/changeset.py
def verify_editor(self): """Verify if the software used in the changeset is a powerfull_editor. """ powerful_editors = [ 'josm', 'level0', 'merkaartor', 'qgis', 'arcgis', 'upload.py', 'osmapi', 'Services_OpenStreetMap' ] if self.editor is not None: for editor in powerful_editors: if editor in self.editor.lower(): self.powerfull_editor = True break if 'iD' in self.editor: trusted_hosts = [ 'www.openstreetmap.org/id', 'www.openstreetmap.org/edit', 'improveosm.org', 'strava.github.io/iD', 'preview.ideditor.com/release', 'preview.ideditor.com/master', 'hey.mapbox.com/iD-internal', 'projets.pavie.info/id-indoor', 'maps.mapcat.com/edit', 'id.softek.ir' ] if self.host.split('://')[-1].strip('/') not in trusted_hosts: self.label_suspicious('Unknown iD instance') else: self.powerfull_editor = True self.label_suspicious('Software editor was not declared')
def verify_editor(self): """Verify if the software used in the changeset is a powerfull_editor. """ powerful_editors = [ 'josm', 'level0', 'merkaartor', 'qgis', 'arcgis', 'upload.py', 'osmapi', 'Services_OpenStreetMap' ] if self.editor is not None: for editor in powerful_editors: if editor in self.editor.lower(): self.powerfull_editor = True break if 'iD' in self.editor: trusted_hosts = [ 'www.openstreetmap.org/id', 'www.openstreetmap.org/edit', 'improveosm.org', 'strava.github.io/iD', 'preview.ideditor.com/release', 'preview.ideditor.com/master', 'hey.mapbox.com/iD-internal', 'projets.pavie.info/id-indoor', 'maps.mapcat.com/edit', 'id.softek.ir' ] if self.host.split('://')[-1].strip('/') not in trusted_hosts: self.label_suspicious('Unknown iD instance') else: self.powerfull_editor = True self.label_suspicious('Software editor was not declared')
[ "Verify", "if", "the", "software", "used", "in", "the", "changeset", "is", "a", "powerfull_editor", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L333-L363
[ "def", "verify_editor", "(", "self", ")", ":", "powerful_editors", "=", "[", "'josm'", ",", "'level0'", ",", "'merkaartor'", ",", "'qgis'", ",", "'arcgis'", ",", "'upload.py'", ",", "'osmapi'", ",", "'Services_OpenStreetMap'", "]", "if", "self", ".", "editor", "is", "not", "None", ":", "for", "editor", "in", "powerful_editors", ":", "if", "editor", "in", "self", ".", "editor", ".", "lower", "(", ")", ":", "self", ".", "powerfull_editor", "=", "True", "break", "if", "'iD'", "in", "self", ".", "editor", ":", "trusted_hosts", "=", "[", "'www.openstreetmap.org/id'", ",", "'www.openstreetmap.org/edit'", ",", "'improveosm.org'", ",", "'strava.github.io/iD'", ",", "'preview.ideditor.com/release'", ",", "'preview.ideditor.com/master'", ",", "'hey.mapbox.com/iD-internal'", ",", "'projets.pavie.info/id-indoor'", ",", "'maps.mapcat.com/edit'", ",", "'id.softek.ir'", "]", "if", "self", ".", "host", ".", "split", "(", "'://'", ")", "[", "-", "1", "]", ".", "strip", "(", "'/'", ")", "not", "in", "trusted_hosts", ":", "self", ".", "label_suspicious", "(", "'Unknown iD instance'", ")", "else", ":", "self", ".", "powerfull_editor", "=", "True", "self", ".", "label_suspicious", "(", "'Software editor was not declared'", ")" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
Analyse.count
Count the number of elements created, modified and deleted by the changeset and analyses if it is a possible import, mass modification or a mass deletion.
osmcha/changeset.py
def count(self): """Count the number of elements created, modified and deleted by the changeset and analyses if it is a possible import, mass modification or a mass deletion. """ xml = get_changeset(self.id) actions = [action.tag for action in xml.getchildren()] self.create = actions.count('create') self.modify = actions.count('modify') self.delete = actions.count('delete') self.verify_editor() try: if (self.create / len(actions) > self.percentage and self.create > self.create_threshold and (self.powerfull_editor or self.create > self.top_threshold)): self.label_suspicious('possible import') elif (self.modify / len(actions) > self.percentage and self.modify > self.modify_threshold): self.label_suspicious('mass modification') elif ((self.delete / len(actions) > self.percentage and self.delete > self.delete_threshold) or self.delete > self.top_threshold): self.label_suspicious('mass deletion') except ZeroDivisionError: print('It seems this changeset was redacted')
def count(self): """Count the number of elements created, modified and deleted by the changeset and analyses if it is a possible import, mass modification or a mass deletion. """ xml = get_changeset(self.id) actions = [action.tag for action in xml.getchildren()] self.create = actions.count('create') self.modify = actions.count('modify') self.delete = actions.count('delete') self.verify_editor() try: if (self.create / len(actions) > self.percentage and self.create > self.create_threshold and (self.powerfull_editor or self.create > self.top_threshold)): self.label_suspicious('possible import') elif (self.modify / len(actions) > self.percentage and self.modify > self.modify_threshold): self.label_suspicious('mass modification') elif ((self.delete / len(actions) > self.percentage and self.delete > self.delete_threshold) or self.delete > self.top_threshold): self.label_suspicious('mass deletion') except ZeroDivisionError: print('It seems this changeset was redacted')
[ "Count", "the", "number", "of", "elements", "created", "modified", "and", "deleted", "by", "the", "changeset", "and", "analyses", "if", "it", "is", "a", "possible", "import", "mass", "modification", "or", "a", "mass", "deletion", "." ]
willemarcel/osmcha
python
https://github.com/willemarcel/osmcha/blob/9a22ed11834ed20c6b91e7b5685f66880ea09350/osmcha/changeset.py#L365-L390
[ "def", "count", "(", "self", ")", ":", "xml", "=", "get_changeset", "(", "self", ".", "id", ")", "actions", "=", "[", "action", ".", "tag", "for", "action", "in", "xml", ".", "getchildren", "(", ")", "]", "self", ".", "create", "=", "actions", ".", "count", "(", "'create'", ")", "self", ".", "modify", "=", "actions", ".", "count", "(", "'modify'", ")", "self", ".", "delete", "=", "actions", ".", "count", "(", "'delete'", ")", "self", ".", "verify_editor", "(", ")", "try", ":", "if", "(", "self", ".", "create", "/", "len", "(", "actions", ")", ">", "self", ".", "percentage", "and", "self", ".", "create", ">", "self", ".", "create_threshold", "and", "(", "self", ".", "powerfull_editor", "or", "self", ".", "create", ">", "self", ".", "top_threshold", ")", ")", ":", "self", ".", "label_suspicious", "(", "'possible import'", ")", "elif", "(", "self", ".", "modify", "/", "len", "(", "actions", ")", ">", "self", ".", "percentage", "and", "self", ".", "modify", ">", "self", ".", "modify_threshold", ")", ":", "self", ".", "label_suspicious", "(", "'mass modification'", ")", "elif", "(", "(", "self", ".", "delete", "/", "len", "(", "actions", ")", ">", "self", ".", "percentage", "and", "self", ".", "delete", ">", "self", ".", "delete_threshold", ")", "or", "self", ".", "delete", ">", "self", ".", "top_threshold", ")", ":", "self", ".", "label_suspicious", "(", "'mass deletion'", ")", "except", "ZeroDivisionError", ":", "print", "(", "'It seems this changeset was redacted'", ")" ]
9a22ed11834ed20c6b91e7b5685f66880ea09350
valid
_unwrap_stream
Get a stream URI from a playlist URI, ``uri``. Unwraps nested playlists until something that's not a playlist is found or the ``timeout`` is reached.
mopidy_audioaddict/actor.py
def _unwrap_stream(uri, timeout, scanner, requests_session): """ Get a stream URI from a playlist URI, ``uri``. Unwraps nested playlists until something that's not a playlist is found or the ``timeout`` is reached. """ original_uri = uri seen_uris = set() deadline = time.time() + timeout while time.time() < deadline: if uri in seen_uris: logger.info( 'Unwrapping stream from URI (%s) failed: ' 'playlist referenced itself', uri) return None else: seen_uris.add(uri) logger.debug('Unwrapping stream from URI: %s', uri) try: scan_timeout = deadline - time.time() if scan_timeout < 0: logger.info( 'Unwrapping stream from URI (%s) failed: ' 'timed out in %sms', uri, timeout) return None scan_result = scanner.scan(uri, timeout=scan_timeout) except exceptions.ScannerError as exc: logger.debug('GStreamer failed scanning URI (%s): %s', uri, exc) scan_result = None if scan_result is not None and not ( scan_result.mime.startswith('text/') or scan_result.mime.startswith('application/')): logger.debug( 'Unwrapped potential %s stream: %s', scan_result.mime, uri) return uri download_timeout = deadline - time.time() if download_timeout < 0: logger.info( 'Unwrapping stream from URI (%s) failed: timed out in %sms', uri, timeout) return None content = http.download( requests_session, uri, timeout=download_timeout) if content is None: logger.info( 'Unwrapping stream from URI (%s) failed: ' 'error downloading URI %s', original_uri, uri) return None uris = playlists.parse(content) if not uris: logger.debug( 'Failed parsing URI (%s) as playlist; found potential stream.', uri) return uri # TODO Test streams and return first that seems to be playable logger.debug( 'Parsed playlist (%s) and found new URI: %s', uri, uris[0]) uri = uris[0]
def _unwrap_stream(uri, timeout, scanner, requests_session): """ Get a stream URI from a playlist URI, ``uri``. Unwraps nested playlists until something that's not a playlist is found or the ``timeout`` is reached. """ original_uri = uri seen_uris = set() deadline = time.time() + timeout while time.time() < deadline: if uri in seen_uris: logger.info( 'Unwrapping stream from URI (%s) failed: ' 'playlist referenced itself', uri) return None else: seen_uris.add(uri) logger.debug('Unwrapping stream from URI: %s', uri) try: scan_timeout = deadline - time.time() if scan_timeout < 0: logger.info( 'Unwrapping stream from URI (%s) failed: ' 'timed out in %sms', uri, timeout) return None scan_result = scanner.scan(uri, timeout=scan_timeout) except exceptions.ScannerError as exc: logger.debug('GStreamer failed scanning URI (%s): %s', uri, exc) scan_result = None if scan_result is not None and not ( scan_result.mime.startswith('text/') or scan_result.mime.startswith('application/')): logger.debug( 'Unwrapped potential %s stream: %s', scan_result.mime, uri) return uri download_timeout = deadline - time.time() if download_timeout < 0: logger.info( 'Unwrapping stream from URI (%s) failed: timed out in %sms', uri, timeout) return None content = http.download( requests_session, uri, timeout=download_timeout) if content is None: logger.info( 'Unwrapping stream from URI (%s) failed: ' 'error downloading URI %s', original_uri, uri) return None uris = playlists.parse(content) if not uris: logger.debug( 'Failed parsing URI (%s) as playlist; found potential stream.', uri) return uri # TODO Test streams and return first that seems to be playable logger.debug( 'Parsed playlist (%s) and found new URI: %s', uri, uris[0]) uri = uris[0]
[ "Get", "a", "stream", "URI", "from", "a", "playlist", "URI", "uri", ".", "Unwraps", "nested", "playlists", "until", "something", "that", "s", "not", "a", "playlist", "is", "found", "or", "the", "timeout", "is", "reached", "." ]
nilicule/mopidy-audioaddict
python
https://github.com/nilicule/mopidy-audioaddict/blob/2fb2909859b1f31682160692051e15df5705f22f/mopidy_audioaddict/actor.py#L126-L192
[ "def", "_unwrap_stream", "(", "uri", ",", "timeout", ",", "scanner", ",", "requests_session", ")", ":", "original_uri", "=", "uri", "seen_uris", "=", "set", "(", ")", "deadline", "=", "time", ".", "time", "(", ")", "+", "timeout", "while", "time", ".", "time", "(", ")", "<", "deadline", ":", "if", "uri", "in", "seen_uris", ":", "logger", ".", "info", "(", "'Unwrapping stream from URI (%s) failed: '", "'playlist referenced itself'", ",", "uri", ")", "return", "None", "else", ":", "seen_uris", ".", "add", "(", "uri", ")", "logger", ".", "debug", "(", "'Unwrapping stream from URI: %s'", ",", "uri", ")", "try", ":", "scan_timeout", "=", "deadline", "-", "time", ".", "time", "(", ")", "if", "scan_timeout", "<", "0", ":", "logger", ".", "info", "(", "'Unwrapping stream from URI (%s) failed: '", "'timed out in %sms'", ",", "uri", ",", "timeout", ")", "return", "None", "scan_result", "=", "scanner", ".", "scan", "(", "uri", ",", "timeout", "=", "scan_timeout", ")", "except", "exceptions", ".", "ScannerError", "as", "exc", ":", "logger", ".", "debug", "(", "'GStreamer failed scanning URI (%s): %s'", ",", "uri", ",", "exc", ")", "scan_result", "=", "None", "if", "scan_result", "is", "not", "None", "and", "not", "(", "scan_result", ".", "mime", ".", "startswith", "(", "'text/'", ")", "or", "scan_result", ".", "mime", ".", "startswith", "(", "'application/'", ")", ")", ":", "logger", ".", "debug", "(", "'Unwrapped potential %s stream: %s'", ",", "scan_result", ".", "mime", ",", "uri", ")", "return", "uri", "download_timeout", "=", "deadline", "-", "time", ".", "time", "(", ")", "if", "download_timeout", "<", "0", ":", "logger", ".", "info", "(", "'Unwrapping stream from URI (%s) failed: timed out in %sms'", ",", "uri", ",", "timeout", ")", "return", "None", "content", "=", "http", ".", "download", "(", "requests_session", ",", "uri", ",", "timeout", "=", "download_timeout", ")", "if", "content", "is", "None", ":", "logger", ".", "info", "(", "'Unwrapping stream from URI (%s) failed: '", "'error downloading URI %s'", ",", "original_uri", ",", "uri", ")", "return", "None", "uris", "=", "playlists", ".", "parse", "(", "content", ")", "if", "not", "uris", ":", "logger", ".", "debug", "(", "'Failed parsing URI (%s) as playlist; found potential stream.'", ",", "uri", ")", "return", "uri", "# TODO Test streams and return first that seems to be playable", "logger", ".", "debug", "(", "'Parsed playlist (%s) and found new URI: %s'", ",", "uri", ",", "uris", "[", "0", "]", ")", "uri", "=", "uris", "[", "0", "]" ]
2fb2909859b1f31682160692051e15df5705f22f
valid
Worker.serve
Start asynchronous HTTP Server on an individual process. :param request_handler: Sanic request handler with middleware :param error_handler: Sanic error handler with middleware :param debug: enables debug output (slows server) :param request_timeout: time in seconds :param ssl: SSLContext :param sock: Socket for the server to accept connections from :param request_max_size: size in bytes, `None` for no limit :param reuse_port: `True` for multiple workers :param loop: asyncio compatible event loop :param protocol: subclass of asyncio protocol class :return: Nothing
sanic_gunicorn.py
def serve(self, sock, request_handler, error_handler, debug=False, request_timeout=60, ssl=None, request_max_size=None, reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100, **kwargs): """Start asynchronous HTTP Server on an individual process. :param request_handler: Sanic request handler with middleware :param error_handler: Sanic error handler with middleware :param debug: enables debug output (slows server) :param request_timeout: time in seconds :param ssl: SSLContext :param sock: Socket for the server to accept connections from :param request_max_size: size in bytes, `None` for no limit :param reuse_port: `True` for multiple workers :param loop: asyncio compatible event loop :param protocol: subclass of asyncio protocol class :return: Nothing """ if debug: loop.set_debug(debug) server = partial( protocol, loop=loop, connections=self.connections, signal=self.signal, request_handler=request_handler, error_handler=error_handler, request_timeout=request_timeout, request_max_size=request_max_size, ) server_coroutine = loop.create_server( server, host=None, port=None, ssl=ssl, reuse_port=reuse_port, sock=sock, backlog=backlog ) # Instead of pulling time at the end of every request, # pull it once per minute loop.call_soon(partial(update_current_time, loop)) return server_coroutine
def serve(self, sock, request_handler, error_handler, debug=False, request_timeout=60, ssl=None, request_max_size=None, reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100, **kwargs): """Start asynchronous HTTP Server on an individual process. :param request_handler: Sanic request handler with middleware :param error_handler: Sanic error handler with middleware :param debug: enables debug output (slows server) :param request_timeout: time in seconds :param ssl: SSLContext :param sock: Socket for the server to accept connections from :param request_max_size: size in bytes, `None` for no limit :param reuse_port: `True` for multiple workers :param loop: asyncio compatible event loop :param protocol: subclass of asyncio protocol class :return: Nothing """ if debug: loop.set_debug(debug) server = partial( protocol, loop=loop, connections=self.connections, signal=self.signal, request_handler=request_handler, error_handler=error_handler, request_timeout=request_timeout, request_max_size=request_max_size, ) server_coroutine = loop.create_server( server, host=None, port=None, ssl=ssl, reuse_port=reuse_port, sock=sock, backlog=backlog ) # Instead of pulling time at the end of every request, # pull it once per minute loop.call_soon(partial(update_current_time, loop)) return server_coroutine
[ "Start", "asynchronous", "HTTP", "Server", "on", "an", "individual", "process", "." ]
messense/sanic-gunicorn
python
https://github.com/messense/sanic-gunicorn/blob/da1e738d9ff4bb064ca477f9aeb37e12f31be243/sanic_gunicorn.py#L108-L152
[ "def", "serve", "(", "self", ",", "sock", ",", "request_handler", ",", "error_handler", ",", "debug", "=", "False", ",", "request_timeout", "=", "60", ",", "ssl", "=", "None", ",", "request_max_size", "=", "None", ",", "reuse_port", "=", "False", ",", "loop", "=", "None", ",", "protocol", "=", "HttpProtocol", ",", "backlog", "=", "100", ",", "*", "*", "kwargs", ")", ":", "if", "debug", ":", "loop", ".", "set_debug", "(", "debug", ")", "server", "=", "partial", "(", "protocol", ",", "loop", "=", "loop", ",", "connections", "=", "self", ".", "connections", ",", "signal", "=", "self", ".", "signal", ",", "request_handler", "=", "request_handler", ",", "error_handler", "=", "error_handler", ",", "request_timeout", "=", "request_timeout", ",", "request_max_size", "=", "request_max_size", ",", ")", "server_coroutine", "=", "loop", ".", "create_server", "(", "server", ",", "host", "=", "None", ",", "port", "=", "None", ",", "ssl", "=", "ssl", ",", "reuse_port", "=", "reuse_port", ",", "sock", "=", "sock", ",", "backlog", "=", "backlog", ")", "# Instead of pulling time at the end of every request,", "# pull it once per minute", "loop", ".", "call_soon", "(", "partial", "(", "update_current_time", ",", "loop", ")", ")", "return", "server_coroutine" ]
da1e738d9ff4bb064ca477f9aeb37e12f31be243
valid
Pantheon.spawn
Grow this Pantheon by multiplying Gods.
pantheon/pantheons.py
def spawn(self, generations): """Grow this Pantheon by multiplying Gods.""" egg_donors = [god for god in self.gods.values() if god.chromosomes == 'XX'] sperm_donors = [god for god in self.gods.values() if god.chromosomes == 'XY'] for i in range(generations): print("\nGENERATION %d\n" % (i+1)) gen_xx = [] gen_xy = [] for egg_donor in egg_donors: sperm_donor = random.choice(sperm_donors) brood = self.breed(egg_donor, sperm_donor) for child in brood: if child.divinity > human: # divine offspring join the Pantheon self.add_god(child) if child.chromosomes == 'XX': gen_xx.append(child) else: gen_xy.append(child) # elder gods leave the breeding pool egg_donors = [ed for ed in egg_donors if ed.generation > (i-2)] sperm_donors = [sd for sd in sperm_donors if sd.generation > (i-3)] # mature offspring join the breeding pool egg_donors += gen_xx sperm_donors += gen_xy
def spawn(self, generations): """Grow this Pantheon by multiplying Gods.""" egg_donors = [god for god in self.gods.values() if god.chromosomes == 'XX'] sperm_donors = [god for god in self.gods.values() if god.chromosomes == 'XY'] for i in range(generations): print("\nGENERATION %d\n" % (i+1)) gen_xx = [] gen_xy = [] for egg_donor in egg_donors: sperm_donor = random.choice(sperm_donors) brood = self.breed(egg_donor, sperm_donor) for child in brood: if child.divinity > human: # divine offspring join the Pantheon self.add_god(child) if child.chromosomes == 'XX': gen_xx.append(child) else: gen_xy.append(child) # elder gods leave the breeding pool egg_donors = [ed for ed in egg_donors if ed.generation > (i-2)] sperm_donors = [sd for sd in sperm_donors if sd.generation > (i-3)] # mature offspring join the breeding pool egg_donors += gen_xx sperm_donors += gen_xy
[ "Grow", "this", "Pantheon", "by", "multiplying", "Gods", "." ]
carawarner/pantheon
python
https://github.com/carawarner/pantheon/blob/7e8718f4397eaa389fb3d5dc04fa01c7cb556512/pantheon/pantheons.py#L30-L59
[ "def", "spawn", "(", "self", ",", "generations", ")", ":", "egg_donors", "=", "[", "god", "for", "god", "in", "self", ".", "gods", ".", "values", "(", ")", "if", "god", ".", "chromosomes", "==", "'XX'", "]", "sperm_donors", "=", "[", "god", "for", "god", "in", "self", ".", "gods", ".", "values", "(", ")", "if", "god", ".", "chromosomes", "==", "'XY'", "]", "for", "i", "in", "range", "(", "generations", ")", ":", "print", "(", "\"\\nGENERATION %d\\n\"", "%", "(", "i", "+", "1", ")", ")", "gen_xx", "=", "[", "]", "gen_xy", "=", "[", "]", "for", "egg_donor", "in", "egg_donors", ":", "sperm_donor", "=", "random", ".", "choice", "(", "sperm_donors", ")", "brood", "=", "self", ".", "breed", "(", "egg_donor", ",", "sperm_donor", ")", "for", "child", "in", "brood", ":", "if", "child", ".", "divinity", ">", "human", ":", "# divine offspring join the Pantheon", "self", ".", "add_god", "(", "child", ")", "if", "child", ".", "chromosomes", "==", "'XX'", ":", "gen_xx", ".", "append", "(", "child", ")", "else", ":", "gen_xy", ".", "append", "(", "child", ")", "# elder gods leave the breeding pool", "egg_donors", "=", "[", "ed", "for", "ed", "in", "egg_donors", "if", "ed", ".", "generation", ">", "(", "i", "-", "2", ")", "]", "sperm_donors", "=", "[", "sd", "for", "sd", "in", "sperm_donors", "if", "sd", ".", "generation", ">", "(", "i", "-", "3", ")", "]", "# mature offspring join the breeding pool", "egg_donors", "+=", "gen_xx", "sperm_donors", "+=", "gen_xy" ]
7e8718f4397eaa389fb3d5dc04fa01c7cb556512
valid
Pantheon.breed
Get it on.
pantheon/pantheons.py
def breed(self, egg_donor, sperm_donor): """Get it on.""" offspring = [] try: num_children = npchoice([1,2], 1, p=[0.8, 0.2])[0] # 20% chance of twins for _ in range(num_children): child = God(egg_donor, sperm_donor) offspring.append(child) send_birth_announcement(egg_donor, sperm_donor, child) except ValueError: print("Breeding error occurred. Likely the generator ran out of names.") return offspring
def breed(self, egg_donor, sperm_donor): """Get it on.""" offspring = [] try: num_children = npchoice([1,2], 1, p=[0.8, 0.2])[0] # 20% chance of twins for _ in range(num_children): child = God(egg_donor, sperm_donor) offspring.append(child) send_birth_announcement(egg_donor, sperm_donor, child) except ValueError: print("Breeding error occurred. Likely the generator ran out of names.") return offspring
[ "Get", "it", "on", "." ]
carawarner/pantheon
python
https://github.com/carawarner/pantheon/blob/7e8718f4397eaa389fb3d5dc04fa01c7cb556512/pantheon/pantheons.py#L62-L74
[ "def", "breed", "(", "self", ",", "egg_donor", ",", "sperm_donor", ")", ":", "offspring", "=", "[", "]", "try", ":", "num_children", "=", "npchoice", "(", "[", "1", ",", "2", "]", ",", "1", ",", "p", "=", "[", "0.8", ",", "0.2", "]", ")", "[", "0", "]", "# 20% chance of twins", "for", "_", "in", "range", "(", "num_children", ")", ":", "child", "=", "God", "(", "egg_donor", ",", "sperm_donor", ")", "offspring", ".", "append", "(", "child", ")", "send_birth_announcement", "(", "egg_donor", ",", "sperm_donor", ",", "child", ")", "except", "ValueError", ":", "print", "(", "\"Breeding error occurred. Likely the generator ran out of names.\"", ")", "return", "offspring" ]
7e8718f4397eaa389fb3d5dc04fa01c7cb556512
valid
get_matches
Return words from <tokens> that are most closely related to <word>.
pantheon/process.py
def get_matches(word, tokens, limit, offset=0): """Return words from <tokens> that are most closely related to <word>.""" return closest(tokens, word_vec(word), limit, offset)
def get_matches(word, tokens, limit, offset=0): """Return words from <tokens> that are most closely related to <word>.""" return closest(tokens, word_vec(word), limit, offset)
[ "Return", "words", "from", "<tokens", ">", "that", "are", "most", "closely", "related", "to", "<word", ">", "." ]
carawarner/pantheon
python
https://github.com/carawarner/pantheon/blob/7e8718f4397eaa389fb3d5dc04fa01c7cb556512/pantheon/process.py#L12-L14
[ "def", "get_matches", "(", "word", ",", "tokens", ",", "limit", ",", "offset", "=", "0", ")", ":", "return", "closest", "(", "tokens", ",", "word_vec", "(", "word", ")", ",", "limit", ",", "offset", ")" ]
7e8718f4397eaa389fb3d5dc04fa01c7cb556512
valid
cosine
Compare vectors. Borrowed from A. Parish.
pantheon/process.py
def cosine(vec1, vec2): """Compare vectors. Borrowed from A. Parish.""" if norm(vec1) > 0 and norm(vec2) > 0: return dot(vec1, vec2) / (norm(vec1) * norm(vec2)) else: return 0.0
def cosine(vec1, vec2): """Compare vectors. Borrowed from A. Parish.""" if norm(vec1) > 0 and norm(vec2) > 0: return dot(vec1, vec2) / (norm(vec1) * norm(vec2)) else: return 0.0
[ "Compare", "vectors", ".", "Borrowed", "from", "A", ".", "Parish", "." ]
carawarner/pantheon
python
https://github.com/carawarner/pantheon/blob/7e8718f4397eaa389fb3d5dc04fa01c7cb556512/pantheon/process.py#L22-L27
[ "def", "cosine", "(", "vec1", ",", "vec2", ")", ":", "if", "norm", "(", "vec1", ")", ">", "0", "and", "norm", "(", "vec2", ")", ">", "0", ":", "return", "dot", "(", "vec1", ",", "vec2", ")", "/", "(", "norm", "(", "vec1", ")", "*", "norm", "(", "vec2", ")", ")", "else", ":", "return", "0.0" ]
7e8718f4397eaa389fb3d5dc04fa01c7cb556512
valid
closest
Return the <limit> words from <tokens> whose vectors most closely resemble the search_vec. Skip the first <offset> results.
pantheon/process.py
def closest(tokens, search_vec, limit, offset=0): """Return the <limit> words from <tokens> whose vectors most closely resemble the search_vec. Skip the first <offset> results. """ return sorted(tokens, key=lambda x: cosine(search_vec, word_vec(x)), reverse=True)[offset:offset+limit]
def closest(tokens, search_vec, limit, offset=0): """Return the <limit> words from <tokens> whose vectors most closely resemble the search_vec. Skip the first <offset> results. """ return sorted(tokens, key=lambda x: cosine(search_vec, word_vec(x)), reverse=True)[offset:offset+limit]
[ "Return", "the", "<limit", ">", "words", "from", "<tokens", ">", "whose", "vectors", "most", "closely", "resemble", "the", "search_vec", ".", "Skip", "the", "first", "<offset", ">", "results", "." ]
carawarner/pantheon
python
https://github.com/carawarner/pantheon/blob/7e8718f4397eaa389fb3d5dc04fa01c7cb556512/pantheon/process.py#L30-L36
[ "def", "closest", "(", "tokens", ",", "search_vec", ",", "limit", ",", "offset", "=", "0", ")", ":", "return", "sorted", "(", "tokens", ",", "key", "=", "lambda", "x", ":", "cosine", "(", "search_vec", ",", "word_vec", "(", "x", ")", ")", ",", "reverse", "=", "True", ")", "[", "offset", ":", "offset", "+", "limit", "]" ]
7e8718f4397eaa389fb3d5dc04fa01c7cb556512
valid
tokenize_texts
Generate a json file for each txt file in the /data/corpora directory.
pantheon/tokens.py
def tokenize_texts(): """Generate a json file for each txt file in the /data/corpora directory.""" text_files = [fname for fname in os.listdir(corpora_dir) \ if fname.split('.')[1] == 'txt'] for text_fname in text_files: json_fname = text_fname.split('.')[0] + '.json' if os.path.isfile(corpora_dir + json_fname): continue # already tokenized print("Tokenizing " + text_fname) text = open(corpora_dir + text_fname).read() words = nltk.word_tokenize(text) with open(corpora_dir + json_fname, 'w') as outjson: json.dump(words, outjson)
def tokenize_texts(): """Generate a json file for each txt file in the /data/corpora directory.""" text_files = [fname for fname in os.listdir(corpora_dir) \ if fname.split('.')[1] == 'txt'] for text_fname in text_files: json_fname = text_fname.split('.')[0] + '.json' if os.path.isfile(corpora_dir + json_fname): continue # already tokenized print("Tokenizing " + text_fname) text = open(corpora_dir + text_fname).read() words = nltk.word_tokenize(text) with open(corpora_dir + json_fname, 'w') as outjson: json.dump(words, outjson)
[ "Generate", "a", "json", "file", "for", "each", "txt", "file", "in", "the", "/", "data", "/", "corpora", "directory", "." ]
carawarner/pantheon
python
https://github.com/carawarner/pantheon/blob/7e8718f4397eaa389fb3d5dc04fa01c7cb556512/pantheon/tokens.py#L40-L54
[ "def", "tokenize_texts", "(", ")", ":", "text_files", "=", "[", "fname", "for", "fname", "in", "os", ".", "listdir", "(", "corpora_dir", ")", "if", "fname", ".", "split", "(", "'.'", ")", "[", "1", "]", "==", "'txt'", "]", "for", "text_fname", "in", "text_files", ":", "json_fname", "=", "text_fname", ".", "split", "(", "'.'", ")", "[", "0", "]", "+", "'.json'", "if", "os", ".", "path", ".", "isfile", "(", "corpora_dir", "+", "json_fname", ")", ":", "continue", "# already tokenized", "print", "(", "\"Tokenizing \"", "+", "text_fname", ")", "text", "=", "open", "(", "corpora_dir", "+", "text_fname", ")", ".", "read", "(", ")", "words", "=", "nltk", ".", "word_tokenize", "(", "text", ")", "with", "open", "(", "corpora_dir", "+", "json_fname", ",", "'w'", ")", "as", "outjson", ":", "json", ".", "dump", "(", "words", ",", "outjson", ")" ]
7e8718f4397eaa389fb3d5dc04fa01c7cb556512
valid
make_tokens_dir
Create a new directory named <dir_>. Create a new file within it called sources.json. The input <sources> is a list of names of tokenized texts. Write <sources> into sources.json.
pantheon/tokens.py
def make_tokens_dir(dir_, sources): """Create a new directory named <dir_>. Create a new file within it called sources.json. The input <sources> is a list of names of tokenized texts. Write <sources> into sources.json. """ os.mkdir(tokens_dir + dir_) for source in sources: if not os.path.isfile(corpora_dir + source): print('Invalid source: ' + source) return with open(tokens_dir + dir_ + '/sources.json', 'w') as outjson: json.dump(sources, outjson)
def make_tokens_dir(dir_, sources): """Create a new directory named <dir_>. Create a new file within it called sources.json. The input <sources> is a list of names of tokenized texts. Write <sources> into sources.json. """ os.mkdir(tokens_dir + dir_) for source in sources: if not os.path.isfile(corpora_dir + source): print('Invalid source: ' + source) return with open(tokens_dir + dir_ + '/sources.json', 'w') as outjson: json.dump(sources, outjson)
[ "Create", "a", "new", "directory", "named", "<dir_", ">", ".", "Create", "a", "new", "file", "within", "it", "called", "sources", ".", "json", ".", "The", "input", "<sources", ">", "is", "a", "list", "of", "names", "of", "tokenized", "texts", ".", "Write", "<sources", ">", "into", "sources", ".", "json", "." ]
carawarner/pantheon
python
https://github.com/carawarner/pantheon/blob/7e8718f4397eaa389fb3d5dc04fa01c7cb556512/pantheon/tokens.py#L64-L76
[ "def", "make_tokens_dir", "(", "dir_", ",", "sources", ")", ":", "os", ".", "mkdir", "(", "tokens_dir", "+", "dir_", ")", "for", "source", "in", "sources", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "corpora_dir", "+", "source", ")", ":", "print", "(", "'Invalid source: '", "+", "source", ")", "return", "with", "open", "(", "tokens_dir", "+", "dir_", "+", "'/sources.json'", ",", "'w'", ")", "as", "outjson", ":", "json", ".", "dump", "(", "sources", ",", "outjson", ")" ]
7e8718f4397eaa389fb3d5dc04fa01c7cb556512
valid
make_tokens_list
Find sources.json in <dir_>. It contains a list of tokenized texts. For each tokenized text listed in sources.json, read its tokens, filter them, and add them to an aggregated list. Write the aggregated list to disk using a filename based on the <filters> given.
pantheon/tokens.py
def make_tokens_list(dir_, filters): """Find sources.json in <dir_>. It contains a list of tokenized texts. For each tokenized text listed in sources.json, read its tokens, filter them, and add them to an aggregated list. Write the aggregated list to disk using a filename based on the <filters> given. """ with open(tokens_dir + dir_ + '/sources.json', 'r') as injson: data = json.load(injson) sources = [corpora_dir + fname for fname in data] with open('data/skipwords.txt', 'r') as f: skipwords = [line.rstrip() for line in f] tokens_list = [] for fname in sources: print("Incorporating tokens from " + fname) with open(fname, 'r') as injson: data = json.load(injson) words = [w.lower() for w in data if not w == ''] filtered = [w for w,p in nltk.pos_tag(words) if p in filters] sanitized = [w for w in filtered if not w in skipwords] tokens_list += sanitized tokens_list = list(set(tokens_list)) # unique target = tokens_dir + dir_ + '/' + '-'.join(filters) + '.json' with open(target, 'w') as outjson: json.dump(tokens_list, outjson)
def make_tokens_list(dir_, filters): """Find sources.json in <dir_>. It contains a list of tokenized texts. For each tokenized text listed in sources.json, read its tokens, filter them, and add them to an aggregated list. Write the aggregated list to disk using a filename based on the <filters> given. """ with open(tokens_dir + dir_ + '/sources.json', 'r') as injson: data = json.load(injson) sources = [corpora_dir + fname for fname in data] with open('data/skipwords.txt', 'r') as f: skipwords = [line.rstrip() for line in f] tokens_list = [] for fname in sources: print("Incorporating tokens from " + fname) with open(fname, 'r') as injson: data = json.load(injson) words = [w.lower() for w in data if not w == ''] filtered = [w for w,p in nltk.pos_tag(words) if p in filters] sanitized = [w for w in filtered if not w in skipwords] tokens_list += sanitized tokens_list = list(set(tokens_list)) # unique target = tokens_dir + dir_ + '/' + '-'.join(filters) + '.json' with open(target, 'w') as outjson: json.dump(tokens_list, outjson)
[ "Find", "sources", ".", "json", "in", "<dir_", ">", ".", "It", "contains", "a", "list", "of", "tokenized", "texts", ".", "For", "each", "tokenized", "text", "listed", "in", "sources", ".", "json", "read", "its", "tokens", "filter", "them", "and", "add", "them", "to", "an", "aggregated", "list", ".", "Write", "the", "aggregated", "list", "to", "disk", "using", "a", "filename", "based", "on", "the", "<filters", ">", "given", "." ]
carawarner/pantheon
python
https://github.com/carawarner/pantheon/blob/7e8718f4397eaa389fb3d5dc04fa01c7cb556512/pantheon/tokens.py#L79-L105
[ "def", "make_tokens_list", "(", "dir_", ",", "filters", ")", ":", "with", "open", "(", "tokens_dir", "+", "dir_", "+", "'/sources.json'", ",", "'r'", ")", "as", "injson", ":", "data", "=", "json", ".", "load", "(", "injson", ")", "sources", "=", "[", "corpora_dir", "+", "fname", "for", "fname", "in", "data", "]", "with", "open", "(", "'data/skipwords.txt'", ",", "'r'", ")", "as", "f", ":", "skipwords", "=", "[", "line", ".", "rstrip", "(", ")", "for", "line", "in", "f", "]", "tokens_list", "=", "[", "]", "for", "fname", "in", "sources", ":", "print", "(", "\"Incorporating tokens from \"", "+", "fname", ")", "with", "open", "(", "fname", ",", "'r'", ")", "as", "injson", ":", "data", "=", "json", ".", "load", "(", "injson", ")", "words", "=", "[", "w", ".", "lower", "(", ")", "for", "w", "in", "data", "if", "not", "w", "==", "''", "]", "filtered", "=", "[", "w", "for", "w", ",", "p", "in", "nltk", ".", "pos_tag", "(", "words", ")", "if", "p", "in", "filters", "]", "sanitized", "=", "[", "w", "for", "w", "in", "filtered", "if", "not", "w", "in", "skipwords", "]", "tokens_list", "+=", "sanitized", "tokens_list", "=", "list", "(", "set", "(", "tokens_list", ")", ")", "# unique", "target", "=", "tokens_dir", "+", "dir_", "+", "'/'", "+", "'-'", ".", "join", "(", "filters", ")", "+", "'.json'", "with", "open", "(", "target", ",", "'w'", ")", "as", "outjson", ":", "json", ".", "dump", "(", "tokens_list", ",", "outjson", ")" ]
7e8718f4397eaa389fb3d5dc04fa01c7cb556512