partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
execute_reliabledictionary
Execute create, update, delete operations on existing reliable dictionaries. carry out create, update and delete operations on existing reliable dictionaries for given application and service. :param application_name: Name of the application. :type application_name: str :param service_name: Name of the service. :type service_name: str :param output_file: input file with list of json to provide the operation information for reliable dictionaries.
rcctl/rcctl/custom_reliablecollections.py
def execute_reliabledictionary(client, application_name, service_name, input_file): """Execute create, update, delete operations on existing reliable dictionaries. carry out create, update and delete operations on existing reliable dictionaries for given application and service. :param application_name: Name of the application. :type application_name: str :param service_name: Name of the service. :type service_name: str :param output_file: input file with list of json to provide the operation information for reliable dictionaries. """ cluster = Cluster.from_sfclient(client) service = cluster.get_application(application_name).get_service(service_name) # call get service with headers and params with open(input_file) as json_file: json_data = json.load(json_file) service.execute(json_data) return
def execute_reliabledictionary(client, application_name, service_name, input_file): """Execute create, update, delete operations on existing reliable dictionaries. carry out create, update and delete operations on existing reliable dictionaries for given application and service. :param application_name: Name of the application. :type application_name: str :param service_name: Name of the service. :type service_name: str :param output_file: input file with list of json to provide the operation information for reliable dictionaries. """ cluster = Cluster.from_sfclient(client) service = cluster.get_application(application_name).get_service(service_name) # call get service with headers and params with open(input_file) as json_file: json_data = json.load(json_file) service.execute(json_data) return
[ "Execute", "create", "update", "delete", "operations", "on", "existing", "reliable", "dictionaries", "." ]
shalabhms/reliable-collections-cli
python
https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/custom_reliablecollections.py#L126-L145
[ "def", "execute_reliabledictionary", "(", "client", ",", "application_name", ",", "service_name", ",", "input_file", ")", ":", "cluster", "=", "Cluster", ".", "from_sfclient", "(", "client", ")", "service", "=", "cluster", ".", "get_application", "(", "application_name", ")", ".", "get_service", "(", "service_name", ")", "# call get service with headers and params", "with", "open", "(", "input_file", ")", "as", "json_file", ":", "json_data", "=", "json", ".", "load", "(", "json_file", ")", "service", ".", "execute", "(", "json_data", ")", "return" ]
195d69816fb5a6e1e9ab0ab66b606b1248b4780d
valid
select_arg_verify
Verify arguments for select command
rcctl/rcctl/custom_cluster.py
def select_arg_verify(endpoint, cert, key, pem, ca, aad, no_verify): #pylint: disable=invalid-name,too-many-arguments """Verify arguments for select command""" if not (endpoint.lower().startswith('http') or endpoint.lower().startswith('https')): raise CLIError('Endpoint must be HTTP or HTTPS') usage = ('Valid syntax : --endpoint [ [ --key --cert | --pem | --aad] ' '[ --ca | --no-verify ] ]') if ca and not (pem or all([key, cert])): raise CLIError(usage) if no_verify and not (pem or all([key, cert]) or aad): raise CLIError(usage) if no_verify and ca: raise CLIError(usage) if any([cert, key]) and not all([cert, key]): raise CLIError(usage) if aad and any([pem, cert, key]): raise CLIError(usage) if pem and any([cert, key]): raise CLIError(usage)
def select_arg_verify(endpoint, cert, key, pem, ca, aad, no_verify): #pylint: disable=invalid-name,too-many-arguments """Verify arguments for select command""" if not (endpoint.lower().startswith('http') or endpoint.lower().startswith('https')): raise CLIError('Endpoint must be HTTP or HTTPS') usage = ('Valid syntax : --endpoint [ [ --key --cert | --pem | --aad] ' '[ --ca | --no-verify ] ]') if ca and not (pem or all([key, cert])): raise CLIError(usage) if no_verify and not (pem or all([key, cert]) or aad): raise CLIError(usage) if no_verify and ca: raise CLIError(usage) if any([cert, key]) and not all([cert, key]): raise CLIError(usage) if aad and any([pem, cert, key]): raise CLIError(usage) if pem and any([cert, key]): raise CLIError(usage)
[ "Verify", "arguments", "for", "select", "command" ]
shalabhms/reliable-collections-cli
python
https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/custom_cluster.py#L13-L39
[ "def", "select_arg_verify", "(", "endpoint", ",", "cert", ",", "key", ",", "pem", ",", "ca", ",", "aad", ",", "no_verify", ")", ":", "#pylint: disable=invalid-name,too-many-arguments", "if", "not", "(", "endpoint", ".", "lower", "(", ")", ".", "startswith", "(", "'http'", ")", "or", "endpoint", ".", "lower", "(", ")", ".", "startswith", "(", "'https'", ")", ")", ":", "raise", "CLIError", "(", "'Endpoint must be HTTP or HTTPS'", ")", "usage", "=", "(", "'Valid syntax : --endpoint [ [ --key --cert | --pem | --aad] '", "'[ --ca | --no-verify ] ]'", ")", "if", "ca", "and", "not", "(", "pem", "or", "all", "(", "[", "key", ",", "cert", "]", ")", ")", ":", "raise", "CLIError", "(", "usage", ")", "if", "no_verify", "and", "not", "(", "pem", "or", "all", "(", "[", "key", ",", "cert", "]", ")", "or", "aad", ")", ":", "raise", "CLIError", "(", "usage", ")", "if", "no_verify", "and", "ca", ":", "raise", "CLIError", "(", "usage", ")", "if", "any", "(", "[", "cert", ",", "key", "]", ")", "and", "not", "all", "(", "[", "cert", ",", "key", "]", ")", ":", "raise", "CLIError", "(", "usage", ")", "if", "aad", "and", "any", "(", "[", "pem", ",", "cert", ",", "key", "]", ")", ":", "raise", "CLIError", "(", "usage", ")", "if", "pem", "and", "any", "(", "[", "cert", ",", "key", "]", ")", ":", "raise", "CLIError", "(", "usage", ")" ]
195d69816fb5a6e1e9ab0ab66b606b1248b4780d
valid
select
Connects to a Service Fabric cluster endpoint. If connecting to secure cluster specify an absolute path to a cert (.crt) and key file (.key) or a single file with both (.pem). Do not specify both. Optionally, if connecting to a secure cluster, specify also an absolute path to a CA bundle file or directory of trusted CA certs. :param str endpoint: Cluster endpoint URL, including port and HTTP or HTTPS prefix :param str cert: Absolute path to a client certificate file :param str key: Absolute path to client certificate key file :param str pem: Absolute path to client certificate, as a .pem file :param str ca: Absolute path to CA certs directory to treat as valid or CA bundle file :param bool aad: Use Azure Active Directory for authentication :param bool no_verify: Disable verification for certificates when using HTTPS, note: this is an insecure option and should not be used for production environments
rcctl/rcctl/custom_cluster.py
def select(endpoint, cert=None, key=None, pem=None, ca=None, #pylint: disable=invalid-name, too-many-arguments aad=False, no_verify=False): #pylint: disable-msg=too-many-locals """ Connects to a Service Fabric cluster endpoint. If connecting to secure cluster specify an absolute path to a cert (.crt) and key file (.key) or a single file with both (.pem). Do not specify both. Optionally, if connecting to a secure cluster, specify also an absolute path to a CA bundle file or directory of trusted CA certs. :param str endpoint: Cluster endpoint URL, including port and HTTP or HTTPS prefix :param str cert: Absolute path to a client certificate file :param str key: Absolute path to client certificate key file :param str pem: Absolute path to client certificate, as a .pem file :param str ca: Absolute path to CA certs directory to treat as valid or CA bundle file :param bool aad: Use Azure Active Directory for authentication :param bool no_verify: Disable verification for certificates when using HTTPS, note: this is an insecure option and should not be used for production environments """ from sfctl.config import (set_ca_cert, set_auth, set_aad_cache, set_cluster_endpoint, set_no_verify) from msrest import ServiceClient, Configuration from sfctl.auth import ClientCertAuthentication, AdalAuthentication select_arg_verify(endpoint, cert, key, pem, ca, aad, no_verify) if aad: new_token, new_cache = get_aad_token(endpoint, no_verify) set_aad_cache(new_token, new_cache) rest_client = ServiceClient( AdalAuthentication(no_verify), Configuration(endpoint) ) # Make sure basic GET request succeeds rest_client.send(rest_client.get('/')).raise_for_status() else: client_cert = None if pem: client_cert = pem elif cert: client_cert = (cert, key) rest_client = ServiceClient( ClientCertAuthentication(client_cert, ca, no_verify), Configuration(endpoint) ) # Make sure basic GET request succeeds rest_client.send(rest_client.get('/')).raise_for_status() set_cluster_endpoint(endpoint) set_no_verify(no_verify) set_ca_cert(ca) set_auth(pem, cert, key, aad)
def select(endpoint, cert=None, key=None, pem=None, ca=None, #pylint: disable=invalid-name, too-many-arguments aad=False, no_verify=False): #pylint: disable-msg=too-many-locals """ Connects to a Service Fabric cluster endpoint. If connecting to secure cluster specify an absolute path to a cert (.crt) and key file (.key) or a single file with both (.pem). Do not specify both. Optionally, if connecting to a secure cluster, specify also an absolute path to a CA bundle file or directory of trusted CA certs. :param str endpoint: Cluster endpoint URL, including port and HTTP or HTTPS prefix :param str cert: Absolute path to a client certificate file :param str key: Absolute path to client certificate key file :param str pem: Absolute path to client certificate, as a .pem file :param str ca: Absolute path to CA certs directory to treat as valid or CA bundle file :param bool aad: Use Azure Active Directory for authentication :param bool no_verify: Disable verification for certificates when using HTTPS, note: this is an insecure option and should not be used for production environments """ from sfctl.config import (set_ca_cert, set_auth, set_aad_cache, set_cluster_endpoint, set_no_verify) from msrest import ServiceClient, Configuration from sfctl.auth import ClientCertAuthentication, AdalAuthentication select_arg_verify(endpoint, cert, key, pem, ca, aad, no_verify) if aad: new_token, new_cache = get_aad_token(endpoint, no_verify) set_aad_cache(new_token, new_cache) rest_client = ServiceClient( AdalAuthentication(no_verify), Configuration(endpoint) ) # Make sure basic GET request succeeds rest_client.send(rest_client.get('/')).raise_for_status() else: client_cert = None if pem: client_cert = pem elif cert: client_cert = (cert, key) rest_client = ServiceClient( ClientCertAuthentication(client_cert, ca, no_verify), Configuration(endpoint) ) # Make sure basic GET request succeeds rest_client.send(rest_client.get('/')).raise_for_status() set_cluster_endpoint(endpoint) set_no_verify(no_verify) set_ca_cert(ca) set_auth(pem, cert, key, aad)
[ "Connects", "to", "a", "Service", "Fabric", "cluster", "endpoint", ".", "If", "connecting", "to", "secure", "cluster", "specify", "an", "absolute", "path", "to", "a", "cert", "(", ".", "crt", ")", "and", "key", "file", "(", ".", "key", ")", "or", "a", "single", "file", "with", "both", "(", ".", "pem", ")", ".", "Do", "not", "specify", "both", ".", "Optionally", "if", "connecting", "to", "a", "secure", "cluster", "specify", "also", "an", "absolute", "path", "to", "a", "CA", "bundle", "file", "or", "directory", "of", "trusted", "CA", "certs", ".", ":", "param", "str", "endpoint", ":", "Cluster", "endpoint", "URL", "including", "port", "and", "HTTP", "or", "HTTPS", "prefix", ":", "param", "str", "cert", ":", "Absolute", "path", "to", "a", "client", "certificate", "file", ":", "param", "str", "key", ":", "Absolute", "path", "to", "client", "certificate", "key", "file", ":", "param", "str", "pem", ":", "Absolute", "path", "to", "client", "certificate", "as", "a", ".", "pem", "file", ":", "param", "str", "ca", ":", "Absolute", "path", "to", "CA", "certs", "directory", "to", "treat", "as", "valid", "or", "CA", "bundle", "file", ":", "param", "bool", "aad", ":", "Use", "Azure", "Active", "Directory", "for", "authentication", ":", "param", "bool", "no_verify", ":", "Disable", "verification", "for", "certificates", "when", "using", "HTTPS", "note", ":", "this", "is", "an", "insecure", "option", "and", "should", "not", "be", "used", "for", "production", "environments" ]
shalabhms/reliable-collections-cli
python
https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/custom_cluster.py#L41-L99
[ "def", "select", "(", "endpoint", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "pem", "=", "None", ",", "ca", "=", "None", ",", "#pylint: disable=invalid-name, too-many-arguments", "aad", "=", "False", ",", "no_verify", "=", "False", ")", ":", "#pylint: disable-msg=too-many-locals", "from", "sfctl", ".", "config", "import", "(", "set_ca_cert", ",", "set_auth", ",", "set_aad_cache", ",", "set_cluster_endpoint", ",", "set_no_verify", ")", "from", "msrest", "import", "ServiceClient", ",", "Configuration", "from", "sfctl", ".", "auth", "import", "ClientCertAuthentication", ",", "AdalAuthentication", "select_arg_verify", "(", "endpoint", ",", "cert", ",", "key", ",", "pem", ",", "ca", ",", "aad", ",", "no_verify", ")", "if", "aad", ":", "new_token", ",", "new_cache", "=", "get_aad_token", "(", "endpoint", ",", "no_verify", ")", "set_aad_cache", "(", "new_token", ",", "new_cache", ")", "rest_client", "=", "ServiceClient", "(", "AdalAuthentication", "(", "no_verify", ")", ",", "Configuration", "(", "endpoint", ")", ")", "# Make sure basic GET request succeeds", "rest_client", ".", "send", "(", "rest_client", ".", "get", "(", "'/'", ")", ")", ".", "raise_for_status", "(", ")", "else", ":", "client_cert", "=", "None", "if", "pem", ":", "client_cert", "=", "pem", "elif", "cert", ":", "client_cert", "=", "(", "cert", ",", "key", ")", "rest_client", "=", "ServiceClient", "(", "ClientCertAuthentication", "(", "client_cert", ",", "ca", ",", "no_verify", ")", ",", "Configuration", "(", "endpoint", ")", ")", "# Make sure basic GET request succeeds", "rest_client", ".", "send", "(", "rest_client", ".", "get", "(", "'/'", ")", ")", ".", "raise_for_status", "(", ")", "set_cluster_endpoint", "(", "endpoint", ")", "set_no_verify", "(", "no_verify", ")", "set_ca_cert", "(", "ca", ")", "set_auth", "(", "pem", ",", "cert", ",", "key", ",", "aad", ")" ]
195d69816fb5a6e1e9ab0ab66b606b1248b4780d
valid
get_aad_token
Get AAD token
rcctl/rcctl/custom_cluster.py
def get_aad_token(endpoint, no_verify): #pylint: disable-msg=too-many-locals """Get AAD token""" from azure.servicefabric.service_fabric_client_ap_is import ( ServiceFabricClientAPIs ) from sfctl.auth import ClientCertAuthentication from sfctl.config import set_aad_metadata auth = ClientCertAuthentication(None, None, no_verify) client = ServiceFabricClientAPIs(auth, base_url=endpoint) aad_metadata = client.get_aad_metadata() if aad_metadata.type != "aad": raise CLIError("Not AAD cluster") aad_resource = aad_metadata.metadata tenant_id = aad_resource.tenant authority_uri = aad_resource.login + '/' + tenant_id context = adal.AuthenticationContext(authority_uri, api_version=None) cluster_id = aad_resource.cluster client_id = aad_resource.client set_aad_metadata(authority_uri, cluster_id, client_id) code = context.acquire_user_code(cluster_id, client_id) print(code['message']) token = context.acquire_token_with_device_code( cluster_id, code, client_id) print("Succeed!") return token, context.cache
def get_aad_token(endpoint, no_verify): #pylint: disable-msg=too-many-locals """Get AAD token""" from azure.servicefabric.service_fabric_client_ap_is import ( ServiceFabricClientAPIs ) from sfctl.auth import ClientCertAuthentication from sfctl.config import set_aad_metadata auth = ClientCertAuthentication(None, None, no_verify) client = ServiceFabricClientAPIs(auth, base_url=endpoint) aad_metadata = client.get_aad_metadata() if aad_metadata.type != "aad": raise CLIError("Not AAD cluster") aad_resource = aad_metadata.metadata tenant_id = aad_resource.tenant authority_uri = aad_resource.login + '/' + tenant_id context = adal.AuthenticationContext(authority_uri, api_version=None) cluster_id = aad_resource.cluster client_id = aad_resource.client set_aad_metadata(authority_uri, cluster_id, client_id) code = context.acquire_user_code(cluster_id, client_id) print(code['message']) token = context.acquire_token_with_device_code( cluster_id, code, client_id) print("Succeed!") return token, context.cache
[ "Get", "AAD", "token" ]
shalabhms/reliable-collections-cli
python
https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/custom_cluster.py#L101-L134
[ "def", "get_aad_token", "(", "endpoint", ",", "no_verify", ")", ":", "#pylint: disable-msg=too-many-locals", "from", "azure", ".", "servicefabric", ".", "service_fabric_client_ap_is", "import", "(", "ServiceFabricClientAPIs", ")", "from", "sfctl", ".", "auth", "import", "ClientCertAuthentication", "from", "sfctl", ".", "config", "import", "set_aad_metadata", "auth", "=", "ClientCertAuthentication", "(", "None", ",", "None", ",", "no_verify", ")", "client", "=", "ServiceFabricClientAPIs", "(", "auth", ",", "base_url", "=", "endpoint", ")", "aad_metadata", "=", "client", ".", "get_aad_metadata", "(", ")", "if", "aad_metadata", ".", "type", "!=", "\"aad\"", ":", "raise", "CLIError", "(", "\"Not AAD cluster\"", ")", "aad_resource", "=", "aad_metadata", ".", "metadata", "tenant_id", "=", "aad_resource", ".", "tenant", "authority_uri", "=", "aad_resource", ".", "login", "+", "'/'", "+", "tenant_id", "context", "=", "adal", ".", "AuthenticationContext", "(", "authority_uri", ",", "api_version", "=", "None", ")", "cluster_id", "=", "aad_resource", ".", "cluster", "client_id", "=", "aad_resource", ".", "client", "set_aad_metadata", "(", "authority_uri", ",", "cluster_id", ",", "client_id", ")", "code", "=", "context", ".", "acquire_user_code", "(", "cluster_id", ",", "client_id", ")", "print", "(", "code", "[", "'message'", "]", ")", "token", "=", "context", ".", "acquire_token_with_device_code", "(", "cluster_id", ",", "code", ",", "client_id", ")", "print", "(", "\"Succeed!\"", ")", "return", "token", ",", "context", ".", "cache" ]
195d69816fb5a6e1e9ab0ab66b606b1248b4780d
valid
_openpyxl_read_xl
Use openpyxl to read an Excel file.
boyle/excel_utils.py
def _openpyxl_read_xl(xl_path: str): """ Use openpyxl to read an Excel file. """ try: wb = load_workbook(filename=xl_path, read_only=True) except: raise else: return wb
def _openpyxl_read_xl(xl_path: str): """ Use openpyxl to read an Excel file. """ try: wb = load_workbook(filename=xl_path, read_only=True) except: raise else: return wb
[ "Use", "openpyxl", "to", "read", "an", "Excel", "file", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/excel_utils.py#L14-L21
[ "def", "_openpyxl_read_xl", "(", "xl_path", ":", "str", ")", ":", "try", ":", "wb", "=", "load_workbook", "(", "filename", "=", "xl_path", ",", "read_only", "=", "True", ")", "except", ":", "raise", "else", ":", "return", "wb" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
_check_xl_path
Return the expanded absolute path of `xl_path` if if exists and 'xlrd' or 'openpyxl' depending on which module should be used for the Excel file in `xl_path`. Parameters ---------- xl_path: str Path to an Excel file Returns ------- xl_path: str User expanded and absolute path to `xl_path` module: str The name of the module you should use to process the Excel file. Choices: 'xlrd', 'pyopenxl' Raises ------ IOError If the file does not exist RuntimError If a suitable reader for xl_path is not found
boyle/excel_utils.py
def _check_xl_path(xl_path: str): """ Return the expanded absolute path of `xl_path` if if exists and 'xlrd' or 'openpyxl' depending on which module should be used for the Excel file in `xl_path`. Parameters ---------- xl_path: str Path to an Excel file Returns ------- xl_path: str User expanded and absolute path to `xl_path` module: str The name of the module you should use to process the Excel file. Choices: 'xlrd', 'pyopenxl' Raises ------ IOError If the file does not exist RuntimError If a suitable reader for xl_path is not found """ xl_path = op.abspath(op.expanduser(xl_path)) if not op.isfile(xl_path): raise IOError("Could not find file in {}.".format(xl_path)) return xl_path, _use_openpyxl_or_xlrf(xl_path)
def _check_xl_path(xl_path: str): """ Return the expanded absolute path of `xl_path` if if exists and 'xlrd' or 'openpyxl' depending on which module should be used for the Excel file in `xl_path`. Parameters ---------- xl_path: str Path to an Excel file Returns ------- xl_path: str User expanded and absolute path to `xl_path` module: str The name of the module you should use to process the Excel file. Choices: 'xlrd', 'pyopenxl' Raises ------ IOError If the file does not exist RuntimError If a suitable reader for xl_path is not found """ xl_path = op.abspath(op.expanduser(xl_path)) if not op.isfile(xl_path): raise IOError("Could not find file in {}.".format(xl_path)) return xl_path, _use_openpyxl_or_xlrf(xl_path)
[ "Return", "the", "expanded", "absolute", "path", "of", "xl_path", "if", "if", "exists", "and", "xlrd", "or", "openpyxl", "depending", "on", "which", "module", "should", "be", "used", "for", "the", "Excel", "file", "in", "xl_path", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/excel_utils.py#L57-L90
[ "def", "_check_xl_path", "(", "xl_path", ":", "str", ")", ":", "xl_path", "=", "op", ".", "abspath", "(", "op", ".", "expanduser", "(", "xl_path", ")", ")", "if", "not", "op", ".", "isfile", "(", "xl_path", ")", ":", "raise", "IOError", "(", "\"Could not find file in {}.\"", ".", "format", "(", "xl_path", ")", ")", "return", "xl_path", ",", "_use_openpyxl_or_xlrf", "(", "xl_path", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
read_xl
Return the workbook from the Excel file in `xl_path`.
boyle/excel_utils.py
def read_xl(xl_path: str): """ Return the workbook from the Excel file in `xl_path`.""" xl_path, choice = _check_xl_path(xl_path) reader = XL_READERS[choice] return reader(xl_path)
def read_xl(xl_path: str): """ Return the workbook from the Excel file in `xl_path`.""" xl_path, choice = _check_xl_path(xl_path) reader = XL_READERS[choice] return reader(xl_path)
[ "Return", "the", "workbook", "from", "the", "Excel", "file", "in", "xl_path", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/excel_utils.py#L93-L98
[ "def", "read_xl", "(", "xl_path", ":", "str", ")", ":", "xl_path", ",", "choice", "=", "_check_xl_path", "(", "xl_path", ")", "reader", "=", "XL_READERS", "[", "choice", "]", "return", "reader", "(", "xl_path", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
get_sheet_list
Return a list with the name of the sheets in the Excel file in `xl_path`.
boyle/excel_utils.py
def get_sheet_list(xl_path: str) -> List: """Return a list with the name of the sheets in the Excel file in `xl_path`. """ wb = read_xl(xl_path) if hasattr(wb, 'sheetnames'): return wb.sheetnames else: return wb.sheet_names()
def get_sheet_list(xl_path: str) -> List: """Return a list with the name of the sheets in the Excel file in `xl_path`. """ wb = read_xl(xl_path) if hasattr(wb, 'sheetnames'): return wb.sheetnames else: return wb.sheet_names()
[ "Return", "a", "list", "with", "the", "name", "of", "the", "sheets", "in", "the", "Excel", "file", "in", "xl_path", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/excel_utils.py#L101-L110
[ "def", "get_sheet_list", "(", "xl_path", ":", "str", ")", "->", "List", ":", "wb", "=", "read_xl", "(", "xl_path", ")", "if", "hasattr", "(", "wb", ",", "'sheetnames'", ")", ":", "return", "wb", ".", "sheetnames", "else", ":", "return", "wb", ".", "sheet_names", "(", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
concat_sheets
Return a pandas DataFrame with the concat'ed content of the `sheetnames` from the Excel file in `xl_path`. Parameters ---------- xl_path: str Path to the Excel file sheetnames: list of str List of existing sheet names of `xl_path`. If None, will use all sheets from `xl_path`. add_tab_names: bool If True will add a 'Tab' column which says from which tab the row comes from. Returns ------- df: pandas.DataFrame
boyle/excel_utils.py
def concat_sheets(xl_path: str, sheetnames=None, add_tab_names=False): """ Return a pandas DataFrame with the concat'ed content of the `sheetnames` from the Excel file in `xl_path`. Parameters ---------- xl_path: str Path to the Excel file sheetnames: list of str List of existing sheet names of `xl_path`. If None, will use all sheets from `xl_path`. add_tab_names: bool If True will add a 'Tab' column which says from which tab the row comes from. Returns ------- df: pandas.DataFrame """ xl_path, choice = _check_xl_path(xl_path) if sheetnames is None: sheetnames = get_sheet_list(xl_path) sheets = pd.read_excel(xl_path, sheetname=sheetnames) if add_tab_names: for tab in sheets: sheets[tab]['Tab'] = [tab] * len(sheets[tab]) return pd.concat([sheets[tab] for tab in sheets])
def concat_sheets(xl_path: str, sheetnames=None, add_tab_names=False): """ Return a pandas DataFrame with the concat'ed content of the `sheetnames` from the Excel file in `xl_path`. Parameters ---------- xl_path: str Path to the Excel file sheetnames: list of str List of existing sheet names of `xl_path`. If None, will use all sheets from `xl_path`. add_tab_names: bool If True will add a 'Tab' column which says from which tab the row comes from. Returns ------- df: pandas.DataFrame """ xl_path, choice = _check_xl_path(xl_path) if sheetnames is None: sheetnames = get_sheet_list(xl_path) sheets = pd.read_excel(xl_path, sheetname=sheetnames) if add_tab_names: for tab in sheets: sheets[tab]['Tab'] = [tab] * len(sheets[tab]) return pd.concat([sheets[tab] for tab in sheets])
[ "Return", "a", "pandas", "DataFrame", "with", "the", "concat", "ed", "content", "of", "the", "sheetnames", "from", "the", "Excel", "file", "in", "xl_path", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/excel_utils.py#L113-L146
[ "def", "concat_sheets", "(", "xl_path", ":", "str", ",", "sheetnames", "=", "None", ",", "add_tab_names", "=", "False", ")", ":", "xl_path", ",", "choice", "=", "_check_xl_path", "(", "xl_path", ")", "if", "sheetnames", "is", "None", ":", "sheetnames", "=", "get_sheet_list", "(", "xl_path", ")", "sheets", "=", "pd", ".", "read_excel", "(", "xl_path", ",", "sheetname", "=", "sheetnames", ")", "if", "add_tab_names", ":", "for", "tab", "in", "sheets", ":", "sheets", "[", "tab", "]", "[", "'Tab'", "]", "=", "[", "tab", "]", "*", "len", "(", "sheets", "[", "tab", "]", ")", "return", "pd", ".", "concat", "(", "[", "sheets", "[", "tab", "]", "for", "tab", "in", "sheets", "]", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
_check_cols
Raise an AttributeError if `df` does not have a column named as an item of the list of strings `col_names`.
boyle/excel_utils.py
def _check_cols(df, col_names): """ Raise an AttributeError if `df` does not have a column named as an item of the list of strings `col_names`. """ for col in col_names: if not hasattr(df, col): raise AttributeError("DataFrame does not have a '{}' column, got {}.".format(col, df.columns))
def _check_cols(df, col_names): """ Raise an AttributeError if `df` does not have a column named as an item of the list of strings `col_names`. """ for col in col_names: if not hasattr(df, col): raise AttributeError("DataFrame does not have a '{}' column, got {}.".format(col, df.columns))
[ "Raise", "an", "AttributeError", "if", "df", "does", "not", "have", "a", "column", "named", "as", "an", "item", "of", "the", "list", "of", "strings", "col_names", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/excel_utils.py#L149-L156
[ "def", "_check_cols", "(", "df", ",", "col_names", ")", ":", "for", "col", "in", "col_names", ":", "if", "not", "hasattr", "(", "df", ",", "col", ")", ":", "raise", "AttributeError", "(", "\"DataFrame does not have a '{}' column, got {}.\"", ".", "format", "(", "col", ",", "df", ".", "columns", ")", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
col_values
Return a list of not null values from the `col_name` column of `df`.
boyle/excel_utils.py
def col_values(df, col_name): """ Return a list of not null values from the `col_name` column of `df`.""" _check_cols(df, [col_name]) if 'O' in df[col_name] or pd.np.issubdtype(df[col_name].dtype, str): # if the column is of strings return [nom.lower() for nom in df[pd.notnull(df)][col_name] if not pd.isnull(nom)] else: return [nom for nom in df[pd.notnull(df)][col_name] if not pd.isnull(nom)]
def col_values(df, col_name): """ Return a list of not null values from the `col_name` column of `df`.""" _check_cols(df, [col_name]) if 'O' in df[col_name] or pd.np.issubdtype(df[col_name].dtype, str): # if the column is of strings return [nom.lower() for nom in df[pd.notnull(df)][col_name] if not pd.isnull(nom)] else: return [nom for nom in df[pd.notnull(df)][col_name] if not pd.isnull(nom)]
[ "Return", "a", "list", "of", "not", "null", "values", "from", "the", "col_name", "column", "of", "df", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/excel_utils.py#L159-L166
[ "def", "col_values", "(", "df", ",", "col_name", ")", ":", "_check_cols", "(", "df", ",", "[", "col_name", "]", ")", "if", "'O'", "in", "df", "[", "col_name", "]", "or", "pd", ".", "np", ".", "issubdtype", "(", "df", "[", "col_name", "]", ".", "dtype", ",", "str", ")", ":", "# if the column is of strings", "return", "[", "nom", ".", "lower", "(", ")", "for", "nom", "in", "df", "[", "pd", ".", "notnull", "(", "df", ")", "]", "[", "col_name", "]", "if", "not", "pd", ".", "isnull", "(", "nom", ")", "]", "else", ":", "return", "[", "nom", "for", "nom", "in", "df", "[", "pd", ".", "notnull", "(", "df", ")", "]", "[", "col_name", "]", "if", "not", "pd", ".", "isnull", "(", "nom", ")", "]" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
duplicated_rows
Return a DataFrame with the duplicated values of the column `col_name` in `df`.
boyle/excel_utils.py
def duplicated_rows(df, col_name): """ Return a DataFrame with the duplicated values of the column `col_name` in `df`.""" _check_cols(df, [col_name]) dups = df[pd.notnull(df[col_name]) & df.duplicated(subset=[col_name])] return dups
def duplicated_rows(df, col_name): """ Return a DataFrame with the duplicated values of the column `col_name` in `df`.""" _check_cols(df, [col_name]) dups = df[pd.notnull(df[col_name]) & df.duplicated(subset=[col_name])] return dups
[ "Return", "a", "DataFrame", "with", "the", "duplicated", "values", "of", "the", "column", "col_name", "in", "df", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/excel_utils.py#L169-L175
[ "def", "duplicated_rows", "(", "df", ",", "col_name", ")", ":", "_check_cols", "(", "df", ",", "[", "col_name", "]", ")", "dups", "=", "df", "[", "pd", ".", "notnull", "(", "df", "[", "col_name", "]", ")", "&", "df", ".", "duplicated", "(", "subset", "=", "[", "col_name", "]", ")", "]", "return", "dups" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
duplicated
Return the duplicated items in `values`
boyle/excel_utils.py
def duplicated(values: Sequence): """ Return the duplicated items in `values`""" vals = pd.Series(values) return vals[vals.duplicated()]
def duplicated(values: Sequence): """ Return the duplicated items in `values`""" vals = pd.Series(values) return vals[vals.duplicated()]
[ "Return", "the", "duplicated", "items", "in", "values" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/excel_utils.py#L178-L181
[ "def", "duplicated", "(", "values", ":", "Sequence", ")", ":", "vals", "=", "pd", ".", "Series", "(", "values", ")", "return", "vals", "[", "vals", ".", "duplicated", "(", ")", "]" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
timestamp_with_tzinfo
Serialize a date/time value into an ISO8601 text representation adjusted (if needed) to UTC timezone. For instance: >>> serialize_date(datetime(2012, 4, 10, 22, 38, 20, 604391)) '2012-04-10T22:38:20.604391Z'
boyle/petitdb.py
def timestamp_with_tzinfo(dt): """ Serialize a date/time value into an ISO8601 text representation adjusted (if needed) to UTC timezone. For instance: >>> serialize_date(datetime(2012, 4, 10, 22, 38, 20, 604391)) '2012-04-10T22:38:20.604391Z' """ utc = tzutc() if dt.tzinfo: dt = dt.astimezone(utc).replace(tzinfo=None) return dt.isoformat() + 'Z'
def timestamp_with_tzinfo(dt): """ Serialize a date/time value into an ISO8601 text representation adjusted (if needed) to UTC timezone. For instance: >>> serialize_date(datetime(2012, 4, 10, 22, 38, 20, 604391)) '2012-04-10T22:38:20.604391Z' """ utc = tzutc() if dt.tzinfo: dt = dt.astimezone(utc).replace(tzinfo=None) return dt.isoformat() + 'Z'
[ "Serialize", "a", "date", "/", "time", "value", "into", "an", "ISO8601", "text", "representation", "adjusted", "(", "if", "needed", ")", "to", "UTC", "timezone", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L38-L51
[ "def", "timestamp_with_tzinfo", "(", "dt", ")", ":", "utc", "=", "tzutc", "(", ")", "if", "dt", ".", "tzinfo", ":", "dt", "=", "dt", ".", "astimezone", "(", "utc", ")", ".", "replace", "(", "tzinfo", "=", "None", ")", "return", "dt", ".", "isoformat", "(", ")", "+", "'Z'" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
_to_string
Convert to string all values in `data`. Parameters ---------- data: dict[str]->object Returns ------- string_data: dict[str]->str
boyle/petitdb.py
def _to_string(data): """ Convert to string all values in `data`. Parameters ---------- data: dict[str]->object Returns ------- string_data: dict[str]->str """ sdata = data.copy() for k, v in data.items(): if isinstance(v, datetime): sdata[k] = timestamp_to_date_str(v) elif not isinstance(v, (string_types, float, int)): sdata[k] = str(v) return sdata
def _to_string(data): """ Convert to string all values in `data`. Parameters ---------- data: dict[str]->object Returns ------- string_data: dict[str]->str """ sdata = data.copy() for k, v in data.items(): if isinstance(v, datetime): sdata[k] = timestamp_to_date_str(v) elif not isinstance(v, (string_types, float, int)): sdata[k] = str(v) return sdata
[ "Convert", "to", "string", "all", "values", "in", "data", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L59-L78
[ "def", "_to_string", "(", "data", ")", ":", "sdata", "=", "data", ".", "copy", "(", ")", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "datetime", ")", ":", "sdata", "[", "k", "]", "=", "timestamp_to_date_str", "(", "v", ")", "elif", "not", "isinstance", "(", "v", ",", "(", "string_types", ",", "float", ",", "int", ")", ")", ":", "sdata", "[", "k", "]", "=", "str", "(", "v", ")", "return", "sdata" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
insert_unique
Insert `data` into `table` ensuring that data has unique values in `table` for the fields listed in `unique_fields`. If `raise_if_found` is True, will raise an NotUniqueItemError if another item with the same `unique_fields` values are found previously in `table`. If False, will return the `eid` from the item found. Parameters ---------- table: tinydb.Table data: dict unique_fields: list of str Name of fields (keys) from `data` which are going to be used to build a sample to look for exactly the same values in the database. If None, will use every key in `data`. raise_if_found: bool Returns ------- eid: int Id of the object inserted or the one found with same `unique_fields`. Raises ------ MoreThanOneItemError Raise even with `raise_with_found` == False if it finds more than one item with the same values as the sample. NotUniqueItemError If `raise_if_found` is True and an item with the same `unique_fields` values from `data` is found in `table`.
boyle/petitdb.py
def insert_unique(table, data, unique_fields=None, *, raise_if_found=False): """Insert `data` into `table` ensuring that data has unique values in `table` for the fields listed in `unique_fields`. If `raise_if_found` is True, will raise an NotUniqueItemError if another item with the same `unique_fields` values are found previously in `table`. If False, will return the `eid` from the item found. Parameters ---------- table: tinydb.Table data: dict unique_fields: list of str Name of fields (keys) from `data` which are going to be used to build a sample to look for exactly the same values in the database. If None, will use every key in `data`. raise_if_found: bool Returns ------- eid: int Id of the object inserted or the one found with same `unique_fields`. Raises ------ MoreThanOneItemError Raise even with `raise_with_found` == False if it finds more than one item with the same values as the sample. NotUniqueItemError If `raise_if_found` is True and an item with the same `unique_fields` values from `data` is found in `table`. """ item = find_unique(table, data, unique_fields) if item is not None: if raise_if_found: raise NotUniqueItemError('Not expected to find an item with the same ' 'values for {}. Inserting {} got {} in eid {}.'.format(unique_fields, data, table.get(eid=item), item)) else: return item return table.insert(data)
def insert_unique(table, data, unique_fields=None, *, raise_if_found=False): """Insert `data` into `table` ensuring that data has unique values in `table` for the fields listed in `unique_fields`. If `raise_if_found` is True, will raise an NotUniqueItemError if another item with the same `unique_fields` values are found previously in `table`. If False, will return the `eid` from the item found. Parameters ---------- table: tinydb.Table data: dict unique_fields: list of str Name of fields (keys) from `data` which are going to be used to build a sample to look for exactly the same values in the database. If None, will use every key in `data`. raise_if_found: bool Returns ------- eid: int Id of the object inserted or the one found with same `unique_fields`. Raises ------ MoreThanOneItemError Raise even with `raise_with_found` == False if it finds more than one item with the same values as the sample. NotUniqueItemError If `raise_if_found` is True and an item with the same `unique_fields` values from `data` is found in `table`. """ item = find_unique(table, data, unique_fields) if item is not None: if raise_if_found: raise NotUniqueItemError('Not expected to find an item with the same ' 'values for {}. Inserting {} got {} in eid {}.'.format(unique_fields, data, table.get(eid=item), item)) else: return item return table.insert(data)
[ "Insert", "data", "into", "table", "ensuring", "that", "data", "has", "unique", "values", "in", "table", "for", "the", "fields", "listed", "in", "unique_fields", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L81-L129
[ "def", "insert_unique", "(", "table", ",", "data", ",", "unique_fields", "=", "None", ",", "*", ",", "raise_if_found", "=", "False", ")", ":", "item", "=", "find_unique", "(", "table", ",", "data", ",", "unique_fields", ")", "if", "item", "is", "not", "None", ":", "if", "raise_if_found", ":", "raise", "NotUniqueItemError", "(", "'Not expected to find an item with the same '", "'values for {}. Inserting {} got {} in eid {}.'", ".", "format", "(", "unique_fields", ",", "data", ",", "table", ".", "get", "(", "eid", "=", "item", ")", ",", "item", ")", ")", "else", ":", "return", "item", "return", "table", ".", "insert", "(", "data", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
search_sample
Search for items in `table` that have the same field sub-set values as in `sample`. Parameters ---------- table: tinydb.table sample: dict Sample data Returns ------- search_result: list of dict List of the items found. The list is empty if no item is found.
boyle/petitdb.py
def search_sample(table, sample): """Search for items in `table` that have the same field sub-set values as in `sample`. Parameters ---------- table: tinydb.table sample: dict Sample data Returns ------- search_result: list of dict List of the items found. The list is empty if no item is found. """ query = _query_sample(sample=sample, operators='__eq__') return table.search(query)
def search_sample(table, sample): """Search for items in `table` that have the same field sub-set values as in `sample`. Parameters ---------- table: tinydb.table sample: dict Sample data Returns ------- search_result: list of dict List of the items found. The list is empty if no item is found. """ query = _query_sample(sample=sample, operators='__eq__') return table.search(query)
[ "Search", "for", "items", "in", "table", "that", "have", "the", "same", "field", "sub", "-", "set", "values", "as", "in", "sample", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L132-L149
[ "def", "search_sample", "(", "table", ",", "sample", ")", ":", "query", "=", "_query_sample", "(", "sample", "=", "sample", ",", "operators", "=", "'__eq__'", ")", "return", "table", ".", "search", "(", "query", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
search_unique
Search for items in `table` that have the same field sub-set values as in `sample`. Expecting it to be unique, otherwise will raise an exception. Parameters ---------- table: tinydb.table sample: dict Sample data Returns ------- search_result: tinydb.database.Element Unique item result of the search. Raises ------ KeyError: If the search returns for more than one entry.
boyle/petitdb.py
def search_unique(table, sample, unique_fields=None): """ Search for items in `table` that have the same field sub-set values as in `sample`. Expecting it to be unique, otherwise will raise an exception. Parameters ---------- table: tinydb.table sample: dict Sample data Returns ------- search_result: tinydb.database.Element Unique item result of the search. Raises ------ KeyError: If the search returns for more than one entry. """ if unique_fields is None: unique_fields = list(sample.keys()) query = _query_data(sample, field_names=unique_fields, operators='__eq__') items = table.search(query) if len(items) == 1: return items[0] if len(items) == 0: return None raise MoreThanOneItemError('Expected to find zero or one items, but found ' '{} items.'.format(len(items)))
def search_unique(table, sample, unique_fields=None): """ Search for items in `table` that have the same field sub-set values as in `sample`. Expecting it to be unique, otherwise will raise an exception. Parameters ---------- table: tinydb.table sample: dict Sample data Returns ------- search_result: tinydb.database.Element Unique item result of the search. Raises ------ KeyError: If the search returns for more than one entry. """ if unique_fields is None: unique_fields = list(sample.keys()) query = _query_data(sample, field_names=unique_fields, operators='__eq__') items = table.search(query) if len(items) == 1: return items[0] if len(items) == 0: return None raise MoreThanOneItemError('Expected to find zero or one items, but found ' '{} items.'.format(len(items)))
[ "Search", "for", "items", "in", "table", "that", "have", "the", "same", "field", "sub", "-", "set", "values", "as", "in", "sample", ".", "Expecting", "it", "to", "be", "unique", "otherwise", "will", "raise", "an", "exception", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L152-L185
[ "def", "search_unique", "(", "table", ",", "sample", ",", "unique_fields", "=", "None", ")", ":", "if", "unique_fields", "is", "None", ":", "unique_fields", "=", "list", "(", "sample", ".", "keys", "(", ")", ")", "query", "=", "_query_data", "(", "sample", ",", "field_names", "=", "unique_fields", ",", "operators", "=", "'__eq__'", ")", "items", "=", "table", ".", "search", "(", "query", ")", "if", "len", "(", "items", ")", "==", "1", ":", "return", "items", "[", "0", "]", "if", "len", "(", "items", ")", "==", "0", ":", "return", "None", "raise", "MoreThanOneItemError", "(", "'Expected to find zero or one items, but found '", "'{} items.'", ".", "format", "(", "len", "(", "items", ")", ")", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
find_unique
Search in `table` an item with the value of the `unique_fields` in the `sample` sample. Check if the the obtained result is unique. If nothing is found will return an empty list, if there is more than one item found, will raise an IndexError. Parameters ---------- table: tinydb.table sample: dict Sample data unique_fields: list of str Name of fields (keys) from `data` which are going to be used to build a sample to look for exactly the same values in the database. If None, will use every key in `data`. Returns ------- eid: int Id of the object found with same `unique_fields`. None if none is found. Raises ------ MoreThanOneItemError If more than one example is found.
boyle/petitdb.py
def find_unique(table, sample, unique_fields=None): """Search in `table` an item with the value of the `unique_fields` in the `sample` sample. Check if the the obtained result is unique. If nothing is found will return an empty list, if there is more than one item found, will raise an IndexError. Parameters ---------- table: tinydb.table sample: dict Sample data unique_fields: list of str Name of fields (keys) from `data` which are going to be used to build a sample to look for exactly the same values in the database. If None, will use every key in `data`. Returns ------- eid: int Id of the object found with same `unique_fields`. None if none is found. Raises ------ MoreThanOneItemError If more than one example is found. """ res = search_unique(table, sample, unique_fields) if res is not None: return res.eid else: return res
def find_unique(table, sample, unique_fields=None): """Search in `table` an item with the value of the `unique_fields` in the `sample` sample. Check if the the obtained result is unique. If nothing is found will return an empty list, if there is more than one item found, will raise an IndexError. Parameters ---------- table: tinydb.table sample: dict Sample data unique_fields: list of str Name of fields (keys) from `data` which are going to be used to build a sample to look for exactly the same values in the database. If None, will use every key in `data`. Returns ------- eid: int Id of the object found with same `unique_fields`. None if none is found. Raises ------ MoreThanOneItemError If more than one example is found. """ res = search_unique(table, sample, unique_fields) if res is not None: return res.eid else: return res
[ "Search", "in", "table", "an", "item", "with", "the", "value", "of", "the", "unique_fields", "in", "the", "sample", "sample", ".", "Check", "if", "the", "the", "obtained", "result", "is", "unique", ".", "If", "nothing", "is", "found", "will", "return", "an", "empty", "list", "if", "there", "is", "more", "than", "one", "item", "found", "will", "raise", "an", "IndexError", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L188-L220
[ "def", "find_unique", "(", "table", ",", "sample", ",", "unique_fields", "=", "None", ")", ":", "res", "=", "search_unique", "(", "table", ",", "sample", ",", "unique_fields", ")", "if", "res", "is", "not", "None", ":", "return", "res", ".", "eid", "else", ":", "return", "res" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
_query_sample
Create a TinyDB query that looks for items that have each field in `sample` with a value compared with the correspondent operation in `operators`. Parameters ---------- sample: dict The sample data operators: str or list of str A list of comparison operations for each field value in `sample`. If this is a str, will use the same operator for all `sample` fields. If you want different operators for each field, remember to use an OrderedDict for `sample`. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query
boyle/petitdb.py
def _query_sample(sample, operators='__eq__'): """Create a TinyDB query that looks for items that have each field in `sample` with a value compared with the correspondent operation in `operators`. Parameters ---------- sample: dict The sample data operators: str or list of str A list of comparison operations for each field value in `sample`. If this is a str, will use the same operator for all `sample` fields. If you want different operators for each field, remember to use an OrderedDict for `sample`. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query """ if isinstance(operators, str): operators = [operators] * len(sample) if len(sample) != len(operators): raise ValueError('Expected `operators` to be a string or a list with the same' ' length as `field_names` ({}), got {}.'.format(len(sample), operators)) queries = [] for i, fn in enumerate(sample): fv = sample[fn] op = operators[i] queries.append(_build_query(field_name=fn, field_value=fv, operator=op)) return _concat_queries(queries, operators='__and__')
def _query_sample(sample, operators='__eq__'): """Create a TinyDB query that looks for items that have each field in `sample` with a value compared with the correspondent operation in `operators`. Parameters ---------- sample: dict The sample data operators: str or list of str A list of comparison operations for each field value in `sample`. If this is a str, will use the same operator for all `sample` fields. If you want different operators for each field, remember to use an OrderedDict for `sample`. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query """ if isinstance(operators, str): operators = [operators] * len(sample) if len(sample) != len(operators): raise ValueError('Expected `operators` to be a string or a list with the same' ' length as `field_names` ({}), got {}.'.format(len(sample), operators)) queries = [] for i, fn in enumerate(sample): fv = sample[fn] op = operators[i] queries.append(_build_query(field_name=fn, field_value=fv, operator=op)) return _concat_queries(queries, operators='__and__')
[ "Create", "a", "TinyDB", "query", "that", "looks", "for", "items", "that", "have", "each", "field", "in", "sample", "with", "a", "value", "compared", "with", "the", "correspondent", "operation", "in", "operators", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L223-L258
[ "def", "_query_sample", "(", "sample", ",", "operators", "=", "'__eq__'", ")", ":", "if", "isinstance", "(", "operators", ",", "str", ")", ":", "operators", "=", "[", "operators", "]", "*", "len", "(", "sample", ")", "if", "len", "(", "sample", ")", "!=", "len", "(", "operators", ")", ":", "raise", "ValueError", "(", "'Expected `operators` to be a string or a list with the same'", "' length as `field_names` ({}), got {}.'", ".", "format", "(", "len", "(", "sample", ")", ",", "operators", ")", ")", "queries", "=", "[", "]", "for", "i", ",", "fn", "in", "enumerate", "(", "sample", ")", ":", "fv", "=", "sample", "[", "fn", "]", "op", "=", "operators", "[", "i", "]", "queries", ".", "append", "(", "_build_query", "(", "field_name", "=", "fn", ",", "field_value", "=", "fv", ",", "operator", "=", "op", ")", ")", "return", "_concat_queries", "(", "queries", ",", "operators", "=", "'__and__'", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
_query_data
Create a tinyDB Query object that looks for items that confirms the correspondent operator from `operators` for each `field_names` field values from `data`. Parameters ---------- data: dict The data sample field_names: str or list of str The name of the fields in `data` that will be used for the query. operators: str or list of str A list of comparison operations for each field value in `field_names`. If this is a str, will use the same operator for all `field_names`. If you want different operators for each field, remember to use an OrderedDict for `data`. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query
boyle/petitdb.py
def _query_data(data, field_names=None, operators='__eq__'): """Create a tinyDB Query object that looks for items that confirms the correspondent operator from `operators` for each `field_names` field values from `data`. Parameters ---------- data: dict The data sample field_names: str or list of str The name of the fields in `data` that will be used for the query. operators: str or list of str A list of comparison operations for each field value in `field_names`. If this is a str, will use the same operator for all `field_names`. If you want different operators for each field, remember to use an OrderedDict for `data`. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query """ if field_names is None: field_names = list(data.keys()) if isinstance(field_names, str): field_names = [field_names] # using OrderedDict by default, in case operators has different operators for each field. sample = OrderedDict([(fn, data[fn]) for fn in field_names]) return _query_sample(sample, operators=operators)
def _query_data(data, field_names=None, operators='__eq__'): """Create a tinyDB Query object that looks for items that confirms the correspondent operator from `operators` for each `field_names` field values from `data`. Parameters ---------- data: dict The data sample field_names: str or list of str The name of the fields in `data` that will be used for the query. operators: str or list of str A list of comparison operations for each field value in `field_names`. If this is a str, will use the same operator for all `field_names`. If you want different operators for each field, remember to use an OrderedDict for `data`. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query """ if field_names is None: field_names = list(data.keys()) if isinstance(field_names, str): field_names = [field_names] # using OrderedDict by default, in case operators has different operators for each field. sample = OrderedDict([(fn, data[fn]) for fn in field_names]) return _query_sample(sample, operators=operators)
[ "Create", "a", "tinyDB", "Query", "object", "that", "looks", "for", "items", "that", "confirms", "the", "correspondent", "operator", "from", "operators", "for", "each", "field_names", "field", "values", "from", "data", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L261-L291
[ "def", "_query_data", "(", "data", ",", "field_names", "=", "None", ",", "operators", "=", "'__eq__'", ")", ":", "if", "field_names", "is", "None", ":", "field_names", "=", "list", "(", "data", ".", "keys", "(", ")", ")", "if", "isinstance", "(", "field_names", ",", "str", ")", ":", "field_names", "=", "[", "field_names", "]", "# using OrderedDict by default, in case operators has different operators for each field.", "sample", "=", "OrderedDict", "(", "[", "(", "fn", ",", "data", "[", "fn", "]", ")", "for", "fn", "in", "field_names", "]", ")", "return", "_query_sample", "(", "sample", ",", "operators", "=", "operators", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
_concat_queries
Create a tinyDB Query object that is the concatenation of each query in `queries`. The concatenation operator is taken from `operators`. Parameters ---------- queries: list of tinydb.Query The list of tinydb.Query to be joined. operators: str or list of str List of binary operators to join `queries` into one query. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query
boyle/petitdb.py
def _concat_queries(queries, operators='__and__'): """Create a tinyDB Query object that is the concatenation of each query in `queries`. The concatenation operator is taken from `operators`. Parameters ---------- queries: list of tinydb.Query The list of tinydb.Query to be joined. operators: str or list of str List of binary operators to join `queries` into one query. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query """ # checks first if not queries: raise ValueError('Expected some `queries`, got {}.'.format(queries)) if len(queries) == 1: return queries[0] if isinstance(operators, str): operators = [operators] * (len(queries) - 1) if len(queries) - 1 != len(operators): raise ValueError('Expected `operators` to be a string or a list with the same' ' length as `field_names` ({}), got {}.'.format(len(queries), operators)) # recursively build the query first, rest, end = queries[0], queries[1:-1], queries[-1:][0] bigop = getattr(first, operators[0]) for i, q in enumerate(rest): bigop = getattr(bigop(q), operators[i]) return bigop(end)
def _concat_queries(queries, operators='__and__'): """Create a tinyDB Query object that is the concatenation of each query in `queries`. The concatenation operator is taken from `operators`. Parameters ---------- queries: list of tinydb.Query The list of tinydb.Query to be joined. operators: str or list of str List of binary operators to join `queries` into one query. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query """ # checks first if not queries: raise ValueError('Expected some `queries`, got {}.'.format(queries)) if len(queries) == 1: return queries[0] if isinstance(operators, str): operators = [operators] * (len(queries) - 1) if len(queries) - 1 != len(operators): raise ValueError('Expected `operators` to be a string or a list with the same' ' length as `field_names` ({}), got {}.'.format(len(queries), operators)) # recursively build the query first, rest, end = queries[0], queries[1:-1], queries[-1:][0] bigop = getattr(first, operators[0]) for i, q in enumerate(rest): bigop = getattr(bigop(q), operators[i]) return bigop(end)
[ "Create", "a", "tinyDB", "Query", "object", "that", "is", "the", "concatenation", "of", "each", "query", "in", "queries", ".", "The", "concatenation", "operator", "is", "taken", "from", "operators", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L294-L332
[ "def", "_concat_queries", "(", "queries", ",", "operators", "=", "'__and__'", ")", ":", "# checks first", "if", "not", "queries", ":", "raise", "ValueError", "(", "'Expected some `queries`, got {}.'", ".", "format", "(", "queries", ")", ")", "if", "len", "(", "queries", ")", "==", "1", ":", "return", "queries", "[", "0", "]", "if", "isinstance", "(", "operators", ",", "str", ")", ":", "operators", "=", "[", "operators", "]", "*", "(", "len", "(", "queries", ")", "-", "1", ")", "if", "len", "(", "queries", ")", "-", "1", "!=", "len", "(", "operators", ")", ":", "raise", "ValueError", "(", "'Expected `operators` to be a string or a list with the same'", "' length as `field_names` ({}), got {}.'", ".", "format", "(", "len", "(", "queries", ")", ",", "operators", ")", ")", "# recursively build the query", "first", ",", "rest", ",", "end", "=", "queries", "[", "0", "]", ",", "queries", "[", "1", ":", "-", "1", "]", ",", "queries", "[", "-", "1", ":", "]", "[", "0", "]", "bigop", "=", "getattr", "(", "first", ",", "operators", "[", "0", "]", ")", "for", "i", ",", "q", "in", "enumerate", "(", "rest", ")", ":", "bigop", "=", "getattr", "(", "bigop", "(", "q", ")", ",", "operators", "[", "i", "]", ")", "return", "bigop", "(", "end", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
_build_query
Create a tinyDB Query object with the format: (where(`field_name`) `operator` `field_value`) Parameters ---------- field_name: str The name of the field to be queried. field_value: The value of the field operator: str The comparison operator. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query
boyle/petitdb.py
def _build_query(field_name, field_value, operator='__eq__'): """Create a tinyDB Query object with the format: (where(`field_name`) `operator` `field_value`) Parameters ---------- field_name: str The name of the field to be queried. field_value: The value of the field operator: str The comparison operator. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query """ qelem = where(field_name) if not hasattr(qelem, operator): raise NotImplementedError('Operator `{}` not found in query object.'.format(operator)) else: query = getattr(qelem, operator) return query(field_value)
def _build_query(field_name, field_value, operator='__eq__'): """Create a tinyDB Query object with the format: (where(`field_name`) `operator` `field_value`) Parameters ---------- field_name: str The name of the field to be queried. field_value: The value of the field operator: str The comparison operator. Check TinyDB.Query class for possible choices. Returns ------- query: tinydb.database.Query """ qelem = where(field_name) if not hasattr(qelem, operator): raise NotImplementedError('Operator `{}` not found in query object.'.format(operator)) else: query = getattr(qelem, operator) return query(field_value)
[ "Create", "a", "tinyDB", "Query", "object", "with", "the", "format", ":", "(", "where", "(", "field_name", ")", "operator", "field_value", ")" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L335-L362
[ "def", "_build_query", "(", "field_name", ",", "field_value", ",", "operator", "=", "'__eq__'", ")", ":", "qelem", "=", "where", "(", "field_name", ")", "if", "not", "hasattr", "(", "qelem", ",", "operator", ")", ":", "raise", "NotImplementedError", "(", "'Operator `{}` not found in query object.'", ".", "format", "(", "operator", ")", ")", "else", ":", "query", "=", "getattr", "(", "qelem", ",", "operator", ")", "return", "query", "(", "field_value", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
PetitDB.search_by_eid
Return the element in `table_name` with Object ID `eid`. If None is found will raise a KeyError exception. Parameters ---------- table_name: str The name of the table to look in. eid: int The Object ID of the element to look for. Returns ------- elem: tinydb.database.Element Raises ------ KeyError If the element with ID `eid` is not found.
boyle/petitdb.py
def search_by_eid(self, table_name, eid): """Return the element in `table_name` with Object ID `eid`. If None is found will raise a KeyError exception. Parameters ---------- table_name: str The name of the table to look in. eid: int The Object ID of the element to look for. Returns ------- elem: tinydb.database.Element Raises ------ KeyError If the element with ID `eid` is not found. """ elem = self.table(table_name).get(eid=eid) if elem is None: raise KeyError('Could not find {} with eid {}.'.format(table_name, eid)) return elem
def search_by_eid(self, table_name, eid): """Return the element in `table_name` with Object ID `eid`. If None is found will raise a KeyError exception. Parameters ---------- table_name: str The name of the table to look in. eid: int The Object ID of the element to look for. Returns ------- elem: tinydb.database.Element Raises ------ KeyError If the element with ID `eid` is not found. """ elem = self.table(table_name).get(eid=eid) if elem is None: raise KeyError('Could not find {} with eid {}.'.format(table_name, eid)) return elem
[ "Return", "the", "element", "in", "table_name", "with", "Object", "ID", "eid", ".", "If", "None", "is", "found", "will", "raise", "a", "KeyError", "exception", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L373-L398
[ "def", "search_by_eid", "(", "self", ",", "table_name", ",", "eid", ")", ":", "elem", "=", "self", ".", "table", "(", "table_name", ")", ".", "get", "(", "eid", "=", "eid", ")", "if", "elem", "is", "None", ":", "raise", "KeyError", "(", "'Could not find {} with eid {}.'", ".", "format", "(", "table_name", ",", "eid", ")", ")", "return", "elem" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
PetitDB.insert_unique
Insert `data` into `table` ensuring that data has unique values in `table` for the fields listed in `unique_fields`. If `raise_if_found` is True, will raise an NotUniqueItemError if another item with the same `unique_fields` values are found previously in `table`. If False, will return the `eid` from the item found. Parameters ---------- table_name: str data: dict unique_fields: list of str Name of fields (keys) from `data` which are going to be used to build a sample to look for exactly the same values in the database. If None, will use every key in `data`. raise_if_found: bool Returns ------- eid: int Id of the object inserted or the one found with same `unique_fields`. Raises ------ MoreThanOneItemError Raise even with `raise_with_found` == False if it finds more than one item with the same values as the sample. NotUniqueItemError If `raise_if_found` is True and an item with the same `unique_fields` values from `data` is found in `table`.
boyle/petitdb.py
def insert_unique(self, table_name, data, unique_fields=None, *, raise_if_found=False): """Insert `data` into `table` ensuring that data has unique values in `table` for the fields listed in `unique_fields`. If `raise_if_found` is True, will raise an NotUniqueItemError if another item with the same `unique_fields` values are found previously in `table`. If False, will return the `eid` from the item found. Parameters ---------- table_name: str data: dict unique_fields: list of str Name of fields (keys) from `data` which are going to be used to build a sample to look for exactly the same values in the database. If None, will use every key in `data`. raise_if_found: bool Returns ------- eid: int Id of the object inserted or the one found with same `unique_fields`. Raises ------ MoreThanOneItemError Raise even with `raise_with_found` == False if it finds more than one item with the same values as the sample. NotUniqueItemError If `raise_if_found` is True and an item with the same `unique_fields` values from `data` is found in `table`. """ return insert_unique(table=self.table(table_name), data=_to_string(data), unique_fields=unique_fields, raise_if_found=raise_if_found)
def insert_unique(self, table_name, data, unique_fields=None, *, raise_if_found=False): """Insert `data` into `table` ensuring that data has unique values in `table` for the fields listed in `unique_fields`. If `raise_if_found` is True, will raise an NotUniqueItemError if another item with the same `unique_fields` values are found previously in `table`. If False, will return the `eid` from the item found. Parameters ---------- table_name: str data: dict unique_fields: list of str Name of fields (keys) from `data` which are going to be used to build a sample to look for exactly the same values in the database. If None, will use every key in `data`. raise_if_found: bool Returns ------- eid: int Id of the object inserted or the one found with same `unique_fields`. Raises ------ MoreThanOneItemError Raise even with `raise_with_found` == False if it finds more than one item with the same values as the sample. NotUniqueItemError If `raise_if_found` is True and an item with the same `unique_fields` values from `data` is found in `table`. """ return insert_unique(table=self.table(table_name), data=_to_string(data), unique_fields=unique_fields, raise_if_found=raise_if_found)
[ "Insert", "data", "into", "table", "ensuring", "that", "data", "has", "unique", "values", "in", "table", "for", "the", "fields", "listed", "in", "unique_fields", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L400-L440
[ "def", "insert_unique", "(", "self", ",", "table_name", ",", "data", ",", "unique_fields", "=", "None", ",", "*", ",", "raise_if_found", "=", "False", ")", ":", "return", "insert_unique", "(", "table", "=", "self", ".", "table", "(", "table_name", ")", ",", "data", "=", "_to_string", "(", "data", ")", ",", "unique_fields", "=", "unique_fields", ",", "raise_if_found", "=", "raise_if_found", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
PetitDB.search_unique
Search in `table` an item with the value of the `unique_fields` in the `data` sample. Check if the the obtained result is unique. If nothing is found will return an empty list, if there is more than one item found, will raise an IndexError. Parameters ---------- table_name: str sample: dict Sample data unique_fields: list of str Name of fields (keys) from `data` which are going to be used to build a sample to look for exactly the same values in the database. If None, will use every key in `data`. Returns ------- eid: int Id of the object found with same `unique_fields`. None if none is found. Raises ------ MoreThanOneItemError If more than one example is found.
boyle/petitdb.py
def search_unique(self, table_name, sample, unique_fields=None): """ Search in `table` an item with the value of the `unique_fields` in the `data` sample. Check if the the obtained result is unique. If nothing is found will return an empty list, if there is more than one item found, will raise an IndexError. Parameters ---------- table_name: str sample: dict Sample data unique_fields: list of str Name of fields (keys) from `data` which are going to be used to build a sample to look for exactly the same values in the database. If None, will use every key in `data`. Returns ------- eid: int Id of the object found with same `unique_fields`. None if none is found. Raises ------ MoreThanOneItemError If more than one example is found. """ return search_unique(table=self.table(table_name), sample=sample, unique_fields=unique_fields)
def search_unique(self, table_name, sample, unique_fields=None): """ Search in `table` an item with the value of the `unique_fields` in the `data` sample. Check if the the obtained result is unique. If nothing is found will return an empty list, if there is more than one item found, will raise an IndexError. Parameters ---------- table_name: str sample: dict Sample data unique_fields: list of str Name of fields (keys) from `data` which are going to be used to build a sample to look for exactly the same values in the database. If None, will use every key in `data`. Returns ------- eid: int Id of the object found with same `unique_fields`. None if none is found. Raises ------ MoreThanOneItemError If more than one example is found. """ return search_unique(table=self.table(table_name), sample=sample, unique_fields=unique_fields)
[ "Search", "in", "table", "an", "item", "with", "the", "value", "of", "the", "unique_fields", "in", "the", "data", "sample", ".", "Check", "if", "the", "the", "obtained", "result", "is", "unique", ".", "If", "nothing", "is", "found", "will", "return", "an", "empty", "list", "if", "there", "is", "more", "than", "one", "item", "found", "will", "raise", "an", "IndexError", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L442-L472
[ "def", "search_unique", "(", "self", ",", "table_name", ",", "sample", ",", "unique_fields", "=", "None", ")", ":", "return", "search_unique", "(", "table", "=", "self", ".", "table", "(", "table_name", ")", ",", "sample", "=", "sample", ",", "unique_fields", "=", "unique_fields", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
PetitDB.search_sample
Search for items in `table` that have the same field sub-set values as in `sample`. Parameters ---------- table_name: str sample: dict Sample data Returns ------- search_result: list of dict List of the items found. The list is empty if no item is found.
boyle/petitdb.py
def search_sample(self, table_name, sample): """Search for items in `table` that have the same field sub-set values as in `sample`. Parameters ---------- table_name: str sample: dict Sample data Returns ------- search_result: list of dict List of the items found. The list is empty if no item is found. """ return search_sample(table=self.table(table_name), sample=sample)
def search_sample(self, table_name, sample): """Search for items in `table` that have the same field sub-set values as in `sample`. Parameters ---------- table_name: str sample: dict Sample data Returns ------- search_result: list of dict List of the items found. The list is empty if no item is found. """ return search_sample(table=self.table(table_name), sample=sample)
[ "Search", "for", "items", "in", "table", "that", "have", "the", "same", "field", "sub", "-", "set", "values", "as", "in", "sample", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L474-L490
[ "def", "search_sample", "(", "self", ",", "table_name", ",", "sample", ")", ":", "return", "search_sample", "(", "table", "=", "self", ".", "table", "(", "table_name", ")", ",", "sample", "=", "sample", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
PetitDB.is_unique
Return True if an item with the value of `unique_fields` from `data` is unique in the table with `table_name`. False if no sample is found or more than one is found. See function `find_unique` for more details. Parameters ---------- table_name: str sample: dict Sample data for query unique_fields: str or list of str Returns ------- is_unique: bool
boyle/petitdb.py
def is_unique(self, table_name, sample, unique_fields=None): """Return True if an item with the value of `unique_fields` from `data` is unique in the table with `table_name`. False if no sample is found or more than one is found. See function `find_unique` for more details. Parameters ---------- table_name: str sample: dict Sample data for query unique_fields: str or list of str Returns ------- is_unique: bool """ try: eid = find_unique(self.table(table_name), sample=sample, unique_fields=unique_fields) except: return False else: return eid is not None
def is_unique(self, table_name, sample, unique_fields=None): """Return True if an item with the value of `unique_fields` from `data` is unique in the table with `table_name`. False if no sample is found or more than one is found. See function `find_unique` for more details. Parameters ---------- table_name: str sample: dict Sample data for query unique_fields: str or list of str Returns ------- is_unique: bool """ try: eid = find_unique(self.table(table_name), sample=sample, unique_fields=unique_fields) except: return False else: return eid is not None
[ "Return", "True", "if", "an", "item", "with", "the", "value", "of", "unique_fields", "from", "data", "is", "unique", "in", "the", "table", "with", "table_name", ".", "False", "if", "no", "sample", "is", "found", "or", "more", "than", "one", "is", "found", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L492-L519
[ "def", "is_unique", "(", "self", ",", "table_name", ",", "sample", ",", "unique_fields", "=", "None", ")", ":", "try", ":", "eid", "=", "find_unique", "(", "self", ".", "table", "(", "table_name", ")", ",", "sample", "=", "sample", ",", "unique_fields", "=", "unique_fields", ")", "except", ":", "return", "False", "else", ":", "return", "eid", "is", "not", "None" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
PetitDB.update_unique
Update the unique matching element to have a given set of fields. Parameters ---------- table_name: str fields: dict or function[dict -> None] new data/values to insert into the unique element or a method that will update the elements. data: dict Sample data for query cond: tinydb.Query which elements to update unique_fields: list of str raise_if_not_found: bool Will raise an exception if the element is not found for update. Returns ------- eid: int The eid of the updated element if found, None otherwise.
boyle/petitdb.py
def update_unique(self, table_name, fields, data, cond=None, unique_fields=None, *, raise_if_not_found=False): """Update the unique matching element to have a given set of fields. Parameters ---------- table_name: str fields: dict or function[dict -> None] new data/values to insert into the unique element or a method that will update the elements. data: dict Sample data for query cond: tinydb.Query which elements to update unique_fields: list of str raise_if_not_found: bool Will raise an exception if the element is not found for update. Returns ------- eid: int The eid of the updated element if found, None otherwise. """ eid = find_unique(self.table(table_name), data, unique_fields) if eid is None: if raise_if_not_found: msg = 'Could not find {} with {}'.format(table_name, data) if cond is not None: msg += ' where {}.'.format(cond) raise IndexError(msg) else: self.table(table_name).update(_to_string(fields), cond=cond, eids=[eid]) return eid
def update_unique(self, table_name, fields, data, cond=None, unique_fields=None, *, raise_if_not_found=False): """Update the unique matching element to have a given set of fields. Parameters ---------- table_name: str fields: dict or function[dict -> None] new data/values to insert into the unique element or a method that will update the elements. data: dict Sample data for query cond: tinydb.Query which elements to update unique_fields: list of str raise_if_not_found: bool Will raise an exception if the element is not found for update. Returns ------- eid: int The eid of the updated element if found, None otherwise. """ eid = find_unique(self.table(table_name), data, unique_fields) if eid is None: if raise_if_not_found: msg = 'Could not find {} with {}'.format(table_name, data) if cond is not None: msg += ' where {}.'.format(cond) raise IndexError(msg) else: self.table(table_name).update(_to_string(fields), cond=cond, eids=[eid]) return eid
[ "Update", "the", "unique", "matching", "element", "to", "have", "a", "given", "set", "of", "fields", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L521-L561
[ "def", "update_unique", "(", "self", ",", "table_name", ",", "fields", ",", "data", ",", "cond", "=", "None", ",", "unique_fields", "=", "None", ",", "*", ",", "raise_if_not_found", "=", "False", ")", ":", "eid", "=", "find_unique", "(", "self", ".", "table", "(", "table_name", ")", ",", "data", ",", "unique_fields", ")", "if", "eid", "is", "None", ":", "if", "raise_if_not_found", ":", "msg", "=", "'Could not find {} with {}'", ".", "format", "(", "table_name", ",", "data", ")", "if", "cond", "is", "not", "None", ":", "msg", "+=", "' where {}.'", ".", "format", "(", "cond", ")", "raise", "IndexError", "(", "msg", ")", "else", ":", "self", ".", "table", "(", "table_name", ")", ".", "update", "(", "_to_string", "(", "fields", ")", ",", "cond", "=", "cond", ",", "eids", "=", "[", "eid", "]", ")", "return", "eid" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
PetitDB.count
Return the number of items that match the `sample` field values in table `table_name`. Check function search_sample for more details.
boyle/petitdb.py
def count(self, table_name, sample): """Return the number of items that match the `sample` field values in table `table_name`. Check function search_sample for more details. """ return len(list(search_sample(table=self.table(table_name), sample=sample)))
def count(self, table_name, sample): """Return the number of items that match the `sample` field values in table `table_name`. Check function search_sample for more details. """ return len(list(search_sample(table=self.table(table_name), sample=sample)))
[ "Return", "the", "number", "of", "items", "that", "match", "the", "sample", "field", "values", "in", "table", "table_name", ".", "Check", "function", "search_sample", "for", "more", "details", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/petitdb.py#L563-L569
[ "def", "count", "(", "self", ",", "table_name", ",", "sample", ")", ":", "return", "len", "(", "list", "(", "search_sample", "(", "table", "=", "self", ".", "table", "(", "table_name", ")", ",", "sample", "=", "sample", ")", ")", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
is_img
Check for get_data and get_affine method in an object Parameters ---------- obj: any object Tested object Returns ------- is_img: boolean True if get_data and get_affine methods are present and callable, False otherwise.
boyle/nifti/check.py
def is_img(obj): """ Check for get_data and get_affine method in an object Parameters ---------- obj: any object Tested object Returns ------- is_img: boolean True if get_data and get_affine methods are present and callable, False otherwise. """ try: get_data = getattr(obj, 'get_data') get_affine = getattr(obj, 'get_affine') return isinstance(get_data, collections.Callable) and \ isinstance(get_affine, collections.Callable) except AttributeError: return False
def is_img(obj): """ Check for get_data and get_affine method in an object Parameters ---------- obj: any object Tested object Returns ------- is_img: boolean True if get_data and get_affine methods are present and callable, False otherwise. """ try: get_data = getattr(obj, 'get_data') get_affine = getattr(obj, 'get_affine') return isinstance(get_data, collections.Callable) and \ isinstance(get_affine, collections.Callable) except AttributeError: return False
[ "Check", "for", "get_data", "and", "get_affine", "method", "in", "an", "object" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L32-L53
[ "def", "is_img", "(", "obj", ")", ":", "try", ":", "get_data", "=", "getattr", "(", "obj", ",", "'get_data'", ")", "get_affine", "=", "getattr", "(", "obj", ",", "'get_affine'", ")", "return", "isinstance", "(", "get_data", ",", "collections", ".", "Callable", ")", "and", "isinstance", "(", "get_affine", ",", "collections", ".", "Callable", ")", "except", "AttributeError", ":", "return", "False" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
get_data
Get the data in the image without having a side effect on the Nifti1Image object Parameters ---------- img: Nifti1Image Returns ------- np.ndarray
boyle/nifti/check.py
def get_data(img): """Get the data in the image without having a side effect on the Nifti1Image object Parameters ---------- img: Nifti1Image Returns ------- np.ndarray """ if hasattr(img, '_data_cache') and img._data_cache is None: # Copy locally the nifti_image to avoid the side effect of data # loading img = copy.deepcopy(img) # force garbage collector gc.collect() return img.get_data()
def get_data(img): """Get the data in the image without having a side effect on the Nifti1Image object Parameters ---------- img: Nifti1Image Returns ------- np.ndarray """ if hasattr(img, '_data_cache') and img._data_cache is None: # Copy locally the nifti_image to avoid the side effect of data # loading img = copy.deepcopy(img) # force garbage collector gc.collect() return img.get_data()
[ "Get", "the", "data", "in", "the", "image", "without", "having", "a", "side", "effect", "on", "the", "Nifti1Image", "object" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L56-L73
[ "def", "get_data", "(", "img", ")", ":", "if", "hasattr", "(", "img", ",", "'_data_cache'", ")", "and", "img", ".", "_data_cache", "is", "None", ":", "# Copy locally the nifti_image to avoid the side effect of data", "# loading", "img", "=", "copy", ".", "deepcopy", "(", "img", ")", "# force garbage collector", "gc", ".", "collect", "(", ")", "return", "img", ".", "get_data", "(", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
get_shape
Return the shape of img. Paramerers ----------- img: Returns ------- shape: tuple
boyle/nifti/check.py
def get_shape(img): """Return the shape of img. Paramerers ----------- img: Returns ------- shape: tuple """ if hasattr(img, 'shape'): shape = img.shape else: shape = img.get_data().shape return shape
def get_shape(img): """Return the shape of img. Paramerers ----------- img: Returns ------- shape: tuple """ if hasattr(img, 'shape'): shape = img.shape else: shape = img.get_data().shape return shape
[ "Return", "the", "shape", "of", "img", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L76-L91
[ "def", "get_shape", "(", "img", ")", ":", "if", "hasattr", "(", "img", ",", "'shape'", ")", ":", "shape", "=", "img", ".", "shape", "else", ":", "shape", "=", "img", ".", "get_data", "(", ")", ".", "shape", "return", "shape" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
is_valid_coordinate
Return True if the given (i, j, k) voxel grid coordinate values are within the img boundaries. Parameters ---------- @param img: @param i: @param j: @param k: Returns ------- bool
boyle/nifti/check.py
def is_valid_coordinate(img, i, j, k): """Return True if the given (i, j, k) voxel grid coordinate values are within the img boundaries. Parameters ---------- @param img: @param i: @param j: @param k: Returns ------- bool """ imgx, imgy, imgz = get_shape(img) return (i >= 0 and i < imgx) and \ (j >= 0 and j < imgy) and \ (k >= 0 and k < imgz)
def is_valid_coordinate(img, i, j, k): """Return True if the given (i, j, k) voxel grid coordinate values are within the img boundaries. Parameters ---------- @param img: @param i: @param j: @param k: Returns ------- bool """ imgx, imgy, imgz = get_shape(img) return (i >= 0 and i < imgx) and \ (j >= 0 and j < imgy) and \ (k >= 0 and k < imgz)
[ "Return", "True", "if", "the", "given", "(", "i", "j", "k", ")", "voxel", "grid", "coordinate", "values", "are", "within", "the", "img", "boundaries", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L94-L111
[ "def", "is_valid_coordinate", "(", "img", ",", "i", ",", "j", ",", "k", ")", ":", "imgx", ",", "imgy", ",", "imgz", "=", "get_shape", "(", "img", ")", "return", "(", "i", ">=", "0", "and", "i", "<", "imgx", ")", "and", "(", "j", ">=", "0", "and", "j", "<", "imgy", ")", "and", "(", "k", ">=", "0", "and", "k", "<", "imgz", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
check_img_compatibility
Return true if one_img and another_img have the same shape. False otherwise. If both are nibabel.Nifti1Image will also check for affine matrices. Parameters ---------- one_img: nibabel.Nifti1Image or np.ndarray another_img: nibabel.Nifti1Image or np.ndarray only_check_3d: bool If True will check only the 3D part of the affine matrices when they have more dimensions. Raises ------ NiftiFilesNotCompatible
boyle/nifti/check.py
def check_img_compatibility(one_img, another_img, only_check_3d=False): """Return true if one_img and another_img have the same shape. False otherwise. If both are nibabel.Nifti1Image will also check for affine matrices. Parameters ---------- one_img: nibabel.Nifti1Image or np.ndarray another_img: nibabel.Nifti1Image or np.ndarray only_check_3d: bool If True will check only the 3D part of the affine matrices when they have more dimensions. Raises ------ NiftiFilesNotCompatible """ nd_to_check = None if only_check_3d: nd_to_check = 3 if hasattr(one_img, 'shape') and hasattr(another_img, 'shape'): if not have_same_shape(one_img, another_img, nd_to_check=nd_to_check): msg = 'Shape of the first image: \n{}\n is different from second one: \n{}'.format(one_img.shape, another_img.shape) raise NiftiFilesNotCompatible(repr_imgs(one_img), repr_imgs(another_img), message=msg) if hasattr(one_img, 'get_affine') and hasattr(another_img, 'get_affine'): if not have_same_affine(one_img, another_img, only_check_3d=only_check_3d): msg = 'Affine matrix of the first image: \n{}\n is different ' \ 'from second one:\n{}'.format(one_img.get_affine(), another_img.get_affine()) raise NiftiFilesNotCompatible(repr_imgs(one_img), repr_imgs(another_img), message=msg)
def check_img_compatibility(one_img, another_img, only_check_3d=False): """Return true if one_img and another_img have the same shape. False otherwise. If both are nibabel.Nifti1Image will also check for affine matrices. Parameters ---------- one_img: nibabel.Nifti1Image or np.ndarray another_img: nibabel.Nifti1Image or np.ndarray only_check_3d: bool If True will check only the 3D part of the affine matrices when they have more dimensions. Raises ------ NiftiFilesNotCompatible """ nd_to_check = None if only_check_3d: nd_to_check = 3 if hasattr(one_img, 'shape') and hasattr(another_img, 'shape'): if not have_same_shape(one_img, another_img, nd_to_check=nd_to_check): msg = 'Shape of the first image: \n{}\n is different from second one: \n{}'.format(one_img.shape, another_img.shape) raise NiftiFilesNotCompatible(repr_imgs(one_img), repr_imgs(another_img), message=msg) if hasattr(one_img, 'get_affine') and hasattr(another_img, 'get_affine'): if not have_same_affine(one_img, another_img, only_check_3d=only_check_3d): msg = 'Affine matrix of the first image: \n{}\n is different ' \ 'from second one:\n{}'.format(one_img.get_affine(), another_img.get_affine()) raise NiftiFilesNotCompatible(repr_imgs(one_img), repr_imgs(another_img), message=msg)
[ "Return", "true", "if", "one_img", "and", "another_img", "have", "the", "same", "shape", ".", "False", "otherwise", ".", "If", "both", "are", "nibabel", ".", "Nifti1Image", "will", "also", "check", "for", "affine", "matrices", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L137-L169
[ "def", "check_img_compatibility", "(", "one_img", ",", "another_img", ",", "only_check_3d", "=", "False", ")", ":", "nd_to_check", "=", "None", "if", "only_check_3d", ":", "nd_to_check", "=", "3", "if", "hasattr", "(", "one_img", ",", "'shape'", ")", "and", "hasattr", "(", "another_img", ",", "'shape'", ")", ":", "if", "not", "have_same_shape", "(", "one_img", ",", "another_img", ",", "nd_to_check", "=", "nd_to_check", ")", ":", "msg", "=", "'Shape of the first image: \\n{}\\n is different from second one: \\n{}'", ".", "format", "(", "one_img", ".", "shape", ",", "another_img", ".", "shape", ")", "raise", "NiftiFilesNotCompatible", "(", "repr_imgs", "(", "one_img", ")", ",", "repr_imgs", "(", "another_img", ")", ",", "message", "=", "msg", ")", "if", "hasattr", "(", "one_img", ",", "'get_affine'", ")", "and", "hasattr", "(", "another_img", ",", "'get_affine'", ")", ":", "if", "not", "have_same_affine", "(", "one_img", ",", "another_img", ",", "only_check_3d", "=", "only_check_3d", ")", ":", "msg", "=", "'Affine matrix of the first image: \\n{}\\n is different '", "'from second one:\\n{}'", ".", "format", "(", "one_img", ".", "get_affine", "(", ")", ",", "another_img", ".", "get_affine", "(", ")", ")", "raise", "NiftiFilesNotCompatible", "(", "repr_imgs", "(", "one_img", ")", ",", "repr_imgs", "(", "another_img", ")", ",", "message", "=", "msg", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
have_same_affine
Return True if the affine matrix of one_img is close to the affine matrix of another_img. False otherwise. Parameters ---------- one_img: nibabel.Nifti1Image another_img: nibabel.Nifti1Image only_check_3d: bool If True will extract only the 3D part of the affine matrices when they have more dimensions. Returns ------- bool Raises ------ ValueError
boyle/nifti/check.py
def have_same_affine(one_img, another_img, only_check_3d=False): """Return True if the affine matrix of one_img is close to the affine matrix of another_img. False otherwise. Parameters ---------- one_img: nibabel.Nifti1Image another_img: nibabel.Nifti1Image only_check_3d: bool If True will extract only the 3D part of the affine matrices when they have more dimensions. Returns ------- bool Raises ------ ValueError """ img1 = check_img(one_img) img2 = check_img(another_img) ndim1 = len(img1.shape) ndim2 = len(img2.shape) if ndim1 < 3: raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img1), ndim1)) if ndim2 < 3: raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img2), ndim1)) affine1 = img1.get_affine() affine2 = img2.get_affine() if only_check_3d: affine1 = affine1[:3, :3] affine2 = affine2[:3, :3] try: return np.allclose(affine1, affine2) except ValueError: return False except: raise
def have_same_affine(one_img, another_img, only_check_3d=False): """Return True if the affine matrix of one_img is close to the affine matrix of another_img. False otherwise. Parameters ---------- one_img: nibabel.Nifti1Image another_img: nibabel.Nifti1Image only_check_3d: bool If True will extract only the 3D part of the affine matrices when they have more dimensions. Returns ------- bool Raises ------ ValueError """ img1 = check_img(one_img) img2 = check_img(another_img) ndim1 = len(img1.shape) ndim2 = len(img2.shape) if ndim1 < 3: raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img1), ndim1)) if ndim2 < 3: raise ValueError('Image {} has only {} dimensions, at least 3 dimensions is expected.'.format(repr_imgs(img2), ndim1)) affine1 = img1.get_affine() affine2 = img2.get_affine() if only_check_3d: affine1 = affine1[:3, :3] affine2 = affine2[:3, :3] try: return np.allclose(affine1, affine2) except ValueError: return False except: raise
[ "Return", "True", "if", "the", "affine", "matrix", "of", "one_img", "is", "close", "to", "the", "affine", "matrix", "of", "another_img", ".", "False", "otherwise", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L172-L217
[ "def", "have_same_affine", "(", "one_img", ",", "another_img", ",", "only_check_3d", "=", "False", ")", ":", "img1", "=", "check_img", "(", "one_img", ")", "img2", "=", "check_img", "(", "another_img", ")", "ndim1", "=", "len", "(", "img1", ".", "shape", ")", "ndim2", "=", "len", "(", "img2", ".", "shape", ")", "if", "ndim1", "<", "3", ":", "raise", "ValueError", "(", "'Image {} has only {} dimensions, at least 3 dimensions is expected.'", ".", "format", "(", "repr_imgs", "(", "img1", ")", ",", "ndim1", ")", ")", "if", "ndim2", "<", "3", ":", "raise", "ValueError", "(", "'Image {} has only {} dimensions, at least 3 dimensions is expected.'", ".", "format", "(", "repr_imgs", "(", "img2", ")", ",", "ndim1", ")", ")", "affine1", "=", "img1", ".", "get_affine", "(", ")", "affine2", "=", "img2", ".", "get_affine", "(", ")", "if", "only_check_3d", ":", "affine1", "=", "affine1", "[", ":", "3", ",", ":", "3", "]", "affine2", "=", "affine2", "[", ":", "3", ",", ":", "3", "]", "try", ":", "return", "np", ".", "allclose", "(", "affine1", ",", "affine2", ")", "except", "ValueError", ":", "return", "False", "except", ":", "raise" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
_make_it_3d
Enforce that img is a 3D img-like object, if it is not, raise a TypeError. i.e., remove dimensions of size 1. Parameters ---------- img: img-like object Returns ------- 3D img-like object
boyle/nifti/check.py
def _make_it_3d(img): """Enforce that img is a 3D img-like object, if it is not, raise a TypeError. i.e., remove dimensions of size 1. Parameters ---------- img: img-like object Returns ------- 3D img-like object """ shape = get_shape(img) if len(shape) == 3: return img elif (len(shape) == 4 and shape[3] == 1): # "squeeze" the image. try: data = get_data(img) affine = img.get_affine() img = nib.Nifti1Image(data[:, :, :, 0], affine) except Exception as exc: raise Exception("Error making image '{}' a 3D volume file.".format(img)) from exc else: return img else: raise TypeError("A 3D image is expected, but an image with a shape of {} was given.".format(shape))
def _make_it_3d(img): """Enforce that img is a 3D img-like object, if it is not, raise a TypeError. i.e., remove dimensions of size 1. Parameters ---------- img: img-like object Returns ------- 3D img-like object """ shape = get_shape(img) if len(shape) == 3: return img elif (len(shape) == 4 and shape[3] == 1): # "squeeze" the image. try: data = get_data(img) affine = img.get_affine() img = nib.Nifti1Image(data[:, :, :, 0], affine) except Exception as exc: raise Exception("Error making image '{}' a 3D volume file.".format(img)) from exc else: return img else: raise TypeError("A 3D image is expected, but an image with a shape of {} was given.".format(shape))
[ "Enforce", "that", "img", "is", "a", "3D", "img", "-", "like", "object", "if", "it", "is", "not", "raise", "a", "TypeError", ".", "i", ".", "e", ".", "remove", "dimensions", "of", "size", "1", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L220-L247
[ "def", "_make_it_3d", "(", "img", ")", ":", "shape", "=", "get_shape", "(", "img", ")", "if", "len", "(", "shape", ")", "==", "3", ":", "return", "img", "elif", "(", "len", "(", "shape", ")", "==", "4", "and", "shape", "[", "3", "]", "==", "1", ")", ":", "# \"squeeze\" the image.", "try", ":", "data", "=", "get_data", "(", "img", ")", "affine", "=", "img", ".", "get_affine", "(", ")", "img", "=", "nib", ".", "Nifti1Image", "(", "data", "[", ":", ",", ":", ",", ":", ",", "0", "]", ",", "affine", ")", "except", "Exception", "as", "exc", ":", "raise", "Exception", "(", "\"Error making image '{}' a 3D volume file.\"", ".", "format", "(", "img", ")", ")", "from", "exc", "else", ":", "return", "img", "else", ":", "raise", "TypeError", "(", "\"A 3D image is expected, but an image with a shape of {} was given.\"", ".", "format", "(", "shape", ")", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
check_img
Check that image is a proper img. Turn filenames into objects. Parameters ---------- image: img-like object or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. make_it_3d: boolean, optional If True, check if the image is a 3D image and raise an error if not. Returns ------- result: nifti-like result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods.
boyle/nifti/check.py
def check_img(image, make_it_3d=False): """Check that image is a proper img. Turn filenames into objects. Parameters ---------- image: img-like object or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. make_it_3d: boolean, optional If True, check if the image is a 3D image and raise an error if not. Returns ------- result: nifti-like result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods. """ if isinstance(image, string_types): # a filename, load it if not op.exists(image): raise FileNotFound(image) try: img = nib.load(image) if make_it_3d: img = _make_it_3d(img) except Exception as exc: raise Exception('Error loading image file {}.'.format(image)) from exc else: return img elif isinstance(image, nib.Nifti1Image) or is_img(image): return image else: raise TypeError('Data given cannot be converted to a nifti' ' image: this object -"{}"- does not have' ' get_data or get_affine methods'.format(type(image)))
def check_img(image, make_it_3d=False): """Check that image is a proper img. Turn filenames into objects. Parameters ---------- image: img-like object or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. make_it_3d: boolean, optional If True, check if the image is a 3D image and raise an error if not. Returns ------- result: nifti-like result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed that the returned object has get_data() and get_affine() methods. """ if isinstance(image, string_types): # a filename, load it if not op.exists(image): raise FileNotFound(image) try: img = nib.load(image) if make_it_3d: img = _make_it_3d(img) except Exception as exc: raise Exception('Error loading image file {}.'.format(image)) from exc else: return img elif isinstance(image, nib.Nifti1Image) or is_img(image): return image else: raise TypeError('Data given cannot be converted to a nifti' ' image: this object -"{}"- does not have' ' get_data or get_affine methods'.format(type(image)))
[ "Check", "that", "image", "is", "a", "proper", "img", ".", "Turn", "filenames", "into", "objects", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L250-L292
[ "def", "check_img", "(", "image", ",", "make_it_3d", "=", "False", ")", ":", "if", "isinstance", "(", "image", ",", "string_types", ")", ":", "# a filename, load it", "if", "not", "op", ".", "exists", "(", "image", ")", ":", "raise", "FileNotFound", "(", "image", ")", "try", ":", "img", "=", "nib", ".", "load", "(", "image", ")", "if", "make_it_3d", ":", "img", "=", "_make_it_3d", "(", "img", ")", "except", "Exception", "as", "exc", ":", "raise", "Exception", "(", "'Error loading image file {}.'", ".", "format", "(", "image", ")", ")", "from", "exc", "else", ":", "return", "img", "elif", "isinstance", "(", "image", ",", "nib", ".", "Nifti1Image", ")", "or", "is_img", "(", "image", ")", ":", "return", "image", "else", ":", "raise", "TypeError", "(", "'Data given cannot be converted to a nifti'", "' image: this object -\"{}\"- does not have'", "' get_data or get_affine methods'", ".", "format", "(", "type", "(", "image", ")", ")", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
repr_imgs
Printing of img or imgs
boyle/nifti/check.py
def repr_imgs(imgs): """Printing of img or imgs""" if isinstance(imgs, string_types): return imgs if isinstance(imgs, collections.Iterable): return '[{}]'.format(', '.join(repr_imgs(img) for img in imgs)) # try get_filename try: filename = imgs.get_filename() if filename is not None: img_str = "{}('{}')".format(imgs.__class__.__name__, filename) else: img_str = "{}(shape={}, affine={})".format(imgs.__class__.__name__, repr(get_shape(imgs)), repr(imgs.get_affine())) except Exception as exc: log.error('Error reading attributes from img.get_filename()') return repr(imgs) else: return img_str
def repr_imgs(imgs): """Printing of img or imgs""" if isinstance(imgs, string_types): return imgs if isinstance(imgs, collections.Iterable): return '[{}]'.format(', '.join(repr_imgs(img) for img in imgs)) # try get_filename try: filename = imgs.get_filename() if filename is not None: img_str = "{}('{}')".format(imgs.__class__.__name__, filename) else: img_str = "{}(shape={}, affine={})".format(imgs.__class__.__name__, repr(get_shape(imgs)), repr(imgs.get_affine())) except Exception as exc: log.error('Error reading attributes from img.get_filename()') return repr(imgs) else: return img_str
[ "Printing", "of", "img", "or", "imgs" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L295-L316
[ "def", "repr_imgs", "(", "imgs", ")", ":", "if", "isinstance", "(", "imgs", ",", "string_types", ")", ":", "return", "imgs", "if", "isinstance", "(", "imgs", ",", "collections", ".", "Iterable", ")", ":", "return", "'[{}]'", ".", "format", "(", "', '", ".", "join", "(", "repr_imgs", "(", "img", ")", "for", "img", "in", "imgs", ")", ")", "# try get_filename", "try", ":", "filename", "=", "imgs", ".", "get_filename", "(", ")", "if", "filename", "is", "not", "None", ":", "img_str", "=", "\"{}('{}')\"", ".", "format", "(", "imgs", ".", "__class__", ".", "__name__", ",", "filename", ")", "else", ":", "img_str", "=", "\"{}(shape={}, affine={})\"", ".", "format", "(", "imgs", ".", "__class__", ".", "__name__", ",", "repr", "(", "get_shape", "(", "imgs", ")", ")", ",", "repr", "(", "imgs", ".", "get_affine", "(", ")", ")", ")", "except", "Exception", "as", "exc", ":", "log", ".", "error", "(", "'Error reading attributes from img.get_filename()'", ")", "return", "repr", "(", "imgs", ")", "else", ":", "return", "img_str" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
have_same_shape
Returns true if array1 and array2 have the same shapes, false otherwise. Parameters ---------- array1: numpy.ndarray array2: numpy.ndarray nd_to_check: int Number of the dimensions to check, i.e., if == 3 then will check only the 3 first numbers of array.shape. Returns ------- bool
boyle/nifti/check.py
def have_same_shape(array1, array2, nd_to_check=None): """ Returns true if array1 and array2 have the same shapes, false otherwise. Parameters ---------- array1: numpy.ndarray array2: numpy.ndarray nd_to_check: int Number of the dimensions to check, i.e., if == 3 then will check only the 3 first numbers of array.shape. Returns ------- bool """ shape1 = array1.shape shape2 = array2.shape if nd_to_check is not None: if len(shape1) < nd_to_check: msg = 'Number of dimensions to check {} is out of bounds for the shape of the first image: \n{}\n.'.format(shape1) raise ValueError(msg) elif len(shape2) < nd_to_check: msg = 'Number of dimensions to check {} is out of bounds for the shape of the second image: \n{}\n.'.format(shape2) raise ValueError(msg) shape1 = shape1[:nd_to_check] shape2 = shape2[:nd_to_check] return shape1 == shape2
def have_same_shape(array1, array2, nd_to_check=None): """ Returns true if array1 and array2 have the same shapes, false otherwise. Parameters ---------- array1: numpy.ndarray array2: numpy.ndarray nd_to_check: int Number of the dimensions to check, i.e., if == 3 then will check only the 3 first numbers of array.shape. Returns ------- bool """ shape1 = array1.shape shape2 = array2.shape if nd_to_check is not None: if len(shape1) < nd_to_check: msg = 'Number of dimensions to check {} is out of bounds for the shape of the first image: \n{}\n.'.format(shape1) raise ValueError(msg) elif len(shape2) < nd_to_check: msg = 'Number of dimensions to check {} is out of bounds for the shape of the second image: \n{}\n.'.format(shape2) raise ValueError(msg) shape1 = shape1[:nd_to_check] shape2 = shape2[:nd_to_check] return shape1 == shape2
[ "Returns", "true", "if", "array1", "and", "array2", "have", "the", "same", "shapes", "false", "otherwise", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L324-L354
[ "def", "have_same_shape", "(", "array1", ",", "array2", ",", "nd_to_check", "=", "None", ")", ":", "shape1", "=", "array1", ".", "shape", "shape2", "=", "array2", ".", "shape", "if", "nd_to_check", "is", "not", "None", ":", "if", "len", "(", "shape1", ")", "<", "nd_to_check", ":", "msg", "=", "'Number of dimensions to check {} is out of bounds for the shape of the first image: \\n{}\\n.'", ".", "format", "(", "shape1", ")", "raise", "ValueError", "(", "msg", ")", "elif", "len", "(", "shape2", ")", "<", "nd_to_check", ":", "msg", "=", "'Number of dimensions to check {} is out of bounds for the shape of the second image: \\n{}\\n.'", ".", "format", "(", "shape2", ")", "raise", "ValueError", "(", "msg", ")", "shape1", "=", "shape1", "[", ":", "nd_to_check", "]", "shape2", "=", "shape2", "[", ":", "nd_to_check", "]", "return", "shape1", "==", "shape2" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
have_same_geometry
@param fname1: string File path of an image @param fname2: string File path of an image @return: bool True if both have the same geometry
boyle/nifti/check.py
def have_same_geometry(fname1, fname2): """ @param fname1: string File path of an image @param fname2: string File path of an image @return: bool True if both have the same geometry """ img1shape = nib.load(fname1).get_shape() img2shape = nib.load(fname2).get_shape() return have_same_shape(img1shape, img2shape)
def have_same_geometry(fname1, fname2): """ @param fname1: string File path of an image @param fname2: string File path of an image @return: bool True if both have the same geometry """ img1shape = nib.load(fname1).get_shape() img2shape = nib.load(fname2).get_shape() return have_same_shape(img1shape, img2shape)
[ "@param", "fname1", ":", "string", "File", "path", "of", "an", "image" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L357-L370
[ "def", "have_same_geometry", "(", "fname1", ",", "fname2", ")", ":", "img1shape", "=", "nib", ".", "load", "(", "fname1", ")", ".", "get_shape", "(", ")", "img2shape", "=", "nib", ".", "load", "(", "fname2", ")", ".", "get_shape", "(", ")", "return", "have_same_shape", "(", "img1shape", ",", "img2shape", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
have_same_spatial_geometry
@param fname1: string File path of an image @param fname2: string File path of an image @return: bool True if both have the same geometry
boyle/nifti/check.py
def have_same_spatial_geometry(fname1, fname2): """ @param fname1: string File path of an image @param fname2: string File path of an image @return: bool True if both have the same geometry """ img1shape = nib.load(fname1).get_shape() img2shape = nib.load(fname2).get_shape() return img1shape[:3] == img2shape[:3]
def have_same_spatial_geometry(fname1, fname2): """ @param fname1: string File path of an image @param fname2: string File path of an image @return: bool True if both have the same geometry """ img1shape = nib.load(fname1).get_shape() img2shape = nib.load(fname2).get_shape() return img1shape[:3] == img2shape[:3]
[ "@param", "fname1", ":", "string", "File", "path", "of", "an", "image" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/check.py#L373-L386
[ "def", "have_same_spatial_geometry", "(", "fname1", ",", "fname2", ")", ":", "img1shape", "=", "nib", ".", "load", "(", "fname1", ")", ".", "get_shape", "(", ")", "img2shape", "=", "nib", ".", "load", "(", "fname2", ")", ".", "get_shape", "(", ")", "return", "img1shape", "[", ":", "3", "]", "==", "img2shape", "[", ":", "3", "]" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
dir_match
Create a list of regex matches that result from the match_regex of all file names within wd. The list of files will have wd as path prefix. @param regex: string @param wd: string working directory @return:
boyle/files/search.py
def dir_match(regex, wd=os.curdir): """Create a list of regex matches that result from the match_regex of all file names within wd. The list of files will have wd as path prefix. @param regex: string @param wd: string working directory @return: """ ls = os.listdir(wd) filt = re.compile(regex).match return filter_list(ls, filt)
def dir_match(regex, wd=os.curdir): """Create a list of regex matches that result from the match_regex of all file names within wd. The list of files will have wd as path prefix. @param regex: string @param wd: string working directory @return: """ ls = os.listdir(wd) filt = re.compile(regex).match return filter_list(ls, filt)
[ "Create", "a", "list", "of", "regex", "matches", "that", "result", "from", "the", "match_regex", "of", "all", "file", "names", "within", "wd", ".", "The", "list", "of", "files", "will", "have", "wd", "as", "path", "prefix", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/files/search.py#L42-L55
[ "def", "dir_match", "(", "regex", ",", "wd", "=", "os", ".", "curdir", ")", ":", "ls", "=", "os", ".", "listdir", "(", "wd", ")", "filt", "=", "re", ".", "compile", "(", "regex", ")", ".", "match", "return", "filter_list", "(", "ls", ",", "filt", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
recursive_dir_match
Returns absolute paths of folders that match the regex within folder_path and all its children folders. Note: The regex matching is done using the match function of the re module. Parameters ---------- folder_path: string regex: string Returns ------- A list of strings.
boyle/files/search.py
def recursive_dir_match(folder_path, regex=''): """ Returns absolute paths of folders that match the regex within folder_path and all its children folders. Note: The regex matching is done using the match function of the re module. Parameters ---------- folder_path: string regex: string Returns ------- A list of strings. """ outlist = [] for root, dirs, files in os.walk(folder_path): outlist.extend([op.join(root, f) for f in dirs if re.match(regex, f)]) return outlist
def recursive_dir_match(folder_path, regex=''): """ Returns absolute paths of folders that match the regex within folder_path and all its children folders. Note: The regex matching is done using the match function of the re module. Parameters ---------- folder_path: string regex: string Returns ------- A list of strings. """ outlist = [] for root, dirs, files in os.walk(folder_path): outlist.extend([op.join(root, f) for f in dirs if re.match(regex, f)]) return outlist
[ "Returns", "absolute", "paths", "of", "folders", "that", "match", "the", "regex", "within", "folder_path", "and", "all", "its", "children", "folders", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/files/search.py#L58-L81
[ "def", "recursive_dir_match", "(", "folder_path", ",", "regex", "=", "''", ")", ":", "outlist", "=", "[", "]", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "folder_path", ")", ":", "outlist", ".", "extend", "(", "[", "op", ".", "join", "(", "root", ",", "f", ")", "for", "f", "in", "dirs", "if", "re", ".", "match", "(", "regex", ",", "f", ")", "]", ")", "return", "outlist" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
get_file_list
Creates a list of files that match the search_regex within file_dir. The list of files will have file_dir as path prefix. Parameters ---------- @param file_dir: @param search_regex: Returns: -------- List of paths to files that match the search_regex
boyle/files/search.py
def get_file_list(file_dir, regex=''): """ Creates a list of files that match the search_regex within file_dir. The list of files will have file_dir as path prefix. Parameters ---------- @param file_dir: @param search_regex: Returns: -------- List of paths to files that match the search_regex """ file_list = os.listdir(file_dir) file_list.sort() if regex: file_list = search_list(file_list, regex) file_list = [op.join(file_dir, fname) for fname in file_list] return file_list
def get_file_list(file_dir, regex=''): """ Creates a list of files that match the search_regex within file_dir. The list of files will have file_dir as path prefix. Parameters ---------- @param file_dir: @param search_regex: Returns: -------- List of paths to files that match the search_regex """ file_list = os.listdir(file_dir) file_list.sort() if regex: file_list = search_list(file_list, regex) file_list = [op.join(file_dir, fname) for fname in file_list] return file_list
[ "Creates", "a", "list", "of", "files", "that", "match", "the", "search_regex", "within", "file_dir", ".", "The", "list", "of", "files", "will", "have", "file_dir", "as", "path", "prefix", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/files/search.py#L84-L107
[ "def", "get_file_list", "(", "file_dir", ",", "regex", "=", "''", ")", ":", "file_list", "=", "os", ".", "listdir", "(", "file_dir", ")", "file_list", ".", "sort", "(", ")", "if", "regex", ":", "file_list", "=", "search_list", "(", "file_list", ",", "regex", ")", "file_list", "=", "[", "op", ".", "join", "(", "file_dir", ",", "fname", ")", "for", "fname", "in", "file_list", "]", "return", "file_list" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
recursive_find_search
Returns absolute paths of files that match the regex within file_dir and all its children folders. Note: The regex matching is done using the search function of the re module. Parameters ---------- folder_path: string regex: string Returns ------- A list of strings.
boyle/files/search.py
def recursive_find_search(folder_path, regex=''): """ Returns absolute paths of files that match the regex within file_dir and all its children folders. Note: The regex matching is done using the search function of the re module. Parameters ---------- folder_path: string regex: string Returns ------- A list of strings. """ outlist = [] for root, dirs, files in os.walk(folder_path): outlist.extend([op.join(root, f) for f in files if re.search(regex, f)]) return outlist
def recursive_find_search(folder_path, regex=''): """ Returns absolute paths of files that match the regex within file_dir and all its children folders. Note: The regex matching is done using the search function of the re module. Parameters ---------- folder_path: string regex: string Returns ------- A list of strings. """ outlist = [] for root, dirs, files in os.walk(folder_path): outlist.extend([op.join(root, f) for f in files if re.search(regex, f)]) return outlist
[ "Returns", "absolute", "paths", "of", "files", "that", "match", "the", "regex", "within", "file_dir", "and", "all", "its", "children", "folders", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/files/search.py#L159-L183
[ "def", "recursive_find_search", "(", "folder_path", ",", "regex", "=", "''", ")", ":", "outlist", "=", "[", "]", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "folder_path", ")", ":", "outlist", ".", "extend", "(", "[", "op", ".", "join", "(", "root", ",", "f", ")", "for", "f", "in", "files", "if", "re", ".", "search", "(", "regex", ",", "f", ")", "]", ")", "return", "outlist" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
iter_recursive_find
Returns absolute paths of files that match the regexs within folder_path and all its children folders. This is an iterator function that will use yield to return each set of file_paths in one iteration. Will only return value if all the strings in regex match a file name. Note: The regex matching is done using the search function of the re module. Parameters ---------- folder_path: string regex: strings Returns ------- A list of strings.
boyle/files/search.py
def iter_recursive_find(folder_path, *regex): """ Returns absolute paths of files that match the regexs within folder_path and all its children folders. This is an iterator function that will use yield to return each set of file_paths in one iteration. Will only return value if all the strings in regex match a file name. Note: The regex matching is done using the search function of the re module. Parameters ---------- folder_path: string regex: strings Returns ------- A list of strings. """ for root, dirs, files in os.walk(folder_path): if len(files) > 0: outlist = [] for f in files: for reg in regex: if re.search(reg, f): outlist.append(op.join(root, f)) if len(outlist) == len(regex): yield outlist
def iter_recursive_find(folder_path, *regex): """ Returns absolute paths of files that match the regexs within folder_path and all its children folders. This is an iterator function that will use yield to return each set of file_paths in one iteration. Will only return value if all the strings in regex match a file name. Note: The regex matching is done using the search function of the re module. Parameters ---------- folder_path: string regex: strings Returns ------- A list of strings. """ for root, dirs, files in os.walk(folder_path): if len(files) > 0: outlist = [] for f in files: for reg in regex: if re.search(reg, f): outlist.append(op.join(root, f)) if len(outlist) == len(regex): yield outlist
[ "Returns", "absolute", "paths", "of", "files", "that", "match", "the", "regexs", "within", "folder_path", "and", "all", "its", "children", "folders", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/files/search.py#L186-L217
[ "def", "iter_recursive_find", "(", "folder_path", ",", "*", "regex", ")", ":", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "folder_path", ")", ":", "if", "len", "(", "files", ")", ">", "0", ":", "outlist", "=", "[", "]", "for", "f", "in", "files", ":", "for", "reg", "in", "regex", ":", "if", "re", ".", "search", "(", "reg", ",", "f", ")", ":", "outlist", ".", "append", "(", "op", ".", "join", "(", "root", ",", "f", ")", ")", "if", "len", "(", "outlist", ")", "==", "len", "(", "regex", ")", ":", "yield", "outlist" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
get_all_files
Generator that loops through all absolute paths of the files within folder Parameters ---------- folder: str Root folder start point for recursive search. Yields ------ fpath: str Absolute path of one file in the folders
boyle/files/search.py
def get_all_files(folder): """ Generator that loops through all absolute paths of the files within folder Parameters ---------- folder: str Root folder start point for recursive search. Yields ------ fpath: str Absolute path of one file in the folders """ for path, dirlist, filelist in os.walk(folder): for fn in filelist: yield op.join(path, fn)
def get_all_files(folder): """ Generator that loops through all absolute paths of the files within folder Parameters ---------- folder: str Root folder start point for recursive search. Yields ------ fpath: str Absolute path of one file in the folders """ for path, dirlist, filelist in os.walk(folder): for fn in filelist: yield op.join(path, fn)
[ "Generator", "that", "loops", "through", "all", "absolute", "paths", "of", "the", "files", "within", "folder" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/files/search.py#L220-L236
[ "def", "get_all_files", "(", "folder", ")", ":", "for", "path", ",", "dirlist", ",", "filelist", "in", "os", ".", "walk", "(", "folder", ")", ":", "for", "fn", "in", "filelist", ":", "yield", "op", ".", "join", "(", "path", ",", "fn", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
recursive_glob
Uses glob to find all files or folders that match the regex starting from the base_directory. Parameters ---------- base_directory: str regex: str Returns ------- files: list
boyle/files/search.py
def recursive_glob(base_directory, regex=''): """ Uses glob to find all files or folders that match the regex starting from the base_directory. Parameters ---------- base_directory: str regex: str Returns ------- files: list """ files = glob(op.join(base_directory, regex)) for path, dirlist, filelist in os.walk(base_directory): for dir_name in dirlist: files.extend(glob(op.join(path, dir_name, regex))) return files
def recursive_glob(base_directory, regex=''): """ Uses glob to find all files or folders that match the regex starting from the base_directory. Parameters ---------- base_directory: str regex: str Returns ------- files: list """ files = glob(op.join(base_directory, regex)) for path, dirlist, filelist in os.walk(base_directory): for dir_name in dirlist: files.extend(glob(op.join(path, dir_name, regex))) return files
[ "Uses", "glob", "to", "find", "all", "files", "or", "folders", "that", "match", "the", "regex", "starting", "from", "the", "base_directory", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/files/search.py#L253-L274
[ "def", "recursive_glob", "(", "base_directory", ",", "regex", "=", "''", ")", ":", "files", "=", "glob", "(", "op", ".", "join", "(", "base_directory", ",", "regex", ")", ")", "for", "path", ",", "dirlist", ",", "filelist", "in", "os", ".", "walk", "(", "base_directory", ")", ":", "for", "dir_name", "in", "dirlist", ":", "files", ".", "extend", "(", "glob", "(", "op", ".", "join", "(", "path", ",", "dir_name", ",", "regex", ")", ")", ")", "return", "files" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
get_last_file
Return the path to the latest file in `input_dir`. The `key` argument defines which information to use for sorting the list of files, could be: - creation date: os.path.getctime, - modification date: os.path.getmtime, etc. Parameters ---------- input_dir: str Path to the folder where to perform the `glob`. glob_pattern: str `glob` Pattern to filter the files in `input_dir`. key: str Sorting key function reverse: bool Set to True if you want the sorting to be in decreasing order, False otherwise. Returns ------- latest_filepath: str Path to the latest modified file in `input_dir`.
boyle/files/search.py
def get_last_file(input_dir, glob_pattern='*', key=op.getctime, reverse=True): """ Return the path to the latest file in `input_dir`. The `key` argument defines which information to use for sorting the list of files, could be: - creation date: os.path.getctime, - modification date: os.path.getmtime, etc. Parameters ---------- input_dir: str Path to the folder where to perform the `glob`. glob_pattern: str `glob` Pattern to filter the files in `input_dir`. key: str Sorting key function reverse: bool Set to True if you want the sorting to be in decreasing order, False otherwise. Returns ------- latest_filepath: str Path to the latest modified file in `input_dir`. """ files = glob(op.join(input_dir, glob_pattern)) files.sort(key=key, reverse=reverse) return files[0]
def get_last_file(input_dir, glob_pattern='*', key=op.getctime, reverse=True): """ Return the path to the latest file in `input_dir`. The `key` argument defines which information to use for sorting the list of files, could be: - creation date: os.path.getctime, - modification date: os.path.getmtime, etc. Parameters ---------- input_dir: str Path to the folder where to perform the `glob`. glob_pattern: str `glob` Pattern to filter the files in `input_dir`. key: str Sorting key function reverse: bool Set to True if you want the sorting to be in decreasing order, False otherwise. Returns ------- latest_filepath: str Path to the latest modified file in `input_dir`. """ files = glob(op.join(input_dir, glob_pattern)) files.sort(key=key, reverse=reverse) return files[0]
[ "Return", "the", "path", "to", "the", "latest", "file", "in", "input_dir", ".", "The", "key", "argument", "defines", "which", "information", "to", "use", "for", "sorting", "the", "list", "of", "files", "could", "be", ":", "-", "creation", "date", ":", "os", ".", "path", ".", "getctime", "-", "modification", "date", ":", "os", ".", "path", ".", "getmtime", "etc", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/files/search.py#L285-L315
[ "def", "get_last_file", "(", "input_dir", ",", "glob_pattern", "=", "'*'", ",", "key", "=", "op", ".", "getctime", ",", "reverse", "=", "True", ")", ":", "files", "=", "glob", "(", "op", ".", "join", "(", "input_dir", ",", "glob_pattern", ")", ")", "files", ".", "sort", "(", "key", "=", "key", ",", "reverse", "=", "reverse", ")", "return", "files", "[", "0", "]" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
compose_err_msg
Append key-value pairs to msg, for display. Parameters ---------- msg: string arbitrary message kwargs: dict arbitrary dictionary Returns ------- updated_msg: string msg, with "key: value" appended. Only string values are appended. Example ------- >>> compose_err_msg('Error message with arguments...', arg_num=123, \ arg_str='filename.nii', arg_bool=True) 'Error message with arguments...\\narg_str: filename.nii' >>>
boyle/exceptions.py
def compose_err_msg(msg, **kwargs): """Append key-value pairs to msg, for display. Parameters ---------- msg: string arbitrary message kwargs: dict arbitrary dictionary Returns ------- updated_msg: string msg, with "key: value" appended. Only string values are appended. Example ------- >>> compose_err_msg('Error message with arguments...', arg_num=123, \ arg_str='filename.nii', arg_bool=True) 'Error message with arguments...\\narg_str: filename.nii' >>> """ updated_msg = msg for k, v in sorted(kwargs.items()): if isinstance(v, _basestring): # print only str-like arguments updated_msg += "\n" + k + ": " + v return updated_msg
def compose_err_msg(msg, **kwargs): """Append key-value pairs to msg, for display. Parameters ---------- msg: string arbitrary message kwargs: dict arbitrary dictionary Returns ------- updated_msg: string msg, with "key: value" appended. Only string values are appended. Example ------- >>> compose_err_msg('Error message with arguments...', arg_num=123, \ arg_str='filename.nii', arg_bool=True) 'Error message with arguments...\\narg_str: filename.nii' >>> """ updated_msg = msg for k, v in sorted(kwargs.items()): if isinstance(v, _basestring): # print only str-like arguments updated_msg += "\n" + k + ": " + v return updated_msg
[ "Append", "key", "-", "value", "pairs", "to", "msg", "for", "display", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/exceptions.py#L9-L36
[ "def", "compose_err_msg", "(", "msg", ",", "*", "*", "kwargs", ")", ":", "updated_msg", "=", "msg", "for", "k", ",", "v", "in", "sorted", "(", "kwargs", ".", "items", "(", ")", ")", ":", "if", "isinstance", "(", "v", ",", "_basestring", ")", ":", "# print only str-like arguments", "updated_msg", "+=", "\"\\n\"", "+", "k", "+", "\": \"", "+", "v", "return", "updated_msg" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
group_dicom_files
Gets a list of DICOM file absolute paths and returns a list of lists of DICOM file paths. Each group contains a set of DICOM files that have exactly the same headers. Parameters ---------- dicom_file_paths: list of str List or set of DICOM file paths header_fields: list of str List of header field names to check on the comparisons of the DICOM files. Returns ------- dict of DicomFileSets The key is one filepath representing the group (the first found).
boyle/dicom/comparison.py
def group_dicom_files(dicom_file_paths, header_fields): """ Gets a list of DICOM file absolute paths and returns a list of lists of DICOM file paths. Each group contains a set of DICOM files that have exactly the same headers. Parameters ---------- dicom_file_paths: list of str List or set of DICOM file paths header_fields: list of str List of header field names to check on the comparisons of the DICOM files. Returns ------- dict of DicomFileSets The key is one filepath representing the group (the first found). """ dist = SimpleDicomFileDistance(field_weights=header_fields) path_list = dicom_file_paths.copy() path_groups = DefaultOrderedDict(DicomFileSet) while len(path_list) > 0: file_path1 = path_list.pop() file_subgroup = [file_path1] dist.set_dicom_file1(file_path1) j = len(path_list)-1 while j >= 0: file_path2 = path_list[j] dist.set_dicom_file2(file_path2) if dist.transform(): file_subgroup.append(file_path2) path_list.pop(j) j -= 1 path_groups[file_path1].from_set(file_subgroup, check_if_dicoms=False) return path_groups
def group_dicom_files(dicom_file_paths, header_fields): """ Gets a list of DICOM file absolute paths and returns a list of lists of DICOM file paths. Each group contains a set of DICOM files that have exactly the same headers. Parameters ---------- dicom_file_paths: list of str List or set of DICOM file paths header_fields: list of str List of header field names to check on the comparisons of the DICOM files. Returns ------- dict of DicomFileSets The key is one filepath representing the group (the first found). """ dist = SimpleDicomFileDistance(field_weights=header_fields) path_list = dicom_file_paths.copy() path_groups = DefaultOrderedDict(DicomFileSet) while len(path_list) > 0: file_path1 = path_list.pop() file_subgroup = [file_path1] dist.set_dicom_file1(file_path1) j = len(path_list)-1 while j >= 0: file_path2 = path_list[j] dist.set_dicom_file2(file_path2) if dist.transform(): file_subgroup.append(file_path2) path_list.pop(j) j -= 1 path_groups[file_path1].from_set(file_subgroup, check_if_dicoms=False) return path_groups
[ "Gets", "a", "list", "of", "DICOM", "file", "absolute", "paths", "and", "returns", "a", "list", "of", "lists", "of", "DICOM", "file", "paths", ".", "Each", "group", "contains", "a", "set", "of", "DICOM", "files", "that", "have", "exactly", "the", "same", "headers", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L209-L251
[ "def", "group_dicom_files", "(", "dicom_file_paths", ",", "header_fields", ")", ":", "dist", "=", "SimpleDicomFileDistance", "(", "field_weights", "=", "header_fields", ")", "path_list", "=", "dicom_file_paths", ".", "copy", "(", ")", "path_groups", "=", "DefaultOrderedDict", "(", "DicomFileSet", ")", "while", "len", "(", "path_list", ")", ">", "0", ":", "file_path1", "=", "path_list", ".", "pop", "(", ")", "file_subgroup", "=", "[", "file_path1", "]", "dist", ".", "set_dicom_file1", "(", "file_path1", ")", "j", "=", "len", "(", "path_list", ")", "-", "1", "while", "j", ">=", "0", ":", "file_path2", "=", "path_list", "[", "j", "]", "dist", ".", "set_dicom_file2", "(", "file_path2", ")", "if", "dist", ".", "transform", "(", ")", ":", "file_subgroup", ".", "append", "(", "file_path2", ")", "path_list", ".", "pop", "(", "j", ")", "j", "-=", "1", "path_groups", "[", "file_path1", "]", ".", "from_set", "(", "file_subgroup", ",", "check_if_dicoms", "=", "False", ")", "return", "path_groups" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
copy_groups_to_folder
Copy the DICOM file groups to folder_path. Each group will be copied into a subfolder with named given by groupby_field. Parameters ---------- dicom_groups: boyle.dicom.sets.DicomFileSet folder_path: str Path to where copy the DICOM files. groupby_field_name: str DICOM field name. Will get the value of this field to name the group folder.
boyle/dicom/comparison.py
def copy_groups_to_folder(dicom_groups, folder_path, groupby_field_name): """Copy the DICOM file groups to folder_path. Each group will be copied into a subfolder with named given by groupby_field. Parameters ---------- dicom_groups: boyle.dicom.sets.DicomFileSet folder_path: str Path to where copy the DICOM files. groupby_field_name: str DICOM field name. Will get the value of this field to name the group folder. """ if dicom_groups is None or not dicom_groups: raise ValueError('Expected a boyle.dicom.sets.DicomFileSet.') if not os.path.exists(folder_path): os.makedirs(folder_path, exist_ok=False) for dcmg in dicom_groups: if groupby_field_name is not None and len(groupby_field_name) > 0: dfile = DicomFile(dcmg) dir_name = '' for att in groupby_field_name: dir_name = os.path.join(dir_name, dfile.get_attributes(att)) dir_name = str(dir_name) else: dir_name = os.path.basename(dcmg) group_folder = os.path.join(folder_path, dir_name) os.makedirs(group_folder, exist_ok=False) log.debug('Copying files to {}.'.format(group_folder)) import shutil dcm_files = dicom_groups[dcmg] for srcf in dcm_files: destf = os.path.join(group_folder, os.path.basename(srcf)) while os.path.exists(destf): destf += '+' shutil.copy2(srcf, destf)
def copy_groups_to_folder(dicom_groups, folder_path, groupby_field_name): """Copy the DICOM file groups to folder_path. Each group will be copied into a subfolder with named given by groupby_field. Parameters ---------- dicom_groups: boyle.dicom.sets.DicomFileSet folder_path: str Path to where copy the DICOM files. groupby_field_name: str DICOM field name. Will get the value of this field to name the group folder. """ if dicom_groups is None or not dicom_groups: raise ValueError('Expected a boyle.dicom.sets.DicomFileSet.') if not os.path.exists(folder_path): os.makedirs(folder_path, exist_ok=False) for dcmg in dicom_groups: if groupby_field_name is not None and len(groupby_field_name) > 0: dfile = DicomFile(dcmg) dir_name = '' for att in groupby_field_name: dir_name = os.path.join(dir_name, dfile.get_attributes(att)) dir_name = str(dir_name) else: dir_name = os.path.basename(dcmg) group_folder = os.path.join(folder_path, dir_name) os.makedirs(group_folder, exist_ok=False) log.debug('Copying files to {}.'.format(group_folder)) import shutil dcm_files = dicom_groups[dcmg] for srcf in dcm_files: destf = os.path.join(group_folder, os.path.basename(srcf)) while os.path.exists(destf): destf += '+' shutil.copy2(srcf, destf)
[ "Copy", "the", "DICOM", "file", "groups", "to", "folder_path", ".", "Each", "group", "will", "be", "copied", "into", "a", "subfolder", "with", "named", "given", "by", "groupby_field", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L254-L296
[ "def", "copy_groups_to_folder", "(", "dicom_groups", ",", "folder_path", ",", "groupby_field_name", ")", ":", "if", "dicom_groups", "is", "None", "or", "not", "dicom_groups", ":", "raise", "ValueError", "(", "'Expected a boyle.dicom.sets.DicomFileSet.'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "folder_path", ")", ":", "os", ".", "makedirs", "(", "folder_path", ",", "exist_ok", "=", "False", ")", "for", "dcmg", "in", "dicom_groups", ":", "if", "groupby_field_name", "is", "not", "None", "and", "len", "(", "groupby_field_name", ")", ">", "0", ":", "dfile", "=", "DicomFile", "(", "dcmg", ")", "dir_name", "=", "''", "for", "att", "in", "groupby_field_name", ":", "dir_name", "=", "os", ".", "path", ".", "join", "(", "dir_name", ",", "dfile", ".", "get_attributes", "(", "att", ")", ")", "dir_name", "=", "str", "(", "dir_name", ")", "else", ":", "dir_name", "=", "os", ".", "path", ".", "basename", "(", "dcmg", ")", "group_folder", "=", "os", ".", "path", ".", "join", "(", "folder_path", ",", "dir_name", ")", "os", ".", "makedirs", "(", "group_folder", ",", "exist_ok", "=", "False", ")", "log", ".", "debug", "(", "'Copying files to {}.'", ".", "format", "(", "group_folder", ")", ")", "import", "shutil", "dcm_files", "=", "dicom_groups", "[", "dcmg", "]", "for", "srcf", "in", "dcm_files", ":", "destf", "=", "os", ".", "path", ".", "join", "(", "group_folder", ",", "os", ".", "path", ".", "basename", "(", "srcf", ")", ")", "while", "os", ".", "path", ".", "exists", "(", "destf", ")", ":", "destf", "+=", "'+'", "shutil", ".", "copy2", "(", "srcf", ",", "destf", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
calculate_file_distances
Calculates the DicomFileDistance between all files in dicom_files, using an weighted Levenshtein measure between all field names in field_weights and their corresponding weights. Parameters ---------- dicom_files: iterable of str Dicom file paths field_weights: dict of str to float A dict with header field names to float scalar values, that indicate a distance measure ratio for the levenshtein distance averaging of all the header field names in it. e.g., {'PatientID': 1} dist_method_cls: DicomFileDistance class Distance method object to compare the files. If None, the default DicomFileDistance method using Levenshtein distance between the field_wieghts will be used. kwargs: DicomFileDistance instantiation named arguments Apart from the field_weitghts argument. Returns ------- file_dists: np.ndarray or scipy.sparse.lil_matrix of shape NxN Levenshtein distances between each of the N items in dicom_files.
boyle/dicom/comparison.py
def calculate_file_distances(dicom_files, field_weights=None, dist_method_cls=None, **kwargs): """ Calculates the DicomFileDistance between all files in dicom_files, using an weighted Levenshtein measure between all field names in field_weights and their corresponding weights. Parameters ---------- dicom_files: iterable of str Dicom file paths field_weights: dict of str to float A dict with header field names to float scalar values, that indicate a distance measure ratio for the levenshtein distance averaging of all the header field names in it. e.g., {'PatientID': 1} dist_method_cls: DicomFileDistance class Distance method object to compare the files. If None, the default DicomFileDistance method using Levenshtein distance between the field_wieghts will be used. kwargs: DicomFileDistance instantiation named arguments Apart from the field_weitghts argument. Returns ------- file_dists: np.ndarray or scipy.sparse.lil_matrix of shape NxN Levenshtein distances between each of the N items in dicom_files. """ if dist_method_cls is None: dist_method = LevenshteinDicomFileDistance(field_weights) else: try: dist_method = dist_method_cls(field_weights=field_weights, **kwargs) except: log.exception('Could not instantiate {} object with field_weights ' 'and {}'.format(dist_method_cls, kwargs)) dist_dtype = np.float16 n_files = len(dicom_files) try: file_dists = np.zeros((n_files, n_files), dtype=dist_dtype) except MemoryError as mee: import scipy.sparse file_dists = scipy.sparse.lil_matrix((n_files, n_files), dtype=dist_dtype) for idxi in range(n_files): dist_method.set_dicom_file1(dicom_files[idxi]) for idxj in range(idxi+1, n_files): dist_method.set_dicom_file2(dicom_files[idxj]) if idxi != idxj: file_dists[idxi, idxj] = dist_method.transform() return file_dists
def calculate_file_distances(dicom_files, field_weights=None, dist_method_cls=None, **kwargs): """ Calculates the DicomFileDistance between all files in dicom_files, using an weighted Levenshtein measure between all field names in field_weights and their corresponding weights. Parameters ---------- dicom_files: iterable of str Dicom file paths field_weights: dict of str to float A dict with header field names to float scalar values, that indicate a distance measure ratio for the levenshtein distance averaging of all the header field names in it. e.g., {'PatientID': 1} dist_method_cls: DicomFileDistance class Distance method object to compare the files. If None, the default DicomFileDistance method using Levenshtein distance between the field_wieghts will be used. kwargs: DicomFileDistance instantiation named arguments Apart from the field_weitghts argument. Returns ------- file_dists: np.ndarray or scipy.sparse.lil_matrix of shape NxN Levenshtein distances between each of the N items in dicom_files. """ if dist_method_cls is None: dist_method = LevenshteinDicomFileDistance(field_weights) else: try: dist_method = dist_method_cls(field_weights=field_weights, **kwargs) except: log.exception('Could not instantiate {} object with field_weights ' 'and {}'.format(dist_method_cls, kwargs)) dist_dtype = np.float16 n_files = len(dicom_files) try: file_dists = np.zeros((n_files, n_files), dtype=dist_dtype) except MemoryError as mee: import scipy.sparse file_dists = scipy.sparse.lil_matrix((n_files, n_files), dtype=dist_dtype) for idxi in range(n_files): dist_method.set_dicom_file1(dicom_files[idxi]) for idxj in range(idxi+1, n_files): dist_method.set_dicom_file2(dicom_files[idxj]) if idxi != idxj: file_dists[idxi, idxj] = dist_method.transform() return file_dists
[ "Calculates", "the", "DicomFileDistance", "between", "all", "files", "in", "dicom_files", "using", "an", "weighted", "Levenshtein", "measure", "between", "all", "field", "names", "in", "field_weights", "and", "their", "corresponding", "weights", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L299-L357
[ "def", "calculate_file_distances", "(", "dicom_files", ",", "field_weights", "=", "None", ",", "dist_method_cls", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "dist_method_cls", "is", "None", ":", "dist_method", "=", "LevenshteinDicomFileDistance", "(", "field_weights", ")", "else", ":", "try", ":", "dist_method", "=", "dist_method_cls", "(", "field_weights", "=", "field_weights", ",", "*", "*", "kwargs", ")", "except", ":", "log", ".", "exception", "(", "'Could not instantiate {} object with field_weights '", "'and {}'", ".", "format", "(", "dist_method_cls", ",", "kwargs", ")", ")", "dist_dtype", "=", "np", ".", "float16", "n_files", "=", "len", "(", "dicom_files", ")", "try", ":", "file_dists", "=", "np", ".", "zeros", "(", "(", "n_files", ",", "n_files", ")", ",", "dtype", "=", "dist_dtype", ")", "except", "MemoryError", "as", "mee", ":", "import", "scipy", ".", "sparse", "file_dists", "=", "scipy", ".", "sparse", ".", "lil_matrix", "(", "(", "n_files", ",", "n_files", ")", ",", "dtype", "=", "dist_dtype", ")", "for", "idxi", "in", "range", "(", "n_files", ")", ":", "dist_method", ".", "set_dicom_file1", "(", "dicom_files", "[", "idxi", "]", ")", "for", "idxj", "in", "range", "(", "idxi", "+", "1", ",", "n_files", ")", ":", "dist_method", ".", "set_dicom_file2", "(", "dicom_files", "[", "idxj", "]", ")", "if", "idxi", "!=", "idxj", ":", "file_dists", "[", "idxi", ",", "idxj", "]", "=", "dist_method", ".", "transform", "(", ")", "return", "file_dists" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
SimpleDicomFileDistance.fit
Parameters ---------- dcm_file1: str (path to file) or DicomFile or namedtuple dcm_file2: str (path to file) or DicomFile or namedtuple
boyle/dicom/comparison.py
def fit(self, dcm_file1, dcm_file2): """ Parameters ---------- dcm_file1: str (path to file) or DicomFile or namedtuple dcm_file2: str (path to file) or DicomFile or namedtuple """ self.set_dicom_file1(dcm_file1) self.set_dicom_file2(dcm_file2)
def fit(self, dcm_file1, dcm_file2): """ Parameters ---------- dcm_file1: str (path to file) or DicomFile or namedtuple dcm_file2: str (path to file) or DicomFile or namedtuple """ self.set_dicom_file1(dcm_file1) self.set_dicom_file2(dcm_file2)
[ "Parameters", "----------", "dcm_file1", ":", "str", "(", "path", "to", "file", ")", "or", "DicomFile", "or", "namedtuple" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L84-L93
[ "def", "fit", "(", "self", ",", "dcm_file1", ",", "dcm_file2", ")", ":", "self", ".", "set_dicom_file1", "(", "dcm_file1", ")", "self", ".", "set_dicom_file2", "(", "dcm_file2", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
SimpleDicomFileDistance.transform
Check the field values in self.dcmf1 and self.dcmf2 and returns True if all the field values are the same, False otherwise. Returns ------- bool
boyle/dicom/comparison.py
def transform(self): """Check the field values in self.dcmf1 and self.dcmf2 and returns True if all the field values are the same, False otherwise. Returns ------- bool """ if self.dcmf1 is None or self.dcmf2 is None: return np.inf for field_name in self.field_weights: if (str(getattr(self.dcmf1, field_name, '')) != str(getattr(self.dcmf2, field_name, ''))): return False return True
def transform(self): """Check the field values in self.dcmf1 and self.dcmf2 and returns True if all the field values are the same, False otherwise. Returns ------- bool """ if self.dcmf1 is None or self.dcmf2 is None: return np.inf for field_name in self.field_weights: if (str(getattr(self.dcmf1, field_name, '')) != str(getattr(self.dcmf2, field_name, ''))): return False return True
[ "Check", "the", "field", "values", "in", "self", ".", "dcmf1", "and", "self", ".", "dcmf2", "and", "returns", "True", "if", "all", "the", "field", "values", "are", "the", "same", "False", "otherwise", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L132-L148
[ "def", "transform", "(", "self", ")", ":", "if", "self", ".", "dcmf1", "is", "None", "or", "self", ".", "dcmf2", "is", "None", ":", "return", "np", ".", "inf", "for", "field_name", "in", "self", ".", "field_weights", ":", "if", "(", "str", "(", "getattr", "(", "self", ".", "dcmf1", ",", "field_name", ",", "''", ")", ")", "!=", "str", "(", "getattr", "(", "self", ".", "dcmf2", ",", "field_name", ",", "''", ")", ")", ")", ":", "return", "False", "return", "True" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
DicomFilesClustering.levenshtein_analysis
Updates the status of the file clusters comparing the cluster key files with a levenshtein weighted measure using either the header_fields or self.header_fields. Parameters ---------- field_weights: dict of strings with floats A dict with header field names to float scalar values, that indicate a distance measure ratio for the levenshtein distance averaging of all the header field names in it. e.g., {'PatientID': 1}
boyle/dicom/comparison.py
def levenshtein_analysis(self, field_weights=None): """ Updates the status of the file clusters comparing the cluster key files with a levenshtein weighted measure using either the header_fields or self.header_fields. Parameters ---------- field_weights: dict of strings with floats A dict with header field names to float scalar values, that indicate a distance measure ratio for the levenshtein distance averaging of all the header field names in it. e.g., {'PatientID': 1} """ if field_weights is None: if not isinstance(self.field_weights, dict): raise ValueError('Expected a dict for `field_weights` parameter, ' 'got {}'.format(type(self.field_weights))) key_dicoms = list(self.dicom_groups.keys()) file_dists = calculate_file_distances(key_dicoms, field_weights, self._dist_method_cls) return file_dists
def levenshtein_analysis(self, field_weights=None): """ Updates the status of the file clusters comparing the cluster key files with a levenshtein weighted measure using either the header_fields or self.header_fields. Parameters ---------- field_weights: dict of strings with floats A dict with header field names to float scalar values, that indicate a distance measure ratio for the levenshtein distance averaging of all the header field names in it. e.g., {'PatientID': 1} """ if field_weights is None: if not isinstance(self.field_weights, dict): raise ValueError('Expected a dict for `field_weights` parameter, ' 'got {}'.format(type(self.field_weights))) key_dicoms = list(self.dicom_groups.keys()) file_dists = calculate_file_distances(key_dicoms, field_weights, self._dist_method_cls) return file_dists
[ "Updates", "the", "status", "of", "the", "file", "clusters", "comparing", "the", "cluster", "key", "files", "with", "a", "levenshtein", "weighted", "measure", "using", "either", "the", "header_fields", "or", "self", ".", "header_fields", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L414-L434
[ "def", "levenshtein_analysis", "(", "self", ",", "field_weights", "=", "None", ")", ":", "if", "field_weights", "is", "None", ":", "if", "not", "isinstance", "(", "self", ".", "field_weights", ",", "dict", ")", ":", "raise", "ValueError", "(", "'Expected a dict for `field_weights` parameter, '", "'got {}'", ".", "format", "(", "type", "(", "self", ".", "field_weights", ")", ")", ")", "key_dicoms", "=", "list", "(", "self", ".", "dicom_groups", ".", "keys", "(", ")", ")", "file_dists", "=", "calculate_file_distances", "(", "key_dicoms", ",", "field_weights", ",", "self", ".", "_dist_method_cls", ")", "return", "file_dists" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
DicomFilesClustering.dist_percentile_threshold
Thresholds a distance matrix and returns the result. Parameters ---------- dist_matrix: array_like Input array or object that can be converted to an array. perc_thr: float in range of [0,100] Percentile to compute which must be between 0 and 100 inclusive. k: int, optional Diagonal above which to zero elements. k = 0 (the default) is the main diagonal, k < 0 is below it and k > 0 is above. Returns ------- array_like
boyle/dicom/comparison.py
def dist_percentile_threshold(dist_matrix, perc_thr=0.05, k=1): """Thresholds a distance matrix and returns the result. Parameters ---------- dist_matrix: array_like Input array or object that can be converted to an array. perc_thr: float in range of [0,100] Percentile to compute which must be between 0 and 100 inclusive. k: int, optional Diagonal above which to zero elements. k = 0 (the default) is the main diagonal, k < 0 is below it and k > 0 is above. Returns ------- array_like """ triu_idx = np.triu_indices(dist_matrix.shape[0], k=k) upper = np.zeros_like(dist_matrix) upper[triu_idx] = dist_matrix[triu_idx] < np.percentile(dist_matrix[triu_idx], perc_thr) return upper
def dist_percentile_threshold(dist_matrix, perc_thr=0.05, k=1): """Thresholds a distance matrix and returns the result. Parameters ---------- dist_matrix: array_like Input array or object that can be converted to an array. perc_thr: float in range of [0,100] Percentile to compute which must be between 0 and 100 inclusive. k: int, optional Diagonal above which to zero elements. k = 0 (the default) is the main diagonal, k < 0 is below it and k > 0 is above. Returns ------- array_like """ triu_idx = np.triu_indices(dist_matrix.shape[0], k=k) upper = np.zeros_like(dist_matrix) upper[triu_idx] = dist_matrix[triu_idx] < np.percentile(dist_matrix[triu_idx], perc_thr) return upper
[ "Thresholds", "a", "distance", "matrix", "and", "returns", "the", "result", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L437-L462
[ "def", "dist_percentile_threshold", "(", "dist_matrix", ",", "perc_thr", "=", "0.05", ",", "k", "=", "1", ")", ":", "triu_idx", "=", "np", ".", "triu_indices", "(", "dist_matrix", ".", "shape", "[", "0", "]", ",", "k", "=", "k", ")", "upper", "=", "np", ".", "zeros_like", "(", "dist_matrix", ")", "upper", "[", "triu_idx", "]", "=", "dist_matrix", "[", "triu_idx", "]", "<", "np", ".", "percentile", "(", "dist_matrix", "[", "triu_idx", "]", ",", "perc_thr", ")", "return", "upper" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
DicomFilesClustering.get_groups_in_same_folder
Returns a list of 2-tuples with pairs of dicom groups that are in the same folder within given depth. Parameters ---------- folder_depth: int Path depth to check for folder equality. Returns ------- list of tuples of str
boyle/dicom/comparison.py
def get_groups_in_same_folder(self, folder_depth=3): """ Returns a list of 2-tuples with pairs of dicom groups that are in the same folder within given depth. Parameters ---------- folder_depth: int Path depth to check for folder equality. Returns ------- list of tuples of str """ group_pairs = [] key_dicoms = list(self.dicom_groups.keys()) idx = len(key_dicoms) while idx > 0: group1 = key_dicoms.pop() dir_group1 = get_folder_subpath(group1, folder_depth) for group in key_dicoms: if group.startswith(dir_group1): group_pairs.append((group1, group)) idx -= 1 return group_pairs
def get_groups_in_same_folder(self, folder_depth=3): """ Returns a list of 2-tuples with pairs of dicom groups that are in the same folder within given depth. Parameters ---------- folder_depth: int Path depth to check for folder equality. Returns ------- list of tuples of str """ group_pairs = [] key_dicoms = list(self.dicom_groups.keys()) idx = len(key_dicoms) while idx > 0: group1 = key_dicoms.pop() dir_group1 = get_folder_subpath(group1, folder_depth) for group in key_dicoms: if group.startswith(dir_group1): group_pairs.append((group1, group)) idx -= 1 return group_pairs
[ "Returns", "a", "list", "of", "2", "-", "tuples", "with", "pairs", "of", "dicom", "groups", "that", "are", "in", "the", "same", "folder", "within", "given", "depth", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L464-L489
[ "def", "get_groups_in_same_folder", "(", "self", ",", "folder_depth", "=", "3", ")", ":", "group_pairs", "=", "[", "]", "key_dicoms", "=", "list", "(", "self", ".", "dicom_groups", ".", "keys", "(", ")", ")", "idx", "=", "len", "(", "key_dicoms", ")", "while", "idx", ">", "0", ":", "group1", "=", "key_dicoms", ".", "pop", "(", ")", "dir_group1", "=", "get_folder_subpath", "(", "group1", ",", "folder_depth", ")", "for", "group", "in", "key_dicoms", ":", "if", "group", ".", "startswith", "(", "dir_group1", ")", ":", "group_pairs", ".", "append", "(", "(", "group1", ",", "group", ")", ")", "idx", "-=", "1", "return", "group_pairs" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
DicomFilesClustering.plot_file_distances
Plots dist_matrix Parameters ---------- dist_matrix: np.ndarray
boyle/dicom/comparison.py
def plot_file_distances(dist_matrix): """ Plots dist_matrix Parameters ---------- dist_matrix: np.ndarray """ import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.matshow(dist_matrix, interpolation='nearest', cmap=plt.cm.get_cmap('PuBu'))
def plot_file_distances(dist_matrix): """ Plots dist_matrix Parameters ---------- dist_matrix: np.ndarray """ import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) ax.matshow(dist_matrix, interpolation='nearest', cmap=plt.cm.get_cmap('PuBu'))
[ "Plots", "dist_matrix" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L492-L505
[ "def", "plot_file_distances", "(", "dist_matrix", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "ax", ".", "matshow", "(", "dist_matrix", ",", "interpolation", "=", "'nearest'", ",", "cmap", "=", "plt", ".", "cm", ".", "get_cmap", "(", "'PuBu'", ")", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
DicomFilesClustering.merge_groups
Extend the lists within the DICOM groups dictionary. The indices will indicate which list have to be extended by which other list. Parameters ---------- indices: list or tuple of 2 iterables of int, bot having the same len The indices of the lists that have to be merged, both iterables items will be read pair by pair, the first is the index to the list that will be extended with the list of the second index. The indices can be constructed with Numpy e.g., indices = np.where(square_matrix)
boyle/dicom/comparison.py
def merge_groups(self, indices): """Extend the lists within the DICOM groups dictionary. The indices will indicate which list have to be extended by which other list. Parameters ---------- indices: list or tuple of 2 iterables of int, bot having the same len The indices of the lists that have to be merged, both iterables items will be read pair by pair, the first is the index to the list that will be extended with the list of the second index. The indices can be constructed with Numpy e.g., indices = np.where(square_matrix) """ try: merged = merge_dict_of_lists(self.dicom_groups, indices, pop_later=True, copy=True) self.dicom_groups = merged except IndexError: raise IndexError('Index out of range to merge DICOM groups.')
def merge_groups(self, indices): """Extend the lists within the DICOM groups dictionary. The indices will indicate which list have to be extended by which other list. Parameters ---------- indices: list or tuple of 2 iterables of int, bot having the same len The indices of the lists that have to be merged, both iterables items will be read pair by pair, the first is the index to the list that will be extended with the list of the second index. The indices can be constructed with Numpy e.g., indices = np.where(square_matrix) """ try: merged = merge_dict_of_lists(self.dicom_groups, indices, pop_later=True, copy=True) self.dicom_groups = merged except IndexError: raise IndexError('Index out of range to merge DICOM groups.')
[ "Extend", "the", "lists", "within", "the", "DICOM", "groups", "dictionary", ".", "The", "indices", "will", "indicate", "which", "list", "have", "to", "be", "extended", "by", "which", "other", "list", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L521-L540
[ "def", "merge_groups", "(", "self", ",", "indices", ")", ":", "try", ":", "merged", "=", "merge_dict_of_lists", "(", "self", ".", "dicom_groups", ",", "indices", ",", "pop_later", "=", "True", ",", "copy", "=", "True", ")", "self", ".", "dicom_groups", "=", "merged", "except", "IndexError", ":", "raise", "IndexError", "(", "'Index out of range to merge DICOM groups.'", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
DicomFilesClustering.move_to_folder
Copy the file groups to folder_path. Each group will be copied into a subfolder with named given by groupby_field. Parameters ---------- folder_path: str Path to where copy the DICOM files. groupby_field_name: str DICOM field name. Will get the value of this field to name the group folder. If empty or None will use the basename of the group key file.
boyle/dicom/comparison.py
def move_to_folder(self, folder_path, groupby_field_name=None): """Copy the file groups to folder_path. Each group will be copied into a subfolder with named given by groupby_field. Parameters ---------- folder_path: str Path to where copy the DICOM files. groupby_field_name: str DICOM field name. Will get the value of this field to name the group folder. If empty or None will use the basename of the group key file. """ try: copy_groups_to_folder(self.dicom_groups, folder_path, groupby_field_name) except IOError as ioe: raise IOError('Error moving dicom groups to {}.'.format(folder_path)) from ioe
def move_to_folder(self, folder_path, groupby_field_name=None): """Copy the file groups to folder_path. Each group will be copied into a subfolder with named given by groupby_field. Parameters ---------- folder_path: str Path to where copy the DICOM files. groupby_field_name: str DICOM field name. Will get the value of this field to name the group folder. If empty or None will use the basename of the group key file. """ try: copy_groups_to_folder(self.dicom_groups, folder_path, groupby_field_name) except IOError as ioe: raise IOError('Error moving dicom groups to {}.'.format(folder_path)) from ioe
[ "Copy", "the", "file", "groups", "to", "folder_path", ".", "Each", "group", "will", "be", "copied", "into", "a", "subfolder", "with", "named", "given", "by", "groupby_field", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L542-L558
[ "def", "move_to_folder", "(", "self", ",", "folder_path", ",", "groupby_field_name", "=", "None", ")", ":", "try", ":", "copy_groups_to_folder", "(", "self", ".", "dicom_groups", ",", "folder_path", ",", "groupby_field_name", ")", "except", "IOError", "as", "ioe", ":", "raise", "IOError", "(", "'Error moving dicom groups to {}.'", ".", "format", "(", "folder_path", ")", ")", "from", "ioe" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
DicomFilesClustering.get_unique_field_values_per_group
Return a dictionary where the key is the group key file path and the values are sets of unique values of the field name of all DICOM files in the group. Parameters ---------- field_name: str Name of the field to read from all files field_to_use_as_key: str Name of the field to get the value and use as key. If None, will use the same key as the dicom_groups. Returns ------- Dict of sets
boyle/dicom/comparison.py
def get_unique_field_values_per_group(self, field_name, field_to_use_as_key=None): """Return a dictionary where the key is the group key file path and the values are sets of unique values of the field name of all DICOM files in the group. Parameters ---------- field_name: str Name of the field to read from all files field_to_use_as_key: str Name of the field to get the value and use as key. If None, will use the same key as the dicom_groups. Returns ------- Dict of sets """ unique_vals = DefaultOrderedDict(set) for dcmg in self.dicom_groups: for f in self.dicom_groups[dcmg]: field_val = DicomFile(f).get_attributes(field_name) key_val = dcmg if field_to_use_as_key is not None: try: key_val = str(DicomFile(dcmg).get_attributes(field_to_use_as_key)) except KeyError as ke: raise KeyError('Error getting field {} from ' 'file {}'.format(field_to_use_as_key, dcmg)) from ke unique_vals[key_val].add(field_val) return unique_vals
def get_unique_field_values_per_group(self, field_name, field_to_use_as_key=None): """Return a dictionary where the key is the group key file path and the values are sets of unique values of the field name of all DICOM files in the group. Parameters ---------- field_name: str Name of the field to read from all files field_to_use_as_key: str Name of the field to get the value and use as key. If None, will use the same key as the dicom_groups. Returns ------- Dict of sets """ unique_vals = DefaultOrderedDict(set) for dcmg in self.dicom_groups: for f in self.dicom_groups[dcmg]: field_val = DicomFile(f).get_attributes(field_name) key_val = dcmg if field_to_use_as_key is not None: try: key_val = str(DicomFile(dcmg).get_attributes(field_to_use_as_key)) except KeyError as ke: raise KeyError('Error getting field {} from ' 'file {}'.format(field_to_use_as_key, dcmg)) from ke unique_vals[key_val].add(field_val) return unique_vals
[ "Return", "a", "dictionary", "where", "the", "key", "is", "the", "group", "key", "file", "path", "and", "the", "values", "are", "sets", "of", "unique", "values", "of", "the", "field", "name", "of", "all", "DICOM", "files", "in", "the", "group", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/comparison.py#L560-L593
[ "def", "get_unique_field_values_per_group", "(", "self", ",", "field_name", ",", "field_to_use_as_key", "=", "None", ")", ":", "unique_vals", "=", "DefaultOrderedDict", "(", "set", ")", "for", "dcmg", "in", "self", ".", "dicom_groups", ":", "for", "f", "in", "self", ".", "dicom_groups", "[", "dcmg", "]", ":", "field_val", "=", "DicomFile", "(", "f", ")", ".", "get_attributes", "(", "field_name", ")", "key_val", "=", "dcmg", "if", "field_to_use_as_key", "is", "not", "None", ":", "try", ":", "key_val", "=", "str", "(", "DicomFile", "(", "dcmg", ")", ".", "get_attributes", "(", "field_to_use_as_key", ")", ")", "except", "KeyError", "as", "ke", ":", "raise", "KeyError", "(", "'Error getting field {} from '", "'file {}'", ".", "format", "(", "field_to_use_as_key", ",", "dcmg", ")", ")", "from", "ke", "unique_vals", "[", "key_val", "]", ".", "add", "(", "field_val", ")", "return", "unique_vals" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
get_config_value
Gets a config by name. In the case where the config name is not found, will use fallback value.
rcctl/rcctl/config.py
def get_config_value(name, fallback=None): """Gets a config by name. In the case where the config name is not found, will use fallback value.""" cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX) return cli_config.get('servicefabric', name, fallback)
def get_config_value(name, fallback=None): """Gets a config by name. In the case where the config name is not found, will use fallback value.""" cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX) return cli_config.get('servicefabric', name, fallback)
[ "Gets", "a", "config", "by", "name", "." ]
shalabhms/reliable-collections-cli
python
https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/config.py#L18-L25
[ "def", "get_config_value", "(", "name", ",", "fallback", "=", "None", ")", ":", "cli_config", "=", "CLIConfig", "(", "SF_CLI_CONFIG_DIR", ",", "SF_CLI_ENV_VAR_PREFIX", ")", "return", "cli_config", ".", "get", "(", "'servicefabric'", ",", "name", ",", "fallback", ")" ]
195d69816fb5a6e1e9ab0ab66b606b1248b4780d
valid
get_config_bool
Checks if a config value is set to a valid bool value.
rcctl/rcctl/config.py
def get_config_bool(name): """Checks if a config value is set to a valid bool value.""" cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX) return cli_config.getboolean('servicefabric', name, False)
def get_config_bool(name): """Checks if a config value is set to a valid bool value.""" cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX) return cli_config.getboolean('servicefabric', name, False)
[ "Checks", "if", "a", "config", "value", "is", "set", "to", "a", "valid", "bool", "value", "." ]
shalabhms/reliable-collections-cli
python
https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/config.py#L27-L31
[ "def", "get_config_bool", "(", "name", ")", ":", "cli_config", "=", "CLIConfig", "(", "SF_CLI_CONFIG_DIR", ",", "SF_CLI_ENV_VAR_PREFIX", ")", "return", "cli_config", ".", "getboolean", "(", "'servicefabric'", ",", "name", ",", "False", ")" ]
195d69816fb5a6e1e9ab0ab66b606b1248b4780d
valid
set_config_value
Set a config by name to a value.
rcctl/rcctl/config.py
def set_config_value(name, value): """Set a config by name to a value.""" cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX) cli_config.set_value('servicefabric', name, value)
def set_config_value(name, value): """Set a config by name to a value.""" cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX) cli_config.set_value('servicefabric', name, value)
[ "Set", "a", "config", "by", "name", "to", "a", "value", "." ]
shalabhms/reliable-collections-cli
python
https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/config.py#L33-L37
[ "def", "set_config_value", "(", "name", ",", "value", ")", ":", "cli_config", "=", "CLIConfig", "(", "SF_CLI_CONFIG_DIR", ",", "SF_CLI_ENV_VAR_PREFIX", ")", "cli_config", ".", "set_value", "(", "'servicefabric'", ",", "name", ",", "value", ")" ]
195d69816fb5a6e1e9ab0ab66b606b1248b4780d
valid
cert_info
Path to certificate related files, either a single file path or a tuple. In the case of no security, returns None.
rcctl/rcctl/config.py
def cert_info(): """Path to certificate related files, either a single file path or a tuple. In the case of no security, returns None.""" sec_type = security_type() if sec_type == 'pem': return get_config_value('pem_path', fallback=None) if sec_type == 'cert': cert_path = get_config_value('cert_path', fallback=None) key_path = get_config_value('key_path', fallback=None) return cert_path, key_path return None
def cert_info(): """Path to certificate related files, either a single file path or a tuple. In the case of no security, returns None.""" sec_type = security_type() if sec_type == 'pem': return get_config_value('pem_path', fallback=None) if sec_type == 'cert': cert_path = get_config_value('cert_path', fallback=None) key_path = get_config_value('key_path', fallback=None) return cert_path, key_path return None
[ "Path", "to", "certificate", "related", "files", "either", "a", "single", "file", "path", "or", "a", "tuple", ".", "In", "the", "case", "of", "no", "security", "returns", "None", "." ]
shalabhms/reliable-collections-cli
python
https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/config.py#L80-L92
[ "def", "cert_info", "(", ")", ":", "sec_type", "=", "security_type", "(", ")", "if", "sec_type", "==", "'pem'", ":", "return", "get_config_value", "(", "'pem_path'", ",", "fallback", "=", "None", ")", "if", "sec_type", "==", "'cert'", ":", "cert_path", "=", "get_config_value", "(", "'cert_path'", ",", "fallback", "=", "None", ")", "key_path", "=", "get_config_value", "(", "'key_path'", ",", "fallback", "=", "None", ")", "return", "cert_path", ",", "key_path", "return", "None" ]
195d69816fb5a6e1e9ab0ab66b606b1248b4780d
valid
set_aad_cache
Set AAD token cache.
rcctl/rcctl/config.py
def set_aad_cache(token, cache): """Set AAD token cache.""" set_config_value('aad_token', jsonpickle.encode(token)) set_config_value('aad_cache', jsonpickle.encode(cache))
def set_aad_cache(token, cache): """Set AAD token cache.""" set_config_value('aad_token', jsonpickle.encode(token)) set_config_value('aad_cache', jsonpickle.encode(cache))
[ "Set", "AAD", "token", "cache", "." ]
shalabhms/reliable-collections-cli
python
https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/config.py#L99-L102
[ "def", "set_aad_cache", "(", "token", ",", "cache", ")", ":", "set_config_value", "(", "'aad_token'", ",", "jsonpickle", ".", "encode", "(", "token", ")", ")", "set_config_value", "(", "'aad_cache'", ",", "jsonpickle", ".", "encode", "(", "cache", ")", ")" ]
195d69816fb5a6e1e9ab0ab66b606b1248b4780d
valid
set_aad_metadata
Set AAD metadata.
rcctl/rcctl/config.py
def set_aad_metadata(uri, resource, client): """Set AAD metadata.""" set_config_value('authority_uri', uri) set_config_value('aad_resource', resource) set_config_value('aad_client', client)
def set_aad_metadata(uri, resource, client): """Set AAD metadata.""" set_config_value('authority_uri', uri) set_config_value('aad_resource', resource) set_config_value('aad_client', client)
[ "Set", "AAD", "metadata", "." ]
shalabhms/reliable-collections-cli
python
https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/config.py#L110-L114
[ "def", "set_aad_metadata", "(", "uri", ",", "resource", ",", "client", ")", ":", "set_config_value", "(", "'authority_uri'", ",", "uri", ")", "set_config_value", "(", "'aad_resource'", ",", "resource", ")", "set_config_value", "(", "'aad_client'", ",", "client", ")" ]
195d69816fb5a6e1e9ab0ab66b606b1248b4780d
valid
set_auth
Set certificate usage paths
rcctl/rcctl/config.py
def set_auth(pem=None, cert=None, key=None, aad=False): """Set certificate usage paths""" if any([cert, key]) and pem: raise ValueError('Cannot specify both pem and cert or key') if any([cert, key]) and not all([cert, key]): raise ValueError('Must specify both cert and key') if pem: set_config_value('security', 'pem') set_config_value('pem_path', pem) elif cert or key: set_config_value('security', 'cert') set_config_value('cert_path', cert) set_config_value('key_path', key) elif aad: set_config_value('security', 'aad') else: set_config_value('security', 'none')
def set_auth(pem=None, cert=None, key=None, aad=False): """Set certificate usage paths""" if any([cert, key]) and pem: raise ValueError('Cannot specify both pem and cert or key') if any([cert, key]) and not all([cert, key]): raise ValueError('Must specify both cert and key') if pem: set_config_value('security', 'pem') set_config_value('pem_path', pem) elif cert or key: set_config_value('security', 'cert') set_config_value('cert_path', cert) set_config_value('key_path', key) elif aad: set_config_value('security', 'aad') else: set_config_value('security', 'none')
[ "Set", "certificate", "usage", "paths" ]
shalabhms/reliable-collections-cli
python
https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/config.py#L116-L135
[ "def", "set_auth", "(", "pem", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "aad", "=", "False", ")", ":", "if", "any", "(", "[", "cert", ",", "key", "]", ")", "and", "pem", ":", "raise", "ValueError", "(", "'Cannot specify both pem and cert or key'", ")", "if", "any", "(", "[", "cert", ",", "key", "]", ")", "and", "not", "all", "(", "[", "cert", ",", "key", "]", ")", ":", "raise", "ValueError", "(", "'Must specify both cert and key'", ")", "if", "pem", ":", "set_config_value", "(", "'security'", ",", "'pem'", ")", "set_config_value", "(", "'pem_path'", ",", "pem", ")", "elif", "cert", "or", "key", ":", "set_config_value", "(", "'security'", ",", "'cert'", ")", "set_config_value", "(", "'cert_path'", ",", "cert", ")", "set_config_value", "(", "'key_path'", ",", "key", ")", "elif", "aad", ":", "set_config_value", "(", "'security'", ",", "'aad'", ")", "else", ":", "set_config_value", "(", "'security'", ",", "'none'", ")" ]
195d69816fb5a6e1e9ab0ab66b606b1248b4780d
valid
filter_objlist
Returns a list with of the objects in olist that have a fieldname valued as fieldval Parameters ---------- olist: list of objects fieldname: string fieldval: anything Returns ------- list of objets
boyle/utils/strings.py
def filter_objlist(olist, fieldname, fieldval): """ Returns a list with of the objects in olist that have a fieldname valued as fieldval Parameters ---------- olist: list of objects fieldname: string fieldval: anything Returns ------- list of objets """ return [x for x in olist if getattr(x, fieldname) == fieldval]
def filter_objlist(olist, fieldname, fieldval): """ Returns a list with of the objects in olist that have a fieldname valued as fieldval Parameters ---------- olist: list of objects fieldname: string fieldval: anything Returns ------- list of objets """ return [x for x in olist if getattr(x, fieldname) == fieldval]
[ "Returns", "a", "list", "with", "of", "the", "objects", "in", "olist", "that", "have", "a", "fieldname", "valued", "as", "fieldval" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/strings.py#L15-L31
[ "def", "filter_objlist", "(", "olist", ",", "fieldname", ",", "fieldval", ")", ":", "return", "[", "x", "for", "x", "in", "olist", "if", "getattr", "(", "x", ",", "fieldname", ")", "==", "fieldval", "]" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
filter_list
Parameters ---------- lst: list filter: function Unary string filter function Returns ------- list List of items that passed the filter Example ------- >>> l = ['12123123', 'N123213'] >>> filt = re.compile('\d*').match >>> nu_l = list_filter(l, filt)
boyle/utils/strings.py
def filter_list(lst, filt): """ Parameters ---------- lst: list filter: function Unary string filter function Returns ------- list List of items that passed the filter Example ------- >>> l = ['12123123', 'N123213'] >>> filt = re.compile('\d*').match >>> nu_l = list_filter(l, filt) """ return [m for s in lst for m in (filt(s),) if m]
def filter_list(lst, filt): """ Parameters ---------- lst: list filter: function Unary string filter function Returns ------- list List of items that passed the filter Example ------- >>> l = ['12123123', 'N123213'] >>> filt = re.compile('\d*').match >>> nu_l = list_filter(l, filt) """ return [m for s in lst for m in (filt(s),) if m]
[ "Parameters", "----------", "lst", ":", "list" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/strings.py#L34-L54
[ "def", "filter_list", "(", "lst", ",", "filt", ")", ":", "return", "[", "m", "for", "s", "in", "lst", "for", "m", "in", "(", "filt", "(", "s", ")", ",", ")", "if", "m", "]" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
match_list
Parameters ---------- lst: list of str regex: string group_names: list of strings See re.MatchObject group docstring Returns ------- list of strings Filtered list, with the strings that match the pattern
boyle/utils/strings.py
def match_list(lst, pattern, group_names=[]): """ Parameters ---------- lst: list of str regex: string group_names: list of strings See re.MatchObject group docstring Returns ------- list of strings Filtered list, with the strings that match the pattern """ filtfn = re.compile(pattern).match filtlst = filter_list(lst, filtfn) if not group_names: return [m.string for m in filtlst] else: return [m.group(group_names) for m in filtlst]
def match_list(lst, pattern, group_names=[]): """ Parameters ---------- lst: list of str regex: string group_names: list of strings See re.MatchObject group docstring Returns ------- list of strings Filtered list, with the strings that match the pattern """ filtfn = re.compile(pattern).match filtlst = filter_list(lst, filtfn) if not group_names: return [m.string for m in filtlst] else: return [m.group(group_names) for m in filtlst]
[ "Parameters", "----------", "lst", ":", "list", "of", "str" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/strings.py#L57-L78
[ "def", "match_list", "(", "lst", ",", "pattern", ",", "group_names", "=", "[", "]", ")", ":", "filtfn", "=", "re", ".", "compile", "(", "pattern", ")", ".", "match", "filtlst", "=", "filter_list", "(", "lst", ",", "filtfn", ")", "if", "not", "group_names", ":", "return", "[", "m", ".", "string", "for", "m", "in", "filtlst", "]", "else", ":", "return", "[", "m", ".", "group", "(", "group_names", ")", "for", "m", "in", "filtlst", "]" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
append_to_keys
Parameters ---------- adict: preffix: Returns -------
boyle/utils/strings.py
def append_to_keys(adict, preffix): """ Parameters ---------- adict: preffix: Returns ------- """ return {preffix + str(key): (value if isinstance(value, dict) else value) for key, value in list(adict.items())}
def append_to_keys(adict, preffix): """ Parameters ---------- adict: preffix: Returns ------- """ return {preffix + str(key): (value if isinstance(value, dict) else value) for key, value in list(adict.items())}
[ "Parameters", "----------", "adict", ":", "preffix", ":" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/strings.py#L99-L111
[ "def", "append_to_keys", "(", "adict", ",", "preffix", ")", ":", "return", "{", "preffix", "+", "str", "(", "key", ")", ":", "(", "value", "if", "isinstance", "(", "value", ",", "dict", ")", "else", "value", ")", "for", "key", ",", "value", "in", "list", "(", "adict", ".", "items", "(", ")", ")", "}" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
is_valid_regex
Checks whether the re module can compile the given regular expression. Parameters ---------- string: str Returns ------- boolean
boyle/utils/strings.py
def is_valid_regex(string): """ Checks whether the re module can compile the given regular expression. Parameters ---------- string: str Returns ------- boolean """ try: re.compile(string) is_valid = True except re.error: is_valid = False return is_valid
def is_valid_regex(string): """ Checks whether the re module can compile the given regular expression. Parameters ---------- string: str Returns ------- boolean """ try: re.compile(string) is_valid = True except re.error: is_valid = False return is_valid
[ "Checks", "whether", "the", "re", "module", "can", "compile", "the", "given", "regular", "expression", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/strings.py#L127-L144
[ "def", "is_valid_regex", "(", "string", ")", ":", "try", ":", "re", ".", "compile", "(", "string", ")", "is_valid", "=", "True", "except", "re", ".", "error", ":", "is_valid", "=", "False", "return", "is_valid" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
is_regex
TODO: improve this! Returns True if the given string is considered a regular expression, False otherwise. It will be considered a regex if starts with a non alphabetic character and then correctly compiled by re.compile :param string: str
boyle/utils/strings.py
def is_regex(string): """ TODO: improve this! Returns True if the given string is considered a regular expression, False otherwise. It will be considered a regex if starts with a non alphabetic character and then correctly compiled by re.compile :param string: str """ is_regex = False regex_chars = ['\\', '(', '+', '^', '$'] for c in regex_chars: if string.find(c) > -1: return is_valid_regex(string) return is_regex
def is_regex(string): """ TODO: improve this! Returns True if the given string is considered a regular expression, False otherwise. It will be considered a regex if starts with a non alphabetic character and then correctly compiled by re.compile :param string: str """ is_regex = False regex_chars = ['\\', '(', '+', '^', '$'] for c in regex_chars: if string.find(c) > -1: return is_valid_regex(string) return is_regex
[ "TODO", ":", "improve", "this!" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/strings.py#L147-L164
[ "def", "is_regex", "(", "string", ")", ":", "is_regex", "=", "False", "regex_chars", "=", "[", "'\\\\'", ",", "'('", ",", "'+'", ",", "'^'", ",", "'$'", "]", "for", "c", "in", "regex_chars", ":", "if", "string", ".", "find", "(", "c", ")", ">", "-", "1", ":", "return", "is_valid_regex", "(", "string", ")", "return", "is_regex" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
is_fnmatch_regex
Returns True if the given string is considered a fnmatch regular expression, False otherwise. It will look for :param string: str
boyle/utils/strings.py
def is_fnmatch_regex(string): """ Returns True if the given string is considered a fnmatch regular expression, False otherwise. It will look for :param string: str """ is_regex = False regex_chars = ['!', '*', '$'] for c in regex_chars: if string.find(c) > -1: return True return is_regex
def is_fnmatch_regex(string): """ Returns True if the given string is considered a fnmatch regular expression, False otherwise. It will look for :param string: str """ is_regex = False regex_chars = ['!', '*', '$'] for c in regex_chars: if string.find(c) > -1: return True return is_regex
[ "Returns", "True", "if", "the", "given", "string", "is", "considered", "a", "fnmatch", "regular", "expression", "False", "otherwise", ".", "It", "will", "look", "for" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/strings.py#L167-L181
[ "def", "is_fnmatch_regex", "(", "string", ")", ":", "is_regex", "=", "False", "regex_chars", "=", "[", "'!'", ",", "'*'", ",", "'$'", "]", "for", "c", "in", "regex_chars", ":", "if", "string", ".", "find", "(", "c", ")", ">", "-", "1", ":", "return", "True", "return", "is_regex" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
where_is
Return index of the nth match found of pattern in strings Parameters ---------- strings: list of str List of strings pattern: str Pattern to be matched nth: int Number of times the match must happen to return the item index. lookup_func: callable Function to match each item in strings to the pattern, e.g., re.match or re.search. Returns ------- index: int Index of the nth item that matches the pattern. If there are no n matches will return -1
boyle/utils/strings.py
def where_is(strings, pattern, n=1, lookup_func=re.match): """Return index of the nth match found of pattern in strings Parameters ---------- strings: list of str List of strings pattern: str Pattern to be matched nth: int Number of times the match must happen to return the item index. lookup_func: callable Function to match each item in strings to the pattern, e.g., re.match or re.search. Returns ------- index: int Index of the nth item that matches the pattern. If there are no n matches will return -1 """ count = 0 for idx, item in enumerate(strings): if lookup_func(pattern, item): count += 1 if count == n: return idx return -1
def where_is(strings, pattern, n=1, lookup_func=re.match): """Return index of the nth match found of pattern in strings Parameters ---------- strings: list of str List of strings pattern: str Pattern to be matched nth: int Number of times the match must happen to return the item index. lookup_func: callable Function to match each item in strings to the pattern, e.g., re.match or re.search. Returns ------- index: int Index of the nth item that matches the pattern. If there are no n matches will return -1 """ count = 0 for idx, item in enumerate(strings): if lookup_func(pattern, item): count += 1 if count == n: return idx return -1
[ "Return", "index", "of", "the", "nth", "match", "found", "of", "pattern", "in", "strings" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/strings.py#L209-L238
[ "def", "where_is", "(", "strings", ",", "pattern", ",", "n", "=", "1", ",", "lookup_func", "=", "re", ".", "match", ")", ":", "count", "=", "0", "for", "idx", ",", "item", "in", "enumerate", "(", "strings", ")", ":", "if", "lookup_func", "(", "pattern", ",", "item", ")", ":", "count", "+=", "1", "if", "count", "==", "n", ":", "return", "idx", "return", "-", "1" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
generate_config
Generate a dcm2nii configuration file that disable the interactive mode.
boyle/dicom/convert.py
def generate_config(output_directory): """ Generate a dcm2nii configuration file that disable the interactive mode. """ if not op.isdir(output_directory): os.makedirs(output_directory) config_file = op.join(output_directory, "config.ini") open_file = open(config_file, "w") open_file.write("[BOOL]\nManualNIfTIConv=0\n") open_file.close() return config_file
def generate_config(output_directory): """ Generate a dcm2nii configuration file that disable the interactive mode. """ if not op.isdir(output_directory): os.makedirs(output_directory) config_file = op.join(output_directory, "config.ini") open_file = open(config_file, "w") open_file.write("[BOOL]\nManualNIfTIConv=0\n") open_file.close() return config_file
[ "Generate", "a", "dcm2nii", "configuration", "file", "that", "disable", "the", "interactive", "mode", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/convert.py#L30-L41
[ "def", "generate_config", "(", "output_directory", ")", ":", "if", "not", "op", ".", "isdir", "(", "output_directory", ")", ":", "os", ".", "makedirs", "(", "output_directory", ")", "config_file", "=", "op", ".", "join", "(", "output_directory", ",", "\"config.ini\"", ")", "open_file", "=", "open", "(", "config_file", ",", "\"w\"", ")", "open_file", ".", "write", "(", "\"[BOOL]\\nManualNIfTIConv=0\\n\"", ")", "open_file", ".", "close", "(", ")", "return", "config_file" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
add_meta_to_nii
Add slice duration and acquisition times to the headers of the nifit1 files in `nii_file`. It will add the repetition time of the DICOM file (field: {0x0018, 0x0080, DS, Repetition Time}) to the NifTI file as well as any other tag in `dcm_tags`. All selected DICOM tags values are set in the `descrip` nifti header field. Note that this will modify the header content of `nii_file`. Parameters ---------- nii_files: str Path to the NifTI file to modify. dicom_file: str Paths to the DICOM file from where to get the meta data. dcm_tags: list of str List of tags from the DICOM file to read and store in the nifti file.
boyle/dicom/convert.py
def add_meta_to_nii(nii_file, dicom_file, dcm_tags=''): """ Add slice duration and acquisition times to the headers of the nifit1 files in `nii_file`. It will add the repetition time of the DICOM file (field: {0x0018, 0x0080, DS, Repetition Time}) to the NifTI file as well as any other tag in `dcm_tags`. All selected DICOM tags values are set in the `descrip` nifti header field. Note that this will modify the header content of `nii_file`. Parameters ---------- nii_files: str Path to the NifTI file to modify. dicom_file: str Paths to the DICOM file from where to get the meta data. dcm_tags: list of str List of tags from the DICOM file to read and store in the nifti file. """ # Load a dicom image dcmimage = dicom.read_file(dicom_file) # Load the nifti1 image image = nibabel.load(nii_file) # Check the we have a nifti1 format image if not isinstance(image, nibabel.nifti1.Nifti1Image): raise Exception( "Only Nifti1 image are supported not '{0}'.".format( type(image))) # check if dcm_tags is one string, if yes put it in a list: if isinstance(dcm_tags, str): dcm_tags = [dcm_tags] # Fill the nifti1 header header = image.get_header() # slice_duration: Time for 1 slice repetition_time = float(dcmimage[("0x0018", "0x0080")].value) header.set_dim_info(slice=2) nb_slices = header.get_n_slices() # Force round to 0 digit after coma. If more, nibabel completes to # 6 digits with random numbers... slice_duration = round(repetition_time / nb_slices, 0) header.set_slice_duration(slice_duration) # add free dicom fields if dcm_tags: content = ["{0}={1}".format(name, dcmimage[tag].value) for name, tag in dcm_tags] free_field = numpy.array(";".join(content), dtype=header["descrip"].dtype) image.get_header()["descrip"] = free_field # Update the image header image.update_header() # Save the filled image nibabel.save(image, nii_file)
def add_meta_to_nii(nii_file, dicom_file, dcm_tags=''): """ Add slice duration and acquisition times to the headers of the nifit1 files in `nii_file`. It will add the repetition time of the DICOM file (field: {0x0018, 0x0080, DS, Repetition Time}) to the NifTI file as well as any other tag in `dcm_tags`. All selected DICOM tags values are set in the `descrip` nifti header field. Note that this will modify the header content of `nii_file`. Parameters ---------- nii_files: str Path to the NifTI file to modify. dicom_file: str Paths to the DICOM file from where to get the meta data. dcm_tags: list of str List of tags from the DICOM file to read and store in the nifti file. """ # Load a dicom image dcmimage = dicom.read_file(dicom_file) # Load the nifti1 image image = nibabel.load(nii_file) # Check the we have a nifti1 format image if not isinstance(image, nibabel.nifti1.Nifti1Image): raise Exception( "Only Nifti1 image are supported not '{0}'.".format( type(image))) # check if dcm_tags is one string, if yes put it in a list: if isinstance(dcm_tags, str): dcm_tags = [dcm_tags] # Fill the nifti1 header header = image.get_header() # slice_duration: Time for 1 slice repetition_time = float(dcmimage[("0x0018", "0x0080")].value) header.set_dim_info(slice=2) nb_slices = header.get_n_slices() # Force round to 0 digit after coma. If more, nibabel completes to # 6 digits with random numbers... slice_duration = round(repetition_time / nb_slices, 0) header.set_slice_duration(slice_duration) # add free dicom fields if dcm_tags: content = ["{0}={1}".format(name, dcmimage[tag].value) for name, tag in dcm_tags] free_field = numpy.array(";".join(content), dtype=header["descrip"].dtype) image.get_header()["descrip"] = free_field # Update the image header image.update_header() # Save the filled image nibabel.save(image, nii_file)
[ "Add", "slice", "duration", "and", "acquisition", "times", "to", "the", "headers", "of", "the", "nifit1", "files", "in", "nii_file", ".", "It", "will", "add", "the", "repetition", "time", "of", "the", "DICOM", "file", "(", "field", ":", "{", "0x0018", "0x0080", "DS", "Repetition", "Time", "}", ")", "to", "the", "NifTI", "file", "as", "well", "as", "any", "other", "tag", "in", "dcm_tags", ".", "All", "selected", "DICOM", "tags", "values", "are", "set", "in", "the", "descrip", "nifti", "header", "field", ".", "Note", "that", "this", "will", "modify", "the", "header", "content", "of", "nii_file", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/convert.py#L44-L102
[ "def", "add_meta_to_nii", "(", "nii_file", ",", "dicom_file", ",", "dcm_tags", "=", "''", ")", ":", "# Load a dicom image", "dcmimage", "=", "dicom", ".", "read_file", "(", "dicom_file", ")", "# Load the nifti1 image", "image", "=", "nibabel", ".", "load", "(", "nii_file", ")", "# Check the we have a nifti1 format image", "if", "not", "isinstance", "(", "image", ",", "nibabel", ".", "nifti1", ".", "Nifti1Image", ")", ":", "raise", "Exception", "(", "\"Only Nifti1 image are supported not '{0}'.\"", ".", "format", "(", "type", "(", "image", ")", ")", ")", "# check if dcm_tags is one string, if yes put it in a list:", "if", "isinstance", "(", "dcm_tags", ",", "str", ")", ":", "dcm_tags", "=", "[", "dcm_tags", "]", "# Fill the nifti1 header", "header", "=", "image", ".", "get_header", "(", ")", "# slice_duration: Time for 1 slice", "repetition_time", "=", "float", "(", "dcmimage", "[", "(", "\"0x0018\"", ",", "\"0x0080\"", ")", "]", ".", "value", ")", "header", ".", "set_dim_info", "(", "slice", "=", "2", ")", "nb_slices", "=", "header", ".", "get_n_slices", "(", ")", "# Force round to 0 digit after coma. If more, nibabel completes to", "# 6 digits with random numbers...", "slice_duration", "=", "round", "(", "repetition_time", "/", "nb_slices", ",", "0", ")", "header", ".", "set_slice_duration", "(", "slice_duration", ")", "# add free dicom fields", "if", "dcm_tags", ":", "content", "=", "[", "\"{0}={1}\"", ".", "format", "(", "name", ",", "dcmimage", "[", "tag", "]", ".", "value", ")", "for", "name", ",", "tag", "in", "dcm_tags", "]", "free_field", "=", "numpy", ".", "array", "(", "\";\"", ".", "join", "(", "content", ")", ",", "dtype", "=", "header", "[", "\"descrip\"", "]", ".", "dtype", ")", "image", ".", "get_header", "(", ")", "[", "\"descrip\"", "]", "=", "free_field", "# Update the image header", "image", ".", "update_header", "(", ")", "# Save the filled image", "nibabel", ".", "save", "(", "image", ",", "nii_file", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
call_dcm2nii
Converts all DICOM files within `work_dir` into one or more NifTi files by calling dcm2nii on this folder. Parameters ---------- work_dir: str Path to the folder that contain the DICOM files arguments: str String containing all the flag arguments for `dcm2nii` CLI. Returns ------- sys_code: int dcm2nii execution return code
boyle/dicom/convert.py
def call_dcm2nii(work_dir, arguments=''): """Converts all DICOM files within `work_dir` into one or more NifTi files by calling dcm2nii on this folder. Parameters ---------- work_dir: str Path to the folder that contain the DICOM files arguments: str String containing all the flag arguments for `dcm2nii` CLI. Returns ------- sys_code: int dcm2nii execution return code """ if not op.exists(work_dir): raise IOError('Folder {} not found.'.format(work_dir)) cmd_line = 'dcm2nii {0} "{1}"'.format(arguments, work_dir) log.info(cmd_line) return subprocess.check_call(cmd_line, shell=True)
def call_dcm2nii(work_dir, arguments=''): """Converts all DICOM files within `work_dir` into one or more NifTi files by calling dcm2nii on this folder. Parameters ---------- work_dir: str Path to the folder that contain the DICOM files arguments: str String containing all the flag arguments for `dcm2nii` CLI. Returns ------- sys_code: int dcm2nii execution return code """ if not op.exists(work_dir): raise IOError('Folder {} not found.'.format(work_dir)) cmd_line = 'dcm2nii {0} "{1}"'.format(arguments, work_dir) log.info(cmd_line) return subprocess.check_call(cmd_line, shell=True)
[ "Converts", "all", "DICOM", "files", "within", "work_dir", "into", "one", "or", "more", "NifTi", "files", "by", "calling", "dcm2nii", "on", "this", "folder", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/convert.py#L105-L127
[ "def", "call_dcm2nii", "(", "work_dir", ",", "arguments", "=", "''", ")", ":", "if", "not", "op", ".", "exists", "(", "work_dir", ")", ":", "raise", "IOError", "(", "'Folder {} not found.'", ".", "format", "(", "work_dir", ")", ")", "cmd_line", "=", "'dcm2nii {0} \"{1}\"'", ".", "format", "(", "arguments", ",", "work_dir", ")", "log", ".", "info", "(", "cmd_line", ")", "return", "subprocess", ".", "check_call", "(", "cmd_line", ",", "shell", "=", "True", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
convert_dcm2nii
Call MRICron's `dcm2nii` to convert the DICOM files inside `input_dir` to Nifti and save the Nifti file in `output_dir` with a `filename` prefix. Parameters ---------- input_dir: str Path to the folder that contains the DICOM files output_dir: str Path to the folder where to save the NifTI file filename: str Output file basename Returns ------- filepaths: list of str List of file paths created in `output_dir`.
boyle/dicom/convert.py
def convert_dcm2nii(input_dir, output_dir, filename): """ Call MRICron's `dcm2nii` to convert the DICOM files inside `input_dir` to Nifti and save the Nifti file in `output_dir` with a `filename` prefix. Parameters ---------- input_dir: str Path to the folder that contains the DICOM files output_dir: str Path to the folder where to save the NifTI file filename: str Output file basename Returns ------- filepaths: list of str List of file paths created in `output_dir`. """ # a few checks before doing the job if not op.exists(input_dir): raise IOError('Expected an existing folder in {}.'.format(input_dir)) if not op.exists(output_dir): raise IOError('Expected an existing output folder in {}.'.format(output_dir)) # create a temporary folder for dcm2nii export tmpdir = tempfile.TemporaryDirectory(prefix='dcm2nii_') # call dcm2nii arguments = '-o "{}" -i y'.format(tmpdir.name) try: call_out = call_dcm2nii(input_dir, arguments) except: raise else: log.info('Converted "{}" to nifti.'.format(input_dir)) # get the filenames of the files that dcm2nii produced filenames = glob(op.join(tmpdir.name, '*.nii*')) # cleanup `filenames`, using only the post-processed (reoriented, cropped, etc.) images by dcm2nii cleaned_filenames = remove_dcm2nii_underprocessed(filenames) # copy files to the output_dir filepaths = [] for srcpath in cleaned_filenames: dstpath = op.join(output_dir, filename) realpath = copy_w_plus(srcpath, dstpath) filepaths.append(realpath) # copy any other file produced by dcm2nii that is not a NifTI file, e.g., *.bvals, *.bvecs, etc. basename = op.basename(remove_ext(srcpath)) aux_files = set(glob(op.join(tmpdir.name, '{}.*' .format(basename)))) - \ set(glob(op.join(tmpdir.name, '{}.nii*'.format(basename)))) for aux_file in aux_files: aux_dstpath = copy_w_ext(aux_file, output_dir, remove_ext(op.basename(realpath))) filepaths.append(aux_dstpath) return filepaths
def convert_dcm2nii(input_dir, output_dir, filename): """ Call MRICron's `dcm2nii` to convert the DICOM files inside `input_dir` to Nifti and save the Nifti file in `output_dir` with a `filename` prefix. Parameters ---------- input_dir: str Path to the folder that contains the DICOM files output_dir: str Path to the folder where to save the NifTI file filename: str Output file basename Returns ------- filepaths: list of str List of file paths created in `output_dir`. """ # a few checks before doing the job if not op.exists(input_dir): raise IOError('Expected an existing folder in {}.'.format(input_dir)) if not op.exists(output_dir): raise IOError('Expected an existing output folder in {}.'.format(output_dir)) # create a temporary folder for dcm2nii export tmpdir = tempfile.TemporaryDirectory(prefix='dcm2nii_') # call dcm2nii arguments = '-o "{}" -i y'.format(tmpdir.name) try: call_out = call_dcm2nii(input_dir, arguments) except: raise else: log.info('Converted "{}" to nifti.'.format(input_dir)) # get the filenames of the files that dcm2nii produced filenames = glob(op.join(tmpdir.name, '*.nii*')) # cleanup `filenames`, using only the post-processed (reoriented, cropped, etc.) images by dcm2nii cleaned_filenames = remove_dcm2nii_underprocessed(filenames) # copy files to the output_dir filepaths = [] for srcpath in cleaned_filenames: dstpath = op.join(output_dir, filename) realpath = copy_w_plus(srcpath, dstpath) filepaths.append(realpath) # copy any other file produced by dcm2nii that is not a NifTI file, e.g., *.bvals, *.bvecs, etc. basename = op.basename(remove_ext(srcpath)) aux_files = set(glob(op.join(tmpdir.name, '{}.*' .format(basename)))) - \ set(glob(op.join(tmpdir.name, '{}.nii*'.format(basename)))) for aux_file in aux_files: aux_dstpath = copy_w_ext(aux_file, output_dir, remove_ext(op.basename(realpath))) filepaths.append(aux_dstpath) return filepaths
[ "Call", "MRICron", "s", "dcm2nii", "to", "convert", "the", "DICOM", "files", "inside", "input_dir", "to", "Nifti", "and", "save", "the", "Nifti", "file", "in", "output_dir", "with", "a", "filename", "prefix", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/convert.py#L130-L190
[ "def", "convert_dcm2nii", "(", "input_dir", ",", "output_dir", ",", "filename", ")", ":", "# a few checks before doing the job", "if", "not", "op", ".", "exists", "(", "input_dir", ")", ":", "raise", "IOError", "(", "'Expected an existing folder in {}.'", ".", "format", "(", "input_dir", ")", ")", "if", "not", "op", ".", "exists", "(", "output_dir", ")", ":", "raise", "IOError", "(", "'Expected an existing output folder in {}.'", ".", "format", "(", "output_dir", ")", ")", "# create a temporary folder for dcm2nii export", "tmpdir", "=", "tempfile", ".", "TemporaryDirectory", "(", "prefix", "=", "'dcm2nii_'", ")", "# call dcm2nii", "arguments", "=", "'-o \"{}\" -i y'", ".", "format", "(", "tmpdir", ".", "name", ")", "try", ":", "call_out", "=", "call_dcm2nii", "(", "input_dir", ",", "arguments", ")", "except", ":", "raise", "else", ":", "log", ".", "info", "(", "'Converted \"{}\" to nifti.'", ".", "format", "(", "input_dir", ")", ")", "# get the filenames of the files that dcm2nii produced", "filenames", "=", "glob", "(", "op", ".", "join", "(", "tmpdir", ".", "name", ",", "'*.nii*'", ")", ")", "# cleanup `filenames`, using only the post-processed (reoriented, cropped, etc.) images by dcm2nii", "cleaned_filenames", "=", "remove_dcm2nii_underprocessed", "(", "filenames", ")", "# copy files to the output_dir", "filepaths", "=", "[", "]", "for", "srcpath", "in", "cleaned_filenames", ":", "dstpath", "=", "op", ".", "join", "(", "output_dir", ",", "filename", ")", "realpath", "=", "copy_w_plus", "(", "srcpath", ",", "dstpath", ")", "filepaths", ".", "append", "(", "realpath", ")", "# copy any other file produced by dcm2nii that is not a NifTI file, e.g., *.bvals, *.bvecs, etc.", "basename", "=", "op", ".", "basename", "(", "remove_ext", "(", "srcpath", ")", ")", "aux_files", "=", "set", "(", "glob", "(", "op", ".", "join", "(", "tmpdir", ".", "name", ",", "'{}.*'", ".", "format", "(", "basename", ")", ")", ")", ")", "-", "set", "(", "glob", "(", "op", ".", "join", "(", "tmpdir", ".", "name", ",", "'{}.nii*'", ".", "format", "(", "basename", ")", ")", ")", ")", "for", "aux_file", "in", "aux_files", ":", "aux_dstpath", "=", "copy_w_ext", "(", "aux_file", ",", "output_dir", ",", "remove_ext", "(", "op", ".", "basename", "(", "realpath", ")", ")", ")", "filepaths", ".", "append", "(", "aux_dstpath", ")", "return", "filepaths" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
remove_dcm2nii_underprocessed
Return a subset of `filepaths`. Keep only the files that have a basename longer than the others with same suffix. This works based on that dcm2nii appends a preffix character for each processing step it does automatically in the DICOM to NifTI conversion. Parameters ---------- filepaths: iterable of str Returns ------- cleaned_paths: iterable of str
boyle/dicom/convert.py
def remove_dcm2nii_underprocessed(filepaths): """ Return a subset of `filepaths`. Keep only the files that have a basename longer than the others with same suffix. This works based on that dcm2nii appends a preffix character for each processing step it does automatically in the DICOM to NifTI conversion. Parameters ---------- filepaths: iterable of str Returns ------- cleaned_paths: iterable of str """ cln_flist = [] # sort them by size len_sorted = sorted(filepaths, key=len) for idx, fpath in enumerate(len_sorted): remove = False # get the basename and the rest of the files fname = op.basename(fpath) rest = len_sorted[idx+1:] # check if the basename is in the basename of the rest of the files for rest_fpath in rest: rest_file = op.basename(rest_fpath) if rest_file.endswith(fname): remove = True break if not remove: cln_flist.append(fpath) return cln_flist
def remove_dcm2nii_underprocessed(filepaths): """ Return a subset of `filepaths`. Keep only the files that have a basename longer than the others with same suffix. This works based on that dcm2nii appends a preffix character for each processing step it does automatically in the DICOM to NifTI conversion. Parameters ---------- filepaths: iterable of str Returns ------- cleaned_paths: iterable of str """ cln_flist = [] # sort them by size len_sorted = sorted(filepaths, key=len) for idx, fpath in enumerate(len_sorted): remove = False # get the basename and the rest of the files fname = op.basename(fpath) rest = len_sorted[idx+1:] # check if the basename is in the basename of the rest of the files for rest_fpath in rest: rest_file = op.basename(rest_fpath) if rest_file.endswith(fname): remove = True break if not remove: cln_flist.append(fpath) return cln_flist
[ "Return", "a", "subset", "of", "filepaths", ".", "Keep", "only", "the", "files", "that", "have", "a", "basename", "longer", "than", "the", "others", "with", "same", "suffix", ".", "This", "works", "based", "on", "that", "dcm2nii", "appends", "a", "preffix", "character", "for", "each", "processing", "step", "it", "does", "automatically", "in", "the", "DICOM", "to", "NifTI", "conversion", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/dicom/convert.py#L193-L229
[ "def", "remove_dcm2nii_underprocessed", "(", "filepaths", ")", ":", "cln_flist", "=", "[", "]", "# sort them by size", "len_sorted", "=", "sorted", "(", "filepaths", ",", "key", "=", "len", ")", "for", "idx", ",", "fpath", "in", "enumerate", "(", "len_sorted", ")", ":", "remove", "=", "False", "# get the basename and the rest of the files", "fname", "=", "op", ".", "basename", "(", "fpath", ")", "rest", "=", "len_sorted", "[", "idx", "+", "1", ":", "]", "# check if the basename is in the basename of the rest of the files", "for", "rest_fpath", "in", "rest", ":", "rest_file", "=", "op", ".", "basename", "(", "rest_fpath", ")", "if", "rest_file", ".", "endswith", "(", "fname", ")", ":", "remove", "=", "True", "break", "if", "not", "remove", ":", "cln_flist", ".", "append", "(", "fpath", ")", "return", "cln_flist" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
dictify
Transform a named tuple into a dictionary
boyle/more_collections.py
def dictify(a_named_tuple): """Transform a named tuple into a dictionary""" return dict((s, getattr(a_named_tuple, s)) for s in a_named_tuple._fields)
def dictify(a_named_tuple): """Transform a named tuple into a dictionary""" return dict((s, getattr(a_named_tuple, s)) for s in a_named_tuple._fields)
[ "Transform", "a", "named", "tuple", "into", "a", "dictionary" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/more_collections.py#L6-L8
[ "def", "dictify", "(", "a_named_tuple", ")", ":", "return", "dict", "(", "(", "s", ",", "getattr", "(", "a_named_tuple", ",", "s", ")", ")", "for", "s", "in", "a_named_tuple", ".", "_fields", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
merge_dict_of_lists
Extend the within a dict of lists. The indices will indicate which list have to be extended by which other list. Parameters ---------- adict: OrderedDict An ordered dictionary of lists indices: list or tuple of 2 iterables of int, bot having the same length The indices of the lists that have to be merged, both iterables items will be read pair by pair, the first is the index to the list that will be extended with the list of the second index. The indices can be constructed with Numpy e.g., indices = np.where(square_matrix) pop_later: bool If True will oop out the lists that are indicated in the second list of indices. copy: bool If True will perform a deep copy of the input adict before modifying it, hence not changing the original input. Returns ------- Dictionary of lists Raises ------ IndexError If the indices are out of range
boyle/more_collections.py
def merge_dict_of_lists(adict, indices, pop_later=True, copy=True): """Extend the within a dict of lists. The indices will indicate which list have to be extended by which other list. Parameters ---------- adict: OrderedDict An ordered dictionary of lists indices: list or tuple of 2 iterables of int, bot having the same length The indices of the lists that have to be merged, both iterables items will be read pair by pair, the first is the index to the list that will be extended with the list of the second index. The indices can be constructed with Numpy e.g., indices = np.where(square_matrix) pop_later: bool If True will oop out the lists that are indicated in the second list of indices. copy: bool If True will perform a deep copy of the input adict before modifying it, hence not changing the original input. Returns ------- Dictionary of lists Raises ------ IndexError If the indices are out of range """ def check_indices(idxs, x): for i in chain(*idxs): if i < 0 or i >= x: raise IndexError("Given indices are out of dict range.") check_indices(indices, len(adict)) rdict = adict.copy() if copy else adict dict_keys = list(rdict.keys()) for i, j in zip(*indices): rdict[dict_keys[i]].extend(rdict[dict_keys[j]]) if pop_later: for i, j in zip(*indices): rdict.pop(dict_keys[j], '') return rdict
def merge_dict_of_lists(adict, indices, pop_later=True, copy=True): """Extend the within a dict of lists. The indices will indicate which list have to be extended by which other list. Parameters ---------- adict: OrderedDict An ordered dictionary of lists indices: list or tuple of 2 iterables of int, bot having the same length The indices of the lists that have to be merged, both iterables items will be read pair by pair, the first is the index to the list that will be extended with the list of the second index. The indices can be constructed with Numpy e.g., indices = np.where(square_matrix) pop_later: bool If True will oop out the lists that are indicated in the second list of indices. copy: bool If True will perform a deep copy of the input adict before modifying it, hence not changing the original input. Returns ------- Dictionary of lists Raises ------ IndexError If the indices are out of range """ def check_indices(idxs, x): for i in chain(*idxs): if i < 0 or i >= x: raise IndexError("Given indices are out of dict range.") check_indices(indices, len(adict)) rdict = adict.copy() if copy else adict dict_keys = list(rdict.keys()) for i, j in zip(*indices): rdict[dict_keys[i]].extend(rdict[dict_keys[j]]) if pop_later: for i, j in zip(*indices): rdict.pop(dict_keys[j], '') return rdict
[ "Extend", "the", "within", "a", "dict", "of", "lists", ".", "The", "indices", "will", "indicate", "which", "list", "have", "to", "be", "extended", "by", "which", "other", "list", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/more_collections.py#L11-L61
[ "def", "merge_dict_of_lists", "(", "adict", ",", "indices", ",", "pop_later", "=", "True", ",", "copy", "=", "True", ")", ":", "def", "check_indices", "(", "idxs", ",", "x", ")", ":", "for", "i", "in", "chain", "(", "*", "idxs", ")", ":", "if", "i", "<", "0", "or", "i", ">=", "x", ":", "raise", "IndexError", "(", "\"Given indices are out of dict range.\"", ")", "check_indices", "(", "indices", ",", "len", "(", "adict", ")", ")", "rdict", "=", "adict", ".", "copy", "(", ")", "if", "copy", "else", "adict", "dict_keys", "=", "list", "(", "rdict", ".", "keys", "(", ")", ")", "for", "i", ",", "j", "in", "zip", "(", "*", "indices", ")", ":", "rdict", "[", "dict_keys", "[", "i", "]", "]", ".", "extend", "(", "rdict", "[", "dict_keys", "[", "j", "]", "]", ")", "if", "pop_later", ":", "for", "i", ",", "j", "in", "zip", "(", "*", "indices", ")", ":", "rdict", ".", "pop", "(", "dict_keys", "[", "j", "]", ",", "''", ")", "return", "rdict" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
append_dict_values
Return a dict of lists from a list of dicts with the same keys. For each dict in list_of_dicts with look for the values of the given keys and append it to the output dict. Parameters ---------- list_of_dicts: list of dicts keys: list of str List of keys to create in the output dict If None will use all keys in the first element of list_of_dicts Returns ------- DefaultOrderedDict of lists
boyle/more_collections.py
def append_dict_values(list_of_dicts, keys=None): """ Return a dict of lists from a list of dicts with the same keys. For each dict in list_of_dicts with look for the values of the given keys and append it to the output dict. Parameters ---------- list_of_dicts: list of dicts keys: list of str List of keys to create in the output dict If None will use all keys in the first element of list_of_dicts Returns ------- DefaultOrderedDict of lists """ if keys is None: keys = list(list_of_dicts[0].keys()) dict_of_lists = DefaultOrderedDict(list) for d in list_of_dicts: for k in keys: dict_of_lists[k].append(d[k]) return dict_of_lists
def append_dict_values(list_of_dicts, keys=None): """ Return a dict of lists from a list of dicts with the same keys. For each dict in list_of_dicts with look for the values of the given keys and append it to the output dict. Parameters ---------- list_of_dicts: list of dicts keys: list of str List of keys to create in the output dict If None will use all keys in the first element of list_of_dicts Returns ------- DefaultOrderedDict of lists """ if keys is None: keys = list(list_of_dicts[0].keys()) dict_of_lists = DefaultOrderedDict(list) for d in list_of_dicts: for k in keys: dict_of_lists[k].append(d[k]) return dict_of_lists
[ "Return", "a", "dict", "of", "lists", "from", "a", "list", "of", "dicts", "with", "the", "same", "keys", ".", "For", "each", "dict", "in", "list_of_dicts", "with", "look", "for", "the", "values", "of", "the", "given", "keys", "and", "append", "it", "to", "the", "output", "dict", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/more_collections.py#L64-L88
[ "def", "append_dict_values", "(", "list_of_dicts", ",", "keys", "=", "None", ")", ":", "if", "keys", "is", "None", ":", "keys", "=", "list", "(", "list_of_dicts", "[", "0", "]", ".", "keys", "(", ")", ")", "dict_of_lists", "=", "DefaultOrderedDict", "(", "list", ")", "for", "d", "in", "list_of_dicts", ":", "for", "k", "in", "keys", ":", "dict_of_lists", "[", "k", "]", ".", "append", "(", "d", "[", "k", "]", ")", "return", "dict_of_lists" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
import_pyfile
Imports the contents of filepath as a Python module. :param filepath: string :param mod_name: string Name of the module when imported :return: module Imported module
boyle/utils/imports.py
def import_pyfile(filepath, mod_name=None): """ Imports the contents of filepath as a Python module. :param filepath: string :param mod_name: string Name of the module when imported :return: module Imported module """ import sys if sys.version_info.major == 3: import importlib.machinery loader = importlib.machinery.SourceFileLoader('', filepath) mod = loader.load_module(mod_name) else: import imp mod = imp.load_source(mod_name, filepath) return mod
def import_pyfile(filepath, mod_name=None): """ Imports the contents of filepath as a Python module. :param filepath: string :param mod_name: string Name of the module when imported :return: module Imported module """ import sys if sys.version_info.major == 3: import importlib.machinery loader = importlib.machinery.SourceFileLoader('', filepath) mod = loader.load_module(mod_name) else: import imp mod = imp.load_source(mod_name, filepath) return mod
[ "Imports", "the", "contents", "of", "filepath", "as", "a", "Python", "module", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/imports.py#L6-L27
[ "def", "import_pyfile", "(", "filepath", ",", "mod_name", "=", "None", ")", ":", "import", "sys", "if", "sys", ".", "version_info", ".", "major", "==", "3", ":", "import", "importlib", ".", "machinery", "loader", "=", "importlib", ".", "machinery", ".", "SourceFileLoader", "(", "''", ",", "filepath", ")", "mod", "=", "loader", ".", "load_module", "(", "mod_name", ")", "else", ":", "import", "imp", "mod", "=", "imp", ".", "load_source", "(", "mod_name", ",", "filepath", ")", "return", "mod" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
copy
Copies the files in the built file tree map to despath. :param configfile: string Path to the FileTreeMap config file :param destpath: string Path to the files destination :param overwrite: bool Overwrite files if they already exist. :param sub_node: string Tree map configuration sub path. Will copy only the contents within this sub-node
scripts/filetree.py
def copy(configfile='', destpath='', overwrite=False, sub_node=''): """Copies the files in the built file tree map to despath. :param configfile: string Path to the FileTreeMap config file :param destpath: string Path to the files destination :param overwrite: bool Overwrite files if they already exist. :param sub_node: string Tree map configuration sub path. Will copy only the contents within this sub-node """ log.info('Running {0} {1} {2}'.format(os.path.basename(__file__), whoami(), locals())) assert(os.path.isfile(configfile)) if os.path.exists(destpath): if os.listdir(destpath): raise FolderAlreadyExists('Folder {0} already exists. Please clean ' 'it or change destpath.'.format(destpath)) else: log.info('Creating folder {0}'.format(destpath)) path(destpath).makedirs_p() from boyle.files.file_tree_map import FileTreeMap file_map = FileTreeMap() try: file_map.from_config_file(configfile) except Exception as e: raise FileTreeMapError(str(e)) if sub_node: sub_map = file_map.get_node(sub_node) if not sub_map: raise FileTreeMapError('Could not find sub node ' '{0}'.format(sub_node)) file_map._filetree = {} file_map._filetree[sub_node] = sub_map try: file_map.copy_to(destpath, overwrite=overwrite) except Exception as e: raise FileTreeMapError(str(e))
def copy(configfile='', destpath='', overwrite=False, sub_node=''): """Copies the files in the built file tree map to despath. :param configfile: string Path to the FileTreeMap config file :param destpath: string Path to the files destination :param overwrite: bool Overwrite files if they already exist. :param sub_node: string Tree map configuration sub path. Will copy only the contents within this sub-node """ log.info('Running {0} {1} {2}'.format(os.path.basename(__file__), whoami(), locals())) assert(os.path.isfile(configfile)) if os.path.exists(destpath): if os.listdir(destpath): raise FolderAlreadyExists('Folder {0} already exists. Please clean ' 'it or change destpath.'.format(destpath)) else: log.info('Creating folder {0}'.format(destpath)) path(destpath).makedirs_p() from boyle.files.file_tree_map import FileTreeMap file_map = FileTreeMap() try: file_map.from_config_file(configfile) except Exception as e: raise FileTreeMapError(str(e)) if sub_node: sub_map = file_map.get_node(sub_node) if not sub_map: raise FileTreeMapError('Could not find sub node ' '{0}'.format(sub_node)) file_map._filetree = {} file_map._filetree[sub_node] = sub_map try: file_map.copy_to(destpath, overwrite=overwrite) except Exception as e: raise FileTreeMapError(str(e))
[ "Copies", "the", "files", "in", "the", "built", "file", "tree", "map", "to", "despath", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/scripts/filetree.py#L20-L72
[ "def", "copy", "(", "configfile", "=", "''", ",", "destpath", "=", "''", ",", "overwrite", "=", "False", ",", "sub_node", "=", "''", ")", ":", "log", ".", "info", "(", "'Running {0} {1} {2}'", ".", "format", "(", "os", ".", "path", ".", "basename", "(", "__file__", ")", ",", "whoami", "(", ")", ",", "locals", "(", ")", ")", ")", "assert", "(", "os", ".", "path", ".", "isfile", "(", "configfile", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "destpath", ")", ":", "if", "os", ".", "listdir", "(", "destpath", ")", ":", "raise", "FolderAlreadyExists", "(", "'Folder {0} already exists. Please clean '", "'it or change destpath.'", ".", "format", "(", "destpath", ")", ")", "else", ":", "log", ".", "info", "(", "'Creating folder {0}'", ".", "format", "(", "destpath", ")", ")", "path", "(", "destpath", ")", ".", "makedirs_p", "(", ")", "from", "boyle", ".", "files", ".", "file_tree_map", "import", "FileTreeMap", "file_map", "=", "FileTreeMap", "(", ")", "try", ":", "file_map", ".", "from_config_file", "(", "configfile", ")", "except", "Exception", "as", "e", ":", "raise", "FileTreeMapError", "(", "str", "(", "e", ")", ")", "if", "sub_node", ":", "sub_map", "=", "file_map", ".", "get_node", "(", "sub_node", ")", "if", "not", "sub_map", ":", "raise", "FileTreeMapError", "(", "'Could not find sub node '", "'{0}'", ".", "format", "(", "sub_node", ")", ")", "file_map", ".", "_filetree", "=", "{", "}", "file_map", ".", "_filetree", "[", "sub_node", "]", "=", "sub_map", "try", ":", "file_map", ".", "copy_to", "(", "destpath", ",", "overwrite", "=", "overwrite", ")", "except", "Exception", "as", "e", ":", "raise", "FileTreeMapError", "(", "str", "(", "e", ")", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
idset_with_reference.get_noneid_references
Returns ------- ndarray Array of references in self.reflst whose self id is None.
scripts/compare_id_sets.py
def get_noneid_references(self): """ Returns ------- ndarray Array of references in self.reflst whose self id is None. """ #return [self.reflst[idx] for idx, idval in enumerate(self) if idval is None] try: nun = np.array(None).astype(self.dtype) return np.array(self.reflst)[self == nun] except: nun = None return np.array(self.reflst)[self is None]
def get_noneid_references(self): """ Returns ------- ndarray Array of references in self.reflst whose self id is None. """ #return [self.reflst[idx] for idx, idval in enumerate(self) if idval is None] try: nun = np.array(None).astype(self.dtype) return np.array(self.reflst)[self == nun] except: nun = None return np.array(self.reflst)[self is None]
[ "Returns", "-------", "ndarray", "Array", "of", "references", "in", "self", ".", "reflst", "whose", "self", "id", "is", "None", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/scripts/compare_id_sets.py#L157-L170
[ "def", "get_noneid_references", "(", "self", ")", ":", "#return [self.reflst[idx] for idx, idval in enumerate(self) if idval is None]", "try", ":", "nun", "=", "np", ".", "array", "(", "None", ")", ".", "astype", "(", "self", ".", "dtype", ")", "return", "np", ".", "array", "(", "self", ".", "reflst", ")", "[", "self", "==", "nun", "]", "except", ":", "nun", "=", "None", "return", "np", ".", "array", "(", "self", ".", "reflst", ")", "[", "self", "is", "None", "]" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
idset_comparator._print_general_vs_table
:param idset1: :param idset2:
scripts/compare_id_sets.py
def _print_general_vs_table(self, idset1, idset2): """ :param idset1: :param idset2: """ ref1name = '' set1_hasref = isinstance(idset1, idset_with_reference) if set1_hasref: ref1arr = np.array(idset1.reflst) ref1name = idset1.refname ref2name = ref1name set2_hasref = isinstance(idset2, idset_with_reference) if set2_hasref: ref2arr = np.array(idset2.reflst) ref2name = idset2.refname else: ref2name = ref1name #First show a general table hdr11 = '{0} > {1}'.format(idset1.name, idset2.name) hdr12 = '{0} > {1} {2}'.format(idset1.name, idset2.name, ref2name) hdr13 = '{0} < {1}'.format(idset1.name, idset2.name) hdr14 = '{0} < {1} {2}'.format(idset1.name, idset2.name, ref1name) table = [[hdr11, hdr12, hdr13, hdr14]] set1 = set(idset1) set2 = set(idset2) row11 = list(set1 - set2) if set1_hasref: row12 = [ref1arr[np.where(idset1 == nom)][0] for nom in row11] else: row12 = ['Not found' for _ in row11] row13 = list(set2 - set1) if set2_hasref: row14 = [ref2arr[np.where(idset2 == nom)][0] for nom in row13] else: row14 = ['Not found' for _ in row13] tablst = self._tabulate_4_lists(row11, row12, row13, row14) table.extend(tablst) if len(table) > 1: print(tabulate(table, headers='firstrow')) print('\n')
def _print_general_vs_table(self, idset1, idset2): """ :param idset1: :param idset2: """ ref1name = '' set1_hasref = isinstance(idset1, idset_with_reference) if set1_hasref: ref1arr = np.array(idset1.reflst) ref1name = idset1.refname ref2name = ref1name set2_hasref = isinstance(idset2, idset_with_reference) if set2_hasref: ref2arr = np.array(idset2.reflst) ref2name = idset2.refname else: ref2name = ref1name #First show a general table hdr11 = '{0} > {1}'.format(idset1.name, idset2.name) hdr12 = '{0} > {1} {2}'.format(idset1.name, idset2.name, ref2name) hdr13 = '{0} < {1}'.format(idset1.name, idset2.name) hdr14 = '{0} < {1} {2}'.format(idset1.name, idset2.name, ref1name) table = [[hdr11, hdr12, hdr13, hdr14]] set1 = set(idset1) set2 = set(idset2) row11 = list(set1 - set2) if set1_hasref: row12 = [ref1arr[np.where(idset1 == nom)][0] for nom in row11] else: row12 = ['Not found' for _ in row11] row13 = list(set2 - set1) if set2_hasref: row14 = [ref2arr[np.where(idset2 == nom)][0] for nom in row13] else: row14 = ['Not found' for _ in row13] tablst = self._tabulate_4_lists(row11, row12, row13, row14) table.extend(tablst) if len(table) > 1: print(tabulate(table, headers='firstrow')) print('\n')
[ ":", "param", "idset1", ":", ":", "param", "idset2", ":" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/scripts/compare_id_sets.py#L233-L278
[ "def", "_print_general_vs_table", "(", "self", ",", "idset1", ",", "idset2", ")", ":", "ref1name", "=", "''", "set1_hasref", "=", "isinstance", "(", "idset1", ",", "idset_with_reference", ")", "if", "set1_hasref", ":", "ref1arr", "=", "np", ".", "array", "(", "idset1", ".", "reflst", ")", "ref1name", "=", "idset1", ".", "refname", "ref2name", "=", "ref1name", "set2_hasref", "=", "isinstance", "(", "idset2", ",", "idset_with_reference", ")", "if", "set2_hasref", ":", "ref2arr", "=", "np", ".", "array", "(", "idset2", ".", "reflst", ")", "ref2name", "=", "idset2", ".", "refname", "else", ":", "ref2name", "=", "ref1name", "#First show a general table", "hdr11", "=", "'{0} > {1}'", ".", "format", "(", "idset1", ".", "name", ",", "idset2", ".", "name", ")", "hdr12", "=", "'{0} > {1} {2}'", ".", "format", "(", "idset1", ".", "name", ",", "idset2", ".", "name", ",", "ref2name", ")", "hdr13", "=", "'{0} < {1}'", ".", "format", "(", "idset1", ".", "name", ",", "idset2", ".", "name", ")", "hdr14", "=", "'{0} < {1} {2}'", ".", "format", "(", "idset1", ".", "name", ",", "idset2", ".", "name", ",", "ref1name", ")", "table", "=", "[", "[", "hdr11", ",", "hdr12", ",", "hdr13", ",", "hdr14", "]", "]", "set1", "=", "set", "(", "idset1", ")", "set2", "=", "set", "(", "idset2", ")", "row11", "=", "list", "(", "set1", "-", "set2", ")", "if", "set1_hasref", ":", "row12", "=", "[", "ref1arr", "[", "np", ".", "where", "(", "idset1", "==", "nom", ")", "]", "[", "0", "]", "for", "nom", "in", "row11", "]", "else", ":", "row12", "=", "[", "'Not found'", "for", "_", "in", "row11", "]", "row13", "=", "list", "(", "set2", "-", "set1", ")", "if", "set2_hasref", ":", "row14", "=", "[", "ref2arr", "[", "np", ".", "where", "(", "idset2", "==", "nom", ")", "]", "[", "0", "]", "for", "nom", "in", "row13", "]", "else", ":", "row14", "=", "[", "'Not found'", "for", "_", "in", "row13", "]", "tablst", "=", "self", ".", "_tabulate_4_lists", "(", "row11", ",", "row12", ",", "row13", ",", "row14", ")", "table", ".", "extend", "(", "tablst", ")", "if", "len", "(", "table", ")", ">", "1", ":", "print", "(", "tabulate", "(", "table", ",", "headers", "=", "'firstrow'", ")", ")", "print", "(", "'\\n'", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
idset_comparator._print_foreign_repetition_table
:param idset1: :param idset2:
scripts/compare_id_sets.py
def _print_foreign_repetition_table(self, idset1, idset2): """ :param idset1: :param idset2: """ assert(isinstance(idset1, idset_with_reference)) assert(isinstance(idset2, idset)) reps = idset2.get_repetitions() if len(reps) < 1: return refs = np.array(idset1.reflst) table = [['{0} {1} values of repetitions in {2}'.format(idset1.name, idset1.refname, idset2.name), '']] for rep in reps: if np.any(idset1 == rep): matches = refs[np.where(idset1 == rep)] myrep = rep for m in matches: table.append([myrep, m]) myrep = '' print(tabulate(table, headers='firstrow')) print('\n')
def _print_foreign_repetition_table(self, idset1, idset2): """ :param idset1: :param idset2: """ assert(isinstance(idset1, idset_with_reference)) assert(isinstance(idset2, idset)) reps = idset2.get_repetitions() if len(reps) < 1: return refs = np.array(idset1.reflst) table = [['{0} {1} values of repetitions in {2}'.format(idset1.name, idset1.refname, idset2.name), '']] for rep in reps: if np.any(idset1 == rep): matches = refs[np.where(idset1 == rep)] myrep = rep for m in matches: table.append([myrep, m]) myrep = '' print(tabulate(table, headers='firstrow')) print('\n')
[ ":", "param", "idset1", ":", ":", "param", "idset2", ":" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/scripts/compare_id_sets.py#L280-L308
[ "def", "_print_foreign_repetition_table", "(", "self", ",", "idset1", ",", "idset2", ")", ":", "assert", "(", "isinstance", "(", "idset1", ",", "idset_with_reference", ")", ")", "assert", "(", "isinstance", "(", "idset2", ",", "idset", ")", ")", "reps", "=", "idset2", ".", "get_repetitions", "(", ")", "if", "len", "(", "reps", ")", "<", "1", ":", "return", "refs", "=", "np", ".", "array", "(", "idset1", ".", "reflst", ")", "table", "=", "[", "[", "'{0} {1} values of repetitions in {2}'", ".", "format", "(", "idset1", ".", "name", ",", "idset1", ".", "refname", ",", "idset2", ".", "name", ")", ",", "''", "]", "]", "for", "rep", "in", "reps", ":", "if", "np", ".", "any", "(", "idset1", "==", "rep", ")", ":", "matches", "=", "refs", "[", "np", ".", "where", "(", "idset1", "==", "rep", ")", "]", "myrep", "=", "rep", "for", "m", "in", "matches", ":", "table", ".", "append", "(", "[", "myrep", ",", "m", "]", ")", "myrep", "=", "''", "print", "(", "tabulate", "(", "table", ",", "headers", "=", "'firstrow'", ")", ")", "print", "(", "'\\n'", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
idset_comparator.print_compare_idsets_one_ref
idset1_name: string key of an idset_with_reference idset2_name: string key of an idset
scripts/compare_id_sets.py
def print_compare_idsets_one_ref(self, idset1_name, idset2_name): """ idset1_name: string key of an idset_with_reference idset2_name: string key of an idset """ try: idset1 = self[idset1_name] idset2 = self[idset2_name] except KeyError as ke: log.error('Error compare_idsets: getting keys {0} and {1}'.format(idset1_name, idset2_name)) import sys, pdb pdb.post_mortem(sys.exc_info()[2]) raise assert(isinstance(idset1, idset_with_reference)) assert(isinstance(idset2, idset)) self._print_general_vs_table(idset1, idset2) self._print_foreign_repetition_table(idset1, idset2)
def print_compare_idsets_one_ref(self, idset1_name, idset2_name): """ idset1_name: string key of an idset_with_reference idset2_name: string key of an idset """ try: idset1 = self[idset1_name] idset2 = self[idset2_name] except KeyError as ke: log.error('Error compare_idsets: getting keys {0} and {1}'.format(idset1_name, idset2_name)) import sys, pdb pdb.post_mortem(sys.exc_info()[2]) raise assert(isinstance(idset1, idset_with_reference)) assert(isinstance(idset2, idset)) self._print_general_vs_table(idset1, idset2) self._print_foreign_repetition_table(idset1, idset2)
[ "idset1_name", ":", "string", "key", "of", "an", "idset_with_reference" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/scripts/compare_id_sets.py#L342-L364
[ "def", "print_compare_idsets_one_ref", "(", "self", ",", "idset1_name", ",", "idset2_name", ")", ":", "try", ":", "idset1", "=", "self", "[", "idset1_name", "]", "idset2", "=", "self", "[", "idset2_name", "]", "except", "KeyError", "as", "ke", ":", "log", ".", "error", "(", "'Error compare_idsets: getting keys {0} and {1}'", ".", "format", "(", "idset1_name", ",", "idset2_name", ")", ")", "import", "sys", ",", "pdb", "pdb", ".", "post_mortem", "(", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", "raise", "assert", "(", "isinstance", "(", "idset1", ",", "idset_with_reference", ")", ")", "assert", "(", "isinstance", "(", "idset2", ",", "idset", ")", ")", "self", ".", "_print_general_vs_table", "(", "idset1", ",", "idset2", ")", "self", ".", "_print_foreign_repetition_table", "(", "idset1", ",", "idset2", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
convert_sav
Transforms the input .sav SPSS file into other format. If you don't specify an outputfile, it will use the inputfile and change its extension to .csv
scripts/convert_sav.py
def convert_sav(inputfile, outputfile=None, method='rpy2', otype='csv'): """ Transforms the input .sav SPSS file into other format. If you don't specify an outputfile, it will use the inputfile and change its extension to .csv """ assert(os.path.isfile(inputfile)) assert(method=='rpy2' or method=='savread') if method == 'rpy2': df = sav_to_pandas_rpy2(inputfile) elif method == 'savread': df = sav_to_pandas_savreader(inputfile) otype_exts = {'csv': '.csv', 'hdf': '.h5', 'stata': '.dta', 'json': '.json', 'pickle': '.pickle', 'excel': '.xls', 'html': '.html'} if outputfile is None: outputfile = inputfile.replace(path(inputfile).ext, '') outputfile = add_extension_if_needed(outputfile, otype_exts[otype]) if otype == 'csv': df.to_csv(outputfile) elif otype == 'hdf': df.to_hdf(outputfile, os.path.basename(outputfile)) elif otype == 'stata': df.to_stata(outputfile) elif otype == 'json': df.to_json(outputfile) elif otype == 'pickle': df.to_pickle(outputfile) elif otype == 'excel': df.to_excel(outputfile) elif otype == 'html': df.to_html(outputfile) else: df.to_csv(outputfile)
def convert_sav(inputfile, outputfile=None, method='rpy2', otype='csv'): """ Transforms the input .sav SPSS file into other format. If you don't specify an outputfile, it will use the inputfile and change its extension to .csv """ assert(os.path.isfile(inputfile)) assert(method=='rpy2' or method=='savread') if method == 'rpy2': df = sav_to_pandas_rpy2(inputfile) elif method == 'savread': df = sav_to_pandas_savreader(inputfile) otype_exts = {'csv': '.csv', 'hdf': '.h5', 'stata': '.dta', 'json': '.json', 'pickle': '.pickle', 'excel': '.xls', 'html': '.html'} if outputfile is None: outputfile = inputfile.replace(path(inputfile).ext, '') outputfile = add_extension_if_needed(outputfile, otype_exts[otype]) if otype == 'csv': df.to_csv(outputfile) elif otype == 'hdf': df.to_hdf(outputfile, os.path.basename(outputfile)) elif otype == 'stata': df.to_stata(outputfile) elif otype == 'json': df.to_json(outputfile) elif otype == 'pickle': df.to_pickle(outputfile) elif otype == 'excel': df.to_excel(outputfile) elif otype == 'html': df.to_html(outputfile) else: df.to_csv(outputfile)
[ "Transforms", "the", "input", ".", "sav", "SPSS", "file", "into", "other", "format", ".", "If", "you", "don", "t", "specify", "an", "outputfile", "it", "will", "use", "the", "inputfile", "and", "change", "its", "extension", "to", ".", "csv" ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/scripts/convert_sav.py#L24-L65
[ "def", "convert_sav", "(", "inputfile", ",", "outputfile", "=", "None", ",", "method", "=", "'rpy2'", ",", "otype", "=", "'csv'", ")", ":", "assert", "(", "os", ".", "path", ".", "isfile", "(", "inputfile", ")", ")", "assert", "(", "method", "==", "'rpy2'", "or", "method", "==", "'savread'", ")", "if", "method", "==", "'rpy2'", ":", "df", "=", "sav_to_pandas_rpy2", "(", "inputfile", ")", "elif", "method", "==", "'savread'", ":", "df", "=", "sav_to_pandas_savreader", "(", "inputfile", ")", "otype_exts", "=", "{", "'csv'", ":", "'.csv'", ",", "'hdf'", ":", "'.h5'", ",", "'stata'", ":", "'.dta'", ",", "'json'", ":", "'.json'", ",", "'pickle'", ":", "'.pickle'", ",", "'excel'", ":", "'.xls'", ",", "'html'", ":", "'.html'", "}", "if", "outputfile", "is", "None", ":", "outputfile", "=", "inputfile", ".", "replace", "(", "path", "(", "inputfile", ")", ".", "ext", ",", "''", ")", "outputfile", "=", "add_extension_if_needed", "(", "outputfile", ",", "otype_exts", "[", "otype", "]", ")", "if", "otype", "==", "'csv'", ":", "df", ".", "to_csv", "(", "outputfile", ")", "elif", "otype", "==", "'hdf'", ":", "df", ".", "to_hdf", "(", "outputfile", ",", "os", ".", "path", ".", "basename", "(", "outputfile", ")", ")", "elif", "otype", "==", "'stata'", ":", "df", ".", "to_stata", "(", "outputfile", ")", "elif", "otype", "==", "'json'", ":", "df", ".", "to_json", "(", "outputfile", ")", "elif", "otype", "==", "'pickle'", ":", "df", ".", "to_pickle", "(", "outputfile", ")", "elif", "otype", "==", "'excel'", ":", "df", ".", "to_excel", "(", "outputfile", ")", "elif", "otype", "==", "'html'", ":", "df", ".", "to_html", "(", "outputfile", ")", "else", ":", "df", ".", "to_csv", "(", "outputfile", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
load_mask
Load a Nifti mask volume. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. allow_empty: boolean, optional Allow loading an empty mask (full of 0 values) Returns ------- nibabel.Nifti1Image with boolean data.
boyle/nifti/mask.py
def load_mask(image, allow_empty=True): """Load a Nifti mask volume. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. allow_empty: boolean, optional Allow loading an empty mask (full of 0 values) Returns ------- nibabel.Nifti1Image with boolean data. """ img = check_img(image, make_it_3d=True) values = np.unique(img.get_data()) if len(values) == 1: # We accept a single value if it is not 0 (full true mask). if values[0] == 0 and not allow_empty: raise ValueError('Given mask is invalid because it masks all data') elif len(values) == 2: # If there are 2 different values, one of them must be 0 (background) if 0 not in values: raise ValueError('Background of the mask must be represented with 0.' ' Given mask contains: {}.'.format(values)) elif len(values) != 2: # If there are more than 2 values, the mask is invalid raise ValueError('Given mask is not made of 2 values: {}. ' 'Cannot interpret as true or false'.format(values)) return nib.Nifti1Image(as_ndarray(get_img_data(img), dtype=bool), img.get_affine(), img.get_header())
def load_mask(image, allow_empty=True): """Load a Nifti mask volume. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. allow_empty: boolean, optional Allow loading an empty mask (full of 0 values) Returns ------- nibabel.Nifti1Image with boolean data. """ img = check_img(image, make_it_3d=True) values = np.unique(img.get_data()) if len(values) == 1: # We accept a single value if it is not 0 (full true mask). if values[0] == 0 and not allow_empty: raise ValueError('Given mask is invalid because it masks all data') elif len(values) == 2: # If there are 2 different values, one of them must be 0 (background) if 0 not in values: raise ValueError('Background of the mask must be represented with 0.' ' Given mask contains: {}.'.format(values)) elif len(values) != 2: # If there are more than 2 values, the mask is invalid raise ValueError('Given mask is not made of 2 values: {}. ' 'Cannot interpret as true or false'.format(values)) return nib.Nifti1Image(as_ndarray(get_img_data(img), dtype=bool), img.get_affine(), img.get_header())
[ "Load", "a", "Nifti", "mask", "volume", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/mask.py#L23-L62
[ "def", "load_mask", "(", "image", ",", "allow_empty", "=", "True", ")", ":", "img", "=", "check_img", "(", "image", ",", "make_it_3d", "=", "True", ")", "values", "=", "np", ".", "unique", "(", "img", ".", "get_data", "(", ")", ")", "if", "len", "(", "values", ")", "==", "1", ":", "# We accept a single value if it is not 0 (full true mask).", "if", "values", "[", "0", "]", "==", "0", "and", "not", "allow_empty", ":", "raise", "ValueError", "(", "'Given mask is invalid because it masks all data'", ")", "elif", "len", "(", "values", ")", "==", "2", ":", "# If there are 2 different values, one of them must be 0 (background)", "if", "0", "not", "in", "values", ":", "raise", "ValueError", "(", "'Background of the mask must be represented with 0.'", "' Given mask contains: {}.'", ".", "format", "(", "values", ")", ")", "elif", "len", "(", "values", ")", "!=", "2", ":", "# If there are more than 2 values, the mask is invalid", "raise", "ValueError", "(", "'Given mask is not made of 2 values: {}. '", "'Cannot interpret as true or false'", ".", "format", "(", "values", ")", ")", "return", "nib", ".", "Nifti1Image", "(", "as_ndarray", "(", "get_img_data", "(", "img", ")", ",", "dtype", "=", "bool", ")", ",", "img", ".", "get_affine", "(", ")", ",", "img", ".", "get_header", "(", ")", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
load_mask_data
Load a Nifti mask volume and return its data matrix as boolean and affine. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. allow_empty: boolean, optional Allow loading an empty mask (full of 0 values) Returns ------- numpy.ndarray with dtype==bool, numpy.ndarray of affine transformation
boyle/nifti/mask.py
def load_mask_data(image, allow_empty=True): """Load a Nifti mask volume and return its data matrix as boolean and affine. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. allow_empty: boolean, optional Allow loading an empty mask (full of 0 values) Returns ------- numpy.ndarray with dtype==bool, numpy.ndarray of affine transformation """ mask = load_mask(image, allow_empty=allow_empty) return get_img_data(mask), mask.get_affine()
def load_mask_data(image, allow_empty=True): """Load a Nifti mask volume and return its data matrix as boolean and affine. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. allow_empty: boolean, optional Allow loading an empty mask (full of 0 values) Returns ------- numpy.ndarray with dtype==bool, numpy.ndarray of affine transformation """ mask = load_mask(image, allow_empty=allow_empty) return get_img_data(mask), mask.get_affine()
[ "Load", "a", "Nifti", "mask", "volume", "and", "return", "its", "data", "matrix", "as", "boolean", "and", "affine", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/mask.py#L65-L86
[ "def", "load_mask_data", "(", "image", ",", "allow_empty", "=", "True", ")", ":", "mask", "=", "load_mask", "(", "image", ",", "allow_empty", "=", "allow_empty", ")", "return", "get_img_data", "(", "mask", ")", ",", "mask", ".", "get_affine", "(", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
union_mask
Creates a binarised mask with the union of the files in filelist. Parameters ---------- filelist: list of img-like object or boyle.nifti.NeuroImage or str List of paths to the volume files containing the ROIs. Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. Returns ------- ndarray of bools Mask volume Raises ------ ValueError
boyle/nifti/mask.py
def union_mask(filelist): """ Creates a binarised mask with the union of the files in filelist. Parameters ---------- filelist: list of img-like object or boyle.nifti.NeuroImage or str List of paths to the volume files containing the ROIs. Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. Returns ------- ndarray of bools Mask volume Raises ------ ValueError """ firstimg = check_img(filelist[0]) mask = np.zeros_like(firstimg.get_data()) # create space for all features and read from subjects try: for volf in filelist: roiimg = check_img(volf) check_img_compatibility(firstimg, roiimg) mask += get_img_data(roiimg) except Exception as exc: raise ValueError('Error joining mask {} and {}.'.format(repr_imgs(firstimg), repr_imgs(volf))) from exc else: return as_ndarray(mask > 0, dtype=bool)
def union_mask(filelist): """ Creates a binarised mask with the union of the files in filelist. Parameters ---------- filelist: list of img-like object or boyle.nifti.NeuroImage or str List of paths to the volume files containing the ROIs. Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. Returns ------- ndarray of bools Mask volume Raises ------ ValueError """ firstimg = check_img(filelist[0]) mask = np.zeros_like(firstimg.get_data()) # create space for all features and read from subjects try: for volf in filelist: roiimg = check_img(volf) check_img_compatibility(firstimg, roiimg) mask += get_img_data(roiimg) except Exception as exc: raise ValueError('Error joining mask {} and {}.'.format(repr_imgs(firstimg), repr_imgs(volf))) from exc else: return as_ndarray(mask > 0, dtype=bool)
[ "Creates", "a", "binarised", "mask", "with", "the", "union", "of", "the", "files", "in", "filelist", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/mask.py#L113-L149
[ "def", "union_mask", "(", "filelist", ")", ":", "firstimg", "=", "check_img", "(", "filelist", "[", "0", "]", ")", "mask", "=", "np", ".", "zeros_like", "(", "firstimg", ".", "get_data", "(", ")", ")", "# create space for all features and read from subjects", "try", ":", "for", "volf", "in", "filelist", ":", "roiimg", "=", "check_img", "(", "volf", ")", "check_img_compatibility", "(", "firstimg", ",", "roiimg", ")", "mask", "+=", "get_img_data", "(", "roiimg", ")", "except", "Exception", "as", "exc", ":", "raise", "ValueError", "(", "'Error joining mask {} and {}.'", ".", "format", "(", "repr_imgs", "(", "firstimg", ")", ",", "repr_imgs", "(", "volf", ")", ")", ")", "from", "exc", "else", ":", "return", "as_ndarray", "(", "mask", ">", "0", ",", "dtype", "=", "bool", ")" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
apply_mask
Read a Nifti file nii_file and a mask Nifti file. Returns the voxels in nii_file that are within the mask, the mask indices and the mask shape. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. mask_img: img-like object or boyle.nifti.NeuroImage or str 3D mask array: True where a voxel should be used. See img description. Returns ------- vol[mask_indices], mask_indices Note ---- nii_file and mask_file must have the same shape. Raises ------ NiftiFilesNotCompatible, ValueError
boyle/nifti/mask.py
def apply_mask(image, mask_img): """Read a Nifti file nii_file and a mask Nifti file. Returns the voxels in nii_file that are within the mask, the mask indices and the mask shape. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. mask_img: img-like object or boyle.nifti.NeuroImage or str 3D mask array: True where a voxel should be used. See img description. Returns ------- vol[mask_indices], mask_indices Note ---- nii_file and mask_file must have the same shape. Raises ------ NiftiFilesNotCompatible, ValueError """ img = check_img(image) mask = check_img(mask_img) check_img_compatibility(img, mask) vol = img.get_data() mask_data, _ = load_mask_data(mask) return vol[mask_data], mask_data
def apply_mask(image, mask_img): """Read a Nifti file nii_file and a mask Nifti file. Returns the voxels in nii_file that are within the mask, the mask indices and the mask shape. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. mask_img: img-like object or boyle.nifti.NeuroImage or str 3D mask array: True where a voxel should be used. See img description. Returns ------- vol[mask_indices], mask_indices Note ---- nii_file and mask_file must have the same shape. Raises ------ NiftiFilesNotCompatible, ValueError """ img = check_img(image) mask = check_img(mask_img) check_img_compatibility(img, mask) vol = img.get_data() mask_data, _ = load_mask_data(mask) return vol[mask_data], mask_data
[ "Read", "a", "Nifti", "file", "nii_file", "and", "a", "mask", "Nifti", "file", ".", "Returns", "the", "voxels", "in", "nii_file", "that", "are", "within", "the", "mask", "the", "mask", "indices", "and", "the", "mask", "shape", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/mask.py#L152-L190
[ "def", "apply_mask", "(", "image", ",", "mask_img", ")", ":", "img", "=", "check_img", "(", "image", ")", "mask", "=", "check_img", "(", "mask_img", ")", "check_img_compatibility", "(", "img", ",", "mask", ")", "vol", "=", "img", ".", "get_data", "(", ")", "mask_data", ",", "_", "=", "load_mask_data", "(", "mask", ")", "return", "vol", "[", "mask_data", "]", ",", "mask_data" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
apply_mask_4d
Read a Nifti file nii_file and a mask Nifti file. Extract the signals in nii_file that are within the mask, the mask indices and the mask shape. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. mask_img: img-like object or boyle.nifti.NeuroImage or str 3D mask array: True where a voxel should be used. See img description. smooth_mm: float #TBD (optional) The size in mm of the FWHM Gaussian kernel to smooth the signal. If True, remove_nans is True. remove_nans: bool #TBD If remove_nans is True (default), the non-finite values (NaNs and infs) found in the images will be replaced by zeros. Returns ------- session_series, mask_data session_series: numpy.ndarray 2D array of series with shape (voxel number, image number) Note ---- nii_file and mask_file must have the same shape. Raises ------ FileNotFound, NiftiFilesNotCompatible
boyle/nifti/mask.py
def apply_mask_4d(image, mask_img): # , smooth_mm=None, remove_nans=True): """Read a Nifti file nii_file and a mask Nifti file. Extract the signals in nii_file that are within the mask, the mask indices and the mask shape. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. mask_img: img-like object or boyle.nifti.NeuroImage or str 3D mask array: True where a voxel should be used. See img description. smooth_mm: float #TBD (optional) The size in mm of the FWHM Gaussian kernel to smooth the signal. If True, remove_nans is True. remove_nans: bool #TBD If remove_nans is True (default), the non-finite values (NaNs and infs) found in the images will be replaced by zeros. Returns ------- session_series, mask_data session_series: numpy.ndarray 2D array of series with shape (voxel number, image number) Note ---- nii_file and mask_file must have the same shape. Raises ------ FileNotFound, NiftiFilesNotCompatible """ img = check_img(image) mask = check_img(mask_img) check_img_compatibility(img, mask, only_check_3d=True) vol = get_data(img) series, mask_data = _apply_mask_to_4d_data(vol, mask) return series, mask_data
def apply_mask_4d(image, mask_img): # , smooth_mm=None, remove_nans=True): """Read a Nifti file nii_file and a mask Nifti file. Extract the signals in nii_file that are within the mask, the mask indices and the mask shape. Parameters ---------- image: img-like object or boyle.nifti.NeuroImage or str Can either be: - a file path to a Nifti image - any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image. If niimg is a string, consider it as a path to Nifti image and call nibabel.load on it. If it is an object, check if get_data() and get_affine() methods are present, raise TypeError otherwise. mask_img: img-like object or boyle.nifti.NeuroImage or str 3D mask array: True where a voxel should be used. See img description. smooth_mm: float #TBD (optional) The size in mm of the FWHM Gaussian kernel to smooth the signal. If True, remove_nans is True. remove_nans: bool #TBD If remove_nans is True (default), the non-finite values (NaNs and infs) found in the images will be replaced by zeros. Returns ------- session_series, mask_data session_series: numpy.ndarray 2D array of series with shape (voxel number, image number) Note ---- nii_file and mask_file must have the same shape. Raises ------ FileNotFound, NiftiFilesNotCompatible """ img = check_img(image) mask = check_img(mask_img) check_img_compatibility(img, mask, only_check_3d=True) vol = get_data(img) series, mask_data = _apply_mask_to_4d_data(vol, mask) return series, mask_data
[ "Read", "a", "Nifti", "file", "nii_file", "and", "a", "mask", "Nifti", "file", ".", "Extract", "the", "signals", "in", "nii_file", "that", "are", "within", "the", "mask", "the", "mask", "indices", "and", "the", "mask", "shape", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/mask.py#L193-L241
[ "def", "apply_mask_4d", "(", "image", ",", "mask_img", ")", ":", "# , smooth_mm=None, remove_nans=True):", "img", "=", "check_img", "(", "image", ")", "mask", "=", "check_img", "(", "mask_img", ")", "check_img_compatibility", "(", "img", ",", "mask", ",", "only_check_3d", "=", "True", ")", "vol", "=", "get_data", "(", "img", ")", "series", ",", "mask_data", "=", "_apply_mask_to_4d_data", "(", "vol", ",", "mask", ")", "return", "series", ",", "mask_data" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
vector_to_volume
Transform a given vector to a volume. This is a reshape function for 3D flattened and maybe masked vectors. Parameters ---------- arr: np.array 1-Dimensional array mask: numpy.ndarray Mask image. Must have 3 dimensions, bool dtype. Returns ------- np.ndarray
boyle/nifti/mask.py
def vector_to_volume(arr, mask, order='C'): """Transform a given vector to a volume. This is a reshape function for 3D flattened and maybe masked vectors. Parameters ---------- arr: np.array 1-Dimensional array mask: numpy.ndarray Mask image. Must have 3 dimensions, bool dtype. Returns ------- np.ndarray """ if mask.dtype != np.bool: raise ValueError("mask must be a boolean array") if arr.ndim != 1: raise ValueError("vector must be a 1-dimensional array") if arr.ndim == 2 and any(v == 1 for v in arr.shape): log.debug('Got an array of shape {}, flattening for my purposes.'.format(arr.shape)) arr = arr.flatten() volume = np.zeros(mask.shape[:3], dtype=arr.dtype, order=order) volume[mask] = arr return volume
def vector_to_volume(arr, mask, order='C'): """Transform a given vector to a volume. This is a reshape function for 3D flattened and maybe masked vectors. Parameters ---------- arr: np.array 1-Dimensional array mask: numpy.ndarray Mask image. Must have 3 dimensions, bool dtype. Returns ------- np.ndarray """ if mask.dtype != np.bool: raise ValueError("mask must be a boolean array") if arr.ndim != 1: raise ValueError("vector must be a 1-dimensional array") if arr.ndim == 2 and any(v == 1 for v in arr.shape): log.debug('Got an array of shape {}, flattening for my purposes.'.format(arr.shape)) arr = arr.flatten() volume = np.zeros(mask.shape[:3], dtype=arr.dtype, order=order) volume[mask] = arr return volume
[ "Transform", "a", "given", "vector", "to", "a", "volume", ".", "This", "is", "a", "reshape", "function", "for", "3D", "flattened", "and", "maybe", "masked", "vectors", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/mask.py#L267-L295
[ "def", "vector_to_volume", "(", "arr", ",", "mask", ",", "order", "=", "'C'", ")", ":", "if", "mask", ".", "dtype", "!=", "np", ".", "bool", ":", "raise", "ValueError", "(", "\"mask must be a boolean array\"", ")", "if", "arr", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"vector must be a 1-dimensional array\"", ")", "if", "arr", ".", "ndim", "==", "2", "and", "any", "(", "v", "==", "1", "for", "v", "in", "arr", ".", "shape", ")", ":", "log", ".", "debug", "(", "'Got an array of shape {}, flattening for my purposes.'", ".", "format", "(", "arr", ".", "shape", ")", ")", "arr", "=", "arr", ".", "flatten", "(", ")", "volume", "=", "np", ".", "zeros", "(", "mask", ".", "shape", "[", ":", "3", "]", ",", "dtype", "=", "arr", ".", "dtype", ",", "order", "=", "order", ")", "volume", "[", "mask", "]", "=", "arr", "return", "volume" ]
2dae7199849395a209c887d5f30506e1de8a9ad9
valid
matrix_to_4dvolume
Transform a given vector to a volume. This is a reshape function for 4D flattened masked matrices where the second dimension of the matrix corresponds to the original 4th dimension. Parameters ---------- arr: numpy.array 2D numpy.array mask: numpy.ndarray Mask image. Must have 3 dimensions, bool dtype. dtype: return type If None, will get the type from vector Returns ------- data: numpy.ndarray Unmasked data. Shape: (mask.shape[0], mask.shape[1], mask.shape[2], X.shape[1])
boyle/nifti/mask.py
def matrix_to_4dvolume(arr, mask, order='C'): """Transform a given vector to a volume. This is a reshape function for 4D flattened masked matrices where the second dimension of the matrix corresponds to the original 4th dimension. Parameters ---------- arr: numpy.array 2D numpy.array mask: numpy.ndarray Mask image. Must have 3 dimensions, bool dtype. dtype: return type If None, will get the type from vector Returns ------- data: numpy.ndarray Unmasked data. Shape: (mask.shape[0], mask.shape[1], mask.shape[2], X.shape[1]) """ if mask.dtype != np.bool: raise ValueError("mask must be a boolean array") if arr.ndim != 2: raise ValueError("X must be a 2-dimensional array") if mask.sum() != arr.shape[0]: # raise an error if the shape of arr is not what expected raise ValueError('Expected arr of shape ({}, samples). Got {}.'.format(mask.sum(), arr.shape)) data = np.zeros(mask.shape + (arr.shape[1],), dtype=arr.dtype, order=order) data[mask, :] = arr return data
def matrix_to_4dvolume(arr, mask, order='C'): """Transform a given vector to a volume. This is a reshape function for 4D flattened masked matrices where the second dimension of the matrix corresponds to the original 4th dimension. Parameters ---------- arr: numpy.array 2D numpy.array mask: numpy.ndarray Mask image. Must have 3 dimensions, bool dtype. dtype: return type If None, will get the type from vector Returns ------- data: numpy.ndarray Unmasked data. Shape: (mask.shape[0], mask.shape[1], mask.shape[2], X.shape[1]) """ if mask.dtype != np.bool: raise ValueError("mask must be a boolean array") if arr.ndim != 2: raise ValueError("X must be a 2-dimensional array") if mask.sum() != arr.shape[0]: # raise an error if the shape of arr is not what expected raise ValueError('Expected arr of shape ({}, samples). Got {}.'.format(mask.sum(), arr.shape)) data = np.zeros(mask.shape + (arr.shape[1],), dtype=arr.dtype, order=order) data[mask, :] = arr return data
[ "Transform", "a", "given", "vector", "to", "a", "volume", ".", "This", "is", "a", "reshape", "function", "for", "4D", "flattened", "masked", "matrices", "where", "the", "second", "dimension", "of", "the", "matrix", "corresponds", "to", "the", "original", "4th", "dimension", "." ]
Neurita/boyle
python
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/nifti/mask.py#L298-L333
[ "def", "matrix_to_4dvolume", "(", "arr", ",", "mask", ",", "order", "=", "'C'", ")", ":", "if", "mask", ".", "dtype", "!=", "np", ".", "bool", ":", "raise", "ValueError", "(", "\"mask must be a boolean array\"", ")", "if", "arr", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"X must be a 2-dimensional array\"", ")", "if", "mask", ".", "sum", "(", ")", "!=", "arr", ".", "shape", "[", "0", "]", ":", "# raise an error if the shape of arr is not what expected", "raise", "ValueError", "(", "'Expected arr of shape ({}, samples). Got {}.'", ".", "format", "(", "mask", ".", "sum", "(", ")", ",", "arr", ".", "shape", ")", ")", "data", "=", "np", ".", "zeros", "(", "mask", ".", "shape", "+", "(", "arr", ".", "shape", "[", "1", "]", ",", ")", ",", "dtype", "=", "arr", ".", "dtype", ",", "order", "=", "order", ")", "data", "[", "mask", ",", ":", "]", "=", "arr", "return", "data" ]
2dae7199849395a209c887d5f30506e1de8a9ad9