partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
DataClient.matched_file_count
Returns the number of files matching a pattern in a dataset. :param dataset_id: The ID of the dataset to search for files. :type dataset_id: int :param glob: A pattern which will be matched against files in the dataset. :type glob: str :param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset. :type is_dir: bool :return: The number of matching files :rtype: int
citrination_client/data/client.py
def matched_file_count(self, dataset_id, glob=".", is_dir=False): """ Returns the number of files matching a pattern in a dataset. :param dataset_id: The ID of the dataset to search for files. :type dataset_id: int :param glob: A pattern which will be matched against files in the dataset. :type glob: str :param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset. :type is_dir: bool :return: The number of matching files :rtype: int """ list_result = self.list_files(dataset_id, glob, is_dir) return len(list_result)
def matched_file_count(self, dataset_id, glob=".", is_dir=False): """ Returns the number of files matching a pattern in a dataset. :param dataset_id: The ID of the dataset to search for files. :type dataset_id: int :param glob: A pattern which will be matched against files in the dataset. :type glob: str :param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset. :type is_dir: bool :return: The number of matching files :rtype: int """ list_result = self.list_files(dataset_id, glob, is_dir) return len(list_result)
[ "Returns", "the", "number", "of", "files", "matching", "a", "pattern", "in", "a", "dataset", "." ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/data/client.py#L120-L134
[ "def", "matched_file_count", "(", "self", ",", "dataset_id", ",", "glob", "=", "\".\"", ",", "is_dir", "=", "False", ")", ":", "list_result", "=", "self", ".", "list_files", "(", "dataset_id", ",", "glob", ",", "is_dir", ")", "return", "len", "(", "list_result", ")" ]
409984fc65ce101a620f069263f155303492465c
valid
DataClient.get_ingest_status
Returns the current status of dataset ingestion. If any file uploaded to a dataset is in an error/failure state this endpoint will return error/failure. If any files are still processing, will return processing. :param dataset_id: Dataset identifier :return: Status of dataset ingestion as a string
citrination_client/data/client.py
def get_ingest_status(self, dataset_id): """ Returns the current status of dataset ingestion. If any file uploaded to a dataset is in an error/failure state this endpoint will return error/failure. If any files are still processing, will return processing. :param dataset_id: Dataset identifier :return: Status of dataset ingestion as a string """ failure_message = "Failed to create dataset ingest status for dataset {}".format(dataset_id) response = self._get_success_json( self._get('v1/datasets/' + str(dataset_id) + '/ingest-status', failure_message=failure_message))['data'] if 'status' in response: return response['status'] return ''
def get_ingest_status(self, dataset_id): """ Returns the current status of dataset ingestion. If any file uploaded to a dataset is in an error/failure state this endpoint will return error/failure. If any files are still processing, will return processing. :param dataset_id: Dataset identifier :return: Status of dataset ingestion as a string """ failure_message = "Failed to create dataset ingest status for dataset {}".format(dataset_id) response = self._get_success_json( self._get('v1/datasets/' + str(dataset_id) + '/ingest-status', failure_message=failure_message))['data'] if 'status' in response: return response['status'] return ''
[ "Returns", "the", "current", "status", "of", "dataset", "ingestion", ".", "If", "any", "file", "uploaded", "to", "a", "dataset", "is", "in", "an", "error", "/", "failure", "state", "this", "endpoint", "will", "return", "error", "/", "failure", ".", "If", "any", "files", "are", "still", "processing", "will", "return", "processing", "." ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/data/client.py#L136-L151
[ "def", "get_ingest_status", "(", "self", ",", "dataset_id", ")", ":", "failure_message", "=", "\"Failed to create dataset ingest status for dataset {}\"", ".", "format", "(", "dataset_id", ")", "response", "=", "self", ".", "_get_success_json", "(", "self", ".", "_get", "(", "'v1/datasets/'", "+", "str", "(", "dataset_id", ")", "+", "'/ingest-status'", ",", "failure_message", "=", "failure_message", ")", ")", "[", "'data'", "]", "if", "'status'", "in", "response", ":", "return", "response", "[", "'status'", "]", "return", "''" ]
409984fc65ce101a620f069263f155303492465c
valid
DataClient.get_dataset_files
Retrieves URLs for the files matched by a glob or a path to a directory in a given dataset. :param dataset_id: The id of the dataset to retrieve files from :type dataset_id: int :param glob: A regex used to select one or more files in the dataset :type glob: str :param is_dir: Whether or not the supplied pattern should be treated as a directory to search in :type is_dir: bool :param version_number: The version number of the dataset to retrieve files from :type version_number: int :return: A list of dataset files whose paths match the provided pattern. :rtype: list of :class:`DatasetFile`
citrination_client/data/client.py
def get_dataset_files(self, dataset_id, glob=".", is_dir=False, version_number=None): """ Retrieves URLs for the files matched by a glob or a path to a directory in a given dataset. :param dataset_id: The id of the dataset to retrieve files from :type dataset_id: int :param glob: A regex used to select one or more files in the dataset :type glob: str :param is_dir: Whether or not the supplied pattern should be treated as a directory to search in :type is_dir: bool :param version_number: The version number of the dataset to retrieve files from :type version_number: int :return: A list of dataset files whose paths match the provided pattern. :rtype: list of :class:`DatasetFile` """ if version_number is None: latest = True else: latest = False data = { "download_request": { "glob": glob, "isDir": is_dir, "latest": latest } } failure_message = "Failed to get matched files in dataset {}".format(dataset_id) versions = self._get_success_json(self._post_json(routes.matched_files(dataset_id), data, failure_message=failure_message))['versions'] # if you don't provide a version number, only the latest # will be included in the response body if version_number is None: version = versions[0] else: try: version = list(filter(lambda v: v['number'] == version_number, versions))[0] except IndexError: raise ResourceNotFoundException() return list( map( lambda f: DatasetFile(path=f['filename'], url=f['url']), version['files'] ) )
def get_dataset_files(self, dataset_id, glob=".", is_dir=False, version_number=None): """ Retrieves URLs for the files matched by a glob or a path to a directory in a given dataset. :param dataset_id: The id of the dataset to retrieve files from :type dataset_id: int :param glob: A regex used to select one or more files in the dataset :type glob: str :param is_dir: Whether or not the supplied pattern should be treated as a directory to search in :type is_dir: bool :param version_number: The version number of the dataset to retrieve files from :type version_number: int :return: A list of dataset files whose paths match the provided pattern. :rtype: list of :class:`DatasetFile` """ if version_number is None: latest = True else: latest = False data = { "download_request": { "glob": glob, "isDir": is_dir, "latest": latest } } failure_message = "Failed to get matched files in dataset {}".format(dataset_id) versions = self._get_success_json(self._post_json(routes.matched_files(dataset_id), data, failure_message=failure_message))['versions'] # if you don't provide a version number, only the latest # will be included in the response body if version_number is None: version = versions[0] else: try: version = list(filter(lambda v: v['number'] == version_number, versions))[0] except IndexError: raise ResourceNotFoundException() return list( map( lambda f: DatasetFile(path=f['filename'], url=f['url']), version['files'] ) )
[ "Retrieves", "URLs", "for", "the", "files", "matched", "by", "a", "glob", "or", "a", "path", "to", "a", "directory", "in", "a", "given", "dataset", "." ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/data/client.py#L153-L200
[ "def", "get_dataset_files", "(", "self", ",", "dataset_id", ",", "glob", "=", "\".\"", ",", "is_dir", "=", "False", ",", "version_number", "=", "None", ")", ":", "if", "version_number", "is", "None", ":", "latest", "=", "True", "else", ":", "latest", "=", "False", "data", "=", "{", "\"download_request\"", ":", "{", "\"glob\"", ":", "glob", ",", "\"isDir\"", ":", "is_dir", ",", "\"latest\"", ":", "latest", "}", "}", "failure_message", "=", "\"Failed to get matched files in dataset {}\"", ".", "format", "(", "dataset_id", ")", "versions", "=", "self", ".", "_get_success_json", "(", "self", ".", "_post_json", "(", "routes", ".", "matched_files", "(", "dataset_id", ")", ",", "data", ",", "failure_message", "=", "failure_message", ")", ")", "[", "'versions'", "]", "# if you don't provide a version number, only the latest", "# will be included in the response body", "if", "version_number", "is", "None", ":", "version", "=", "versions", "[", "0", "]", "else", ":", "try", ":", "version", "=", "list", "(", "filter", "(", "lambda", "v", ":", "v", "[", "'number'", "]", "==", "version_number", ",", "versions", ")", ")", "[", "0", "]", "except", "IndexError", ":", "raise", "ResourceNotFoundException", "(", ")", "return", "list", "(", "map", "(", "lambda", "f", ":", "DatasetFile", "(", "path", "=", "f", "[", "'filename'", "]", ",", "url", "=", "f", "[", "'url'", "]", ")", ",", "version", "[", "'files'", "]", ")", ")" ]
409984fc65ce101a620f069263f155303492465c
valid
DataClient.get_dataset_file
Retrieves a dataset file matching a provided file path :param dataset_id: The id of the dataset to retrieve file from :type dataset_id: int :param file_path: The file path within the dataset :type file_path: str :param version: The dataset version to look for the file in. If nothing is supplied, the latest dataset version will be searched :type version: int :return: A dataset file matching the filepath provided :rtype: :class:`DatasetFile`
citrination_client/data/client.py
def get_dataset_file(self, dataset_id, file_path, version = None): """ Retrieves a dataset file matching a provided file path :param dataset_id: The id of the dataset to retrieve file from :type dataset_id: int :param file_path: The file path within the dataset :type file_path: str :param version: The dataset version to look for the file in. If nothing is supplied, the latest dataset version will be searched :type version: int :return: A dataset file matching the filepath provided :rtype: :class:`DatasetFile` """ return self.get_dataset_files(dataset_id, "^{}$".format(file_path), version_number=version)[0]
def get_dataset_file(self, dataset_id, file_path, version = None): """ Retrieves a dataset file matching a provided file path :param dataset_id: The id of the dataset to retrieve file from :type dataset_id: int :param file_path: The file path within the dataset :type file_path: str :param version: The dataset version to look for the file in. If nothing is supplied, the latest dataset version will be searched :type version: int :return: A dataset file matching the filepath provided :rtype: :class:`DatasetFile` """ return self.get_dataset_files(dataset_id, "^{}$".format(file_path), version_number=version)[0]
[ "Retrieves", "a", "dataset", "file", "matching", "a", "provided", "file", "path" ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/data/client.py#L202-L215
[ "def", "get_dataset_file", "(", "self", ",", "dataset_id", ",", "file_path", ",", "version", "=", "None", ")", ":", "return", "self", ".", "get_dataset_files", "(", "dataset_id", ",", "\"^{}$\"", ".", "format", "(", "file_path", ")", ",", "version_number", "=", "version", ")", "[", "0", "]" ]
409984fc65ce101a620f069263f155303492465c
valid
DataClient.download_files
Downloads file(s) to a local destination. :param dataset_files: :type dataset_files: list of :class: `DatasetFile` :param destination: The path to the desired local download destination :type destination: str :param chunk: Whether or not to chunk the file. Default True :type chunk: bool
citrination_client/data/client.py
def download_files(self, dataset_files, destination='.'): """ Downloads file(s) to a local destination. :param dataset_files: :type dataset_files: list of :class: `DatasetFile` :param destination: The path to the desired local download destination :type destination: str :param chunk: Whether or not to chunk the file. Default True :type chunk: bool """ if not isinstance(dataset_files, list): dataset_files = [dataset_files] for f in dataset_files: filename = f.path.lstrip('/') local_path = os.path.join(destination, filename) if not os.path.isdir(os.path.dirname(local_path)): os.makedirs(os.path.dirname(local_path)) r = requests.get(f.url, stream=True) with open(local_path, 'wb') as output_file: shutil.copyfileobj(r.raw, output_file)
def download_files(self, dataset_files, destination='.'): """ Downloads file(s) to a local destination. :param dataset_files: :type dataset_files: list of :class: `DatasetFile` :param destination: The path to the desired local download destination :type destination: str :param chunk: Whether or not to chunk the file. Default True :type chunk: bool """ if not isinstance(dataset_files, list): dataset_files = [dataset_files] for f in dataset_files: filename = f.path.lstrip('/') local_path = os.path.join(destination, filename) if not os.path.isdir(os.path.dirname(local_path)): os.makedirs(os.path.dirname(local_path)) r = requests.get(f.url, stream=True) with open(local_path, 'wb') as output_file: shutil.copyfileobj(r.raw, output_file)
[ "Downloads", "file", "(", "s", ")", "to", "a", "local", "destination", "." ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/data/client.py#L217-L241
[ "def", "download_files", "(", "self", ",", "dataset_files", ",", "destination", "=", "'.'", ")", ":", "if", "not", "isinstance", "(", "dataset_files", ",", "list", ")", ":", "dataset_files", "=", "[", "dataset_files", "]", "for", "f", "in", "dataset_files", ":", "filename", "=", "f", ".", "path", ".", "lstrip", "(", "'/'", ")", "local_path", "=", "os", ".", "path", ".", "join", "(", "destination", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "dirname", "(", "local_path", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "local_path", ")", ")", "r", "=", "requests", ".", "get", "(", "f", ".", "url", ",", "stream", "=", "True", ")", "with", "open", "(", "local_path", ",", "'wb'", ")", "as", "output_file", ":", "shutil", ".", "copyfileobj", "(", "r", ".", "raw", ",", "output_file", ")" ]
409984fc65ce101a620f069263f155303492465c
valid
DataClient.get_pif
Retrieves a PIF from a given dataset. :param dataset_id: The id of the dataset to retrieve PIF from :type dataset_id: int :param uid: The uid of the PIF to retrieve :type uid: str :param dataset_version: The dataset version to look for the PIF in. If nothing is supplied, the latest dataset version will be searched :type dataset_version: int :return: A :class:`Pif` object :rtype: :class:`Pif`
citrination_client/data/client.py
def get_pif(self, dataset_id, uid, dataset_version = None): """ Retrieves a PIF from a given dataset. :param dataset_id: The id of the dataset to retrieve PIF from :type dataset_id: int :param uid: The uid of the PIF to retrieve :type uid: str :param dataset_version: The dataset version to look for the PIF in. If nothing is supplied, the latest dataset version will be searched :type dataset_version: int :return: A :class:`Pif` object :rtype: :class:`Pif` """ failure_message = "An error occurred retrieving PIF {}".format(uid) if dataset_version == None: response = self._get(routes.pif_dataset_uid(dataset_id, uid), failure_message=failure_message) else: response = self._get(routes.pif_dataset_version_uid(dataset_id, uid, dataset_version), failure_message=failure_message) return pif.loads(response.content.decode("utf-8"))
def get_pif(self, dataset_id, uid, dataset_version = None): """ Retrieves a PIF from a given dataset. :param dataset_id: The id of the dataset to retrieve PIF from :type dataset_id: int :param uid: The uid of the PIF to retrieve :type uid: str :param dataset_version: The dataset version to look for the PIF in. If nothing is supplied, the latest dataset version will be searched :type dataset_version: int :return: A :class:`Pif` object :rtype: :class:`Pif` """ failure_message = "An error occurred retrieving PIF {}".format(uid) if dataset_version == None: response = self._get(routes.pif_dataset_uid(dataset_id, uid), failure_message=failure_message) else: response = self._get(routes.pif_dataset_version_uid(dataset_id, uid, dataset_version), failure_message=failure_message) return pif.loads(response.content.decode("utf-8"))
[ "Retrieves", "a", "PIF", "from", "a", "given", "dataset", "." ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/data/client.py#L243-L262
[ "def", "get_pif", "(", "self", ",", "dataset_id", ",", "uid", ",", "dataset_version", "=", "None", ")", ":", "failure_message", "=", "\"An error occurred retrieving PIF {}\"", ".", "format", "(", "uid", ")", "if", "dataset_version", "==", "None", ":", "response", "=", "self", ".", "_get", "(", "routes", ".", "pif_dataset_uid", "(", "dataset_id", ",", "uid", ")", ",", "failure_message", "=", "failure_message", ")", "else", ":", "response", "=", "self", ".", "_get", "(", "routes", ".", "pif_dataset_version_uid", "(", "dataset_id", ",", "uid", ",", "dataset_version", ")", ",", "failure_message", "=", "failure_message", ")", "return", "pif", ".", "loads", "(", "response", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")" ]
409984fc65ce101a620f069263f155303492465c
valid
DataClient.create_dataset
Create a new data set. :param name: name of the dataset :type name: str :param description: description for the dataset :type description: str :param public: A boolean indicating whether or not the dataset should be public. :type public: bool :return: The newly created dataset. :rtype: :class:`Dataset`
citrination_client/data/client.py
def create_dataset(self, name=None, description=None, public=False): """ Create a new data set. :param name: name of the dataset :type name: str :param description: description for the dataset :type description: str :param public: A boolean indicating whether or not the dataset should be public. :type public: bool :return: The newly created dataset. :rtype: :class:`Dataset` """ data = { "public": _convert_bool_to_public_value(public) } if name: data["name"] = name if description: data["description"] = description dataset = {"dataset": data} failure_message = "Unable to create dataset" result = self._get_success_json(self._post_json(routes.create_dataset(), dataset, failure_message=failure_message)) return _dataset_from_response_dict(result)
def create_dataset(self, name=None, description=None, public=False): """ Create a new data set. :param name: name of the dataset :type name: str :param description: description for the dataset :type description: str :param public: A boolean indicating whether or not the dataset should be public. :type public: bool :return: The newly created dataset. :rtype: :class:`Dataset` """ data = { "public": _convert_bool_to_public_value(public) } if name: data["name"] = name if description: data["description"] = description dataset = {"dataset": data} failure_message = "Unable to create dataset" result = self._get_success_json(self._post_json(routes.create_dataset(), dataset, failure_message=failure_message)) return _dataset_from_response_dict(result)
[ "Create", "a", "new", "data", "set", "." ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/data/client.py#L264-L288
[ "def", "create_dataset", "(", "self", ",", "name", "=", "None", ",", "description", "=", "None", ",", "public", "=", "False", ")", ":", "data", "=", "{", "\"public\"", ":", "_convert_bool_to_public_value", "(", "public", ")", "}", "if", "name", ":", "data", "[", "\"name\"", "]", "=", "name", "if", "description", ":", "data", "[", "\"description\"", "]", "=", "description", "dataset", "=", "{", "\"dataset\"", ":", "data", "}", "failure_message", "=", "\"Unable to create dataset\"", "result", "=", "self", ".", "_get_success_json", "(", "self", ".", "_post_json", "(", "routes", ".", "create_dataset", "(", ")", ",", "dataset", ",", "failure_message", "=", "failure_message", ")", ")", "return", "_dataset_from_response_dict", "(", "result", ")" ]
409984fc65ce101a620f069263f155303492465c
valid
DataClient.update_dataset
Update a data set. :param dataset_id: The ID of the dataset to update :type dataset_id: int :param name: name of the dataset :type name: str :param description: description for the dataset :type description: str :param public: A boolean indicating whether or not the dataset should be public. :type public: bool :return: The updated dataset. :rtype: :class:`Dataset`
citrination_client/data/client.py
def update_dataset(self, dataset_id, name=None, description=None, public=None): """ Update a data set. :param dataset_id: The ID of the dataset to update :type dataset_id: int :param name: name of the dataset :type name: str :param description: description for the dataset :type description: str :param public: A boolean indicating whether or not the dataset should be public. :type public: bool :return: The updated dataset. :rtype: :class:`Dataset` """ data = { "public": _convert_bool_to_public_value(public) } if name: data["name"] = name if description: data["description"] = description dataset = {"dataset": data} failure_message = "Failed to update dataset {}".format(dataset_id) response = self._get_success_json(self._post_json(routes.update_dataset(dataset_id), data=dataset, failure_message=failure_message)) return _dataset_from_response_dict(response)
def update_dataset(self, dataset_id, name=None, description=None, public=None): """ Update a data set. :param dataset_id: The ID of the dataset to update :type dataset_id: int :param name: name of the dataset :type name: str :param description: description for the dataset :type description: str :param public: A boolean indicating whether or not the dataset should be public. :type public: bool :return: The updated dataset. :rtype: :class:`Dataset` """ data = { "public": _convert_bool_to_public_value(public) } if name: data["name"] = name if description: data["description"] = description dataset = {"dataset": data} failure_message = "Failed to update dataset {}".format(dataset_id) response = self._get_success_json(self._post_json(routes.update_dataset(dataset_id), data=dataset, failure_message=failure_message)) return _dataset_from_response_dict(response)
[ "Update", "a", "data", "set", "." ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/data/client.py#L290-L319
[ "def", "update_dataset", "(", "self", ",", "dataset_id", ",", "name", "=", "None", ",", "description", "=", "None", ",", "public", "=", "None", ")", ":", "data", "=", "{", "\"public\"", ":", "_convert_bool_to_public_value", "(", "public", ")", "}", "if", "name", ":", "data", "[", "\"name\"", "]", "=", "name", "if", "description", ":", "data", "[", "\"description\"", "]", "=", "description", "dataset", "=", "{", "\"dataset\"", ":", "data", "}", "failure_message", "=", "\"Failed to update dataset {}\"", ".", "format", "(", "dataset_id", ")", "response", "=", "self", ".", "_get_success_json", "(", "self", ".", "_post_json", "(", "routes", ".", "update_dataset", "(", "dataset_id", ")", ",", "data", "=", "dataset", ",", "failure_message", "=", "failure_message", ")", ")", "return", "_dataset_from_response_dict", "(", "response", ")" ]
409984fc65ce101a620f069263f155303492465c
valid
DataClient.create_dataset_version
Create a new data set version. :param dataset_id: The ID of the dataset for which the version must be bumped. :type dataset_id: int :return: The new dataset version. :rtype: :class:`DatasetVersion`
citrination_client/data/client.py
def create_dataset_version(self, dataset_id): """ Create a new data set version. :param dataset_id: The ID of the dataset for which the version must be bumped. :type dataset_id: int :return: The new dataset version. :rtype: :class:`DatasetVersion` """ failure_message = "Failed to create dataset version for dataset {}".format(dataset_id) number = self._get_success_json(self._post_json(routes.create_dataset_version(dataset_id), data={}, failure_message=failure_message))['dataset_scoped_id'] return DatasetVersion(number=number)
def create_dataset_version(self, dataset_id): """ Create a new data set version. :param dataset_id: The ID of the dataset for which the version must be bumped. :type dataset_id: int :return: The new dataset version. :rtype: :class:`DatasetVersion` """ failure_message = "Failed to create dataset version for dataset {}".format(dataset_id) number = self._get_success_json(self._post_json(routes.create_dataset_version(dataset_id), data={}, failure_message=failure_message))['dataset_scoped_id'] return DatasetVersion(number=number)
[ "Create", "a", "new", "data", "set", "version", "." ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/data/client.py#L321-L333
[ "def", "create_dataset_version", "(", "self", ",", "dataset_id", ")", ":", "failure_message", "=", "\"Failed to create dataset version for dataset {}\"", ".", "format", "(", "dataset_id", ")", "number", "=", "self", ".", "_get_success_json", "(", "self", ".", "_post_json", "(", "routes", ".", "create_dataset_version", "(", "dataset_id", ")", ",", "data", "=", "{", "}", ",", "failure_message", "=", "failure_message", ")", ")", "[", "'dataset_scoped_id'", "]", "return", "DatasetVersion", "(", "number", "=", "number", ")" ]
409984fc65ce101a620f069263f155303492465c
valid
SearchTemplateClient.get_available_columns
Retrieves the set of columns from the combination of dataset ids given :param dataset_ids: The id of the dataset to retrieve columns from :type dataset_ids: list of int :return: A list of column names from the dataset ids given. :rtype: list of str
citrination_client/views/search_template/client.py
def get_available_columns(self, dataset_ids): """ Retrieves the set of columns from the combination of dataset ids given :param dataset_ids: The id of the dataset to retrieve columns from :type dataset_ids: list of int :return: A list of column names from the dataset ids given. :rtype: list of str """ if not isinstance(dataset_ids, list): dataset_ids = [dataset_ids] data = { "dataset_ids": dataset_ids } failure_message = "Failed to get available columns in dataset(s) {}".format(dataset_ids) return self._get_success_json(self._post_json( 'v1/datasets/get-available-columns', data, failure_message=failure_message))['data']
def get_available_columns(self, dataset_ids): """ Retrieves the set of columns from the combination of dataset ids given :param dataset_ids: The id of the dataset to retrieve columns from :type dataset_ids: list of int :return: A list of column names from the dataset ids given. :rtype: list of str """ if not isinstance(dataset_ids, list): dataset_ids = [dataset_ids] data = { "dataset_ids": dataset_ids } failure_message = "Failed to get available columns in dataset(s) {}".format(dataset_ids) return self._get_success_json(self._post_json( 'v1/datasets/get-available-columns', data, failure_message=failure_message))['data']
[ "Retrieves", "the", "set", "of", "columns", "from", "the", "combination", "of", "dataset", "ids", "given" ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/views/search_template/client.py#L17-L37
[ "def", "get_available_columns", "(", "self", ",", "dataset_ids", ")", ":", "if", "not", "isinstance", "(", "dataset_ids", ",", "list", ")", ":", "dataset_ids", "=", "[", "dataset_ids", "]", "data", "=", "{", "\"dataset_ids\"", ":", "dataset_ids", "}", "failure_message", "=", "\"Failed to get available columns in dataset(s) {}\"", ".", "format", "(", "dataset_ids", ")", "return", "self", ".", "_get_success_json", "(", "self", ".", "_post_json", "(", "'v1/datasets/get-available-columns'", ",", "data", ",", "failure_message", "=", "failure_message", ")", ")", "[", "'data'", "]" ]
409984fc65ce101a620f069263f155303492465c
valid
SearchTemplateClient.__generate_search_template
Generates a default search templates from the available columns in the dataset ids given. :param dataset_ids: The id of the dataset to retrieve files from :type dataset_ids: list of int :return: A search template based on the columns in the datasets given
citrination_client/views/search_template/client.py
def __generate_search_template(self, dataset_ids): """ Generates a default search templates from the available columns in the dataset ids given. :param dataset_ids: The id of the dataset to retrieve files from :type dataset_ids: list of int :return: A search template based on the columns in the datasets given """ data = { "dataset_ids": dataset_ids } failure_message = "Failed to generate a search template from columns in dataset(s) {}".format(dataset_ids) return self._get_success_json(self._post_json( 'v1/search_templates/builders/from-dataset-ids', data, failure_message=failure_message))['data']
def __generate_search_template(self, dataset_ids): """ Generates a default search templates from the available columns in the dataset ids given. :param dataset_ids: The id of the dataset to retrieve files from :type dataset_ids: list of int :return: A search template based on the columns in the datasets given """ data = { "dataset_ids": dataset_ids } failure_message = "Failed to generate a search template from columns in dataset(s) {}".format(dataset_ids) return self._get_success_json(self._post_json( 'v1/search_templates/builders/from-dataset-ids', data, failure_message=failure_message))['data']
[ "Generates", "a", "default", "search", "templates", "from", "the", "available", "columns", "in", "the", "dataset", "ids", "given", "." ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/views/search_template/client.py#L39-L56
[ "def", "__generate_search_template", "(", "self", ",", "dataset_ids", ")", ":", "data", "=", "{", "\"dataset_ids\"", ":", "dataset_ids", "}", "failure_message", "=", "\"Failed to generate a search template from columns in dataset(s) {}\"", ".", "format", "(", "dataset_ids", ")", "return", "self", ".", "_get_success_json", "(", "self", ".", "_post_json", "(", "'v1/search_templates/builders/from-dataset-ids'", ",", "data", ",", "failure_message", "=", "failure_message", ")", ")", "[", "'data'", "]" ]
409984fc65ce101a620f069263f155303492465c
valid
SearchTemplateClient.__prune_search_template
Returns a new search template, but the new template has only the extract_as_keys given. :param extract_as_keys: List of extract as keys to keep :param search_template: The search template to prune :return: New search template with pruned columns
citrination_client/views/search_template/client.py
def __prune_search_template(self, extract_as_keys, search_template): """ Returns a new search template, but the new template has only the extract_as_keys given. :param extract_as_keys: List of extract as keys to keep :param search_template: The search template to prune :return: New search template with pruned columns """ data = { "extract_as_keys": extract_as_keys, "search_template": search_template } failure_message = "Failed to prune a search template" return self._get_success_json(self._post_json( 'v1/search_templates/prune-to-extract-as', data, failure_message=failure_message))['data']
def __prune_search_template(self, extract_as_keys, search_template): """ Returns a new search template, but the new template has only the extract_as_keys given. :param extract_as_keys: List of extract as keys to keep :param search_template: The search template to prune :return: New search template with pruned columns """ data = { "extract_as_keys": extract_as_keys, "search_template": search_template } failure_message = "Failed to prune a search template" return self._get_success_json(self._post_json( 'v1/search_templates/prune-to-extract-as', data, failure_message=failure_message))['data']
[ "Returns", "a", "new", "search", "template", "but", "the", "new", "template", "has", "only", "the", "extract_as_keys", "given", "." ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/views/search_template/client.py#L59-L78
[ "def", "__prune_search_template", "(", "self", ",", "extract_as_keys", ",", "search_template", ")", ":", "data", "=", "{", "\"extract_as_keys\"", ":", "extract_as_keys", ",", "\"search_template\"", ":", "search_template", "}", "failure_message", "=", "\"Failed to prune a search template\"", "return", "self", ".", "_get_success_json", "(", "self", ".", "_post_json", "(", "'v1/search_templates/prune-to-extract-as'", ",", "data", ",", "failure_message", "=", "failure_message", ")", ")", "[", "'data'", "]" ]
409984fc65ce101a620f069263f155303492465c
valid
QueryEncoder.default
Convert an object to a form ready to dump to json. :param obj: Object being serialized. The type of this object must be one of the following: None; a single object derived from the Pio class; or a list of objects, each derived from the Pio class. :return: List of dictionaries, each representing a physical information object, ready to be serialized.
citrination_client/search/query_encoder.py
def default(self, obj): """ Convert an object to a form ready to dump to json. :param obj: Object being serialized. The type of this object must be one of the following: None; a single object derived from the Pio class; or a list of objects, each derived from the Pio class. :return: List of dictionaries, each representing a physical information object, ready to be serialized. """ if obj is None: return [] elif isinstance(obj, list): return [i.as_dictionary() for i in obj] elif isinstance(obj, dict): return self._keys_to_camel_case(obj) else: return obj.as_dictionary()
def default(self, obj): """ Convert an object to a form ready to dump to json. :param obj: Object being serialized. The type of this object must be one of the following: None; a single object derived from the Pio class; or a list of objects, each derived from the Pio class. :return: List of dictionaries, each representing a physical information object, ready to be serialized. """ if obj is None: return [] elif isinstance(obj, list): return [i.as_dictionary() for i in obj] elif isinstance(obj, dict): return self._keys_to_camel_case(obj) else: return obj.as_dictionary()
[ "Convert", "an", "object", "to", "a", "form", "ready", "to", "dump", "to", "json", "." ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/search/query_encoder.py#L11-L25
[ "def", "default", "(", "self", ",", "obj", ")", ":", "if", "obj", "is", "None", ":", "return", "[", "]", "elif", "isinstance", "(", "obj", ",", "list", ")", ":", "return", "[", "i", ".", "as_dictionary", "(", ")", "for", "i", "in", "obj", "]", "elif", "isinstance", "(", "obj", ",", "dict", ")", ":", "return", "self", ".", "_keys_to_camel_case", "(", "obj", ")", "else", ":", "return", "obj", ".", "as_dictionary", "(", ")" ]
409984fc65ce101a620f069263f155303492465c
valid
QueryEncoder._keys_to_camel_case
Make a copy of a dictionary with all keys converted to camel case. This is just calls to_camel_case on each of the keys in the dictionary and returns a new dictionary. :param obj: Dictionary to convert keys to camel case. :return: Dictionary with the input values and all keys in camel case
citrination_client/search/query_encoder.py
def _keys_to_camel_case(self, obj): """ Make a copy of a dictionary with all keys converted to camel case. This is just calls to_camel_case on each of the keys in the dictionary and returns a new dictionary. :param obj: Dictionary to convert keys to camel case. :return: Dictionary with the input values and all keys in camel case """ return dict((to_camel_case(key), value) for (key, value) in obj.items())
def _keys_to_camel_case(self, obj): """ Make a copy of a dictionary with all keys converted to camel case. This is just calls to_camel_case on each of the keys in the dictionary and returns a new dictionary. :param obj: Dictionary to convert keys to camel case. :return: Dictionary with the input values and all keys in camel case """ return dict((to_camel_case(key), value) for (key, value) in obj.items())
[ "Make", "a", "copy", "of", "a", "dictionary", "with", "all", "keys", "converted", "to", "camel", "case", ".", "This", "is", "just", "calls", "to_camel_case", "on", "each", "of", "the", "keys", "in", "the", "dictionary", "and", "returns", "a", "new", "dictionary", "." ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/search/query_encoder.py#L27-L34
[ "def", "_keys_to_camel_case", "(", "self", ",", "obj", ")", ":", "return", "dict", "(", "(", "to_camel_case", "(", "key", ")", ",", "value", ")", "for", "(", "key", ",", "value", ")", "in", "obj", ".", "items", "(", ")", ")" ]
409984fc65ce101a620f069263f155303492465c
valid
ModelTemplateClient.validate
Runs the template against the validation endpoint, returns a message indicating status of the templte :param ml_template: Template to validate :return: OK or error message if validation failed
citrination_client/views/model_template/client.py
def validate(self, ml_template): """ Runs the template against the validation endpoint, returns a message indicating status of the templte :param ml_template: Template to validate :return: OK or error message if validation failed """ data = { "ml_template": ml_template } failure_message = "ML template validation invoke failed" res = self._get_success_json(self._post_json( 'ml_templates/validate', data, failure_message=failure_message))['data'] if res['valid']: return 'OK' return res['reason']
def validate(self, ml_template): """ Runs the template against the validation endpoint, returns a message indicating status of the templte :param ml_template: Template to validate :return: OK or error message if validation failed """ data = { "ml_template": ml_template } failure_message = "ML template validation invoke failed" res = self._get_success_json(self._post_json( 'ml_templates/validate', data, failure_message=failure_message))['data'] if res['valid']: return 'OK' return res['reason']
[ "Runs", "the", "template", "against", "the", "validation", "endpoint", "returns", "a", "message", "indicating", "status", "of", "the", "templte" ]
CitrineInformatics/python-citrination-client
python
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/views/model_template/client.py#L15-L34
[ "def", "validate", "(", "self", ",", "ml_template", ")", ":", "data", "=", "{", "\"ml_template\"", ":", "ml_template", "}", "failure_message", "=", "\"ML template validation invoke failed\"", "res", "=", "self", ".", "_get_success_json", "(", "self", ".", "_post_json", "(", "'ml_templates/validate'", ",", "data", ",", "failure_message", "=", "failure_message", ")", ")", "[", "'data'", "]", "if", "res", "[", "'valid'", "]", ":", "return", "'OK'", "return", "res", "[", "'reason'", "]" ]
409984fc65ce101a620f069263f155303492465c
valid
_validate_course_key
Validation helper
organizations/api.py
def _validate_course_key(course_key): """ Validation helper """ if not validators.course_key_is_valid(course_key): exceptions.raise_exception( "CourseKey", course_key, exceptions.InvalidCourseKeyException )
def _validate_course_key(course_key): """ Validation helper """ if not validators.course_key_is_valid(course_key): exceptions.raise_exception( "CourseKey", course_key, exceptions.InvalidCourseKeyException )
[ "Validation", "helper" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/api.py#L17-L24
[ "def", "_validate_course_key", "(", "course_key", ")", ":", "if", "not", "validators", ".", "course_key_is_valid", "(", "course_key", ")", ":", "exceptions", ".", "raise_exception", "(", "\"CourseKey\"", ",", "course_key", ",", "exceptions", ".", "InvalidCourseKeyException", ")" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
_validate_organization_data
Validation helper
organizations/api.py
def _validate_organization_data(organization_data): """ Validation helper """ if not validators.organization_data_is_valid(organization_data): exceptions.raise_exception( "Organization", organization_data, exceptions.InvalidOrganizationException )
def _validate_organization_data(organization_data): """ Validation helper """ if not validators.organization_data_is_valid(organization_data): exceptions.raise_exception( "Organization", organization_data, exceptions.InvalidOrganizationException )
[ "Validation", "helper" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/api.py#L27-L34
[ "def", "_validate_organization_data", "(", "organization_data", ")", ":", "if", "not", "validators", ".", "organization_data_is_valid", "(", "organization_data", ")", ":", "exceptions", ".", "raise_exception", "(", "\"Organization\"", ",", "organization_data", ",", "exceptions", ".", "InvalidOrganizationException", ")" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
add_organization_course
Adds a organization-course link to the system
organizations/api.py
def add_organization_course(organization_data, course_key): """ Adds a organization-course link to the system """ _validate_course_key(course_key) _validate_organization_data(organization_data) data.create_organization_course( organization=organization_data, course_key=course_key )
def add_organization_course(organization_data, course_key): """ Adds a organization-course link to the system """ _validate_course_key(course_key) _validate_organization_data(organization_data) data.create_organization_course( organization=organization_data, course_key=course_key )
[ "Adds", "a", "organization", "-", "course", "link", "to", "the", "system" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/api.py#L86-L95
[ "def", "add_organization_course", "(", "organization_data", ",", "course_key", ")", ":", "_validate_course_key", "(", "course_key", ")", "_validate_organization_data", "(", "organization_data", ")", "data", ".", "create_organization_course", "(", "organization", "=", "organization_data", ",", "course_key", "=", "course_key", ")" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
remove_organization_course
Removes the specfied course from the specified organization
organizations/api.py
def remove_organization_course(organization, course_key): """ Removes the specfied course from the specified organization """ _validate_organization_data(organization) _validate_course_key(course_key) return data.delete_organization_course(course_key=course_key, organization=organization)
def remove_organization_course(organization, course_key): """ Removes the specfied course from the specified organization """ _validate_organization_data(organization) _validate_course_key(course_key) return data.delete_organization_course(course_key=course_key, organization=organization)
[ "Removes", "the", "specfied", "course", "from", "the", "specified", "organization" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/api.py#L107-L113
[ "def", "remove_organization_course", "(", "organization", ",", "course_key", ")", ":", "_validate_organization_data", "(", "organization", ")", "_validate_course_key", "(", "course_key", ")", "return", "data", ".", "delete_organization_course", "(", "course_key", "=", "course_key", ",", "organization", "=", "organization", ")" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
raise_exception
Exception helper
organizations/exceptions.py
def raise_exception(entity_type, entity, exception): """ Exception helper """ raise exception( u'The {} you have provided is not valid: {}'.format(entity_type, entity).encode('utf-8') )
def raise_exception(entity_type, entity, exception): """ Exception helper """ raise exception( u'The {} you have provided is not valid: {}'.format(entity_type, entity).encode('utf-8') )
[ "Exception", "helper" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/exceptions.py#L16-L20
[ "def", "raise_exception", "(", "entity_type", ",", "entity", ",", "exception", ")", ":", "raise", "exception", "(", "u'The {} you have provided is not valid: {}'", ".", "format", "(", "entity_type", ",", "entity", ")", ".", "encode", "(", "'utf-8'", ")", ")" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
course_key_is_valid
Course key object validation
organizations/validators.py
def course_key_is_valid(course_key): """ Course key object validation """ if course_key is None: return False try: CourseKey.from_string(text_type(course_key)) except (InvalidKeyError, UnicodeDecodeError): return False return True
def course_key_is_valid(course_key): """ Course key object validation """ if course_key is None: return False try: CourseKey.from_string(text_type(course_key)) except (InvalidKeyError, UnicodeDecodeError): return False return True
[ "Course", "key", "object", "validation" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/validators.py#L10-L20
[ "def", "course_key_is_valid", "(", "course_key", ")", ":", "if", "course_key", "is", "None", ":", "return", "False", "try", ":", "CourseKey", ".", "from_string", "(", "text_type", "(", "course_key", ")", ")", "except", "(", "InvalidKeyError", ",", "UnicodeDecodeError", ")", ":", "return", "False", "return", "True" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
organization_data_is_valid
Organization data validation
organizations/validators.py
def organization_data_is_valid(organization_data): """ Organization data validation """ if organization_data is None: return False if 'id' in organization_data and not organization_data.get('id'): return False if 'name' in organization_data and not organization_data.get('name'): return False return True
def organization_data_is_valid(organization_data): """ Organization data validation """ if organization_data is None: return False if 'id' in organization_data and not organization_data.get('id'): return False if 'name' in organization_data and not organization_data.get('name'): return False return True
[ "Organization", "data", "validation" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/validators.py#L23-L33
[ "def", "organization_data_is_valid", "(", "organization_data", ")", ":", "if", "organization_data", "is", "None", ":", "return", "False", "if", "'id'", "in", "organization_data", "and", "not", "organization_data", ".", "get", "(", "'id'", ")", ":", "return", "False", "if", "'name'", "in", "organization_data", "and", "not", "organization_data", ".", "get", "(", "'name'", ")", ":", "return", "False", "return", "True" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
_activate_organization
Activates an inactivated (soft-deleted) organization as well as any inactive relationships
organizations/data.py
def _activate_organization(organization): """ Activates an inactivated (soft-deleted) organization as well as any inactive relationships """ [_activate_organization_course_relationship(record) for record in internal.OrganizationCourse.objects.filter(organization_id=organization.id, active=False)] [_activate_record(record) for record in internal.Organization.objects.filter(id=organization.id, active=False)]
def _activate_organization(organization): """ Activates an inactivated (soft-deleted) organization as well as any inactive relationships """ [_activate_organization_course_relationship(record) for record in internal.OrganizationCourse.objects.filter(organization_id=organization.id, active=False)] [_activate_record(record) for record in internal.Organization.objects.filter(id=organization.id, active=False)]
[ "Activates", "an", "inactivated", "(", "soft", "-", "deleted", ")", "organization", "as", "well", "as", "any", "inactive", "relationships" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L53-L61
[ "def", "_activate_organization", "(", "organization", ")", ":", "[", "_activate_organization_course_relationship", "(", "record", ")", "for", "record", "in", "internal", ".", "OrganizationCourse", ".", "objects", ".", "filter", "(", "organization_id", "=", "organization", ".", "id", ",", "active", "=", "False", ")", "]", "[", "_activate_record", "(", "record", ")", "for", "record", "in", "internal", ".", "Organization", ".", "objects", ".", "filter", "(", "id", "=", "organization", ".", "id", ",", "active", "=", "False", ")", "]" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
_inactivate_organization
Inactivates an activated organization as well as any active relationships
organizations/data.py
def _inactivate_organization(organization): """ Inactivates an activated organization as well as any active relationships """ [_inactivate_organization_course_relationship(record) for record in internal.OrganizationCourse.objects.filter(organization_id=organization.id, active=True)] [_inactivate_record(record) for record in internal.Organization.objects.filter(id=organization.id, active=True)]
def _inactivate_organization(organization): """ Inactivates an activated organization as well as any active relationships """ [_inactivate_organization_course_relationship(record) for record in internal.OrganizationCourse.objects.filter(organization_id=organization.id, active=True)] [_inactivate_record(record) for record in internal.Organization.objects.filter(id=organization.id, active=True)]
[ "Inactivates", "an", "activated", "organization", "as", "well", "as", "any", "active", "relationships" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L64-L72
[ "def", "_inactivate_organization", "(", "organization", ")", ":", "[", "_inactivate_organization_course_relationship", "(", "record", ")", "for", "record", "in", "internal", ".", "OrganizationCourse", ".", "objects", ".", "filter", "(", "organization_id", "=", "organization", ".", "id", ",", "active", "=", "True", ")", "]", "[", "_inactivate_record", "(", "record", ")", "for", "record", "in", "internal", ".", "Organization", ".", "objects", ".", "filter", "(", "id", "=", "organization", ".", "id", ",", "active", "=", "True", ")", "]" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
_activate_organization_course_relationship
Activates an inactive organization-course relationship
organizations/data.py
def _activate_organization_course_relationship(relationship): # pylint: disable=invalid-name """ Activates an inactive organization-course relationship """ # If the relationship doesn't exist or the organization isn't active we'll want to raise an error relationship = internal.OrganizationCourse.objects.get( id=relationship.id, active=False, organization__active=True ) _activate_record(relationship)
def _activate_organization_course_relationship(relationship): # pylint: disable=invalid-name """ Activates an inactive organization-course relationship """ # If the relationship doesn't exist or the organization isn't active we'll want to raise an error relationship = internal.OrganizationCourse.objects.get( id=relationship.id, active=False, organization__active=True ) _activate_record(relationship)
[ "Activates", "an", "inactive", "organization", "-", "course", "relationship" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L75-L85
[ "def", "_activate_organization_course_relationship", "(", "relationship", ")", ":", "# pylint: disable=invalid-name", "# If the relationship doesn't exist or the organization isn't active we'll want to raise an error", "relationship", "=", "internal", ".", "OrganizationCourse", ".", "objects", ".", "get", "(", "id", "=", "relationship", ".", "id", ",", "active", "=", "False", ",", "organization__active", "=", "True", ")", "_activate_record", "(", "relationship", ")" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
_inactivate_organization_course_relationship
Inactivates an active organization-course relationship
organizations/data.py
def _inactivate_organization_course_relationship(relationship): # pylint: disable=invalid-name """ Inactivates an active organization-course relationship """ relationship = internal.OrganizationCourse.objects.get( id=relationship.id, active=True ) _inactivate_record(relationship)
def _inactivate_organization_course_relationship(relationship): # pylint: disable=invalid-name """ Inactivates an active organization-course relationship """ relationship = internal.OrganizationCourse.objects.get( id=relationship.id, active=True ) _inactivate_record(relationship)
[ "Inactivates", "an", "active", "organization", "-", "course", "relationship" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L88-L96
[ "def", "_inactivate_organization_course_relationship", "(", "relationship", ")", ":", "# pylint: disable=invalid-name", "relationship", "=", "internal", ".", "OrganizationCourse", ".", "objects", ".", "get", "(", "id", "=", "relationship", ".", "id", ",", "active", "=", "True", ")", "_inactivate_record", "(", "relationship", ")" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
create_organization
Inserts a new organization into app/local state given the following dictionary: { 'name': string, 'description': string } Returns an updated dictionary including a new 'id': integer field/value
organizations/data.py
def create_organization(organization): """ Inserts a new organization into app/local state given the following dictionary: { 'name': string, 'description': string } Returns an updated dictionary including a new 'id': integer field/value """ # Trust, but verify... if not organization.get('name'): exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) organization_obj = serializers.deserialize_organization(organization) try: organization = internal.Organization.objects.get( name=organization_obj.name, ) # If the organization exists, but was inactivated, we can simply turn it back on if not organization.active: _activate_organization(organization_obj) except internal.Organization.DoesNotExist: organization = internal.Organization.objects.create( name=organization_obj.name, short_name=organization_obj.short_name, description=organization_obj.description, logo=organization_obj.logo, active=True ) return serializers.serialize_organization(organization)
def create_organization(organization): """ Inserts a new organization into app/local state given the following dictionary: { 'name': string, 'description': string } Returns an updated dictionary including a new 'id': integer field/value """ # Trust, but verify... if not organization.get('name'): exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) organization_obj = serializers.deserialize_organization(organization) try: organization = internal.Organization.objects.get( name=organization_obj.name, ) # If the organization exists, but was inactivated, we can simply turn it back on if not organization.active: _activate_organization(organization_obj) except internal.Organization.DoesNotExist: organization = internal.Organization.objects.create( name=organization_obj.name, short_name=organization_obj.short_name, description=organization_obj.description, logo=organization_obj.logo, active=True ) return serializers.serialize_organization(organization)
[ "Inserts", "a", "new", "organization", "into", "app", "/", "local", "state", "given", "the", "following", "dictionary", ":", "{", "name", ":", "string", "description", ":", "string", "}", "Returns", "an", "updated", "dictionary", "including", "a", "new", "id", ":", "integer", "field", "/", "value" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L100-L128
[ "def", "create_organization", "(", "organization", ")", ":", "# Trust, but verify...", "if", "not", "organization", ".", "get", "(", "'name'", ")", ":", "exceptions", ".", "raise_exception", "(", "\"organization\"", ",", "organization", ",", "exceptions", ".", "InvalidOrganizationException", ")", "organization_obj", "=", "serializers", ".", "deserialize_organization", "(", "organization", ")", "try", ":", "organization", "=", "internal", ".", "Organization", ".", "objects", ".", "get", "(", "name", "=", "organization_obj", ".", "name", ",", ")", "# If the organization exists, but was inactivated, we can simply turn it back on", "if", "not", "organization", ".", "active", ":", "_activate_organization", "(", "organization_obj", ")", "except", "internal", ".", "Organization", ".", "DoesNotExist", ":", "organization", "=", "internal", ".", "Organization", ".", "objects", ".", "create", "(", "name", "=", "organization_obj", ".", "name", ",", "short_name", "=", "organization_obj", ".", "short_name", ",", "description", "=", "organization_obj", ".", "description", ",", "logo", "=", "organization_obj", ".", "logo", ",", "active", "=", "True", ")", "return", "serializers", ".", "serialize_organization", "(", "organization", ")" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
update_organization
Updates an existing organization in app/local state Returns a dictionary representation of the object
organizations/data.py
def update_organization(organization): """ Updates an existing organization in app/local state Returns a dictionary representation of the object """ organization_obj = serializers.deserialize_organization(organization) try: organization = internal.Organization.objects.get(id=organization_obj.id) organization.name = organization_obj.name organization.short_name = organization_obj.short_name organization.description = organization_obj.description organization.logo = organization_obj.logo organization.active = organization_obj.active except internal.Organization.DoesNotExist: exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) return serializers.serialize_organization(organization)
def update_organization(organization): """ Updates an existing organization in app/local state Returns a dictionary representation of the object """ organization_obj = serializers.deserialize_organization(organization) try: organization = internal.Organization.objects.get(id=organization_obj.id) organization.name = organization_obj.name organization.short_name = organization_obj.short_name organization.description = organization_obj.description organization.logo = organization_obj.logo organization.active = organization_obj.active except internal.Organization.DoesNotExist: exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) return serializers.serialize_organization(organization)
[ "Updates", "an", "existing", "organization", "in", "app", "/", "local", "state", "Returns", "a", "dictionary", "representation", "of", "the", "object" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L131-L146
[ "def", "update_organization", "(", "organization", ")", ":", "organization_obj", "=", "serializers", ".", "deserialize_organization", "(", "organization", ")", "try", ":", "organization", "=", "internal", ".", "Organization", ".", "objects", ".", "get", "(", "id", "=", "organization_obj", ".", "id", ")", "organization", ".", "name", "=", "organization_obj", ".", "name", "organization", ".", "short_name", "=", "organization_obj", ".", "short_name", "organization", ".", "description", "=", "organization_obj", ".", "description", "organization", ".", "logo", "=", "organization_obj", ".", "logo", "organization", ".", "active", "=", "organization_obj", ".", "active", "except", "internal", ".", "Organization", ".", "DoesNotExist", ":", "exceptions", ".", "raise_exception", "(", "\"organization\"", ",", "organization", ",", "exceptions", ".", "InvalidOrganizationException", ")", "return", "serializers", ".", "serialize_organization", "(", "organization", ")" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
fetch_organization
Retrieves a specific organization from app/local state Returns a dictionary representation of the object
organizations/data.py
def fetch_organization(organization_id): """ Retrieves a specific organization from app/local state Returns a dictionary representation of the object """ organization = {'id': organization_id} if not organization_id: exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) organizations = serializers.serialize_organizations( internal.Organization.objects.filter(id=organization_id, active=True) ) if not organizations: exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) return organizations[0]
def fetch_organization(organization_id): """ Retrieves a specific organization from app/local state Returns a dictionary representation of the object """ organization = {'id': organization_id} if not organization_id: exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) organizations = serializers.serialize_organizations( internal.Organization.objects.filter(id=organization_id, active=True) ) if not organizations: exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) return organizations[0]
[ "Retrieves", "a", "specific", "organization", "from", "app", "/", "local", "state", "Returns", "a", "dictionary", "representation", "of", "the", "object" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L158-L171
[ "def", "fetch_organization", "(", "organization_id", ")", ":", "organization", "=", "{", "'id'", ":", "organization_id", "}", "if", "not", "organization_id", ":", "exceptions", ".", "raise_exception", "(", "\"organization\"", ",", "organization", ",", "exceptions", ".", "InvalidOrganizationException", ")", "organizations", "=", "serializers", ".", "serialize_organizations", "(", "internal", ".", "Organization", ".", "objects", ".", "filter", "(", "id", "=", "organization_id", ",", "active", "=", "True", ")", ")", "if", "not", "organizations", ":", "exceptions", ".", "raise_exception", "(", "\"organization\"", ",", "organization", ",", "exceptions", ".", "InvalidOrganizationException", ")", "return", "organizations", "[", "0", "]" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
fetch_organization_by_short_name
Retrieves a specific organization from app/local state by short name Returns a dictionary representation of the object
organizations/data.py
def fetch_organization_by_short_name(organization_short_name): """ Retrieves a specific organization from app/local state by short name Returns a dictionary representation of the object """ organization = {'short_name': organization_short_name} if not organization_short_name: exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) organizations = serializers.serialize_organizations(internal.Organization.objects.filter( active=True, short_name=organization_short_name )) if not organizations: exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) return organizations[0]
def fetch_organization_by_short_name(organization_short_name): """ Retrieves a specific organization from app/local state by short name Returns a dictionary representation of the object """ organization = {'short_name': organization_short_name} if not organization_short_name: exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) organizations = serializers.serialize_organizations(internal.Organization.objects.filter( active=True, short_name=organization_short_name )) if not organizations: exceptions.raise_exception("organization", organization, exceptions.InvalidOrganizationException) return organizations[0]
[ "Retrieves", "a", "specific", "organization", "from", "app", "/", "local", "state", "by", "short", "name", "Returns", "a", "dictionary", "representation", "of", "the", "object" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L174-L187
[ "def", "fetch_organization_by_short_name", "(", "organization_short_name", ")", ":", "organization", "=", "{", "'short_name'", ":", "organization_short_name", "}", "if", "not", "organization_short_name", ":", "exceptions", ".", "raise_exception", "(", "\"organization\"", ",", "organization", ",", "exceptions", ".", "InvalidOrganizationException", ")", "organizations", "=", "serializers", ".", "serialize_organizations", "(", "internal", ".", "Organization", ".", "objects", ".", "filter", "(", "active", "=", "True", ",", "short_name", "=", "organization_short_name", ")", ")", "if", "not", "organizations", ":", "exceptions", ".", "raise_exception", "(", "\"organization\"", ",", "organization", ",", "exceptions", ".", "InvalidOrganizationException", ")", "return", "organizations", "[", "0", "]" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
create_organization_course
Inserts a new organization-course relationship into app/local state No response currently defined for this operation
organizations/data.py
def create_organization_course(organization, course_key): """ Inserts a new organization-course relationship into app/local state No response currently defined for this operation """ organization_obj = serializers.deserialize_organization(organization) try: relationship = internal.OrganizationCourse.objects.get( organization=organization_obj, course_id=text_type(course_key) ) # If the relationship exists, but was inactivated, we can simply turn it back on if not relationship.active: _activate_organization_course_relationship(relationship) except internal.OrganizationCourse.DoesNotExist: relationship = internal.OrganizationCourse.objects.create( organization=organization_obj, course_id=text_type(course_key), active=True )
def create_organization_course(organization, course_key): """ Inserts a new organization-course relationship into app/local state No response currently defined for this operation """ organization_obj = serializers.deserialize_organization(organization) try: relationship = internal.OrganizationCourse.objects.get( organization=organization_obj, course_id=text_type(course_key) ) # If the relationship exists, but was inactivated, we can simply turn it back on if not relationship.active: _activate_organization_course_relationship(relationship) except internal.OrganizationCourse.DoesNotExist: relationship = internal.OrganizationCourse.objects.create( organization=organization_obj, course_id=text_type(course_key), active=True )
[ "Inserts", "a", "new", "organization", "-", "course", "relationship", "into", "app", "/", "local", "state", "No", "response", "currently", "defined", "for", "this", "operation" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L198-L217
[ "def", "create_organization_course", "(", "organization", ",", "course_key", ")", ":", "organization_obj", "=", "serializers", ".", "deserialize_organization", "(", "organization", ")", "try", ":", "relationship", "=", "internal", ".", "OrganizationCourse", ".", "objects", ".", "get", "(", "organization", "=", "organization_obj", ",", "course_id", "=", "text_type", "(", "course_key", ")", ")", "# If the relationship exists, but was inactivated, we can simply turn it back on", "if", "not", "relationship", ".", "active", ":", "_activate_organization_course_relationship", "(", "relationship", ")", "except", "internal", ".", "OrganizationCourse", ".", "DoesNotExist", ":", "relationship", "=", "internal", ".", "OrganizationCourse", ".", "objects", ".", "create", "(", "organization", "=", "organization_obj", ",", "course_id", "=", "text_type", "(", "course_key", ")", ",", "active", "=", "True", ")" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
delete_organization_course
Removes an existing organization-course relationship from app/local state No response currently defined for this operation
organizations/data.py
def delete_organization_course(organization, course_key): """ Removes an existing organization-course relationship from app/local state No response currently defined for this operation """ try: relationship = internal.OrganizationCourse.objects.get( organization=organization['id'], course_id=text_type(course_key), active=True, ) _inactivate_organization_course_relationship(relationship) except internal.OrganizationCourse.DoesNotExist: # If we're being asked to delete an organization-course link # that does not exist in the database then our work is done pass
def delete_organization_course(organization, course_key): """ Removes an existing organization-course relationship from app/local state No response currently defined for this operation """ try: relationship = internal.OrganizationCourse.objects.get( organization=organization['id'], course_id=text_type(course_key), active=True, ) _inactivate_organization_course_relationship(relationship) except internal.OrganizationCourse.DoesNotExist: # If we're being asked to delete an organization-course link # that does not exist in the database then our work is done pass
[ "Removes", "an", "existing", "organization", "-", "course", "relationship", "from", "app", "/", "local", "state", "No", "response", "currently", "defined", "for", "this", "operation" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L220-L235
[ "def", "delete_organization_course", "(", "organization", ",", "course_key", ")", ":", "try", ":", "relationship", "=", "internal", ".", "OrganizationCourse", ".", "objects", ".", "get", "(", "organization", "=", "organization", "[", "'id'", "]", ",", "course_id", "=", "text_type", "(", "course_key", ")", ",", "active", "=", "True", ",", ")", "_inactivate_organization_course_relationship", "(", "relationship", ")", "except", "internal", ".", "OrganizationCourse", ".", "DoesNotExist", ":", "# If we're being asked to delete an organization-course link", "# that does not exist in the database then our work is done", "pass" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
fetch_organization_courses
Retrieves the set of courses currently linked to the specified organization
organizations/data.py
def fetch_organization_courses(organization): """ Retrieves the set of courses currently linked to the specified organization """ organization_obj = serializers.deserialize_organization(organization) queryset = internal.OrganizationCourse.objects.filter( organization=organization_obj, active=True ).select_related('organization') return [serializers.serialize_organization_with_course(organization) for organization in queryset]
def fetch_organization_courses(organization): """ Retrieves the set of courses currently linked to the specified organization """ organization_obj = serializers.deserialize_organization(organization) queryset = internal.OrganizationCourse.objects.filter( organization=organization_obj, active=True ).select_related('organization') return [serializers.serialize_organization_with_course(organization) for organization in queryset]
[ "Retrieves", "the", "set", "of", "courses", "currently", "linked", "to", "the", "specified", "organization" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L238-L247
[ "def", "fetch_organization_courses", "(", "organization", ")", ":", "organization_obj", "=", "serializers", ".", "deserialize_organization", "(", "organization", ")", "queryset", "=", "internal", ".", "OrganizationCourse", ".", "objects", ".", "filter", "(", "organization", "=", "organization_obj", ",", "active", "=", "True", ")", ".", "select_related", "(", "'organization'", ")", "return", "[", "serializers", ".", "serialize_organization_with_course", "(", "organization", ")", "for", "organization", "in", "queryset", "]" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
fetch_course_organizations
Retrieves the organizations linked to the specified course
organizations/data.py
def fetch_course_organizations(course_key): """ Retrieves the organizations linked to the specified course """ queryset = internal.OrganizationCourse.objects.filter( course_id=text_type(course_key), active=True ).select_related('organization') return [serializers.serialize_organization_with_course(organization) for organization in queryset]
def fetch_course_organizations(course_key): """ Retrieves the organizations linked to the specified course """ queryset = internal.OrganizationCourse.objects.filter( course_id=text_type(course_key), active=True ).select_related('organization') return [serializers.serialize_organization_with_course(organization) for organization in queryset]
[ "Retrieves", "the", "organizations", "linked", "to", "the", "specified", "course" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L250-L258
[ "def", "fetch_course_organizations", "(", "course_key", ")", ":", "queryset", "=", "internal", ".", "OrganizationCourse", ".", "objects", ".", "filter", "(", "course_id", "=", "text_type", "(", "course_key", ")", ",", "active", "=", "True", ")", ".", "select_related", "(", "'organization'", ")", "return", "[", "serializers", ".", "serialize_organization_with_course", "(", "organization", ")", "for", "organization", "in", "queryset", "]" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
delete_course_references
Inactivates references to course keys within this app (ref: receivers.py and api.py)
organizations/data.py
def delete_course_references(course_key): """ Inactivates references to course keys within this app (ref: receivers.py and api.py) """ [_inactivate_record(record) for record in internal.OrganizationCourse.objects.filter( course_id=text_type(course_key), active=True )]
def delete_course_references(course_key): """ Inactivates references to course keys within this app (ref: receivers.py and api.py) """ [_inactivate_record(record) for record in internal.OrganizationCourse.objects.filter( course_id=text_type(course_key), active=True )]
[ "Inactivates", "references", "to", "course", "keys", "within", "this", "app", "(", "ref", ":", "receivers", ".", "py", "and", "api", ".", "py", ")" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/data.py#L261-L268
[ "def", "delete_course_references", "(", "course_key", ")", ":", "[", "_inactivate_record", "(", "record", ")", "for", "record", "in", "internal", ".", "OrganizationCourse", ".", "objects", ".", "filter", "(", "course_id", "=", "text_type", "(", "course_key", ")", ",", "active", "=", "True", ")", "]" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
serialize_organization
Organization object-to-dict serialization
organizations/serializers.py
def serialize_organization(organization): """ Organization object-to-dict serialization """ return { 'id': organization.id, 'name': organization.name, 'short_name': organization.short_name, 'description': organization.description, 'logo': organization.logo }
def serialize_organization(organization): """ Organization object-to-dict serialization """ return { 'id': organization.id, 'name': organization.name, 'short_name': organization.short_name, 'description': organization.description, 'logo': organization.logo }
[ "Organization", "object", "-", "to", "-", "dict", "serialization" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/serializers.py#L18-L28
[ "def", "serialize_organization", "(", "organization", ")", ":", "return", "{", "'id'", ":", "organization", ".", "id", ",", "'name'", ":", "organization", ".", "name", ",", "'short_name'", ":", "organization", ".", "short_name", ",", "'description'", ":", "organization", ".", "description", ",", "'logo'", ":", "organization", ".", "logo", "}" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
serialize_organization_with_course
OrganizationCourse serialization (composite object)
organizations/serializers.py
def serialize_organization_with_course(organization_course): """ OrganizationCourse serialization (composite object) """ return { 'id': organization_course.organization.id, 'name': organization_course.organization.name, 'short_name': organization_course.organization.short_name, 'description': organization_course.organization.description, 'logo': organization_course.organization.logo, 'course_id': organization_course.course_id }
def serialize_organization_with_course(organization_course): """ OrganizationCourse serialization (composite object) """ return { 'id': organization_course.organization.id, 'name': organization_course.organization.name, 'short_name': organization_course.organization.short_name, 'description': organization_course.organization.description, 'logo': organization_course.organization.logo, 'course_id': organization_course.course_id }
[ "OrganizationCourse", "serialization", "(", "composite", "object", ")" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/serializers.py#L31-L42
[ "def", "serialize_organization_with_course", "(", "organization_course", ")", ":", "return", "{", "'id'", ":", "organization_course", ".", "organization", ".", "id", ",", "'name'", ":", "organization_course", ".", "organization", ".", "name", ",", "'short_name'", ":", "organization_course", ".", "organization", ".", "short_name", ",", "'description'", ":", "organization_course", ".", "organization", ".", "description", ",", "'logo'", ":", "organization_course", ".", "organization", ".", "logo", ",", "'course_id'", ":", "organization_course", ".", "course_id", "}" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
deserialize_organization
Organization dict-to-object serialization
organizations/serializers.py
def deserialize_organization(organization_dict): """ Organization dict-to-object serialization """ return models.Organization( id=organization_dict.get('id'), name=organization_dict.get('name', ''), short_name=organization_dict.get('short_name', ''), description=organization_dict.get('description', ''), logo=organization_dict.get('logo', '') )
def deserialize_organization(organization_dict): """ Organization dict-to-object serialization """ return models.Organization( id=organization_dict.get('id'), name=organization_dict.get('name', ''), short_name=organization_dict.get('short_name', ''), description=organization_dict.get('description', ''), logo=organization_dict.get('logo', '') )
[ "Organization", "dict", "-", "to", "-", "object", "serialization" ]
edx/edx-organizations
python
https://github.com/edx/edx-organizations/blob/51000d5d359d880a6eb3a79345f60744f1982c00/organizations/serializers.py#L53-L63
[ "def", "deserialize_organization", "(", "organization_dict", ")", ":", "return", "models", ".", "Organization", "(", "id", "=", "organization_dict", ".", "get", "(", "'id'", ")", ",", "name", "=", "organization_dict", ".", "get", "(", "'name'", ",", "''", ")", ",", "short_name", "=", "organization_dict", ".", "get", "(", "'short_name'", ",", "''", ")", ",", "description", "=", "organization_dict", ".", "get", "(", "'description'", ",", "''", ")", ",", "logo", "=", "organization_dict", ".", "get", "(", "'logo'", ",", "''", ")", ")" ]
51000d5d359d880a6eb3a79345f60744f1982c00
valid
ImageExtractor.check_large_images
\ although slow the best way to determine the best image is to download them and check the actual dimensions of the image when on disk so we'll go through a phased approach... 1. get a list of ALL images from the parent node 2. filter out any bad image names that we know of (gifs, ads, etc..) 3. do a head request on each file to make sure it meets our bare requirements 4. any images left over let's do a full GET request, download em to disk and check their dimensions 5. Score images based on different factors like height/width and possibly things like color density
goose3/extractors/images.py
def check_large_images(self, node, parent_depth_level, sibling_depth_level): """\ although slow the best way to determine the best image is to download them and check the actual dimensions of the image when on disk so we'll go through a phased approach... 1. get a list of ALL images from the parent node 2. filter out any bad image names that we know of (gifs, ads, etc..) 3. do a head request on each file to make sure it meets our bare requirements 4. any images left over let's do a full GET request, download em to disk and check their dimensions 5. Score images based on different factors like height/width and possibly things like color density """ good_images = self.get_image_candidates(node) if good_images: scored_images = self.fetch_images(good_images, parent_depth_level) if scored_images: highscore_image = sorted(list(scored_images.items()), key=lambda x: x[1], reverse=True)[0][0] main_image = Image() main_image._src = highscore_image.src main_image._width = highscore_image.width main_image._height = highscore_image.height main_image._extraction_type = "bigimage" score_len = len(scored_images) main_image._confidence_score = 100 / score_len if score_len > 0 else 0 return main_image depth_obj = self.get_depth_level(node, parent_depth_level, sibling_depth_level) if depth_obj: return self.check_large_images(depth_obj.node, depth_obj.parent_depth, depth_obj.sibling_depth) return None
def check_large_images(self, node, parent_depth_level, sibling_depth_level): """\ although slow the best way to determine the best image is to download them and check the actual dimensions of the image when on disk so we'll go through a phased approach... 1. get a list of ALL images from the parent node 2. filter out any bad image names that we know of (gifs, ads, etc..) 3. do a head request on each file to make sure it meets our bare requirements 4. any images left over let's do a full GET request, download em to disk and check their dimensions 5. Score images based on different factors like height/width and possibly things like color density """ good_images = self.get_image_candidates(node) if good_images: scored_images = self.fetch_images(good_images, parent_depth_level) if scored_images: highscore_image = sorted(list(scored_images.items()), key=lambda x: x[1], reverse=True)[0][0] main_image = Image() main_image._src = highscore_image.src main_image._width = highscore_image.width main_image._height = highscore_image.height main_image._extraction_type = "bigimage" score_len = len(scored_images) main_image._confidence_score = 100 / score_len if score_len > 0 else 0 return main_image depth_obj = self.get_depth_level(node, parent_depth_level, sibling_depth_level) if depth_obj: return self.check_large_images(depth_obj.node, depth_obj.parent_depth, depth_obj.sibling_depth) return None
[ "\\", "although", "slow", "the", "best", "way", "to", "determine", "the", "best", "image", "is", "to", "download", "them", "and", "check", "the", "actual", "dimensions", "of", "the", "image", "when", "on", "disk", "so", "we", "ll", "go", "through", "a", "phased", "approach", "...", "1", ".", "get", "a", "list", "of", "ALL", "images", "from", "the", "parent", "node", "2", ".", "filter", "out", "any", "bad", "image", "names", "that", "we", "know", "of", "(", "gifs", "ads", "etc", "..", ")", "3", ".", "do", "a", "head", "request", "on", "each", "file", "to", "make", "sure", "it", "meets", "our", "bare", "requirements", "4", ".", "any", "images", "left", "over", "let", "s", "do", "a", "full", "GET", "request", "download", "em", "to", "disk", "and", "check", "their", "dimensions", "5", ".", "Score", "images", "based", "on", "different", "factors", "like", "height", "/", "width", "and", "possibly", "things", "like", "color", "density" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/images.py#L95-L130
[ "def", "check_large_images", "(", "self", ",", "node", ",", "parent_depth_level", ",", "sibling_depth_level", ")", ":", "good_images", "=", "self", ".", "get_image_candidates", "(", "node", ")", "if", "good_images", ":", "scored_images", "=", "self", ".", "fetch_images", "(", "good_images", ",", "parent_depth_level", ")", "if", "scored_images", ":", "highscore_image", "=", "sorted", "(", "list", "(", "scored_images", ".", "items", "(", ")", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ",", "reverse", "=", "True", ")", "[", "0", "]", "[", "0", "]", "main_image", "=", "Image", "(", ")", "main_image", ".", "_src", "=", "highscore_image", ".", "src", "main_image", ".", "_width", "=", "highscore_image", ".", "width", "main_image", ".", "_height", "=", "highscore_image", ".", "height", "main_image", ".", "_extraction_type", "=", "\"bigimage\"", "score_len", "=", "len", "(", "scored_images", ")", "main_image", ".", "_confidence_score", "=", "100", "/", "score_len", "if", "score_len", ">", "0", "else", "0", "return", "main_image", "depth_obj", "=", "self", ".", "get_depth_level", "(", "node", ",", "parent_depth_level", ",", "sibling_depth_level", ")", "if", "depth_obj", ":", "return", "self", ".", "check_large_images", "(", "depth_obj", ".", "node", ",", "depth_obj", ".", "parent_depth", ",", "depth_obj", ".", "sibling_depth", ")", "return", "None" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
ImageExtractor.is_banner_dimensions
\ returns true if we think this is kind of a bannery dimension like 600 / 100 = 6 may be a fishy dimension for a good image
goose3/extractors/images.py
def is_banner_dimensions(width, height): """\ returns true if we think this is kind of a bannery dimension like 600 / 100 = 6 may be a fishy dimension for a good image """ if width == height: return False if width > height: diff = float(width / height) if diff > 5: return True if height > width: diff = float(height / width) if diff > 5: return True return False
def is_banner_dimensions(width, height): """\ returns true if we think this is kind of a bannery dimension like 600 / 100 = 6 may be a fishy dimension for a good image """ if width == height: return False if width > height: diff = float(width / height) if diff > 5: return True if height > width: diff = float(height / width) if diff > 5: return True return False
[ "\\", "returns", "true", "if", "we", "think", "this", "is", "kind", "of", "a", "bannery", "dimension", "like", "600", "/", "100", "=", "6", "may", "be", "a", "fishy", "dimension", "for", "a", "good", "image" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/images.py#L213-L231
[ "def", "is_banner_dimensions", "(", "width", ",", "height", ")", ":", "if", "width", "==", "height", ":", "return", "False", "if", "width", ">", "height", ":", "diff", "=", "float", "(", "width", "/", "height", ")", "if", "diff", ">", "5", ":", "return", "True", "if", "height", ">", "width", ":", "diff", "=", "float", "(", "height", "/", "width", ")", "if", "diff", ">", "5", ":", "return", "True", "return", "False" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
ImageExtractor.is_valid_filename
\ will check the image src against a list of bad image files we know of like buttons, etc...
goose3/extractors/images.py
def is_valid_filename(self, image_node): """\ will check the image src against a list of bad image files we know of like buttons, etc... """ src = self.parser.getAttribute(image_node, attr='src') if not src: return False if self.badimages_names_re.search(src): return False return True
def is_valid_filename(self, image_node): """\ will check the image src against a list of bad image files we know of like buttons, etc... """ src = self.parser.getAttribute(image_node, attr='src') if not src: return False if self.badimages_names_re.search(src): return False return True
[ "\\", "will", "check", "the", "image", "src", "against", "a", "list", "of", "bad", "image", "files", "we", "know", "of", "like", "buttons", "etc", "..." ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/images.py#L250-L263
[ "def", "is_valid_filename", "(", "self", ",", "image_node", ")", ":", "src", "=", "self", ".", "parser", ".", "getAttribute", "(", "image_node", ",", "attr", "=", "'src'", ")", "if", "not", "src", ":", "return", "False", "if", "self", ".", "badimages_names_re", ".", "search", "(", "src", ")", ":", "return", "False", "return", "True" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
ImageExtractor.get_images_bytesize_match
\ loop through all the images and find the ones that have the best bytez to even make them a candidate
goose3/extractors/images.py
def get_images_bytesize_match(self, images): """\ loop through all the images and find the ones that have the best bytez to even make them a candidate """ cnt = 0 max_bytes_size = 15728640 good_images = [] for image in images: if cnt > 30: return good_images src = self.parser.getAttribute(image, attr='src') src = self.build_image_path(src) src = self.add_schema_if_none(src) local_image = self.get_local_image(src) if local_image: filesize = local_image.bytes if (filesize == 0 or filesize > self.images_min_bytes) and filesize < max_bytes_size: good_images.append(image) else: images.remove(image) cnt += 1 return good_images if len(good_images) > 0 else None
def get_images_bytesize_match(self, images): """\ loop through all the images and find the ones that have the best bytez to even make them a candidate """ cnt = 0 max_bytes_size = 15728640 good_images = [] for image in images: if cnt > 30: return good_images src = self.parser.getAttribute(image, attr='src') src = self.build_image_path(src) src = self.add_schema_if_none(src) local_image = self.get_local_image(src) if local_image: filesize = local_image.bytes if (filesize == 0 or filesize > self.images_min_bytes) and filesize < max_bytes_size: good_images.append(image) else: images.remove(image) cnt += 1 return good_images if len(good_images) > 0 else None
[ "\\", "loop", "through", "all", "the", "images", "and", "find", "the", "ones", "that", "have", "the", "best", "bytez", "to", "even", "make", "them", "a", "candidate" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/images.py#L275-L297
[ "def", "get_images_bytesize_match", "(", "self", ",", "images", ")", ":", "cnt", "=", "0", "max_bytes_size", "=", "15728640", "good_images", "=", "[", "]", "for", "image", "in", "images", ":", "if", "cnt", ">", "30", ":", "return", "good_images", "src", "=", "self", ".", "parser", ".", "getAttribute", "(", "image", ",", "attr", "=", "'src'", ")", "src", "=", "self", ".", "build_image_path", "(", "src", ")", "src", "=", "self", ".", "add_schema_if_none", "(", "src", ")", "local_image", "=", "self", ".", "get_local_image", "(", "src", ")", "if", "local_image", ":", "filesize", "=", "local_image", ".", "bytes", "if", "(", "filesize", "==", "0", "or", "filesize", ">", "self", ".", "images_min_bytes", ")", "and", "filesize", "<", "max_bytes_size", ":", "good_images", ".", "append", "(", "image", ")", "else", ":", "images", ".", "remove", "(", "image", ")", "cnt", "+=", "1", "return", "good_images", "if", "len", "(", "good_images", ")", ">", "0", "else", "None" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
ImageExtractor.check_link_tag
\ checks to see if we were able to find open link_src on this page
goose3/extractors/images.py
def check_link_tag(self): """\ checks to see if we were able to find open link_src on this page """ node = self.article.raw_doc meta = self.parser.getElementsByTag(node, tag='link', attr='rel', value='image_src') for item in meta: src = self.parser.getAttribute(item, attr='href') if src: return self.get_image(src, extraction_type='linktag') return None
def check_link_tag(self): """\ checks to see if we were able to find open link_src on this page """ node = self.article.raw_doc meta = self.parser.getElementsByTag(node, tag='link', attr='rel', value='image_src') for item in meta: src = self.parser.getAttribute(item, attr='href') if src: return self.get_image(src, extraction_type='linktag') return None
[ "\\", "checks", "to", "see", "if", "we", "were", "able", "to", "find", "open", "link_src", "on", "this", "page" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/images.py#L303-L314
[ "def", "check_link_tag", "(", "self", ")", ":", "node", "=", "self", ".", "article", ".", "raw_doc", "meta", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "node", ",", "tag", "=", "'link'", ",", "attr", "=", "'rel'", ",", "value", "=", "'image_src'", ")", "for", "item", "in", "meta", ":", "src", "=", "self", ".", "parser", ".", "getAttribute", "(", "item", ",", "attr", "=", "'href'", ")", "if", "src", ":", "return", "self", ".", "get_image", "(", "src", ",", "extraction_type", "=", "'linktag'", ")", "return", "None" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
ImageExtractor.check_known_schemas
\ checks to see if we were able to find the image via known schemas: Supported Schemas - Open Graph - schema.org
goose3/extractors/images.py
def check_known_schemas(self): """\ checks to see if we were able to find the image via known schemas: Supported Schemas - Open Graph - schema.org """ if 'image' in self.article.opengraph: return self.get_image(self.article.opengraph["image"], extraction_type='opengraph') elif (self.article.schema and 'image' in self.article.schema and "url" in self.article.schema["image"]): return self.get_image(self.article.schema["image"]["url"], extraction_type='schema.org') return None
def check_known_schemas(self): """\ checks to see if we were able to find the image via known schemas: Supported Schemas - Open Graph - schema.org """ if 'image' in self.article.opengraph: return self.get_image(self.article.opengraph["image"], extraction_type='opengraph') elif (self.article.schema and 'image' in self.article.schema and "url" in self.article.schema["image"]): return self.get_image(self.article.schema["image"]["url"], extraction_type='schema.org') return None
[ "\\", "checks", "to", "see", "if", "we", "were", "able", "to", "find", "the", "image", "via", "known", "schemas", ":" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/images.py#L316-L331
[ "def", "check_known_schemas", "(", "self", ")", ":", "if", "'image'", "in", "self", ".", "article", ".", "opengraph", ":", "return", "self", ".", "get_image", "(", "self", ".", "article", ".", "opengraph", "[", "\"image\"", "]", ",", "extraction_type", "=", "'opengraph'", ")", "elif", "(", "self", ".", "article", ".", "schema", "and", "'image'", "in", "self", ".", "article", ".", "schema", "and", "\"url\"", "in", "self", ".", "article", ".", "schema", "[", "\"image\"", "]", ")", ":", "return", "self", ".", "get_image", "(", "self", ".", "article", ".", "schema", "[", "\"image\"", "]", "[", "\"url\"", "]", ",", "extraction_type", "=", "'schema.org'", ")", "return", "None" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
ImageExtractor.get_local_image
\ returns the bytes of the image file on disk
goose3/extractors/images.py
def get_local_image(self, src): """\ returns the bytes of the image file on disk """ return ImageUtils.store_image(self.fetcher, self.article.link_hash, src, self.config)
def get_local_image(self, src): """\ returns the bytes of the image file on disk """ return ImageUtils.store_image(self.fetcher, self.article.link_hash, src, self.config)
[ "\\", "returns", "the", "bytes", "of", "the", "image", "file", "on", "disk" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/images.py#L333-L337
[ "def", "get_local_image", "(", "self", ",", "src", ")", ":", "return", "ImageUtils", ".", "store_image", "(", "self", ".", "fetcher", ",", "self", ".", "article", ".", "link_hash", ",", "src", ",", "self", ".", "config", ")" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
ImageExtractor.build_image_path
\ This method will take an image path and build out the absolute path to that image * using the initial url we crawled so we can find a link to the image if they use relative urls like ../myimage.jpg
goose3/extractors/images.py
def build_image_path(self, src): """\ This method will take an image path and build out the absolute path to that image * using the initial url we crawled so we can find a link to the image if they use relative urls like ../myimage.jpg """ o = urlparse(src) # we have a full url if o.netloc != '': return o.geturl() # we have a relative url return urljoin(self.article.final_url, src)
def build_image_path(self, src): """\ This method will take an image path and build out the absolute path to that image * using the initial url we crawled so we can find a link to the image if they use relative urls like ../myimage.jpg """ o = urlparse(src) # we have a full url if o.netloc != '': return o.geturl() # we have a relative url return urljoin(self.article.final_url, src)
[ "\\", "This", "method", "will", "take", "an", "image", "path", "and", "build", "out", "the", "absolute", "path", "to", "that", "image", "*", "using", "the", "initial", "url", "we", "crawled", "so", "we", "can", "find", "a", "link", "to", "the", "image", "if", "they", "use", "relative", "urls", "like", "..", "/", "myimage", ".", "jpg" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/images.py#L392-L405
[ "def", "build_image_path", "(", "self", ",", "src", ")", ":", "o", "=", "urlparse", "(", "src", ")", "# we have a full url", "if", "o", ".", "netloc", "!=", "''", ":", "return", "o", ".", "geturl", "(", ")", "# we have a relative url", "return", "urljoin", "(", "self", ".", "article", ".", "final_url", ",", "src", ")" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
VideoExtractor.get_video
Create a video object from a video embed
goose3/extractors/videos.py
def get_video(self, node): """ Create a video object from a video embed """ video = Video() video._embed_code = self.get_embed_code(node) video._embed_type = self.get_embed_type(node) video._width = self.get_width(node) video._height = self.get_height(node) video._src = self.get_src(node) video._provider = self.get_provider(video.src) return video
def get_video(self, node): """ Create a video object from a video embed """ video = Video() video._embed_code = self.get_embed_code(node) video._embed_type = self.get_embed_type(node) video._width = self.get_width(node) video._height = self.get_height(node) video._src = self.get_src(node) video._provider = self.get_provider(video.src) return video
[ "Create", "a", "video", "object", "from", "a", "video", "embed" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/videos.py#L67-L78
[ "def", "get_video", "(", "self", ",", "node", ")", ":", "video", "=", "Video", "(", ")", "video", ".", "_embed_code", "=", "self", ".", "get_embed_code", "(", "node", ")", "video", ".", "_embed_type", "=", "self", ".", "get_embed_type", "(", "node", ")", "video", ".", "_width", "=", "self", ".", "get_width", "(", "node", ")", "video", ".", "_height", "=", "self", ".", "get_height", "(", "node", ")", "video", ".", "_src", "=", "self", ".", "get_src", "(", "node", ")", "video", ".", "_provider", "=", "self", ".", "get_provider", "(", "video", ".", "src", ")", "return", "video" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
get_encodings_from_content
Code from: https://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py Return encodings from given content string. :param content: string to extract encodings from.
goose3/text.py
def get_encodings_from_content(content): """ Code from: https://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py Return encodings from given content string. :param content: string to extract encodings from. """ if isinstance(content, bytes): find_charset = re.compile( br'<meta.*?charset=["\']*([a-z0-9\-_]+?) *?["\'>]', flags=re.I ).findall find_xml = re.compile( br'^<\?xml.*?encoding=["\']*([a-z0-9\-_]+?) *?["\'>]' ).findall return [encoding.decode('utf-8') for encoding in find_charset(content) + find_xml(content)] else: find_charset = re.compile( r'<meta.*?charset=["\']*([a-z0-9\-_]+?) *?["\'>]', flags=re.I ).findall find_xml = re.compile( r'^<\?xml.*?encoding=["\']*([a-z0-9\-_]+?) *?["\'>]' ).findall return find_charset(content) + find_xml(content)
def get_encodings_from_content(content): """ Code from: https://github.com/sigmavirus24/requests-toolbelt/blob/master/requests_toolbelt/utils/deprecated.py Return encodings from given content string. :param content: string to extract encodings from. """ if isinstance(content, bytes): find_charset = re.compile( br'<meta.*?charset=["\']*([a-z0-9\-_]+?) *?["\'>]', flags=re.I ).findall find_xml = re.compile( br'^<\?xml.*?encoding=["\']*([a-z0-9\-_]+?) *?["\'>]' ).findall return [encoding.decode('utf-8') for encoding in find_charset(content) + find_xml(content)] else: find_charset = re.compile( r'<meta.*?charset=["\']*([a-z0-9\-_]+?) *?["\'>]', flags=re.I ).findall find_xml = re.compile( r'^<\?xml.*?encoding=["\']*([a-z0-9\-_]+?) *?["\'>]' ).findall return find_charset(content) + find_xml(content)
[ "Code", "from", ":", "https", ":", "//", "github", ".", "com", "/", "sigmavirus24", "/", "requests", "-", "toolbelt", "/", "blob", "/", "master", "/", "requests_toolbelt", "/", "utils", "/", "deprecated", ".", "py", "Return", "encodings", "from", "given", "content", "string", ".", ":", "param", "content", ":", "string", "to", "extract", "encodings", "from", "." ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/text.py#L35-L60
[ "def", "get_encodings_from_content", "(", "content", ")", ":", "if", "isinstance", "(", "content", ",", "bytes", ")", ":", "find_charset", "=", "re", ".", "compile", "(", "br'<meta.*?charset=[\"\\']*([a-z0-9\\-_]+?) *?[\"\\'>]'", ",", "flags", "=", "re", ".", "I", ")", ".", "findall", "find_xml", "=", "re", ".", "compile", "(", "br'^<\\?xml.*?encoding=[\"\\']*([a-z0-9\\-_]+?) *?[\"\\'>]'", ")", ".", "findall", "return", "[", "encoding", ".", "decode", "(", "'utf-8'", ")", "for", "encoding", "in", "find_charset", "(", "content", ")", "+", "find_xml", "(", "content", ")", "]", "else", ":", "find_charset", "=", "re", ".", "compile", "(", "r'<meta.*?charset=[\"\\']*([a-z0-9\\-_]+?) *?[\"\\'>]'", ",", "flags", "=", "re", ".", "I", ")", ".", "findall", "find_xml", "=", "re", ".", "compile", "(", "r'^<\\?xml.*?encoding=[\"\\']*([a-z0-9\\-_]+?) *?[\"\\'>]'", ")", ".", "findall", "return", "find_charset", "(", "content", ")", "+", "find_xml", "(", "content", ")" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
ImageUtils.store_image
\ Writes an image src http string to disk as a temporary file and returns the LocallyStoredImage object that has the info you should need on the image
goose3/utils/images.py
def store_image(cls, http_client, link_hash, src, config): """\ Writes an image src http string to disk as a temporary file and returns the LocallyStoredImage object that has the info you should need on the image """ # check for a cache hit already on disk image = cls.read_localfile(link_hash, src, config) if image: return image # no cache found; do something else # parse base64 image if src.startswith('data:image'): image = cls.write_localfile_base64(link_hash, src, config) return image # download the image data = http_client.fetch(src) if data: image = cls.write_localfile(data, link_hash, src, config) if image: return image return None
def store_image(cls, http_client, link_hash, src, config): """\ Writes an image src http string to disk as a temporary file and returns the LocallyStoredImage object that has the info you should need on the image """ # check for a cache hit already on disk image = cls.read_localfile(link_hash, src, config) if image: return image # no cache found; do something else # parse base64 image if src.startswith('data:image'): image = cls.write_localfile_base64(link_hash, src, config) return image # download the image data = http_client.fetch(src) if data: image = cls.write_localfile(data, link_hash, src, config) if image: return image return None
[ "\\", "Writes", "an", "image", "src", "http", "string", "to", "disk", "as", "a", "temporary", "file", "and", "returns", "the", "LocallyStoredImage", "object", "that", "has", "the", "info", "you", "should", "need", "on", "the", "image" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/utils/images.py#L60-L85
[ "def", "store_image", "(", "cls", ",", "http_client", ",", "link_hash", ",", "src", ",", "config", ")", ":", "# check for a cache hit already on disk", "image", "=", "cls", ".", "read_localfile", "(", "link_hash", ",", "src", ",", "config", ")", "if", "image", ":", "return", "image", "# no cache found; do something else", "# parse base64 image", "if", "src", ".", "startswith", "(", "'data:image'", ")", ":", "image", "=", "cls", ".", "write_localfile_base64", "(", "link_hash", ",", "src", ",", "config", ")", "return", "image", "# download the image", "data", "=", "http_client", ".", "fetch", "(", "src", ")", "if", "data", ":", "image", "=", "cls", ".", "write_localfile", "(", "data", ",", "link_hash", ",", "src", ",", "config", ")", "if", "image", ":", "return", "image", "return", "None" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
Configuration.known_context_patterns
val must be an ArticleContextPattern, a dictionary, or list of \ dictionaries e.g., {'attr': 'class', 'value': 'my-article-class'} or [{'attr': 'class', 'value': 'my-article-class'}, {'attr': 'id', 'value': 'my-article-id'}]
goose3/configuration.py
def known_context_patterns(self, val): ''' val must be an ArticleContextPattern, a dictionary, or list of \ dictionaries e.g., {'attr': 'class', 'value': 'my-article-class'} or [{'attr': 'class', 'value': 'my-article-class'}, {'attr': 'id', 'value': 'my-article-id'}] ''' def create_pat_from_dict(val): '''Helper function used to create an ArticleContextPattern from a dictionary ''' if "tag" in val: pat = ArticleContextPattern(tag=val["tag"]) if "attr" in val: pat.attr = val["attr"] pat.value = val["value"] elif "attr" in val: pat = ArticleContextPattern(attr=val["attr"], value=val["value"]) if "domain" in val: pat.domain = val["domain"] return pat if isinstance(val, list): self._known_context_patterns = [ x if isinstance(x, ArticleContextPattern) else create_pat_from_dict(x) for x in val ] + self.known_context_patterns elif isinstance(val, ArticleContextPattern): self._known_context_patterns.insert(0, val) elif isinstance(val, dict): self._known_context_patterns.insert(0, create_pat_from_dict(val)) else: raise Exception("Unknown type: {}. Use a ArticleContextPattern.".format(type(val)))
def known_context_patterns(self, val): ''' val must be an ArticleContextPattern, a dictionary, or list of \ dictionaries e.g., {'attr': 'class', 'value': 'my-article-class'} or [{'attr': 'class', 'value': 'my-article-class'}, {'attr': 'id', 'value': 'my-article-id'}] ''' def create_pat_from_dict(val): '''Helper function used to create an ArticleContextPattern from a dictionary ''' if "tag" in val: pat = ArticleContextPattern(tag=val["tag"]) if "attr" in val: pat.attr = val["attr"] pat.value = val["value"] elif "attr" in val: pat = ArticleContextPattern(attr=val["attr"], value=val["value"]) if "domain" in val: pat.domain = val["domain"] return pat if isinstance(val, list): self._known_context_patterns = [ x if isinstance(x, ArticleContextPattern) else create_pat_from_dict(x) for x in val ] + self.known_context_patterns elif isinstance(val, ArticleContextPattern): self._known_context_patterns.insert(0, val) elif isinstance(val, dict): self._known_context_patterns.insert(0, create_pat_from_dict(val)) else: raise Exception("Unknown type: {}. Use a ArticleContextPattern.".format(type(val)))
[ "val", "must", "be", "an", "ArticleContextPattern", "a", "dictionary", "or", "list", "of", "\\", "dictionaries", "e", ".", "g", ".", "{", "attr", ":", "class", "value", ":", "my", "-", "article", "-", "class", "}", "or", "[", "{", "attr", ":", "class", "value", ":", "my", "-", "article", "-", "class", "}", "{", "attr", ":", "id", "value", ":", "my", "-", "article", "-", "id", "}", "]" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/configuration.py#L216-L249
[ "def", "known_context_patterns", "(", "self", ",", "val", ")", ":", "def", "create_pat_from_dict", "(", "val", ")", ":", "'''Helper function used to create an ArticleContextPattern from a dictionary\n '''", "if", "\"tag\"", "in", "val", ":", "pat", "=", "ArticleContextPattern", "(", "tag", "=", "val", "[", "\"tag\"", "]", ")", "if", "\"attr\"", "in", "val", ":", "pat", ".", "attr", "=", "val", "[", "\"attr\"", "]", "pat", ".", "value", "=", "val", "[", "\"value\"", "]", "elif", "\"attr\"", "in", "val", ":", "pat", "=", "ArticleContextPattern", "(", "attr", "=", "val", "[", "\"attr\"", "]", ",", "value", "=", "val", "[", "\"value\"", "]", ")", "if", "\"domain\"", "in", "val", ":", "pat", ".", "domain", "=", "val", "[", "\"domain\"", "]", "return", "pat", "if", "isinstance", "(", "val", ",", "list", ")", ":", "self", ".", "_known_context_patterns", "=", "[", "x", "if", "isinstance", "(", "x", ",", "ArticleContextPattern", ")", "else", "create_pat_from_dict", "(", "x", ")", "for", "x", "in", "val", "]", "+", "self", ".", "known_context_patterns", "elif", "isinstance", "(", "val", ",", "ArticleContextPattern", ")", ":", "self", ".", "_known_context_patterns", ".", "insert", "(", "0", ",", "val", ")", "elif", "isinstance", "(", "val", ",", "dict", ")", ":", "self", ".", "_known_context_patterns", ".", "insert", "(", "0", ",", "create_pat_from_dict", "(", "val", ")", ")", "else", ":", "raise", "Exception", "(", "\"Unknown type: {}. Use a ArticleContextPattern.\"", ".", "format", "(", "type", "(", "val", ")", ")", ")" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
Configuration.known_publish_date_tags
val must be a dictionary or list of dictionaries e.g., {'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'} or [{'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'}, {'attrribute': 'property', 'value': 'pub_time', 'content': 'content'}]
goose3/configuration.py
def known_publish_date_tags(self, val): ''' val must be a dictionary or list of dictionaries e.g., {'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'} or [{'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'}, {'attrribute': 'property', 'value': 'pub_time', 'content': 'content'}] ''' def create_pat_from_dict(val): '''Helper function used to create an PublishDatePattern from a dictionary ''' if "tag" in val: pat = PublishDatePattern(tag=val["tag"]) if "attribute" in val: pat.attr = val["attribute"] pat.value = val["value"] elif "attribute" in val: pat = PublishDatePattern(attr=val["attribute"], value=val["value"], content=val["content"]) if "subcontent" in val: pat.subcontent = val["subcontent"] if "domain" in val: pat.domain = val["domain"] return pat if isinstance(val, list): self._known_publish_date_tags = [ x if isinstance(x, PublishDatePattern) else create_pat_from_dict(x) for x in val ] + self.known_publish_date_tags elif isinstance(val, PublishDatePattern): self._known_publish_date_tags.insert(0, val) elif isinstance(val, dict): self._known_publish_date_tags.insert(0, create_pat_from_dict(val)) else: raise Exception("Unknown type: {}. Use a PublishDatePattern.".format(type(val)))
def known_publish_date_tags(self, val): ''' val must be a dictionary or list of dictionaries e.g., {'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'} or [{'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'}, {'attrribute': 'property', 'value': 'pub_time', 'content': 'content'}] ''' def create_pat_from_dict(val): '''Helper function used to create an PublishDatePattern from a dictionary ''' if "tag" in val: pat = PublishDatePattern(tag=val["tag"]) if "attribute" in val: pat.attr = val["attribute"] pat.value = val["value"] elif "attribute" in val: pat = PublishDatePattern(attr=val["attribute"], value=val["value"], content=val["content"]) if "subcontent" in val: pat.subcontent = val["subcontent"] if "domain" in val: pat.domain = val["domain"] return pat if isinstance(val, list): self._known_publish_date_tags = [ x if isinstance(x, PublishDatePattern) else create_pat_from_dict(x) for x in val ] + self.known_publish_date_tags elif isinstance(val, PublishDatePattern): self._known_publish_date_tags.insert(0, val) elif isinstance(val, dict): self._known_publish_date_tags.insert(0, create_pat_from_dict(val)) else: raise Exception("Unknown type: {}. Use a PublishDatePattern.".format(type(val)))
[ "val", "must", "be", "a", "dictionary", "or", "list", "of", "dictionaries", "e", ".", "g", ".", "{", "attrribute", ":", "name", "value", ":", "my", "-", "pubdate", "content", ":", "datetime", "}", "or", "[", "{", "attrribute", ":", "name", "value", ":", "my", "-", "pubdate", "content", ":", "datetime", "}", "{", "attrribute", ":", "property", "value", ":", "pub_time", "content", ":", "content", "}", "]" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/configuration.py#L262-L297
[ "def", "known_publish_date_tags", "(", "self", ",", "val", ")", ":", "def", "create_pat_from_dict", "(", "val", ")", ":", "'''Helper function used to create an PublishDatePattern from a dictionary\n '''", "if", "\"tag\"", "in", "val", ":", "pat", "=", "PublishDatePattern", "(", "tag", "=", "val", "[", "\"tag\"", "]", ")", "if", "\"attribute\"", "in", "val", ":", "pat", ".", "attr", "=", "val", "[", "\"attribute\"", "]", "pat", ".", "value", "=", "val", "[", "\"value\"", "]", "elif", "\"attribute\"", "in", "val", ":", "pat", "=", "PublishDatePattern", "(", "attr", "=", "val", "[", "\"attribute\"", "]", ",", "value", "=", "val", "[", "\"value\"", "]", ",", "content", "=", "val", "[", "\"content\"", "]", ")", "if", "\"subcontent\"", "in", "val", ":", "pat", ".", "subcontent", "=", "val", "[", "\"subcontent\"", "]", "if", "\"domain\"", "in", "val", ":", "pat", ".", "domain", "=", "val", "[", "\"domain\"", "]", "return", "pat", "if", "isinstance", "(", "val", ",", "list", ")", ":", "self", ".", "_known_publish_date_tags", "=", "[", "x", "if", "isinstance", "(", "x", ",", "PublishDatePattern", ")", "else", "create_pat_from_dict", "(", "x", ")", "for", "x", "in", "val", "]", "+", "self", ".", "known_publish_date_tags", "elif", "isinstance", "(", "val", ",", "PublishDatePattern", ")", ":", "self", ".", "_known_publish_date_tags", ".", "insert", "(", "0", ",", "val", ")", "elif", "isinstance", "(", "val", ",", "dict", ")", ":", "self", ".", "_known_publish_date_tags", ".", "insert", "(", "0", ",", "create_pat_from_dict", "(", "val", ")", ")", "else", ":", "raise", "Exception", "(", "\"Unknown type: {}. Use a PublishDatePattern.\"", ".", "format", "(", "type", "(", "val", ")", ")", ")" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
Configuration.known_author_patterns
val must be a dictionary or list of dictionaries e.g., {'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'} or [{'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'}, {'attrribute': 'property', 'value': 'pub_time', 'content': 'content'}]
goose3/configuration.py
def known_author_patterns(self, val): ''' val must be a dictionary or list of dictionaries e.g., {'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'} or [{'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'}, {'attrribute': 'property', 'value': 'pub_time', 'content': 'content'}] ''' def create_pat_from_dict(val): '''Helper function used to create an AuthorPatterns from a dictionary ''' if "tag" in val: pat = AuthorPattern(tag=val["tag"]) if "attribute" in val: pat.attr = val["attribute"] pat.value = val["value"] elif "attribute" in val: pat = AuthorPattern(attr=val["attribute"], value=val["value"], content=val["content"]) if "subpattern" in val: pat.subpattern = create_pat_from_dict(val["subpattern"]) return pat if isinstance(val, list): self._known_author_patterns = [ x if isinstance(x, AuthorPattern) else create_pat_from_dict(x) for x in val ] + self.known_author_patterns elif isinstance(val, AuthorPattern): self._known_author_patterns.insert(0, val) elif isinstance(val, dict): self._known_author_patterns.insert(0, create_pat_from_dict(val)) else: raise Exception("Unknown type: {}. Use an AuthorPattern.".format(type(val)))
def known_author_patterns(self, val): ''' val must be a dictionary or list of dictionaries e.g., {'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'} or [{'attrribute': 'name', 'value': 'my-pubdate', 'content': 'datetime'}, {'attrribute': 'property', 'value': 'pub_time', 'content': 'content'}] ''' def create_pat_from_dict(val): '''Helper function used to create an AuthorPatterns from a dictionary ''' if "tag" in val: pat = AuthorPattern(tag=val["tag"]) if "attribute" in val: pat.attr = val["attribute"] pat.value = val["value"] elif "attribute" in val: pat = AuthorPattern(attr=val["attribute"], value=val["value"], content=val["content"]) if "subpattern" in val: pat.subpattern = create_pat_from_dict(val["subpattern"]) return pat if isinstance(val, list): self._known_author_patterns = [ x if isinstance(x, AuthorPattern) else create_pat_from_dict(x) for x in val ] + self.known_author_patterns elif isinstance(val, AuthorPattern): self._known_author_patterns.insert(0, val) elif isinstance(val, dict): self._known_author_patterns.insert(0, create_pat_from_dict(val)) else: raise Exception("Unknown type: {}. Use an AuthorPattern.".format(type(val)))
[ "val", "must", "be", "a", "dictionary", "or", "list", "of", "dictionaries", "e", ".", "g", ".", "{", "attrribute", ":", "name", "value", ":", "my", "-", "pubdate", "content", ":", "datetime", "}", "or", "[", "{", "attrribute", ":", "name", "value", ":", "my", "-", "pubdate", "content", ":", "datetime", "}", "{", "attrribute", ":", "property", "value", ":", "pub_time", "content", ":", "content", "}", "]" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/configuration.py#L310-L343
[ "def", "known_author_patterns", "(", "self", ",", "val", ")", ":", "def", "create_pat_from_dict", "(", "val", ")", ":", "'''Helper function used to create an AuthorPatterns from a dictionary\n '''", "if", "\"tag\"", "in", "val", ":", "pat", "=", "AuthorPattern", "(", "tag", "=", "val", "[", "\"tag\"", "]", ")", "if", "\"attribute\"", "in", "val", ":", "pat", ".", "attr", "=", "val", "[", "\"attribute\"", "]", "pat", ".", "value", "=", "val", "[", "\"value\"", "]", "elif", "\"attribute\"", "in", "val", ":", "pat", "=", "AuthorPattern", "(", "attr", "=", "val", "[", "\"attribute\"", "]", ",", "value", "=", "val", "[", "\"value\"", "]", ",", "content", "=", "val", "[", "\"content\"", "]", ")", "if", "\"subpattern\"", "in", "val", ":", "pat", ".", "subpattern", "=", "create_pat_from_dict", "(", "val", "[", "\"subpattern\"", "]", ")", "return", "pat", "if", "isinstance", "(", "val", ",", "list", ")", ":", "self", ".", "_known_author_patterns", "=", "[", "x", "if", "isinstance", "(", "x", ",", "AuthorPattern", ")", "else", "create_pat_from_dict", "(", "x", ")", "for", "x", "in", "val", "]", "+", "self", ".", "known_author_patterns", "elif", "isinstance", "(", "val", ",", "AuthorPattern", ")", ":", "self", ".", "_known_author_patterns", ".", "insert", "(", "0", ",", "val", ")", "elif", "isinstance", "(", "val", ",", "dict", ")", ":", "self", ".", "_known_author_patterns", ".", "insert", "(", "0", ",", "create_pat_from_dict", "(", "val", ")", ")", "else", ":", "raise", "Exception", "(", "\"Unknown type: {}. Use an AuthorPattern.\"", ".", "format", "(", "type", "(", "val", ")", ")", ")" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
ContentExtractor.get_siblings_content
adds any siblings that may have a decent score to this node
goose3/extractors/content.py
def get_siblings_content(self, current_sibling, baselinescore_siblings_para): """ adds any siblings that may have a decent score to this node """ if current_sibling.tag == 'p' and self.parser.getText(current_sibling): tmp = current_sibling if tmp.tail: tmp = deepcopy(tmp) tmp.tail = '' return [tmp] else: potential_paragraphs = self.parser.getElementsByTag(current_sibling, tag='p') if potential_paragraphs is None: return None paragraphs = list() for first_paragraph in potential_paragraphs: text = self.parser.getText(first_paragraph) if text: # no len(text) > 0 word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text) paragraph_score = word_stats.get_stopword_count() sibling_baseline_score = float(.30) high_link_density = self.is_highlink_density(first_paragraph) score = float(baselinescore_siblings_para * sibling_baseline_score) if score < paragraph_score and not high_link_density: para = self.parser.createElement(tag='p', text=text, tail=None) paragraphs.append(para) return paragraphs
def get_siblings_content(self, current_sibling, baselinescore_siblings_para): """ adds any siblings that may have a decent score to this node """ if current_sibling.tag == 'p' and self.parser.getText(current_sibling): tmp = current_sibling if tmp.tail: tmp = deepcopy(tmp) tmp.tail = '' return [tmp] else: potential_paragraphs = self.parser.getElementsByTag(current_sibling, tag='p') if potential_paragraphs is None: return None paragraphs = list() for first_paragraph in potential_paragraphs: text = self.parser.getText(first_paragraph) if text: # no len(text) > 0 word_stats = self.stopwords_class(language=self.get_language()).get_stopword_count(text) paragraph_score = word_stats.get_stopword_count() sibling_baseline_score = float(.30) high_link_density = self.is_highlink_density(first_paragraph) score = float(baselinescore_siblings_para * sibling_baseline_score) if score < paragraph_score and not high_link_density: para = self.parser.createElement(tag='p', text=text, tail=None) paragraphs.append(para) return paragraphs
[ "adds", "any", "siblings", "that", "may", "have", "a", "decent", "score", "to", "this", "node" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/content.py#L198-L225
[ "def", "get_siblings_content", "(", "self", ",", "current_sibling", ",", "baselinescore_siblings_para", ")", ":", "if", "current_sibling", ".", "tag", "==", "'p'", "and", "self", ".", "parser", ".", "getText", "(", "current_sibling", ")", ":", "tmp", "=", "current_sibling", "if", "tmp", ".", "tail", ":", "tmp", "=", "deepcopy", "(", "tmp", ")", "tmp", ".", "tail", "=", "''", "return", "[", "tmp", "]", "else", ":", "potential_paragraphs", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "current_sibling", ",", "tag", "=", "'p'", ")", "if", "potential_paragraphs", "is", "None", ":", "return", "None", "paragraphs", "=", "list", "(", ")", "for", "first_paragraph", "in", "potential_paragraphs", ":", "text", "=", "self", ".", "parser", ".", "getText", "(", "first_paragraph", ")", "if", "text", ":", "# no len(text) > 0", "word_stats", "=", "self", ".", "stopwords_class", "(", "language", "=", "self", ".", "get_language", "(", ")", ")", ".", "get_stopword_count", "(", "text", ")", "paragraph_score", "=", "word_stats", ".", "get_stopword_count", "(", ")", "sibling_baseline_score", "=", "float", "(", ".30", ")", "high_link_density", "=", "self", ".", "is_highlink_density", "(", "first_paragraph", ")", "score", "=", "float", "(", "baselinescore_siblings_para", "*", "sibling_baseline_score", ")", "if", "score", "<", "paragraph_score", "and", "not", "high_link_density", ":", "para", "=", "self", ".", "parser", ".", "createElement", "(", "tag", "=", "'p'", ",", "text", "=", "text", ",", "tail", "=", "None", ")", "paragraphs", ".", "append", "(", "para", ")", "return", "paragraphs" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
ContentExtractor.is_highlink_density
checks the density of links within a node, is there not much text and most of it contains linky shit? if so it's no good
goose3/extractors/content.py
def is_highlink_density(self, element): """ checks the density of links within a node, is there not much text and most of it contains linky shit? if so it's no good """ links = self.parser.getElementsByTag(element, tag='a') if not links: return False text = self.parser.getText(element) words = text.split(' ') words_number = float(len(words)) link_text_parts = [] for link in links: link_text_parts.append(self.parser.getText(link)) link_text = ''.join(link_text_parts) link_words = link_text.split(' ') number_of_link_words = float(len(link_words)) number_of_links = float(len(links)) link_divisor = float(number_of_link_words / words_number) score = float(link_divisor * number_of_links) if score >= 1.0: return True return False
def is_highlink_density(self, element): """ checks the density of links within a node, is there not much text and most of it contains linky shit? if so it's no good """ links = self.parser.getElementsByTag(element, tag='a') if not links: return False text = self.parser.getText(element) words = text.split(' ') words_number = float(len(words)) link_text_parts = [] for link in links: link_text_parts.append(self.parser.getText(link)) link_text = ''.join(link_text_parts) link_words = link_text.split(' ') number_of_link_words = float(len(link_words)) number_of_links = float(len(links)) link_divisor = float(number_of_link_words / words_number) score = float(link_divisor * number_of_links) if score >= 1.0: return True return False
[ "checks", "the", "density", "of", "links", "within", "a", "node", "is", "there", "not", "much", "text", "and", "most", "of", "it", "contains", "linky", "shit?", "if", "so", "it", "s", "no", "good" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/content.py#L281-L306
[ "def", "is_highlink_density", "(", "self", ",", "element", ")", ":", "links", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "element", ",", "tag", "=", "'a'", ")", "if", "not", "links", ":", "return", "False", "text", "=", "self", ".", "parser", ".", "getText", "(", "element", ")", "words", "=", "text", ".", "split", "(", "' '", ")", "words_number", "=", "float", "(", "len", "(", "words", ")", ")", "link_text_parts", "=", "[", "]", "for", "link", "in", "links", ":", "link_text_parts", ".", "append", "(", "self", ".", "parser", ".", "getText", "(", "link", ")", ")", "link_text", "=", "''", ".", "join", "(", "link_text_parts", ")", "link_words", "=", "link_text", ".", "split", "(", "' '", ")", "number_of_link_words", "=", "float", "(", "len", "(", "link_words", ")", ")", "number_of_links", "=", "float", "(", "len", "(", "links", ")", ")", "link_divisor", "=", "float", "(", "number_of_link_words", "/", "words_number", ")", "score", "=", "float", "(", "link_divisor", "*", "number_of_links", ")", "if", "score", ">=", "1.0", ":", "return", "True", "return", "False" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
ContentExtractor.nodes_to_check
\ returns a list of nodes we want to search on like paragraphs and tables
goose3/extractors/content.py
def nodes_to_check(self, docs): """\ returns a list of nodes we want to search on like paragraphs and tables """ nodes_to_check = [] for doc in docs: for tag in ['p', 'pre', 'td']: items = self.parser.getElementsByTag(doc, tag=tag) nodes_to_check += items return nodes_to_check
def nodes_to_check(self, docs): """\ returns a list of nodes we want to search on like paragraphs and tables """ nodes_to_check = [] for doc in docs: for tag in ['p', 'pre', 'td']: items = self.parser.getElementsByTag(doc, tag=tag) nodes_to_check += items return nodes_to_check
[ "\\", "returns", "a", "list", "of", "nodes", "we", "want", "to", "search", "on", "like", "paragraphs", "and", "tables" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/content.py#L321-L332
[ "def", "nodes_to_check", "(", "self", ",", "docs", ")", ":", "nodes_to_check", "=", "[", "]", "for", "doc", "in", "docs", ":", "for", "tag", "in", "[", "'p'", ",", "'pre'", ",", "'td'", "]", ":", "items", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "doc", ",", "tag", "=", "tag", ")", "nodes_to_check", "+=", "items", "return", "nodes_to_check" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
ContentExtractor.post_cleanup
\ remove any divs that looks like non-content, clusters of links, or paras with no gusto
goose3/extractors/content.py
def post_cleanup(self): """\ remove any divs that looks like non-content, clusters of links, or paras with no gusto """ parse_tags = ['p'] if self.config.parse_lists: parse_tags.extend(['ul', 'ol']) if self.config.parse_headers: parse_tags.extend(['h1', 'h2', 'h3', 'h4', 'h5', 'h6']) target_node = self.article.top_node node = self.add_siblings(target_node) for elm in self.parser.getChildren(node): e_tag = self.parser.getTag(elm) if e_tag not in parse_tags: if (self.is_highlink_density(elm) or self.is_table_and_no_para_exist(elm) or not self.is_nodescore_threshold_met(node, elm)): self.parser.remove(elm) return node
def post_cleanup(self): """\ remove any divs that looks like non-content, clusters of links, or paras with no gusto """ parse_tags = ['p'] if self.config.parse_lists: parse_tags.extend(['ul', 'ol']) if self.config.parse_headers: parse_tags.extend(['h1', 'h2', 'h3', 'h4', 'h5', 'h6']) target_node = self.article.top_node node = self.add_siblings(target_node) for elm in self.parser.getChildren(node): e_tag = self.parser.getTag(elm) if e_tag not in parse_tags: if (self.is_highlink_density(elm) or self.is_table_and_no_para_exist(elm) or not self.is_nodescore_threshold_met(node, elm)): self.parser.remove(elm) return node
[ "\\", "remove", "any", "divs", "that", "looks", "like", "non", "-", "content", "clusters", "of", "links", "or", "paras", "with", "no", "gusto" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/content.py#L355-L374
[ "def", "post_cleanup", "(", "self", ")", ":", "parse_tags", "=", "[", "'p'", "]", "if", "self", ".", "config", ".", "parse_lists", ":", "parse_tags", ".", "extend", "(", "[", "'ul'", ",", "'ol'", "]", ")", "if", "self", ".", "config", ".", "parse_headers", ":", "parse_tags", ".", "extend", "(", "[", "'h1'", ",", "'h2'", ",", "'h3'", ",", "'h4'", ",", "'h5'", ",", "'h6'", "]", ")", "target_node", "=", "self", ".", "article", ".", "top_node", "node", "=", "self", ".", "add_siblings", "(", "target_node", ")", "for", "elm", "in", "self", ".", "parser", ".", "getChildren", "(", "node", ")", ":", "e_tag", "=", "self", ".", "parser", ".", "getTag", "(", "elm", ")", "if", "e_tag", "not", "in", "parse_tags", ":", "if", "(", "self", ".", "is_highlink_density", "(", "elm", ")", "or", "self", ".", "is_table_and_no_para_exist", "(", "elm", ")", "or", "not", "self", ".", "is_nodescore_threshold_met", "(", "node", ",", "elm", ")", ")", ":", "self", ".", "parser", ".", "remove", "(", "elm", ")", "return", "node" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
Article.infos
dict: The summation of all data available about the extracted article Note: Read only
goose3/article.py
def infos(self): ''' dict: The summation of all data available about the extracted article Note: Read only ''' data = { "meta": { "description": self.meta_description, "lang": self.meta_lang, "keywords": self.meta_keywords, "favicon": self.meta_favicon, "canonical": self.canonical_link, "encoding": self.meta_encoding }, "image": None, "domain": self.domain, "title": self.title, "cleaned_text": self.cleaned_text, "opengraph": self.opengraph, "tags": self.tags, "tweets": self.tweets, "movies": [], "links": self.links, "authors": self.authors, "publish_date": self.publish_date } # image if self.top_image is not None: data['image'] = { 'url': self.top_image.src, 'width': self.top_image.width, 'height': self.top_image.height, 'type': 'image' } # movies for movie in self.movies: data['movies'].append({ 'embed_type': movie.embed_type, 'provider': movie.provider, 'width': movie.width, 'height': movie.height, 'embed_code': movie.embed_code, 'src': movie.src, }) return data
def infos(self): ''' dict: The summation of all data available about the extracted article Note: Read only ''' data = { "meta": { "description": self.meta_description, "lang": self.meta_lang, "keywords": self.meta_keywords, "favicon": self.meta_favicon, "canonical": self.canonical_link, "encoding": self.meta_encoding }, "image": None, "domain": self.domain, "title": self.title, "cleaned_text": self.cleaned_text, "opengraph": self.opengraph, "tags": self.tags, "tweets": self.tweets, "movies": [], "links": self.links, "authors": self.authors, "publish_date": self.publish_date } # image if self.top_image is not None: data['image'] = { 'url': self.top_image.src, 'width': self.top_image.width, 'height': self.top_image.height, 'type': 'image' } # movies for movie in self.movies: data['movies'].append({ 'embed_type': movie.embed_type, 'provider': movie.provider, 'width': movie.width, 'height': movie.height, 'embed_code': movie.embed_code, 'src': movie.src, }) return data
[ "dict", ":", "The", "summation", "of", "all", "data", "available", "about", "the", "extracted", "article" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/article.py#L271-L318
[ "def", "infos", "(", "self", ")", ":", "data", "=", "{", "\"meta\"", ":", "{", "\"description\"", ":", "self", ".", "meta_description", ",", "\"lang\"", ":", "self", ".", "meta_lang", ",", "\"keywords\"", ":", "self", ".", "meta_keywords", ",", "\"favicon\"", ":", "self", ".", "meta_favicon", ",", "\"canonical\"", ":", "self", ".", "canonical_link", ",", "\"encoding\"", ":", "self", ".", "meta_encoding", "}", ",", "\"image\"", ":", "None", ",", "\"domain\"", ":", "self", ".", "domain", ",", "\"title\"", ":", "self", ".", "title", ",", "\"cleaned_text\"", ":", "self", ".", "cleaned_text", ",", "\"opengraph\"", ":", "self", ".", "opengraph", ",", "\"tags\"", ":", "self", ".", "tags", ",", "\"tweets\"", ":", "self", ".", "tweets", ",", "\"movies\"", ":", "[", "]", ",", "\"links\"", ":", "self", ".", "links", ",", "\"authors\"", ":", "self", ".", "authors", ",", "\"publish_date\"", ":", "self", ".", "publish_date", "}", "# image", "if", "self", ".", "top_image", "is", "not", "None", ":", "data", "[", "'image'", "]", "=", "{", "'url'", ":", "self", ".", "top_image", ".", "src", ",", "'width'", ":", "self", ".", "top_image", ".", "width", ",", "'height'", ":", "self", ".", "top_image", ".", "height", ",", "'type'", ":", "'image'", "}", "# movies", "for", "movie", "in", "self", ".", "movies", ":", "data", "[", "'movies'", "]", ".", "append", "(", "{", "'embed_type'", ":", "movie", ".", "embed_type", ",", "'provider'", ":", "movie", ".", "provider", ",", "'width'", ":", "movie", ".", "width", ",", "'height'", ":", "movie", ".", "height", ",", "'embed_code'", ":", "movie", ".", "embed_code", ",", "'src'", ":", "movie", ".", "src", ",", "}", ")", "return", "data" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
TitleExtractor.clean_title
Clean title with the use of og:site_name in this case try to get rid of site name and use TITLE_SPLITTERS to reformat title
goose3/extractors/title.py
def clean_title(self, title): """Clean title with the use of og:site_name in this case try to get rid of site name and use TITLE_SPLITTERS to reformat title """ # check if we have the site name in opengraph data if "site_name" in list(self.article.opengraph.keys()): site_name = self.article.opengraph['site_name'] # remove the site name from title title = title.replace(site_name, '').strip() elif (self.article.schema and "publisher" in self.article.schema and "name" in self.article.schema["publisher"]): site_name = self.article.schema["publisher"]["name"] # remove the site name from title title = title.replace(site_name, '').strip() # try to remove the domain from url if self.article.domain: pattern = re.compile(self.article.domain, re.IGNORECASE) title = pattern.sub("", title).strip() # split the title in words # TechCrunch | my wonderfull article # my wonderfull article | TechCrunch title_words = title.split() # check if first letter is in TITLE_SPLITTERS # if so remove it if title_words and title_words[0] in TITLE_SPLITTERS: title_words.pop(0) # check for a title that is empty or consists of only a # title splitter to avoid a IndexError below if not title_words: return "" # check if last letter is in TITLE_SPLITTERS # if so remove it if title_words[-1] in TITLE_SPLITTERS: title_words.pop(-1) # rebuild the title title = " ".join(title_words).strip() return title
def clean_title(self, title): """Clean title with the use of og:site_name in this case try to get rid of site name and use TITLE_SPLITTERS to reformat title """ # check if we have the site name in opengraph data if "site_name" in list(self.article.opengraph.keys()): site_name = self.article.opengraph['site_name'] # remove the site name from title title = title.replace(site_name, '').strip() elif (self.article.schema and "publisher" in self.article.schema and "name" in self.article.schema["publisher"]): site_name = self.article.schema["publisher"]["name"] # remove the site name from title title = title.replace(site_name, '').strip() # try to remove the domain from url if self.article.domain: pattern = re.compile(self.article.domain, re.IGNORECASE) title = pattern.sub("", title).strip() # split the title in words # TechCrunch | my wonderfull article # my wonderfull article | TechCrunch title_words = title.split() # check if first letter is in TITLE_SPLITTERS # if so remove it if title_words and title_words[0] in TITLE_SPLITTERS: title_words.pop(0) # check for a title that is empty or consists of only a # title splitter to avoid a IndexError below if not title_words: return "" # check if last letter is in TITLE_SPLITTERS # if so remove it if title_words[-1] in TITLE_SPLITTERS: title_words.pop(-1) # rebuild the title title = " ".join(title_words).strip() return title
[ "Clean", "title", "with", "the", "use", "of", "og", ":", "site_name", "in", "this", "case", "try", "to", "get", "rid", "of", "site", "name", "and", "use", "TITLE_SPLITTERS", "to", "reformat", "title" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/title.py#L33-L77
[ "def", "clean_title", "(", "self", ",", "title", ")", ":", "# check if we have the site name in opengraph data", "if", "\"site_name\"", "in", "list", "(", "self", ".", "article", ".", "opengraph", ".", "keys", "(", ")", ")", ":", "site_name", "=", "self", ".", "article", ".", "opengraph", "[", "'site_name'", "]", "# remove the site name from title", "title", "=", "title", ".", "replace", "(", "site_name", ",", "''", ")", ".", "strip", "(", ")", "elif", "(", "self", ".", "article", ".", "schema", "and", "\"publisher\"", "in", "self", ".", "article", ".", "schema", "and", "\"name\"", "in", "self", ".", "article", ".", "schema", "[", "\"publisher\"", "]", ")", ":", "site_name", "=", "self", ".", "article", ".", "schema", "[", "\"publisher\"", "]", "[", "\"name\"", "]", "# remove the site name from title", "title", "=", "title", ".", "replace", "(", "site_name", ",", "''", ")", ".", "strip", "(", ")", "# try to remove the domain from url", "if", "self", ".", "article", ".", "domain", ":", "pattern", "=", "re", ".", "compile", "(", "self", ".", "article", ".", "domain", ",", "re", ".", "IGNORECASE", ")", "title", "=", "pattern", ".", "sub", "(", "\"\"", ",", "title", ")", ".", "strip", "(", ")", "# split the title in words", "# TechCrunch | my wonderfull article", "# my wonderfull article | TechCrunch", "title_words", "=", "title", ".", "split", "(", ")", "# check if first letter is in TITLE_SPLITTERS", "# if so remove it", "if", "title_words", "and", "title_words", "[", "0", "]", "in", "TITLE_SPLITTERS", ":", "title_words", ".", "pop", "(", "0", ")", "# check for a title that is empty or consists of only a", "# title splitter to avoid a IndexError below", "if", "not", "title_words", ":", "return", "\"\"", "# check if last letter is in TITLE_SPLITTERS", "# if so remove it", "if", "title_words", "[", "-", "1", "]", "in", "TITLE_SPLITTERS", ":", "title_words", ".", "pop", "(", "-", "1", ")", "# rebuild the title", "title", "=", "\" \"", ".", "join", "(", "title_words", ")", ".", "strip", "(", ")", "return", "title" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
TitleExtractor.get_title
\ Fetch the article title and analyze it
goose3/extractors/title.py
def get_title(self): """\ Fetch the article title and analyze it """ title = '' # rely on opengraph in case we have the data if "title" in list(self.article.opengraph.keys()): return self.clean_title(self.article.opengraph['title']) elif self.article.schema and "headline" in self.article.schema: return self.clean_title(self.article.schema['headline']) # try to fetch the meta headline meta_headline = self.parser.getElementsByTag(self.article.doc, tag="meta", attr="name", value="headline") if meta_headline is not None and len(meta_headline) > 0: title = self.parser.getAttribute(meta_headline[0], 'content') return self.clean_title(title) # otherwise use the title meta title_element = self.parser.getElementsByTag(self.article.doc, tag='title') if title_element is not None and len(title_element) > 0: title = self.parser.getText(title_element[0]) return self.clean_title(title) return title
def get_title(self): """\ Fetch the article title and analyze it """ title = '' # rely on opengraph in case we have the data if "title" in list(self.article.opengraph.keys()): return self.clean_title(self.article.opengraph['title']) elif self.article.schema and "headline" in self.article.schema: return self.clean_title(self.article.schema['headline']) # try to fetch the meta headline meta_headline = self.parser.getElementsByTag(self.article.doc, tag="meta", attr="name", value="headline") if meta_headline is not None and len(meta_headline) > 0: title = self.parser.getAttribute(meta_headline[0], 'content') return self.clean_title(title) # otherwise use the title meta title_element = self.parser.getElementsByTag(self.article.doc, tag='title') if title_element is not None and len(title_element) > 0: title = self.parser.getText(title_element[0]) return self.clean_title(title) return title
[ "\\", "Fetch", "the", "article", "title", "and", "analyze", "it" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/title.py#L79-L106
[ "def", "get_title", "(", "self", ")", ":", "title", "=", "''", "# rely on opengraph in case we have the data", "if", "\"title\"", "in", "list", "(", "self", ".", "article", ".", "opengraph", ".", "keys", "(", ")", ")", ":", "return", "self", ".", "clean_title", "(", "self", ".", "article", ".", "opengraph", "[", "'title'", "]", ")", "elif", "self", ".", "article", ".", "schema", "and", "\"headline\"", "in", "self", ".", "article", ".", "schema", ":", "return", "self", ".", "clean_title", "(", "self", ".", "article", ".", "schema", "[", "'headline'", "]", ")", "# try to fetch the meta headline", "meta_headline", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "self", ".", "article", ".", "doc", ",", "tag", "=", "\"meta\"", ",", "attr", "=", "\"name\"", ",", "value", "=", "\"headline\"", ")", "if", "meta_headline", "is", "not", "None", "and", "len", "(", "meta_headline", ")", ">", "0", ":", "title", "=", "self", ".", "parser", ".", "getAttribute", "(", "meta_headline", "[", "0", "]", ",", "'content'", ")", "return", "self", ".", "clean_title", "(", "title", ")", "# otherwise use the title meta", "title_element", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "self", ".", "article", ".", "doc", ",", "tag", "=", "'title'", ")", "if", "title_element", "is", "not", "None", "and", "len", "(", "title_element", ")", ">", "0", ":", "title", "=", "self", ".", "parser", ".", "getText", "(", "title_element", "[", "0", "]", ")", "return", "self", ".", "clean_title", "(", "title", ")", "return", "title" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
MetasExtractor.get_canonical_link
if the article has meta canonical link set in the url
goose3/extractors/metas.py
def get_canonical_link(self): """ if the article has meta canonical link set in the url """ if self.article.final_url: kwargs = {'tag': 'link', 'attr': 'rel', 'value': 'canonical'} meta = self.parser.getElementsByTag(self.article.doc, **kwargs) if meta is not None and len(meta) > 0: href = self.parser.getAttribute(meta[0], 'href') if href: href = href.strip() o = urlparse(href) if not o.hostname: tmp = urlparse(self.article.final_url) domain = '%s://%s' % (tmp.scheme, tmp.hostname) href = urljoin(domain, href) return href return self.article.final_url
def get_canonical_link(self): """ if the article has meta canonical link set in the url """ if self.article.final_url: kwargs = {'tag': 'link', 'attr': 'rel', 'value': 'canonical'} meta = self.parser.getElementsByTag(self.article.doc, **kwargs) if meta is not None and len(meta) > 0: href = self.parser.getAttribute(meta[0], 'href') if href: href = href.strip() o = urlparse(href) if not o.hostname: tmp = urlparse(self.article.final_url) domain = '%s://%s' % (tmp.scheme, tmp.hostname) href = urljoin(domain, href) return href return self.article.final_url
[ "if", "the", "article", "has", "meta", "canonical", "link", "set", "in", "the", "url" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/metas.py#L57-L74
[ "def", "get_canonical_link", "(", "self", ")", ":", "if", "self", ".", "article", ".", "final_url", ":", "kwargs", "=", "{", "'tag'", ":", "'link'", ",", "'attr'", ":", "'rel'", ",", "'value'", ":", "'canonical'", "}", "meta", "=", "self", ".", "parser", ".", "getElementsByTag", "(", "self", ".", "article", ".", "doc", ",", "*", "*", "kwargs", ")", "if", "meta", "is", "not", "None", "and", "len", "(", "meta", ")", ">", "0", ":", "href", "=", "self", ".", "parser", ".", "getAttribute", "(", "meta", "[", "0", "]", ",", "'href'", ")", "if", "href", ":", "href", "=", "href", ".", "strip", "(", ")", "o", "=", "urlparse", "(", "href", ")", "if", "not", "o", ".", "hostname", ":", "tmp", "=", "urlparse", "(", "self", ".", "article", ".", "final_url", ")", "domain", "=", "'%s://%s'", "%", "(", "tmp", ".", "scheme", ",", "tmp", ".", "hostname", ")", "href", "=", "urljoin", "(", "domain", ",", "href", ")", "return", "href", "return", "self", ".", "article", ".", "final_url" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
OutputFormatter.make_list_elms_pretty
make any list element read like a list
goose3/outputformatters.py
def make_list_elms_pretty(self): """ make any list element read like a list """ for elm in self.parser.getElementsByTag(self.top_node, tag='li'): elm.text = r'• {}'.format(elm.text)
def make_list_elms_pretty(self): """ make any list element read like a list """ for elm in self.parser.getElementsByTag(self.top_node, tag='li'): elm.text = r'• {}'.format(elm.text)
[ "make", "any", "list", "element", "read", "like", "a", "list" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/outputformatters.py#L104-L108
[ "def", "make_list_elms_pretty", "(", "self", ")", ":", "for", "elm", "in", "self", ".", "parser", ".", "getElementsByTag", "(", "self", ".", "top_node", ",", "tag", "=", "'li'", ")", ":", "elm", ".", "text", "=", "r'• {}'.f", "o", "rmat(e", "l", "m.t", "e", "xt)", "" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
Goose.close
Close the network connection and perform any other required cleanup Note: Auto closed when using goose as a context manager or when garbage collected
goose3/__init__.py
def close(self): ''' Close the network connection and perform any other required cleanup Note: Auto closed when using goose as a context manager or when garbage collected ''' if self.fetcher is not None: self.shutdown_network() self.finalizer.atexit = False
def close(self): ''' Close the network connection and perform any other required cleanup Note: Auto closed when using goose as a context manager or when garbage collected ''' if self.fetcher is not None: self.shutdown_network() self.finalizer.atexit = False
[ "Close", "the", "network", "connection", "and", "perform", "any", "other", "required", "cleanup" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/__init__.py#L94-L101
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "fetcher", "is", "not", "None", ":", "self", ".", "shutdown_network", "(", ")", "self", ".", "finalizer", ".", "atexit", "=", "False" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
Goose.extract
Extract the most likely article content from the html page Args: url (str): URL to pull and parse raw_html (str): String representation of the HTML page Returns: Article: Representation of the article contents \ including other parsed and extracted metadata
goose3/__init__.py
def extract(self, url=None, raw_html=None): ''' Extract the most likely article content from the html page Args: url (str): URL to pull and parse raw_html (str): String representation of the HTML page Returns: Article: Representation of the article contents \ including other parsed and extracted metadata ''' crawl_candidate = CrawlCandidate(self.config, url, raw_html) return self.__crawl(crawl_candidate)
def extract(self, url=None, raw_html=None): ''' Extract the most likely article content from the html page Args: url (str): URL to pull and parse raw_html (str): String representation of the HTML page Returns: Article: Representation of the article contents \ including other parsed and extracted metadata ''' crawl_candidate = CrawlCandidate(self.config, url, raw_html) return self.__crawl(crawl_candidate)
[ "Extract", "the", "most", "likely", "article", "content", "from", "the", "html", "page" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/__init__.py#L103-L113
[ "def", "extract", "(", "self", ",", "url", "=", "None", ",", "raw_html", "=", "None", ")", ":", "crawl_candidate", "=", "CrawlCandidate", "(", "self", ".", "config", ",", "url", ",", "raw_html", ")", "return", "self", ".", "__crawl", "(", "crawl_candidate", ")" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
Goose.__crawl
wrap the crawling functionality
goose3/__init__.py
def __crawl(self, crawl_candidate): ''' wrap the crawling functionality ''' def crawler_wrapper(parser, parsers_lst, crawl_candidate): try: crawler = Crawler(self.config, self.fetcher) article = crawler.crawl(crawl_candidate) except (UnicodeDecodeError, ValueError) as ex: if parsers_lst: parser = parsers_lst.pop(0) # remove it also! return crawler_wrapper(parser, parsers_lst, crawl_candidate) else: raise ex return article # use the wrapper parsers = list(self.config.available_parsers) parsers.remove(self.config.parser_class) return crawler_wrapper(self.config.parser_class, parsers, crawl_candidate)
def __crawl(self, crawl_candidate): ''' wrap the crawling functionality ''' def crawler_wrapper(parser, parsers_lst, crawl_candidate): try: crawler = Crawler(self.config, self.fetcher) article = crawler.crawl(crawl_candidate) except (UnicodeDecodeError, ValueError) as ex: if parsers_lst: parser = parsers_lst.pop(0) # remove it also! return crawler_wrapper(parser, parsers_lst, crawl_candidate) else: raise ex return article # use the wrapper parsers = list(self.config.available_parsers) parsers.remove(self.config.parser_class) return crawler_wrapper(self.config.parser_class, parsers, crawl_candidate)
[ "wrap", "the", "crawling", "functionality" ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/__init__.py#L123-L140
[ "def", "__crawl", "(", "self", ",", "crawl_candidate", ")", ":", "def", "crawler_wrapper", "(", "parser", ",", "parsers_lst", ",", "crawl_candidate", ")", ":", "try", ":", "crawler", "=", "Crawler", "(", "self", ".", "config", ",", "self", ".", "fetcher", ")", "article", "=", "crawler", ".", "crawl", "(", "crawl_candidate", ")", "except", "(", "UnicodeDecodeError", ",", "ValueError", ")", "as", "ex", ":", "if", "parsers_lst", ":", "parser", "=", "parsers_lst", ".", "pop", "(", "0", ")", "# remove it also!", "return", "crawler_wrapper", "(", "parser", ",", "parsers_lst", ",", "crawl_candidate", ")", "else", ":", "raise", "ex", "return", "article", "# use the wrapper", "parsers", "=", "list", "(", "self", ".", "config", ".", "available_parsers", ")", "parsers", ".", "remove", "(", "self", ".", "config", ".", "parser_class", ")", "return", "crawler_wrapper", "(", "self", ".", "config", ".", "parser_class", ",", "parsers", ",", "crawl_candidate", ")" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
smart_unicode
Returns a unicode object representing 's'. Treats bytestrings using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects.
goose3/utils/encoding.py
def smart_unicode(string, encoding='utf-8', strings_only=False, errors='strict'): """ Returns a unicode object representing 's'. Treats bytestrings using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects. """ # if isinstance(s, Promise): # # The input is the result of a gettext_lazy() call. # return s return force_unicode(string, encoding, strings_only, errors)
def smart_unicode(string, encoding='utf-8', strings_only=False, errors='strict'): """ Returns a unicode object representing 's'. Treats bytestrings using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects. """ # if isinstance(s, Promise): # # The input is the result of a gettext_lazy() call. # return s return force_unicode(string, encoding, strings_only, errors)
[ "Returns", "a", "unicode", "object", "representing", "s", ".", "Treats", "bytestrings", "using", "the", "encoding", "codec", "." ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/utils/encoding.py#L28-L38
[ "def", "smart_unicode", "(", "string", ",", "encoding", "=", "'utf-8'", ",", "strings_only", "=", "False", ",", "errors", "=", "'strict'", ")", ":", "# if isinstance(s, Promise):", "# # The input is the result of a gettext_lazy() call.", "# return s", "return", "force_unicode", "(", "string", ",", "encoding", ",", "strings_only", ",", "errors", ")" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
force_unicode
Similar to smart_unicode, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects.
goose3/utils/encoding.py
def force_unicode(string, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_unicode, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% in performance when s # is an instance of unicode. This function gets called often in that # setting. if isinstance(string, str): return string if strings_only and is_protected_type(string): return string try: if not isinstance(string, str): if hasattr(string, '__unicode__'): string = string.__unicode__() else: try: string = str(string, encoding, errors) except UnicodeEncodeError: if not isinstance(string, Exception): raise # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII data without special # handling to display as a string. We need to handle this # without raising a further exception. We do an # approximation to what the Exception's standard str() # output should be. string = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in string]) elif not isinstance(string, str): # Note: We use .decode() here, instead of unicode(s, encoding, # errors), so that if s is a SafeString, it ends up being a # SafeUnicode at the end. string = string.decode(encoding, errors) except UnicodeDecodeError as ex: if not isinstance(string, Exception): raise DjangoUnicodeDecodeError(string, *ex.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. string = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in string]) return string
def force_unicode(string, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_unicode, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first, saves 30-40% in performance when s # is an instance of unicode. This function gets called often in that # setting. if isinstance(string, str): return string if strings_only and is_protected_type(string): return string try: if not isinstance(string, str): if hasattr(string, '__unicode__'): string = string.__unicode__() else: try: string = str(string, encoding, errors) except UnicodeEncodeError: if not isinstance(string, Exception): raise # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII data without special # handling to display as a string. We need to handle this # without raising a further exception. We do an # approximation to what the Exception's standard str() # output should be. string = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in string]) elif not isinstance(string, str): # Note: We use .decode() here, instead of unicode(s, encoding, # errors), so that if s is a SafeString, it ends up being a # SafeUnicode at the end. string = string.decode(encoding, errors) except UnicodeDecodeError as ex: if not isinstance(string, Exception): raise DjangoUnicodeDecodeError(string, *ex.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. string = ' '.join([force_unicode(arg, encoding, strings_only, errors) for arg in string]) return string
[ "Similar", "to", "smart_unicode", "except", "that", "lazy", "instances", "are", "resolved", "to", "strings", "rather", "than", "kept", "as", "lazy", "objects", "." ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/utils/encoding.py#L52-L101
[ "def", "force_unicode", "(", "string", ",", "encoding", "=", "'utf-8'", ",", "strings_only", "=", "False", ",", "errors", "=", "'strict'", ")", ":", "# Handle the common case first, saves 30-40% in performance when s", "# is an instance of unicode. This function gets called often in that", "# setting.", "if", "isinstance", "(", "string", ",", "str", ")", ":", "return", "string", "if", "strings_only", "and", "is_protected_type", "(", "string", ")", ":", "return", "string", "try", ":", "if", "not", "isinstance", "(", "string", ",", "str", ")", ":", "if", "hasattr", "(", "string", ",", "'__unicode__'", ")", ":", "string", "=", "string", ".", "__unicode__", "(", ")", "else", ":", "try", ":", "string", "=", "str", "(", "string", ",", "encoding", ",", "errors", ")", "except", "UnicodeEncodeError", ":", "if", "not", "isinstance", "(", "string", ",", "Exception", ")", ":", "raise", "# If we get to here, the caller has passed in an Exception", "# subclass populated with non-ASCII data without special", "# handling to display as a string. We need to handle this", "# without raising a further exception. We do an", "# approximation to what the Exception's standard str()", "# output should be.", "string", "=", "' '", ".", "join", "(", "[", "force_unicode", "(", "arg", ",", "encoding", ",", "strings_only", ",", "errors", ")", "for", "arg", "in", "string", "]", ")", "elif", "not", "isinstance", "(", "string", ",", "str", ")", ":", "# Note: We use .decode() here, instead of unicode(s, encoding,", "# errors), so that if s is a SafeString, it ends up being a", "# SafeUnicode at the end.", "string", "=", "string", ".", "decode", "(", "encoding", ",", "errors", ")", "except", "UnicodeDecodeError", "as", "ex", ":", "if", "not", "isinstance", "(", "string", ",", "Exception", ")", ":", "raise", "DjangoUnicodeDecodeError", "(", "string", ",", "*", "ex", ".", "args", ")", "else", ":", "# If we get to here, the caller has passed in an Exception", "# subclass populated with non-ASCII bytestring data without a", "# working unicode method. Try to handle this without raising a", "# further exception by individually forcing the exception args", "# to unicode.", "string", "=", "' '", ".", "join", "(", "[", "force_unicode", "(", "arg", ",", "encoding", ",", "strings_only", ",", "errors", ")", "for", "arg", "in", "string", "]", ")", "return", "string" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
smart_str
Returns a bytestring version of 's', encoded as specified in 'encoding'. If strings_only is True, don't convert (some) non-string-like objects.
goose3/utils/encoding.py
def smart_str(string, encoding='utf-8', strings_only=False, errors='strict'): """ Returns a bytestring version of 's', encoded as specified in 'encoding'. If strings_only is True, don't convert (some) non-string-like objects. """ if strings_only and isinstance(string, (type(None), int)): return string # if isinstance(s, Promise): # return unicode(s).encode(encoding, errors) if isinstance(string, str): try: return string.encode(encoding, errors) except UnicodeEncodeError: return string.encode('utf-8', errors) elif not isinstance(string, bytes): try: return str(string).encode(encoding, errors) except UnicodeEncodeError: if isinstance(string, Exception): # An Exception subclass containing non-ASCII data that doesn't # know how to print itself properly. We shouldn't raise a # further exception. return ' '.join([smart_str(arg, encoding, strings_only, errors) for arg in string]) return str(string).encode(encoding, errors) else: return string
def smart_str(string, encoding='utf-8', strings_only=False, errors='strict'): """ Returns a bytestring version of 's', encoded as specified in 'encoding'. If strings_only is True, don't convert (some) non-string-like objects. """ if strings_only and isinstance(string, (type(None), int)): return string # if isinstance(s, Promise): # return unicode(s).encode(encoding, errors) if isinstance(string, str): try: return string.encode(encoding, errors) except UnicodeEncodeError: return string.encode('utf-8', errors) elif not isinstance(string, bytes): try: return str(string).encode(encoding, errors) except UnicodeEncodeError: if isinstance(string, Exception): # An Exception subclass containing non-ASCII data that doesn't # know how to print itself properly. We shouldn't raise a # further exception. return ' '.join([smart_str(arg, encoding, strings_only, errors) for arg in string]) return str(string).encode(encoding, errors) else: return string
[ "Returns", "a", "bytestring", "version", "of", "s", "encoded", "as", "specified", "in", "encoding", "." ]
goose3/goose3
python
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/utils/encoding.py#L104-L131
[ "def", "smart_str", "(", "string", ",", "encoding", "=", "'utf-8'", ",", "strings_only", "=", "False", ",", "errors", "=", "'strict'", ")", ":", "if", "strings_only", "and", "isinstance", "(", "string", ",", "(", "type", "(", "None", ")", ",", "int", ")", ")", ":", "return", "string", "# if isinstance(s, Promise):", "# return unicode(s).encode(encoding, errors)", "if", "isinstance", "(", "string", ",", "str", ")", ":", "try", ":", "return", "string", ".", "encode", "(", "encoding", ",", "errors", ")", "except", "UnicodeEncodeError", ":", "return", "string", ".", "encode", "(", "'utf-8'", ",", "errors", ")", "elif", "not", "isinstance", "(", "string", ",", "bytes", ")", ":", "try", ":", "return", "str", "(", "string", ")", ".", "encode", "(", "encoding", ",", "errors", ")", "except", "UnicodeEncodeError", ":", "if", "isinstance", "(", "string", ",", "Exception", ")", ":", "# An Exception subclass containing non-ASCII data that doesn't", "# know how to print itself properly. We shouldn't raise a", "# further exception.", "return", "' '", ".", "join", "(", "[", "smart_str", "(", "arg", ",", "encoding", ",", "strings_only", ",", "errors", ")", "for", "arg", "in", "string", "]", ")", "return", "str", "(", "string", ")", ".", "encode", "(", "encoding", ",", "errors", ")", "else", ":", "return", "string" ]
e6994b1b1826af2720a091d1bff5ca15594f558d
valid
QuillAdmin.get_urls
Add URLs needed to handle image uploads.
quill/admin.py
def get_urls(self): """Add URLs needed to handle image uploads.""" urls = patterns( '', url(r'^upload/$', self.admin_site.admin_view(self.handle_upload), name='quill-file-upload'), ) return urls + super(QuillAdmin, self).get_urls()
def get_urls(self): """Add URLs needed to handle image uploads.""" urls = patterns( '', url(r'^upload/$', self.admin_site.admin_view(self.handle_upload), name='quill-file-upload'), ) return urls + super(QuillAdmin, self).get_urls()
[ "Add", "URLs", "needed", "to", "handle", "image", "uploads", "." ]
coremke/django-quill
python
https://github.com/coremke/django-quill/blob/6c5ace1a96e291f0a8e401f6d61d634dd0cb7c9f/quill/admin.py#L15-L21
[ "def", "get_urls", "(", "self", ")", ":", "urls", "=", "patterns", "(", "''", ",", "url", "(", "r'^upload/$'", ",", "self", ".", "admin_site", ".", "admin_view", "(", "self", ".", "handle_upload", ")", ",", "name", "=", "'quill-file-upload'", ")", ",", ")", "return", "urls", "+", "super", "(", "QuillAdmin", ",", "self", ")", ".", "get_urls", "(", ")" ]
6c5ace1a96e291f0a8e401f6d61d634dd0cb7c9f
valid
QuillAdmin.handle_upload
Handle file uploads from WYSIWYG.
quill/admin.py
def handle_upload(self, request): """Handle file uploads from WYSIWYG.""" if request.method != 'POST': raise Http404 if request.is_ajax(): try: filename = request.GET['quillUploadFile'] data = request is_raw = True except KeyError: return HttpResponseBadRequest("Invalid file upload.") else: if len(request.FILES) != 1: return HttpResponseBadRequest("Can only upload 1 file at a time.") try: data = request.FILES['quillUploadFile'] filename = data.name is_raw = False except KeyError: return HttpResponseBadRequest('Missing image `quillUploadFile`.') url = save_file(data, filename, is_raw, default_storage) response_data = {} response_data['url'] = url # Response content type needs to be text/html here or else # IE will try to download the file. return HttpResponse(json.dumps(response_data), content_type="text/html; charset=utf-8")
def handle_upload(self, request): """Handle file uploads from WYSIWYG.""" if request.method != 'POST': raise Http404 if request.is_ajax(): try: filename = request.GET['quillUploadFile'] data = request is_raw = True except KeyError: return HttpResponseBadRequest("Invalid file upload.") else: if len(request.FILES) != 1: return HttpResponseBadRequest("Can only upload 1 file at a time.") try: data = request.FILES['quillUploadFile'] filename = data.name is_raw = False except KeyError: return HttpResponseBadRequest('Missing image `quillUploadFile`.') url = save_file(data, filename, is_raw, default_storage) response_data = {} response_data['url'] = url # Response content type needs to be text/html here or else # IE will try to download the file. return HttpResponse(json.dumps(response_data), content_type="text/html; charset=utf-8")
[ "Handle", "file", "uploads", "from", "WYSIWYG", "." ]
coremke/django-quill
python
https://github.com/coremke/django-quill/blob/6c5ace1a96e291f0a8e401f6d61d634dd0cb7c9f/quill/admin.py#L23-L51
[ "def", "handle_upload", "(", "self", ",", "request", ")", ":", "if", "request", ".", "method", "!=", "'POST'", ":", "raise", "Http404", "if", "request", ".", "is_ajax", "(", ")", ":", "try", ":", "filename", "=", "request", ".", "GET", "[", "'quillUploadFile'", "]", "data", "=", "request", "is_raw", "=", "True", "except", "KeyError", ":", "return", "HttpResponseBadRequest", "(", "\"Invalid file upload.\"", ")", "else", ":", "if", "len", "(", "request", ".", "FILES", ")", "!=", "1", ":", "return", "HttpResponseBadRequest", "(", "\"Can only upload 1 file at a time.\"", ")", "try", ":", "data", "=", "request", ".", "FILES", "[", "'quillUploadFile'", "]", "filename", "=", "data", ".", "name", "is_raw", "=", "False", "except", "KeyError", ":", "return", "HttpResponseBadRequest", "(", "'Missing image `quillUploadFile`.'", ")", "url", "=", "save_file", "(", "data", ",", "filename", ",", "is_raw", ",", "default_storage", ")", "response_data", "=", "{", "}", "response_data", "[", "'url'", "]", "=", "url", "# Response content type needs to be text/html here or else", "# IE will try to download the file.", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "response_data", ")", ",", "content_type", "=", "\"text/html; charset=utf-8\"", ")" ]
6c5ace1a96e291f0a8e401f6d61d634dd0cb7c9f
valid
QuillEditorWidget.render
Render the Quill WYSIWYG.
quill/widgets.py
def render(self, name, value, attrs={}): """Render the Quill WYSIWYG.""" if value is None: value = '' final_attrs = self.build_attrs(attrs, name=name) quill_app = apps.get_app_config('quill') quill_config = getattr(quill_app, self.config) return mark_safe(render_to_string(quill_config['template'], { 'final_attrs': flatatt(final_attrs), 'value': value, 'id': final_attrs['id'], 'config': self.config, }))
def render(self, name, value, attrs={}): """Render the Quill WYSIWYG.""" if value is None: value = '' final_attrs = self.build_attrs(attrs, name=name) quill_app = apps.get_app_config('quill') quill_config = getattr(quill_app, self.config) return mark_safe(render_to_string(quill_config['template'], { 'final_attrs': flatatt(final_attrs), 'value': value, 'id': final_attrs['id'], 'config': self.config, }))
[ "Render", "the", "Quill", "WYSIWYG", "." ]
coremke/django-quill
python
https://github.com/coremke/django-quill/blob/6c5ace1a96e291f0a8e401f6d61d634dd0cb7c9f/quill/widgets.py#L35-L48
[ "def", "render", "(", "self", ",", "name", ",", "value", ",", "attrs", "=", "{", "}", ")", ":", "if", "value", "is", "None", ":", "value", "=", "''", "final_attrs", "=", "self", ".", "build_attrs", "(", "attrs", ",", "name", "=", "name", ")", "quill_app", "=", "apps", ".", "get_app_config", "(", "'quill'", ")", "quill_config", "=", "getattr", "(", "quill_app", ",", "self", ".", "config", ")", "return", "mark_safe", "(", "render_to_string", "(", "quill_config", "[", "'template'", "]", ",", "{", "'final_attrs'", ":", "flatatt", "(", "final_attrs", ")", ",", "'value'", ":", "value", ",", "'id'", ":", "final_attrs", "[", "'id'", "]", ",", "'config'", ":", "self", ".", "config", ",", "}", ")", ")" ]
6c5ace1a96e291f0a8e401f6d61d634dd0cb7c9f
valid
RichTextField.formfield
Get the form for field.
quill/fields.py
def formfield(self, **kwargs): """Get the form for field.""" defaults = { 'form_class': RichTextFormField, 'config': self.config, } defaults.update(kwargs) return super(RichTextField, self).formfield(**defaults)
def formfield(self, **kwargs): """Get the form for field.""" defaults = { 'form_class': RichTextFormField, 'config': self.config, } defaults.update(kwargs) return super(RichTextField, self).formfield(**defaults)
[ "Get", "the", "form", "for", "field", "." ]
coremke/django-quill
python
https://github.com/coremke/django-quill/blob/6c5ace1a96e291f0a8e401f6d61d634dd0cb7c9f/quill/fields.py#L20-L27
[ "def", "formfield", "(", "self", ",", "*", "*", "kwargs", ")", ":", "defaults", "=", "{", "'form_class'", ":", "RichTextFormField", ",", "'config'", ":", "self", ".", "config", ",", "}", "defaults", ".", "update", "(", "kwargs", ")", "return", "super", "(", "RichTextField", ",", "self", ")", ".", "formfield", "(", "*", "*", "defaults", ")" ]
6c5ace1a96e291f0a8e401f6d61d634dd0cb7c9f
valid
render_toolbar
Render the toolbar for the given config.
quill/templatetags/quill_tags.py
def render_toolbar(context, config): """Render the toolbar for the given config.""" quill_config = getattr(quill_app, config) t = template.loader.get_template(quill_config['toolbar_template']) return t.render(context)
def render_toolbar(context, config): """Render the toolbar for the given config.""" quill_config = getattr(quill_app, config) t = template.loader.get_template(quill_config['toolbar_template']) return t.render(context)
[ "Render", "the", "toolbar", "for", "the", "given", "config", "." ]
coremke/django-quill
python
https://github.com/coremke/django-quill/blob/6c5ace1a96e291f0a8e401f6d61d634dd0cb7c9f/quill/templatetags/quill_tags.py#L26-L30
[ "def", "render_toolbar", "(", "context", ",", "config", ")", ":", "quill_config", "=", "getattr", "(", "quill_app", ",", "config", ")", "t", "=", "template", ".", "loader", ".", "get_template", "(", "quill_config", "[", "'toolbar_template'", "]", ")", "return", "t", ".", "render", "(", "context", ")" ]
6c5ace1a96e291f0a8e401f6d61d634dd0cb7c9f
valid
get_meta_image_url
Resize an image for metadata tags, and return an absolute URL to it.
wagtailmetadata/tags.py
def get_meta_image_url(request, image): """ Resize an image for metadata tags, and return an absolute URL to it. """ rendition = image.get_rendition(filter='original') return request.build_absolute_uri(rendition.url)
def get_meta_image_url(request, image): """ Resize an image for metadata tags, and return an absolute URL to it. """ rendition = image.get_rendition(filter='original') return request.build_absolute_uri(rendition.url)
[ "Resize", "an", "image", "for", "metadata", "tags", "and", "return", "an", "absolute", "URL", "to", "it", "." ]
neon-jungle/wagtail-metadata
python
https://github.com/neon-jungle/wagtail-metadata/blob/f8592dc3f2b644fa28de5b3585504899b1bb796a/wagtailmetadata/tags.py#L4-L9
[ "def", "get_meta_image_url", "(", "request", ",", "image", ")", ":", "rendition", "=", "image", ".", "get_rendition", "(", "filter", "=", "'original'", ")", "return", "request", ".", "build_absolute_uri", "(", "rendition", ".", "url", ")" ]
f8592dc3f2b644fa28de5b3585504899b1bb796a
valid
MDP.read
Read and parse mdp file *filename*.
gromacs/fileformats/mdp.py
def read(self, filename=None): """Read and parse mdp file *filename*.""" self._init_filename(filename) def BLANK(i): return "B{0:04d}".format(i) def COMMENT(i): return "C{0:04d}".format(i) data = odict() iblank = icomment = 0 with open(self.real_filename) as mdp: for line in mdp: line = line.strip() if len(line) == 0: iblank += 1 data[BLANK(iblank)] = '' continue m = self.COMMENT.match(line) if m: icomment += 1 data[COMMENT(icomment)] = m.group('value') continue # parameter m = self.PARAMETER.match(line) if m: # check for comments after parameter?? -- currently discarded parameter = m.group('parameter') value = self._transform(m.group('value')) data[parameter] = value else: errmsg = '{filename!r}: unknown line in mdp file, {line!r}'.format(**vars()) self.logger.error(errmsg) raise ParseError(errmsg) super(MDP,self).update(data)
def read(self, filename=None): """Read and parse mdp file *filename*.""" self._init_filename(filename) def BLANK(i): return "B{0:04d}".format(i) def COMMENT(i): return "C{0:04d}".format(i) data = odict() iblank = icomment = 0 with open(self.real_filename) as mdp: for line in mdp: line = line.strip() if len(line) == 0: iblank += 1 data[BLANK(iblank)] = '' continue m = self.COMMENT.match(line) if m: icomment += 1 data[COMMENT(icomment)] = m.group('value') continue # parameter m = self.PARAMETER.match(line) if m: # check for comments after parameter?? -- currently discarded parameter = m.group('parameter') value = self._transform(m.group('value')) data[parameter] = value else: errmsg = '{filename!r}: unknown line in mdp file, {line!r}'.format(**vars()) self.logger.error(errmsg) raise ParseError(errmsg) super(MDP,self).update(data)
[ "Read", "and", "parse", "mdp", "file", "*", "filename", "*", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/fileformats/mdp.py#L95-L130
[ "def", "read", "(", "self", ",", "filename", "=", "None", ")", ":", "self", ".", "_init_filename", "(", "filename", ")", "def", "BLANK", "(", "i", ")", ":", "return", "\"B{0:04d}\"", ".", "format", "(", "i", ")", "def", "COMMENT", "(", "i", ")", ":", "return", "\"C{0:04d}\"", ".", "format", "(", "i", ")", "data", "=", "odict", "(", ")", "iblank", "=", "icomment", "=", "0", "with", "open", "(", "self", ".", "real_filename", ")", "as", "mdp", ":", "for", "line", "in", "mdp", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "len", "(", "line", ")", "==", "0", ":", "iblank", "+=", "1", "data", "[", "BLANK", "(", "iblank", ")", "]", "=", "''", "continue", "m", "=", "self", ".", "COMMENT", ".", "match", "(", "line", ")", "if", "m", ":", "icomment", "+=", "1", "data", "[", "COMMENT", "(", "icomment", ")", "]", "=", "m", ".", "group", "(", "'value'", ")", "continue", "# parameter", "m", "=", "self", ".", "PARAMETER", ".", "match", "(", "line", ")", "if", "m", ":", "# check for comments after parameter?? -- currently discarded", "parameter", "=", "m", ".", "group", "(", "'parameter'", ")", "value", "=", "self", ".", "_transform", "(", "m", ".", "group", "(", "'value'", ")", ")", "data", "[", "parameter", "]", "=", "value", "else", ":", "errmsg", "=", "'{filename!r}: unknown line in mdp file, {line!r}'", ".", "format", "(", "*", "*", "vars", "(", ")", ")", "self", ".", "logger", ".", "error", "(", "errmsg", ")", "raise", "ParseError", "(", "errmsg", ")", "super", "(", "MDP", ",", "self", ")", ".", "update", "(", "data", ")" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
MDP.write
Write mdp file to *filename*. :Keywords: *filename* output mdp file; default is the filename the mdp was read from *skipempty* : boolean ``True`` removes any parameter lines from output that contain empty values [``False``] .. Note:: Overwrites the file that the mdp was read from if no *filename* supplied.
gromacs/fileformats/mdp.py
def write(self, filename=None, skipempty=False): """Write mdp file to *filename*. :Keywords: *filename* output mdp file; default is the filename the mdp was read from *skipempty* : boolean ``True`` removes any parameter lines from output that contain empty values [``False``] .. Note:: Overwrites the file that the mdp was read from if no *filename* supplied. """ with open(self.filename(filename, ext='mdp'), 'w') as mdp: for k,v in self.items(): if k[0] == 'B': # blank line mdp.write("\n") elif k[0] == 'C': # comment mdp.write("; {v!s}\n".format(**vars())) else: # parameter = value if skipempty and (v == '' or v is None): continue if isinstance(v, six.string_types) or not hasattr(v, '__iter__'): mdp.write("{k!s} = {v!s}\n".format(**vars())) else: mdp.write("{} = {}\n".format(k,' '.join(map(str, v))))
def write(self, filename=None, skipempty=False): """Write mdp file to *filename*. :Keywords: *filename* output mdp file; default is the filename the mdp was read from *skipempty* : boolean ``True`` removes any parameter lines from output that contain empty values [``False``] .. Note:: Overwrites the file that the mdp was read from if no *filename* supplied. """ with open(self.filename(filename, ext='mdp'), 'w') as mdp: for k,v in self.items(): if k[0] == 'B': # blank line mdp.write("\n") elif k[0] == 'C': # comment mdp.write("; {v!s}\n".format(**vars())) else: # parameter = value if skipempty and (v == '' or v is None): continue if isinstance(v, six.string_types) or not hasattr(v, '__iter__'): mdp.write("{k!s} = {v!s}\n".format(**vars())) else: mdp.write("{} = {}\n".format(k,' '.join(map(str, v))))
[ "Write", "mdp", "file", "to", "*", "filename", "*", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/fileformats/mdp.py#L133-L160
[ "def", "write", "(", "self", ",", "filename", "=", "None", ",", "skipempty", "=", "False", ")", ":", "with", "open", "(", "self", ".", "filename", "(", "filename", ",", "ext", "=", "'mdp'", ")", ",", "'w'", ")", "as", "mdp", ":", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", ":", "if", "k", "[", "0", "]", "==", "'B'", ":", "# blank line", "mdp", ".", "write", "(", "\"\\n\"", ")", "elif", "k", "[", "0", "]", "==", "'C'", ":", "# comment", "mdp", ".", "write", "(", "\"; {v!s}\\n\"", ".", "format", "(", "*", "*", "vars", "(", ")", ")", ")", "else", ":", "# parameter = value", "if", "skipempty", "and", "(", "v", "==", "''", "or", "v", "is", "None", ")", ":", "continue", "if", "isinstance", "(", "v", ",", "six", ".", "string_types", ")", "or", "not", "hasattr", "(", "v", ",", "'__iter__'", ")", ":", "mdp", ".", "write", "(", "\"{k!s} = {v!s}\\n\"", ".", "format", "(", "*", "*", "vars", "(", ")", ")", ")", "else", ":", "mdp", ".", "write", "(", "\"{} = {}\\n\"", ".", "format", "(", "k", ",", "' '", ".", "join", "(", "map", "(", "str", ",", "v", ")", ")", ")", ")" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
find_gromacs_command
Return *driver* and *name* of the first command that can be found on :envvar:`PATH`
gromacs/run.py
def find_gromacs_command(commands): """Return *driver* and *name* of the first command that can be found on :envvar:`PATH`""" # We could try executing 'name' or 'driver name' but to keep things lean we # just check if the executables can be found and then hope for the best. commands = utilities.asiterable(commands) for command in commands: try: driver, name = command.split() except ValueError: driver, name = None, command executable = driver if driver else name if utilities.which(executable): break else: raise OSError(errno.ENOENT, "No Gromacs executable found in", ", ".join(commands)) return driver, name
def find_gromacs_command(commands): """Return *driver* and *name* of the first command that can be found on :envvar:`PATH`""" # We could try executing 'name' or 'driver name' but to keep things lean we # just check if the executables can be found and then hope for the best. commands = utilities.asiterable(commands) for command in commands: try: driver, name = command.split() except ValueError: driver, name = None, command executable = driver if driver else name if utilities.which(executable): break else: raise OSError(errno.ENOENT, "No Gromacs executable found in", ", ".join(commands)) return driver, name
[ "Return", "*", "driver", "*", "and", "*", "name", "*", "of", "the", "first", "command", "that", "can", "be", "found", "on", ":", "envvar", ":", "PATH" ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/run.py#L52-L71
[ "def", "find_gromacs_command", "(", "commands", ")", ":", "# We could try executing 'name' or 'driver name' but to keep things lean we", "# just check if the executables can be found and then hope for the best.", "commands", "=", "utilities", ".", "asiterable", "(", "commands", ")", "for", "command", "in", "commands", ":", "try", ":", "driver", ",", "name", "=", "command", ".", "split", "(", ")", "except", "ValueError", ":", "driver", ",", "name", "=", "None", ",", "command", "executable", "=", "driver", "if", "driver", "else", "name", "if", "utilities", ".", "which", "(", "executable", ")", ":", "break", "else", ":", "raise", "OSError", "(", "errno", ".", "ENOENT", ",", "\"No Gromacs executable found in\"", ",", "\", \"", ".", "join", "(", "commands", ")", ")", "return", "driver", ",", "name" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
check_mdrun_success
Check if ``mdrun`` finished successfully. Analyses the output from ``mdrun`` in *logfile*. Right now we are simply looking for the line "Finished mdrun on node" in the last 1kb of the file. (The file must be seeakable.) :Arguments: *logfile* : filename Logfile produced by ``mdrun``. :Returns: ``True`` if all ok, ``False`` if not finished, and ``None`` if the *logfile* cannot be opened
gromacs/run.py
def check_mdrun_success(logfile): """Check if ``mdrun`` finished successfully. Analyses the output from ``mdrun`` in *logfile*. Right now we are simply looking for the line "Finished mdrun on node" in the last 1kb of the file. (The file must be seeakable.) :Arguments: *logfile* : filename Logfile produced by ``mdrun``. :Returns: ``True`` if all ok, ``False`` if not finished, and ``None`` if the *logfile* cannot be opened """ if not os.path.exists(logfile): return None with open(logfile, 'rb') as log: log.seek(-1024, 2) for line in log: line = line.decode('ASCII') if line.startswith("Finished mdrun on"): return True return False
def check_mdrun_success(logfile): """Check if ``mdrun`` finished successfully. Analyses the output from ``mdrun`` in *logfile*. Right now we are simply looking for the line "Finished mdrun on node" in the last 1kb of the file. (The file must be seeakable.) :Arguments: *logfile* : filename Logfile produced by ``mdrun``. :Returns: ``True`` if all ok, ``False`` if not finished, and ``None`` if the *logfile* cannot be opened """ if not os.path.exists(logfile): return None with open(logfile, 'rb') as log: log.seek(-1024, 2) for line in log: line = line.decode('ASCII') if line.startswith("Finished mdrun on"): return True return False
[ "Check", "if", "mdrun", "finished", "successfully", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/run.py#L314-L336
[ "def", "check_mdrun_success", "(", "logfile", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "logfile", ")", ":", "return", "None", "with", "open", "(", "logfile", ",", "'rb'", ")", "as", "log", ":", "log", ".", "seek", "(", "-", "1024", ",", "2", ")", "for", "line", "in", "log", ":", "line", "=", "line", ".", "decode", "(", "'ASCII'", ")", "if", "line", ".", "startswith", "(", "\"Finished mdrun on\"", ")", ":", "return", "True", "return", "False" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
get_double_or_single_prec_mdrun
Return double precision ``mdrun`` or fall back to single precision. This convenience function tries :func:`gromacs.mdrun_d` first and if it cannot run it, falls back to :func:`gromacs.mdrun` (without further checking). .. versionadded:: 0.5.1
gromacs/run.py
def get_double_or_single_prec_mdrun(): """Return double precision ``mdrun`` or fall back to single precision. This convenience function tries :func:`gromacs.mdrun_d` first and if it cannot run it, falls back to :func:`gromacs.mdrun` (without further checking). .. versionadded:: 0.5.1 """ try: gromacs.mdrun_d(h=True, stdout=False, stderr=False) logger.debug("using double precision gromacs.mdrun_d") return gromacs.mdrun_d except (AttributeError, GromacsError, OSError): # fall back to mdrun if no double precision binary wmsg = "No 'mdrun_d' binary found so trying 'mdrun' instead.\n"\ "(Note that energy minimization runs better with mdrun_d.)" logger.warn(wmsg) warnings.warn(wmsg, category=AutoCorrectionWarning) return gromacs.mdrun
def get_double_or_single_prec_mdrun(): """Return double precision ``mdrun`` or fall back to single precision. This convenience function tries :func:`gromacs.mdrun_d` first and if it cannot run it, falls back to :func:`gromacs.mdrun` (without further checking). .. versionadded:: 0.5.1 """ try: gromacs.mdrun_d(h=True, stdout=False, stderr=False) logger.debug("using double precision gromacs.mdrun_d") return gromacs.mdrun_d except (AttributeError, GromacsError, OSError): # fall back to mdrun if no double precision binary wmsg = "No 'mdrun_d' binary found so trying 'mdrun' instead.\n"\ "(Note that energy minimization runs better with mdrun_d.)" logger.warn(wmsg) warnings.warn(wmsg, category=AutoCorrectionWarning) return gromacs.mdrun
[ "Return", "double", "precision", "mdrun", "or", "fall", "back", "to", "single", "precision", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/run.py#L339-L358
[ "def", "get_double_or_single_prec_mdrun", "(", ")", ":", "try", ":", "gromacs", ".", "mdrun_d", "(", "h", "=", "True", ",", "stdout", "=", "False", ",", "stderr", "=", "False", ")", "logger", ".", "debug", "(", "\"using double precision gromacs.mdrun_d\"", ")", "return", "gromacs", ".", "mdrun_d", "except", "(", "AttributeError", ",", "GromacsError", ",", "OSError", ")", ":", "# fall back to mdrun if no double precision binary", "wmsg", "=", "\"No 'mdrun_d' binary found so trying 'mdrun' instead.\\n\"", "\"(Note that energy minimization runs better with mdrun_d.)\"", "logger", ".", "warn", "(", "wmsg", ")", "warnings", ".", "warn", "(", "wmsg", ",", "category", "=", "AutoCorrectionWarning", ")", "return", "gromacs", ".", "mdrun" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
MDrunner.commandline
Returns simple command line to invoke mdrun. If :attr:`mpiexec` is set then :meth:`mpicommand` provides the mpi launcher command that prefixes the actual ``mdrun`` invocation: :attr:`mpiexec` [*mpiargs*] :attr:`mdrun` [*mdrun-args*] The *mdrun-args* are set on initializing the class. Override :meth:`mpicommand` to fit your system if the simple default OpenMP launcher is not appropriate.
gromacs/run.py
def commandline(self, **mpiargs): """Returns simple command line to invoke mdrun. If :attr:`mpiexec` is set then :meth:`mpicommand` provides the mpi launcher command that prefixes the actual ``mdrun`` invocation: :attr:`mpiexec` [*mpiargs*] :attr:`mdrun` [*mdrun-args*] The *mdrun-args* are set on initializing the class. Override :meth:`mpicommand` to fit your system if the simple default OpenMP launcher is not appropriate. """ cmd = self.MDRUN.commandline() if self.mpiexec: cmd = self.mpicommand(**mpiargs) + cmd return cmd
def commandline(self, **mpiargs): """Returns simple command line to invoke mdrun. If :attr:`mpiexec` is set then :meth:`mpicommand` provides the mpi launcher command that prefixes the actual ``mdrun`` invocation: :attr:`mpiexec` [*mpiargs*] :attr:`mdrun` [*mdrun-args*] The *mdrun-args* are set on initializing the class. Override :meth:`mpicommand` to fit your system if the simple default OpenMP launcher is not appropriate. """ cmd = self.MDRUN.commandline() if self.mpiexec: cmd = self.mpicommand(**mpiargs) + cmd return cmd
[ "Returns", "simple", "command", "line", "to", "invoke", "mdrun", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/run.py#L160-L175
[ "def", "commandline", "(", "self", ",", "*", "*", "mpiargs", ")", ":", "cmd", "=", "self", ".", "MDRUN", ".", "commandline", "(", ")", "if", "self", ".", "mpiexec", ":", "cmd", "=", "self", ".", "mpicommand", "(", "*", "*", "mpiargs", ")", "+", "cmd", "return", "cmd" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
MDrunner.mpicommand
Return a list of the mpi command portion of the commandline. Only allows primitive mpi at the moment: *mpiexec* -n *ncores* *mdrun* *mdrun-args* (This is a primitive example for OpenMP. Override it for more complicated cases.)
gromacs/run.py
def mpicommand(self, *args, **kwargs): """Return a list of the mpi command portion of the commandline. Only allows primitive mpi at the moment: *mpiexec* -n *ncores* *mdrun* *mdrun-args* (This is a primitive example for OpenMP. Override it for more complicated cases.) """ if self.mpiexec is None: raise NotImplementedError("Override mpiexec to enable the simple OpenMP launcher") # example implementation ncores = kwargs.pop('ncores', 8) return [self.mpiexec, '-n', str(ncores)]
def mpicommand(self, *args, **kwargs): """Return a list of the mpi command portion of the commandline. Only allows primitive mpi at the moment: *mpiexec* -n *ncores* *mdrun* *mdrun-args* (This is a primitive example for OpenMP. Override it for more complicated cases.) """ if self.mpiexec is None: raise NotImplementedError("Override mpiexec to enable the simple OpenMP launcher") # example implementation ncores = kwargs.pop('ncores', 8) return [self.mpiexec, '-n', str(ncores)]
[ "Return", "a", "list", "of", "the", "mpi", "command", "portion", "of", "the", "commandline", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/run.py#L177-L190
[ "def", "mpicommand", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "mpiexec", "is", "None", ":", "raise", "NotImplementedError", "(", "\"Override mpiexec to enable the simple OpenMP launcher\"", ")", "# example implementation", "ncores", "=", "kwargs", ".", "pop", "(", "'ncores'", ",", "8", ")", "return", "[", "self", ".", "mpiexec", ",", "'-n'", ",", "str", "(", "ncores", ")", "]" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
MDrunner.run
Execute the mdrun command (possibly as a MPI command) and run the simulation. :Keywords: *pre* a dictionary containing keyword arguments for the :meth:`prehook` *post* a dictionary containing keyword arguments for the :meth:`posthook` *mdrunargs* a dictionary with keyword arguments for :program:`mdrun` which supersede **and update** the defaults given to the class constructor *mpiargs* all other keyword arguments that are processed by :meth:`mpicommand`
gromacs/run.py
def run(self, pre=None, post=None, mdrunargs=None, **mpiargs): """Execute the mdrun command (possibly as a MPI command) and run the simulation. :Keywords: *pre* a dictionary containing keyword arguments for the :meth:`prehook` *post* a dictionary containing keyword arguments for the :meth:`posthook` *mdrunargs* a dictionary with keyword arguments for :program:`mdrun` which supersede **and update** the defaults given to the class constructor *mpiargs* all other keyword arguments that are processed by :meth:`mpicommand` """ if pre is None: pre = {} if post is None: post = {} if mdrunargs is not None: try: self.MDRUN.gmxargs.update(mdrunargs) except (ValueError, TypeError): msg = "mdrunargs must be a dict of mdrun options, not {0}".format(mdrunargs) logger.error(msg) raise cmd = self.commandline(**mpiargs) with utilities.in_dir(self.dirname, create=False): try: self.prehook(**pre) logger.info(" ".join(cmd)) rc = subprocess.call(cmd) except: logger.exception("Failed MD run for unknown reasons.") raise finally: self.posthook(**post) if rc == 0: logger.info("MDrun completed ok, returncode = {0:d}".format(rc)) else: logger.critical("Failure in MDrun, returncode = {0:d}".format(rc)) return rc
def run(self, pre=None, post=None, mdrunargs=None, **mpiargs): """Execute the mdrun command (possibly as a MPI command) and run the simulation. :Keywords: *pre* a dictionary containing keyword arguments for the :meth:`prehook` *post* a dictionary containing keyword arguments for the :meth:`posthook` *mdrunargs* a dictionary with keyword arguments for :program:`mdrun` which supersede **and update** the defaults given to the class constructor *mpiargs* all other keyword arguments that are processed by :meth:`mpicommand` """ if pre is None: pre = {} if post is None: post = {} if mdrunargs is not None: try: self.MDRUN.gmxargs.update(mdrunargs) except (ValueError, TypeError): msg = "mdrunargs must be a dict of mdrun options, not {0}".format(mdrunargs) logger.error(msg) raise cmd = self.commandline(**mpiargs) with utilities.in_dir(self.dirname, create=False): try: self.prehook(**pre) logger.info(" ".join(cmd)) rc = subprocess.call(cmd) except: logger.exception("Failed MD run for unknown reasons.") raise finally: self.posthook(**post) if rc == 0: logger.info("MDrun completed ok, returncode = {0:d}".format(rc)) else: logger.critical("Failure in MDrun, returncode = {0:d}".format(rc)) return rc
[ "Execute", "the", "mdrun", "command", "(", "possibly", "as", "a", "MPI", "command", ")", "and", "run", "the", "simulation", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/run.py#L200-L243
[ "def", "run", "(", "self", ",", "pre", "=", "None", ",", "post", "=", "None", ",", "mdrunargs", "=", "None", ",", "*", "*", "mpiargs", ")", ":", "if", "pre", "is", "None", ":", "pre", "=", "{", "}", "if", "post", "is", "None", ":", "post", "=", "{", "}", "if", "mdrunargs", "is", "not", "None", ":", "try", ":", "self", ".", "MDRUN", ".", "gmxargs", ".", "update", "(", "mdrunargs", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "msg", "=", "\"mdrunargs must be a dict of mdrun options, not {0}\"", ".", "format", "(", "mdrunargs", ")", "logger", ".", "error", "(", "msg", ")", "raise", "cmd", "=", "self", ".", "commandline", "(", "*", "*", "mpiargs", ")", "with", "utilities", ".", "in_dir", "(", "self", ".", "dirname", ",", "create", "=", "False", ")", ":", "try", ":", "self", ".", "prehook", "(", "*", "*", "pre", ")", "logger", ".", "info", "(", "\" \"", ".", "join", "(", "cmd", ")", ")", "rc", "=", "subprocess", ".", "call", "(", "cmd", ")", "except", ":", "logger", ".", "exception", "(", "\"Failed MD run for unknown reasons.\"", ")", "raise", "finally", ":", "self", ".", "posthook", "(", "*", "*", "post", ")", "if", "rc", "==", "0", ":", "logger", ".", "info", "(", "\"MDrun completed ok, returncode = {0:d}\"", ".", "format", "(", "rc", ")", ")", "else", ":", "logger", ".", "critical", "(", "\"Failure in MDrun, returncode = {0:d}\"", ".", "format", "(", "rc", ")", ")", "return", "rc" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
MDrunner.run_check
Run :program:`mdrun` and check if run completed when it finishes. This works by looking at the mdrun log file for 'Finished mdrun on node'. It is useful to implement robust simulation techniques. :Arguments: *kwargs* are keyword arguments that are passed on to :meth:`run` (typically used for mpi things) :Returns: - ``True`` if run completed successfully - ``False`` otherwise
gromacs/run.py
def run_check(self, **kwargs): """Run :program:`mdrun` and check if run completed when it finishes. This works by looking at the mdrun log file for 'Finished mdrun on node'. It is useful to implement robust simulation techniques. :Arguments: *kwargs* are keyword arguments that are passed on to :meth:`run` (typically used for mpi things) :Returns: - ``True`` if run completed successfully - ``False`` otherwise """ rc = None # set to something in case we ever want to look at it later (and bomb in the try block) try: rc = self.run(**kwargs) except: logger.exception("run_check: caught exception") status = self.check_success() if status: logger.info("run_check: Hooray! mdrun finished successfully") else: logger.error("run_check: mdrun failed to complete run") return status
def run_check(self, **kwargs): """Run :program:`mdrun` and check if run completed when it finishes. This works by looking at the mdrun log file for 'Finished mdrun on node'. It is useful to implement robust simulation techniques. :Arguments: *kwargs* are keyword arguments that are passed on to :meth:`run` (typically used for mpi things) :Returns: - ``True`` if run completed successfully - ``False`` otherwise """ rc = None # set to something in case we ever want to look at it later (and bomb in the try block) try: rc = self.run(**kwargs) except: logger.exception("run_check: caught exception") status = self.check_success() if status: logger.info("run_check: Hooray! mdrun finished successfully") else: logger.error("run_check: mdrun failed to complete run") return status
[ "Run", ":", "program", ":", "mdrun", "and", "check", "if", "run", "completed", "when", "it", "finishes", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/run.py#L245-L270
[ "def", "run_check", "(", "self", ",", "*", "*", "kwargs", ")", ":", "rc", "=", "None", "# set to something in case we ever want to look at it later (and bomb in the try block)", "try", ":", "rc", "=", "self", ".", "run", "(", "*", "*", "kwargs", ")", "except", ":", "logger", ".", "exception", "(", "\"run_check: caught exception\"", ")", "status", "=", "self", ".", "check_success", "(", ")", "if", "status", ":", "logger", ".", "info", "(", "\"run_check: Hooray! mdrun finished successfully\"", ")", "else", ":", "logger", ".", "error", "(", "\"run_check: mdrun failed to complete run\"", ")", "return", "status" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
MDrunnerMpich2Smpd.prehook
Launch local smpd.
gromacs/run.py
def prehook(self, **kwargs): """Launch local smpd.""" cmd = ['smpd', '-s'] logger.info("Starting smpd: "+" ".join(cmd)) rc = subprocess.call(cmd) return rc
def prehook(self, **kwargs): """Launch local smpd.""" cmd = ['smpd', '-s'] logger.info("Starting smpd: "+" ".join(cmd)) rc = subprocess.call(cmd) return rc
[ "Launch", "local", "smpd", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/run.py#L299-L304
[ "def", "prehook", "(", "self", ",", "*", "*", "kwargs", ")", ":", "cmd", "=", "[", "'smpd'", ",", "'-s'", "]", "logger", ".", "info", "(", "\"Starting smpd: \"", "+", "\" \"", ".", "join", "(", "cmd", ")", ")", "rc", "=", "subprocess", ".", "call", "(", "cmd", ")", "return", "rc" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
_define_canned_commands
Define functions for the top level name space. Definitions are collected here so that they can all be wrapped in a try-except block that avoids code failing when the Gromacs tools are not available --- in some cases they are not necessary to use parts of GromacsWrapper. .. Note:: Any function defined here **must be listed in ``global``**!
gromacs/cbook.py
def _define_canned_commands(): """Define functions for the top level name space. Definitions are collected here so that they can all be wrapped in a try-except block that avoids code failing when the Gromacs tools are not available --- in some cases they are not necessary to use parts of GromacsWrapper. .. Note:: Any function defined here **must be listed in ``global``**! """ global trj_compact, rmsd_backbone, trj_fitted, trj_xyfitted trj_compact = tools.Trjconv(ur='compact', center=True, boxcenter='tric', pbc='mol', input=('protein','system'), doc=""" Writes a compact representation of the system centered on the protein""") rmsd_backbone = tools.G_rms(what='rmsd', fit='rot+trans', input=('Backbone','Backbone'), doc=""" Computes RMSD of backbone after fitting to the backbone.""") trj_fitted = tools.Trjconv(fit='rot+trans', input=('backbone', 'system'), doc=""" Writes a trajectory fitted to the protein backbone. Note that this does *not* center; if center is required, the *input* selection should have the group to be centered on in second position, e.g. ``input = ('backbone', 'Protein', System')``. """) # Gromacs 4.x trj_xyfitted = tools.Trjconv(fit='rotxy+transxy', input=('backbone', 'protein','system'), doc=""" Writes a trajectory fitted to the protein in the XY-plane only. This is useful for membrane proteins. The system *must* be oriented so that the membrane is in the XY plane. The protein backbone is used for the least square fit, centering is done for the whole protein. Note that centering together with fitting does not always work well and that one sometimes need two runs of trjconv: one to center and one to fit. .. Note:: Gromacs 4.x only""")
def _define_canned_commands(): """Define functions for the top level name space. Definitions are collected here so that they can all be wrapped in a try-except block that avoids code failing when the Gromacs tools are not available --- in some cases they are not necessary to use parts of GromacsWrapper. .. Note:: Any function defined here **must be listed in ``global``**! """ global trj_compact, rmsd_backbone, trj_fitted, trj_xyfitted trj_compact = tools.Trjconv(ur='compact', center=True, boxcenter='tric', pbc='mol', input=('protein','system'), doc=""" Writes a compact representation of the system centered on the protein""") rmsd_backbone = tools.G_rms(what='rmsd', fit='rot+trans', input=('Backbone','Backbone'), doc=""" Computes RMSD of backbone after fitting to the backbone.""") trj_fitted = tools.Trjconv(fit='rot+trans', input=('backbone', 'system'), doc=""" Writes a trajectory fitted to the protein backbone. Note that this does *not* center; if center is required, the *input* selection should have the group to be centered on in second position, e.g. ``input = ('backbone', 'Protein', System')``. """) # Gromacs 4.x trj_xyfitted = tools.Trjconv(fit='rotxy+transxy', input=('backbone', 'protein','system'), doc=""" Writes a trajectory fitted to the protein in the XY-plane only. This is useful for membrane proteins. The system *must* be oriented so that the membrane is in the XY plane. The protein backbone is used for the least square fit, centering is done for the whole protein. Note that centering together with fitting does not always work well and that one sometimes need two runs of trjconv: one to center and one to fit. .. Note:: Gromacs 4.x only""")
[ "Define", "functions", "for", "the", "top", "level", "name", "space", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L150-L197
[ "def", "_define_canned_commands", "(", ")", ":", "global", "trj_compact", ",", "rmsd_backbone", ",", "trj_fitted", ",", "trj_xyfitted", "trj_compact", "=", "tools", ".", "Trjconv", "(", "ur", "=", "'compact'", ",", "center", "=", "True", ",", "boxcenter", "=", "'tric'", ",", "pbc", "=", "'mol'", ",", "input", "=", "(", "'protein'", ",", "'system'", ")", ",", "doc", "=", "\"\"\"\nWrites a compact representation of the system centered on the protein\"\"\"", ")", "rmsd_backbone", "=", "tools", ".", "G_rms", "(", "what", "=", "'rmsd'", ",", "fit", "=", "'rot+trans'", ",", "input", "=", "(", "'Backbone'", ",", "'Backbone'", ")", ",", "doc", "=", "\"\"\"\nComputes RMSD of backbone after fitting to the backbone.\"\"\"", ")", "trj_fitted", "=", "tools", ".", "Trjconv", "(", "fit", "=", "'rot+trans'", ",", "input", "=", "(", "'backbone'", ",", "'system'", ")", ",", "doc", "=", "\"\"\"\nWrites a trajectory fitted to the protein backbone.\n\nNote that this does *not* center; if center is required, the *input*\nselection should have the group to be centered on in second position,\ne.g. ``input = ('backbone', 'Protein', System')``.\n\"\"\"", ")", "# Gromacs 4.x", "trj_xyfitted", "=", "tools", ".", "Trjconv", "(", "fit", "=", "'rotxy+transxy'", ",", "input", "=", "(", "'backbone'", ",", "'protein'", ",", "'system'", ")", ",", "doc", "=", "\"\"\"\nWrites a trajectory fitted to the protein in the XY-plane only.\n\nThis is useful for membrane proteins. The system *must* be oriented so\nthat the membrane is in the XY plane. The protein backbone is used\nfor the least square fit, centering is done for the whole protein.\n\nNote that centering together with fitting does not always work well\nand that one sometimes need two runs of trjconv: one to center and\none to fit.\n\n.. Note:: Gromacs 4.x only\"\"\"", ")" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
trj_fitandcenter
Center everything and make a compact representation (pass 1) and fit the system to a reference (pass 2). :Keywords: *s* input structure file (tpr file required to make molecule whole); if a list or tuple is provided then s[0] is used for pass 1 (should be a tpr) and s[1] is used for the fitting step (can be a pdb of the whole system) If a second structure is supplied then it is assumed that the fitted trajectory should *not* be centered. *f* input trajectory *o* output trajectory *input* A list with three groups. The default is ['backbone', 'protein','system'] The fit command uses all three (1st for least square fit, 2nd for centering, 3rd for output), the centered/make-whole stage use 2nd for centering and 3rd for output. *input1* If *input1* is supplied then *input* is used exclusively for the fitting stage (pass 2) and *input1* for the centering (pass 1). *n* Index file used for pass 1 and pass 2. *n1* If *n1* is supplied then index *n1* is only used for pass 1 (centering) and *n* for pass 2 (fitting). *xy* : boolean If ``True`` then only do a rot+trans fit in the xy plane (good for membrane simulations); default is ``False``. *kwargs* All other arguments are passed to :class:`~gromacs.tools.Trjconv`. Note that here we first center the protein and create a compact box, using ``-pbc mol -ur compact -center -boxcenter tric`` and write an intermediate xtc. Then in a second pass we perform a rotation+translation fit (or restricted to the xy plane if *xy* = ``True`` is set) on the intermediate xtc to produce the final trajectory. Doing it in this order has the disadvantage that the solvent box is rotating around the protein but the opposite order (with center/compact second) produces strange artifacts where columns of solvent appear cut out from the box---it probably means that after rotation the information for the periodic boundaries is not correct any more. Most kwargs are passed to both invocations of :class:`gromacs.tools.Trjconv` so it does not really make sense to use eg *skip*; in this case do things manually. By default the *input* to the fit command is ('backbone', 'protein','system'); the compact command always uses the second and third group for its purposes or if this fails, prompts the user. Both steps cannot performed in one pass; this is a known limitation of ``trjconv``. An intermediate temporary XTC files is generated which should be automatically cleaned up unless bad things happened. The function tries to honour the input/output formats. For instance, if you want trr output you need to supply a trr file as input and explicitly give the output file also a trr suffix. .. Note:: For big trajectories it can **take a very long time** and consume a **large amount of temporary diskspace**. We follow the `g_spatial documentation`_ in preparing the trajectories:: trjconv -s a.tpr -f a.xtc -o b.xtc -center -boxcenter tric -ur compact -pbc mol trjconv -s a.tpr -f b.xtc -o c.xtc -fit rot+trans .. _`g_spatial documentation`: http://www.gromacs.org/Documentation/Gromacs_Utilities/g_spatial
gromacs/cbook.py
def trj_fitandcenter(xy=False, **kwargs): """Center everything and make a compact representation (pass 1) and fit the system to a reference (pass 2). :Keywords: *s* input structure file (tpr file required to make molecule whole); if a list or tuple is provided then s[0] is used for pass 1 (should be a tpr) and s[1] is used for the fitting step (can be a pdb of the whole system) If a second structure is supplied then it is assumed that the fitted trajectory should *not* be centered. *f* input trajectory *o* output trajectory *input* A list with three groups. The default is ['backbone', 'protein','system'] The fit command uses all three (1st for least square fit, 2nd for centering, 3rd for output), the centered/make-whole stage use 2nd for centering and 3rd for output. *input1* If *input1* is supplied then *input* is used exclusively for the fitting stage (pass 2) and *input1* for the centering (pass 1). *n* Index file used for pass 1 and pass 2. *n1* If *n1* is supplied then index *n1* is only used for pass 1 (centering) and *n* for pass 2 (fitting). *xy* : boolean If ``True`` then only do a rot+trans fit in the xy plane (good for membrane simulations); default is ``False``. *kwargs* All other arguments are passed to :class:`~gromacs.tools.Trjconv`. Note that here we first center the protein and create a compact box, using ``-pbc mol -ur compact -center -boxcenter tric`` and write an intermediate xtc. Then in a second pass we perform a rotation+translation fit (or restricted to the xy plane if *xy* = ``True`` is set) on the intermediate xtc to produce the final trajectory. Doing it in this order has the disadvantage that the solvent box is rotating around the protein but the opposite order (with center/compact second) produces strange artifacts where columns of solvent appear cut out from the box---it probably means that after rotation the information for the periodic boundaries is not correct any more. Most kwargs are passed to both invocations of :class:`gromacs.tools.Trjconv` so it does not really make sense to use eg *skip*; in this case do things manually. By default the *input* to the fit command is ('backbone', 'protein','system'); the compact command always uses the second and third group for its purposes or if this fails, prompts the user. Both steps cannot performed in one pass; this is a known limitation of ``trjconv``. An intermediate temporary XTC files is generated which should be automatically cleaned up unless bad things happened. The function tries to honour the input/output formats. For instance, if you want trr output you need to supply a trr file as input and explicitly give the output file also a trr suffix. .. Note:: For big trajectories it can **take a very long time** and consume a **large amount of temporary diskspace**. We follow the `g_spatial documentation`_ in preparing the trajectories:: trjconv -s a.tpr -f a.xtc -o b.xtc -center -boxcenter tric -ur compact -pbc mol trjconv -s a.tpr -f b.xtc -o c.xtc -fit rot+trans .. _`g_spatial documentation`: http://www.gromacs.org/Documentation/Gromacs_Utilities/g_spatial """ if xy: fitmode = 'rotxy+transxy' kwargs.pop('fit', None) else: fitmode = kwargs.pop('fit', 'rot+trans') # user can use progressive, too intrj = kwargs.pop('f', None) # get the correct suffix for the intermediate step: only trr will # keep velocities/forces! suffix = os.path.splitext(intrj)[1] if not suffix in ('xtc', 'trr'): suffix = '.xtc' outtrj = kwargs.pop('o', None) ndx = kwargs.pop('n', None) ndxcompact = kwargs.pop('n1', ndx) structures = kwargs.pop('s', None) if type(structures) in (tuple, list): try: compact_structure, fit_structure = structures except: raise ValueError("argument s must be a pair of tpr/pdb files or a single structure file") else: compact_structure = fit_structure = structures inpfit = kwargs.pop('input', ('backbone', 'protein','system')) try: _inpcompact = inpfit[1:] # use 2nd and 3rd group for compact except TypeError: _inpcompact = None inpcompact = kwargs.pop('input1', _inpcompact) # ... or the user supplied ones fd, tmptrj = tempfile.mkstemp(suffix=suffix, prefix='pbc_compact_') logger.info("Input structure for PBC: {compact_structure!r}".format(**vars())) logger.info("Input structure for fit: {fit_structure!r}".format(**vars())) logger.info("Input trajectory: {intrj!r}".format(**vars())) logger.info("Output trajectory: {outtrj!r}".format(**vars())) logger.debug("Writing temporary trajectory {tmptrj!r} (will be auto-cleaned).".format(**vars())) sys.stdout.flush() try: gromacs.trjconv(s=compact_structure, f=intrj, o=tmptrj, n=ndxcompact, ur='compact', center=True, boxcenter='tric', pbc='mol', input=inpcompact, **kwargs) # explicitly set pbc="none" for the fitting stage (anything else will produce rubbish and/or # complaints from Gromacs) kwargs['pbc'] = "none" if compact_structure == fit_structure: # fit as ususal, including centering # (Is center=True really necessary? -- note, if I remove center=True then # I MUST fiddle inpfit as below!!) gromacs.trjconv(s=fit_structure, f=tmptrj, o=outtrj, n=ndx, fit=fitmode, center=True, input=inpfit, **kwargs) else: # make sure that we fit EXACTLY as the user wants inpfit = [inpfit[0], inpfit[-1]] gromacs.trjconv(s=fit_structure, f=tmptrj, o=outtrj, n=ndx, fit=fitmode, input=inpfit, **kwargs) finally: utilities.unlink_gmx(tmptrj)
def trj_fitandcenter(xy=False, **kwargs): """Center everything and make a compact representation (pass 1) and fit the system to a reference (pass 2). :Keywords: *s* input structure file (tpr file required to make molecule whole); if a list or tuple is provided then s[0] is used for pass 1 (should be a tpr) and s[1] is used for the fitting step (can be a pdb of the whole system) If a second structure is supplied then it is assumed that the fitted trajectory should *not* be centered. *f* input trajectory *o* output trajectory *input* A list with three groups. The default is ['backbone', 'protein','system'] The fit command uses all three (1st for least square fit, 2nd for centering, 3rd for output), the centered/make-whole stage use 2nd for centering and 3rd for output. *input1* If *input1* is supplied then *input* is used exclusively for the fitting stage (pass 2) and *input1* for the centering (pass 1). *n* Index file used for pass 1 and pass 2. *n1* If *n1* is supplied then index *n1* is only used for pass 1 (centering) and *n* for pass 2 (fitting). *xy* : boolean If ``True`` then only do a rot+trans fit in the xy plane (good for membrane simulations); default is ``False``. *kwargs* All other arguments are passed to :class:`~gromacs.tools.Trjconv`. Note that here we first center the protein and create a compact box, using ``-pbc mol -ur compact -center -boxcenter tric`` and write an intermediate xtc. Then in a second pass we perform a rotation+translation fit (or restricted to the xy plane if *xy* = ``True`` is set) on the intermediate xtc to produce the final trajectory. Doing it in this order has the disadvantage that the solvent box is rotating around the protein but the opposite order (with center/compact second) produces strange artifacts where columns of solvent appear cut out from the box---it probably means that after rotation the information for the periodic boundaries is not correct any more. Most kwargs are passed to both invocations of :class:`gromacs.tools.Trjconv` so it does not really make sense to use eg *skip*; in this case do things manually. By default the *input* to the fit command is ('backbone', 'protein','system'); the compact command always uses the second and third group for its purposes or if this fails, prompts the user. Both steps cannot performed in one pass; this is a known limitation of ``trjconv``. An intermediate temporary XTC files is generated which should be automatically cleaned up unless bad things happened. The function tries to honour the input/output formats. For instance, if you want trr output you need to supply a trr file as input and explicitly give the output file also a trr suffix. .. Note:: For big trajectories it can **take a very long time** and consume a **large amount of temporary diskspace**. We follow the `g_spatial documentation`_ in preparing the trajectories:: trjconv -s a.tpr -f a.xtc -o b.xtc -center -boxcenter tric -ur compact -pbc mol trjconv -s a.tpr -f b.xtc -o c.xtc -fit rot+trans .. _`g_spatial documentation`: http://www.gromacs.org/Documentation/Gromacs_Utilities/g_spatial """ if xy: fitmode = 'rotxy+transxy' kwargs.pop('fit', None) else: fitmode = kwargs.pop('fit', 'rot+trans') # user can use progressive, too intrj = kwargs.pop('f', None) # get the correct suffix for the intermediate step: only trr will # keep velocities/forces! suffix = os.path.splitext(intrj)[1] if not suffix in ('xtc', 'trr'): suffix = '.xtc' outtrj = kwargs.pop('o', None) ndx = kwargs.pop('n', None) ndxcompact = kwargs.pop('n1', ndx) structures = kwargs.pop('s', None) if type(structures) in (tuple, list): try: compact_structure, fit_structure = structures except: raise ValueError("argument s must be a pair of tpr/pdb files or a single structure file") else: compact_structure = fit_structure = structures inpfit = kwargs.pop('input', ('backbone', 'protein','system')) try: _inpcompact = inpfit[1:] # use 2nd and 3rd group for compact except TypeError: _inpcompact = None inpcompact = kwargs.pop('input1', _inpcompact) # ... or the user supplied ones fd, tmptrj = tempfile.mkstemp(suffix=suffix, prefix='pbc_compact_') logger.info("Input structure for PBC: {compact_structure!r}".format(**vars())) logger.info("Input structure for fit: {fit_structure!r}".format(**vars())) logger.info("Input trajectory: {intrj!r}".format(**vars())) logger.info("Output trajectory: {outtrj!r}".format(**vars())) logger.debug("Writing temporary trajectory {tmptrj!r} (will be auto-cleaned).".format(**vars())) sys.stdout.flush() try: gromacs.trjconv(s=compact_structure, f=intrj, o=tmptrj, n=ndxcompact, ur='compact', center=True, boxcenter='tric', pbc='mol', input=inpcompact, **kwargs) # explicitly set pbc="none" for the fitting stage (anything else will produce rubbish and/or # complaints from Gromacs) kwargs['pbc'] = "none" if compact_structure == fit_structure: # fit as ususal, including centering # (Is center=True really necessary? -- note, if I remove center=True then # I MUST fiddle inpfit as below!!) gromacs.trjconv(s=fit_structure, f=tmptrj, o=outtrj, n=ndx, fit=fitmode, center=True, input=inpfit, **kwargs) else: # make sure that we fit EXACTLY as the user wants inpfit = [inpfit[0], inpfit[-1]] gromacs.trjconv(s=fit_structure, f=tmptrj, o=outtrj, n=ndx, fit=fitmode, input=inpfit, **kwargs) finally: utilities.unlink_gmx(tmptrj)
[ "Center", "everything", "and", "make", "a", "compact", "representation", "(", "pass", "1", ")", "and", "fit", "the", "system", "to", "a", "reference", "(", "pass", "2", ")", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L212-L343
[ "def", "trj_fitandcenter", "(", "xy", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "xy", ":", "fitmode", "=", "'rotxy+transxy'", "kwargs", ".", "pop", "(", "'fit'", ",", "None", ")", "else", ":", "fitmode", "=", "kwargs", ".", "pop", "(", "'fit'", ",", "'rot+trans'", ")", "# user can use progressive, too", "intrj", "=", "kwargs", ".", "pop", "(", "'f'", ",", "None", ")", "# get the correct suffix for the intermediate step: only trr will", "# keep velocities/forces!", "suffix", "=", "os", ".", "path", ".", "splitext", "(", "intrj", ")", "[", "1", "]", "if", "not", "suffix", "in", "(", "'xtc'", ",", "'trr'", ")", ":", "suffix", "=", "'.xtc'", "outtrj", "=", "kwargs", ".", "pop", "(", "'o'", ",", "None", ")", "ndx", "=", "kwargs", ".", "pop", "(", "'n'", ",", "None", ")", "ndxcompact", "=", "kwargs", ".", "pop", "(", "'n1'", ",", "ndx", ")", "structures", "=", "kwargs", ".", "pop", "(", "'s'", ",", "None", ")", "if", "type", "(", "structures", ")", "in", "(", "tuple", ",", "list", ")", ":", "try", ":", "compact_structure", ",", "fit_structure", "=", "structures", "except", ":", "raise", "ValueError", "(", "\"argument s must be a pair of tpr/pdb files or a single structure file\"", ")", "else", ":", "compact_structure", "=", "fit_structure", "=", "structures", "inpfit", "=", "kwargs", ".", "pop", "(", "'input'", ",", "(", "'backbone'", ",", "'protein'", ",", "'system'", ")", ")", "try", ":", "_inpcompact", "=", "inpfit", "[", "1", ":", "]", "# use 2nd and 3rd group for compact", "except", "TypeError", ":", "_inpcompact", "=", "None", "inpcompact", "=", "kwargs", ".", "pop", "(", "'input1'", ",", "_inpcompact", ")", "# ... or the user supplied ones", "fd", ",", "tmptrj", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "suffix", ",", "prefix", "=", "'pbc_compact_'", ")", "logger", ".", "info", "(", "\"Input structure for PBC: {compact_structure!r}\"", ".", "format", "(", "*", "*", "vars", "(", ")", ")", ")", "logger", ".", "info", "(", "\"Input structure for fit: {fit_structure!r}\"", ".", "format", "(", "*", "*", "vars", "(", ")", ")", ")", "logger", ".", "info", "(", "\"Input trajectory: {intrj!r}\"", ".", "format", "(", "*", "*", "vars", "(", ")", ")", ")", "logger", ".", "info", "(", "\"Output trajectory: {outtrj!r}\"", ".", "format", "(", "*", "*", "vars", "(", ")", ")", ")", "logger", ".", "debug", "(", "\"Writing temporary trajectory {tmptrj!r} (will be auto-cleaned).\"", ".", "format", "(", "*", "*", "vars", "(", ")", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "try", ":", "gromacs", ".", "trjconv", "(", "s", "=", "compact_structure", ",", "f", "=", "intrj", ",", "o", "=", "tmptrj", ",", "n", "=", "ndxcompact", ",", "ur", "=", "'compact'", ",", "center", "=", "True", ",", "boxcenter", "=", "'tric'", ",", "pbc", "=", "'mol'", ",", "input", "=", "inpcompact", ",", "*", "*", "kwargs", ")", "# explicitly set pbc=\"none\" for the fitting stage (anything else will produce rubbish and/or", "# complaints from Gromacs)", "kwargs", "[", "'pbc'", "]", "=", "\"none\"", "if", "compact_structure", "==", "fit_structure", ":", "# fit as ususal, including centering", "# (Is center=True really necessary? -- note, if I remove center=True then", "# I MUST fiddle inpfit as below!!)", "gromacs", ".", "trjconv", "(", "s", "=", "fit_structure", ",", "f", "=", "tmptrj", ",", "o", "=", "outtrj", ",", "n", "=", "ndx", ",", "fit", "=", "fitmode", ",", "center", "=", "True", ",", "input", "=", "inpfit", ",", "*", "*", "kwargs", ")", "else", ":", "# make sure that we fit EXACTLY as the user wants", "inpfit", "=", "[", "inpfit", "[", "0", "]", ",", "inpfit", "[", "-", "1", "]", "]", "gromacs", ".", "trjconv", "(", "s", "=", "fit_structure", ",", "f", "=", "tmptrj", ",", "o", "=", "outtrj", ",", "n", "=", "ndx", ",", "fit", "=", "fitmode", ",", "input", "=", "inpfit", ",", "*", "*", "kwargs", ")", "finally", ":", "utilities", ".", "unlink_gmx", "(", "tmptrj", ")" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
cat
Concatenate all parts of a simulation. The xtc, trr, and edr files in *dirname* such as prefix.xtc, prefix.part0002.xtc, prefix.part0003.xtc, ... are 1) moved to the *partsdir* (under *dirname*) 2) concatenated with the Gromacs tools to yield prefix.xtc, prefix.trr, prefix.edr, prefix.gro (or prefix.md) in *dirname* 3) Store these trajectories in *fulldir* .. Note:: Trajectory files are *never* deleted by this function to avoid data loss in case of bugs. You will have to clean up yourself by deleting *dirname*/*partsdir*. Symlinks for the trajectories are *not* handled well and break the function. Use hard links instead. .. Warning:: If an exception occurs when running this function then make doubly and triply sure where your files are before running this function again; otherwise you might **overwrite data**. Possibly you will need to manually move the files from *partsdir* back into the working directory *dirname*; this should onlu overwrite generated files so far but *check carefully*! :Keywords: *prefix* deffnm of the trajectories [md] *resolve_multi" how to deal with multiple "final" gro or pdb files: normally there should only be one but in case of restarting from the checkpoint of a finished simulation one can end up with multiple identical ones. - "pass" : do nothing and log a warning - "guess" : take prefix.pdb or prefix.gro if it exists, otherwise the one of prefix.partNNNN.gro|pdb with the highes NNNN *dirname* change to *dirname* and assume all tarjectories are located there [.] *partsdir* directory where to store the input files (they are moved out of the way); *partsdir* must be manually deleted [parts] *fulldir* directory where to store the final results [full]
gromacs/cbook.py
def cat(prefix="md", dirname=os.path.curdir, partsdir="parts", fulldir="full", resolve_multi="pass"): """Concatenate all parts of a simulation. The xtc, trr, and edr files in *dirname* such as prefix.xtc, prefix.part0002.xtc, prefix.part0003.xtc, ... are 1) moved to the *partsdir* (under *dirname*) 2) concatenated with the Gromacs tools to yield prefix.xtc, prefix.trr, prefix.edr, prefix.gro (or prefix.md) in *dirname* 3) Store these trajectories in *fulldir* .. Note:: Trajectory files are *never* deleted by this function to avoid data loss in case of bugs. You will have to clean up yourself by deleting *dirname*/*partsdir*. Symlinks for the trajectories are *not* handled well and break the function. Use hard links instead. .. Warning:: If an exception occurs when running this function then make doubly and triply sure where your files are before running this function again; otherwise you might **overwrite data**. Possibly you will need to manually move the files from *partsdir* back into the working directory *dirname*; this should onlu overwrite generated files so far but *check carefully*! :Keywords: *prefix* deffnm of the trajectories [md] *resolve_multi" how to deal with multiple "final" gro or pdb files: normally there should only be one but in case of restarting from the checkpoint of a finished simulation one can end up with multiple identical ones. - "pass" : do nothing and log a warning - "guess" : take prefix.pdb or prefix.gro if it exists, otherwise the one of prefix.partNNNN.gro|pdb with the highes NNNN *dirname* change to *dirname* and assume all tarjectories are located there [.] *partsdir* directory where to store the input files (they are moved out of the way); *partsdir* must be manually deleted [parts] *fulldir* directory where to store the final results [full] """ gmxcat = {'xtc': gromacs.trjcat, 'trr': gromacs.trjcat, 'edr': gromacs.eneconv, 'log': utilities.cat, } def _cat(prefix, ext, partsdir=partsdir, fulldir=fulldir): filenames = glob_parts(prefix, ext) if ext.startswith('.'): ext = ext[1:] outfile = os.path.join(fulldir, prefix + '.' + ext) if not filenames: return None nonempty_files = [] for f in filenames: if os.stat(f).st_size == 0: logger.warn("File {f!r} is empty, skipping".format(**vars())) continue if os.path.islink(f): # TODO: re-write the symlink to point to the original file errmsg = "Symbolic links do not work (file %(f)r), sorry. " \ "CHECK LOCATION OF FILES MANUALLY BEFORE RUNNING gromacs.cbook.cat() AGAIN!" % vars() logger.exception(errmsg) raise NotImplementedError(errmsg) shutil.move(f, partsdir) nonempty_files.append(f) filepaths = [os.path.join(partsdir, f) for f in nonempty_files] gmxcat[ext](f=filepaths, o=outfile) return outfile _resolve_options = ("pass", "guess") if not resolve_multi in _resolve_options: raise ValueError("resolve_multi must be one of %(_resolve_options)r, " "not %(resolve_multi)r" % vars()) if fulldir == os.path.curdir: wmsg = "Using the current directory as fulldir can potentially lead to data loss if you run this function multiple times." logger.warning(wmsg) warnings.warn(wmsg, category=BadParameterWarning) with utilities.in_dir(dirname, create=False): utilities.mkdir_p(partsdir) utilities.mkdir_p(fulldir) for ext in ('log', 'edr', 'trr', 'xtc'): logger.info("[%(dirname)s] concatenating %(ext)s files...", vars()) outfile = _cat(prefix, ext, partsdir) logger.info("[%(dirname)s] created %(outfile)r", vars()) for ext in ('gro', 'pdb'): # XXX: ugly, make method out of parts? filenames = glob_parts(prefix, ext) if len(filenames) == 0: continue # goto next ext elif len(filenames) == 1: pick = filenames[0] else: if resolve_multi == "pass": logger.warning("[%(dirname)s] too many output structures %(filenames)r, " "cannot decide which one --- resolve manually!", vars()) for f in filenames: shutil.move(f, partsdir) continue # goto next ext elif resolve_multi == "guess": pick = prefix + '.' + ext if not pick in filenames: pick = filenames[-1] # filenames are ordered with highest parts at end final = os.path.join(fulldir, prefix + '.' + ext) shutil.copy(pick, final) # copy2 fails on nfs with Darwin at least for f in filenames: shutil.move(f, partsdir) logger.info("[%(dirname)s] collected final structure %(final)r " "(from %(pick)r)", vars()) partsdirpath = utilities.realpath(dirname, partsdir) logger.warn("[%(dirname)s] cat() complete in %(fulldir)r but original files " "in %(partsdirpath)r must be manually removed", vars())
def cat(prefix="md", dirname=os.path.curdir, partsdir="parts", fulldir="full", resolve_multi="pass"): """Concatenate all parts of a simulation. The xtc, trr, and edr files in *dirname* such as prefix.xtc, prefix.part0002.xtc, prefix.part0003.xtc, ... are 1) moved to the *partsdir* (under *dirname*) 2) concatenated with the Gromacs tools to yield prefix.xtc, prefix.trr, prefix.edr, prefix.gro (or prefix.md) in *dirname* 3) Store these trajectories in *fulldir* .. Note:: Trajectory files are *never* deleted by this function to avoid data loss in case of bugs. You will have to clean up yourself by deleting *dirname*/*partsdir*. Symlinks for the trajectories are *not* handled well and break the function. Use hard links instead. .. Warning:: If an exception occurs when running this function then make doubly and triply sure where your files are before running this function again; otherwise you might **overwrite data**. Possibly you will need to manually move the files from *partsdir* back into the working directory *dirname*; this should onlu overwrite generated files so far but *check carefully*! :Keywords: *prefix* deffnm of the trajectories [md] *resolve_multi" how to deal with multiple "final" gro or pdb files: normally there should only be one but in case of restarting from the checkpoint of a finished simulation one can end up with multiple identical ones. - "pass" : do nothing and log a warning - "guess" : take prefix.pdb or prefix.gro if it exists, otherwise the one of prefix.partNNNN.gro|pdb with the highes NNNN *dirname* change to *dirname* and assume all tarjectories are located there [.] *partsdir* directory where to store the input files (they are moved out of the way); *partsdir* must be manually deleted [parts] *fulldir* directory where to store the final results [full] """ gmxcat = {'xtc': gromacs.trjcat, 'trr': gromacs.trjcat, 'edr': gromacs.eneconv, 'log': utilities.cat, } def _cat(prefix, ext, partsdir=partsdir, fulldir=fulldir): filenames = glob_parts(prefix, ext) if ext.startswith('.'): ext = ext[1:] outfile = os.path.join(fulldir, prefix + '.' + ext) if not filenames: return None nonempty_files = [] for f in filenames: if os.stat(f).st_size == 0: logger.warn("File {f!r} is empty, skipping".format(**vars())) continue if os.path.islink(f): # TODO: re-write the symlink to point to the original file errmsg = "Symbolic links do not work (file %(f)r), sorry. " \ "CHECK LOCATION OF FILES MANUALLY BEFORE RUNNING gromacs.cbook.cat() AGAIN!" % vars() logger.exception(errmsg) raise NotImplementedError(errmsg) shutil.move(f, partsdir) nonempty_files.append(f) filepaths = [os.path.join(partsdir, f) for f in nonempty_files] gmxcat[ext](f=filepaths, o=outfile) return outfile _resolve_options = ("pass", "guess") if not resolve_multi in _resolve_options: raise ValueError("resolve_multi must be one of %(_resolve_options)r, " "not %(resolve_multi)r" % vars()) if fulldir == os.path.curdir: wmsg = "Using the current directory as fulldir can potentially lead to data loss if you run this function multiple times." logger.warning(wmsg) warnings.warn(wmsg, category=BadParameterWarning) with utilities.in_dir(dirname, create=False): utilities.mkdir_p(partsdir) utilities.mkdir_p(fulldir) for ext in ('log', 'edr', 'trr', 'xtc'): logger.info("[%(dirname)s] concatenating %(ext)s files...", vars()) outfile = _cat(prefix, ext, partsdir) logger.info("[%(dirname)s] created %(outfile)r", vars()) for ext in ('gro', 'pdb'): # XXX: ugly, make method out of parts? filenames = glob_parts(prefix, ext) if len(filenames) == 0: continue # goto next ext elif len(filenames) == 1: pick = filenames[0] else: if resolve_multi == "pass": logger.warning("[%(dirname)s] too many output structures %(filenames)r, " "cannot decide which one --- resolve manually!", vars()) for f in filenames: shutil.move(f, partsdir) continue # goto next ext elif resolve_multi == "guess": pick = prefix + '.' + ext if not pick in filenames: pick = filenames[-1] # filenames are ordered with highest parts at end final = os.path.join(fulldir, prefix + '.' + ext) shutil.copy(pick, final) # copy2 fails on nfs with Darwin at least for f in filenames: shutil.move(f, partsdir) logger.info("[%(dirname)s] collected final structure %(final)r " "(from %(pick)r)", vars()) partsdirpath = utilities.realpath(dirname, partsdir) logger.warn("[%(dirname)s] cat() complete in %(fulldir)r but original files " "in %(partsdirpath)r must be manually removed", vars())
[ "Concatenate", "all", "parts", "of", "a", "simulation", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L345-L464
[ "def", "cat", "(", "prefix", "=", "\"md\"", ",", "dirname", "=", "os", ".", "path", ".", "curdir", ",", "partsdir", "=", "\"parts\"", ",", "fulldir", "=", "\"full\"", ",", "resolve_multi", "=", "\"pass\"", ")", ":", "gmxcat", "=", "{", "'xtc'", ":", "gromacs", ".", "trjcat", ",", "'trr'", ":", "gromacs", ".", "trjcat", ",", "'edr'", ":", "gromacs", ".", "eneconv", ",", "'log'", ":", "utilities", ".", "cat", ",", "}", "def", "_cat", "(", "prefix", ",", "ext", ",", "partsdir", "=", "partsdir", ",", "fulldir", "=", "fulldir", ")", ":", "filenames", "=", "glob_parts", "(", "prefix", ",", "ext", ")", "if", "ext", ".", "startswith", "(", "'.'", ")", ":", "ext", "=", "ext", "[", "1", ":", "]", "outfile", "=", "os", ".", "path", ".", "join", "(", "fulldir", ",", "prefix", "+", "'.'", "+", "ext", ")", "if", "not", "filenames", ":", "return", "None", "nonempty_files", "=", "[", "]", "for", "f", "in", "filenames", ":", "if", "os", ".", "stat", "(", "f", ")", ".", "st_size", "==", "0", ":", "logger", ".", "warn", "(", "\"File {f!r} is empty, skipping\"", ".", "format", "(", "*", "*", "vars", "(", ")", ")", ")", "continue", "if", "os", ".", "path", ".", "islink", "(", "f", ")", ":", "# TODO: re-write the symlink to point to the original file", "errmsg", "=", "\"Symbolic links do not work (file %(f)r), sorry. \"", "\"CHECK LOCATION OF FILES MANUALLY BEFORE RUNNING gromacs.cbook.cat() AGAIN!\"", "%", "vars", "(", ")", "logger", ".", "exception", "(", "errmsg", ")", "raise", "NotImplementedError", "(", "errmsg", ")", "shutil", ".", "move", "(", "f", ",", "partsdir", ")", "nonempty_files", ".", "append", "(", "f", ")", "filepaths", "=", "[", "os", ".", "path", ".", "join", "(", "partsdir", ",", "f", ")", "for", "f", "in", "nonempty_files", "]", "gmxcat", "[", "ext", "]", "(", "f", "=", "filepaths", ",", "o", "=", "outfile", ")", "return", "outfile", "_resolve_options", "=", "(", "\"pass\"", ",", "\"guess\"", ")", "if", "not", "resolve_multi", "in", "_resolve_options", ":", "raise", "ValueError", "(", "\"resolve_multi must be one of %(_resolve_options)r, \"", "\"not %(resolve_multi)r\"", "%", "vars", "(", ")", ")", "if", "fulldir", "==", "os", ".", "path", ".", "curdir", ":", "wmsg", "=", "\"Using the current directory as fulldir can potentially lead to data loss if you run this function multiple times.\"", "logger", ".", "warning", "(", "wmsg", ")", "warnings", ".", "warn", "(", "wmsg", ",", "category", "=", "BadParameterWarning", ")", "with", "utilities", ".", "in_dir", "(", "dirname", ",", "create", "=", "False", ")", ":", "utilities", ".", "mkdir_p", "(", "partsdir", ")", "utilities", ".", "mkdir_p", "(", "fulldir", ")", "for", "ext", "in", "(", "'log'", ",", "'edr'", ",", "'trr'", ",", "'xtc'", ")", ":", "logger", ".", "info", "(", "\"[%(dirname)s] concatenating %(ext)s files...\"", ",", "vars", "(", ")", ")", "outfile", "=", "_cat", "(", "prefix", ",", "ext", ",", "partsdir", ")", "logger", ".", "info", "(", "\"[%(dirname)s] created %(outfile)r\"", ",", "vars", "(", ")", ")", "for", "ext", "in", "(", "'gro'", ",", "'pdb'", ")", ":", "# XXX: ugly, make method out of parts?", "filenames", "=", "glob_parts", "(", "prefix", ",", "ext", ")", "if", "len", "(", "filenames", ")", "==", "0", ":", "continue", "# goto next ext", "elif", "len", "(", "filenames", ")", "==", "1", ":", "pick", "=", "filenames", "[", "0", "]", "else", ":", "if", "resolve_multi", "==", "\"pass\"", ":", "logger", ".", "warning", "(", "\"[%(dirname)s] too many output structures %(filenames)r, \"", "\"cannot decide which one --- resolve manually!\"", ",", "vars", "(", ")", ")", "for", "f", "in", "filenames", ":", "shutil", ".", "move", "(", "f", ",", "partsdir", ")", "continue", "# goto next ext", "elif", "resolve_multi", "==", "\"guess\"", ":", "pick", "=", "prefix", "+", "'.'", "+", "ext", "if", "not", "pick", "in", "filenames", ":", "pick", "=", "filenames", "[", "-", "1", "]", "# filenames are ordered with highest parts at end", "final", "=", "os", ".", "path", ".", "join", "(", "fulldir", ",", "prefix", "+", "'.'", "+", "ext", ")", "shutil", ".", "copy", "(", "pick", ",", "final", ")", "# copy2 fails on nfs with Darwin at least", "for", "f", "in", "filenames", ":", "shutil", ".", "move", "(", "f", ",", "partsdir", ")", "logger", ".", "info", "(", "\"[%(dirname)s] collected final structure %(final)r \"", "\"(from %(pick)r)\"", ",", "vars", "(", ")", ")", "partsdirpath", "=", "utilities", ".", "realpath", "(", "dirname", ",", "partsdir", ")", "logger", ".", "warn", "(", "\"[%(dirname)s] cat() complete in %(fulldir)r but original files \"", "\"in %(partsdirpath)r must be manually removed\"", ",", "vars", "(", ")", ")" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
glob_parts
Find files from a continuation run
gromacs/cbook.py
def glob_parts(prefix, ext): """Find files from a continuation run""" if ext.startswith('.'): ext = ext[1:] files = glob.glob(prefix+'.'+ext) + glob.glob(prefix+'.part[0-9][0-9][0-9][0-9].'+ext) files.sort() # at least some rough sorting... return files
def glob_parts(prefix, ext): """Find files from a continuation run""" if ext.startswith('.'): ext = ext[1:] files = glob.glob(prefix+'.'+ext) + glob.glob(prefix+'.part[0-9][0-9][0-9][0-9].'+ext) files.sort() # at least some rough sorting... return files
[ "Find", "files", "from", "a", "continuation", "run" ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L466-L472
[ "def", "glob_parts", "(", "prefix", ",", "ext", ")", ":", "if", "ext", ".", "startswith", "(", "'.'", ")", ":", "ext", "=", "ext", "[", "1", ":", "]", "files", "=", "glob", ".", "glob", "(", "prefix", "+", "'.'", "+", "ext", ")", "+", "glob", ".", "glob", "(", "prefix", "+", "'.part[0-9][0-9][0-9][0-9].'", "+", "ext", ")", "files", ".", "sort", "(", ")", "# at least some rough sorting...", "return", "files" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
grompp_qtot
Run ``gromacs.grompp`` and return the total charge of the system. :Arguments: The arguments are the ones one would pass to :func:`gromacs.grompp`. :Returns: The total charge as reported Some things to keep in mind: * The stdout output of grompp is only shown when an error occurs. For debugging, look at the log file or screen output and try running the normal :func:`gromacs.grompp` command and analyze the output if the debugging messages are not sufficient. * Check that ``qtot`` is correct. Because the function is based on pattern matching of the informative output of :program:`grompp` it can break when the output format changes. This version recognizes lines like :: ' System has non-zero total charge: -4.000001e+00' using the regular expression :regexp:`System has non-zero total charge: *(?P<qtot>[-+]?\d*\.\d+([eE][-+]\d+)?)`.
gromacs/cbook.py
def grompp_qtot(*args, **kwargs): """Run ``gromacs.grompp`` and return the total charge of the system. :Arguments: The arguments are the ones one would pass to :func:`gromacs.grompp`. :Returns: The total charge as reported Some things to keep in mind: * The stdout output of grompp is only shown when an error occurs. For debugging, look at the log file or screen output and try running the normal :func:`gromacs.grompp` command and analyze the output if the debugging messages are not sufficient. * Check that ``qtot`` is correct. Because the function is based on pattern matching of the informative output of :program:`grompp` it can break when the output format changes. This version recognizes lines like :: ' System has non-zero total charge: -4.000001e+00' using the regular expression :regexp:`System has non-zero total charge: *(?P<qtot>[-+]?\d*\.\d+([eE][-+]\d+)?)`. """ qtot_pattern = re.compile('System has non-zero total charge: *(?P<qtot>[-+]?\d*\.\d+([eE][-+]\d+)?)') # make sure to capture ALL output kwargs['stdout'] = False kwargs['stderr'] = False rc, output, error = grompp_warnonly(*args, **kwargs) gmxoutput = "\n".join([x for x in [output, error] if x is not None]) if rc != 0: # error occured and we want to see the whole output for debugging msg = "grompp_qtot() failed. See warning and screen output for clues." logger.error(msg) import sys sys.stderr.write("=========== grompp (stdout/stderr) ============\n") sys.stderr.write(gmxoutput) sys.stderr.write("===============================================\n") sys.stderr.flush() raise GromacsError(rc, msg) qtot = 0 for line in gmxoutput.split('\n'): m = qtot_pattern.search(line) if m: qtot = float(m.group('qtot')) break logger.info("system total charge qtot = {qtot!r}".format(**vars())) return qtot
def grompp_qtot(*args, **kwargs): """Run ``gromacs.grompp`` and return the total charge of the system. :Arguments: The arguments are the ones one would pass to :func:`gromacs.grompp`. :Returns: The total charge as reported Some things to keep in mind: * The stdout output of grompp is only shown when an error occurs. For debugging, look at the log file or screen output and try running the normal :func:`gromacs.grompp` command and analyze the output if the debugging messages are not sufficient. * Check that ``qtot`` is correct. Because the function is based on pattern matching of the informative output of :program:`grompp` it can break when the output format changes. This version recognizes lines like :: ' System has non-zero total charge: -4.000001e+00' using the regular expression :regexp:`System has non-zero total charge: *(?P<qtot>[-+]?\d*\.\d+([eE][-+]\d+)?)`. """ qtot_pattern = re.compile('System has non-zero total charge: *(?P<qtot>[-+]?\d*\.\d+([eE][-+]\d+)?)') # make sure to capture ALL output kwargs['stdout'] = False kwargs['stderr'] = False rc, output, error = grompp_warnonly(*args, **kwargs) gmxoutput = "\n".join([x for x in [output, error] if x is not None]) if rc != 0: # error occured and we want to see the whole output for debugging msg = "grompp_qtot() failed. See warning and screen output for clues." logger.error(msg) import sys sys.stderr.write("=========== grompp (stdout/stderr) ============\n") sys.stderr.write(gmxoutput) sys.stderr.write("===============================================\n") sys.stderr.flush() raise GromacsError(rc, msg) qtot = 0 for line in gmxoutput.split('\n'): m = qtot_pattern.search(line) if m: qtot = float(m.group('qtot')) break logger.info("system total charge qtot = {qtot!r}".format(**vars())) return qtot
[ "Run", "gromacs", ".", "grompp", "and", "return", "the", "total", "charge", "of", "the", "system", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L589-L637
[ "def", "grompp_qtot", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "qtot_pattern", "=", "re", ".", "compile", "(", "'System has non-zero total charge: *(?P<qtot>[-+]?\\d*\\.\\d+([eE][-+]\\d+)?)'", ")", "# make sure to capture ALL output", "kwargs", "[", "'stdout'", "]", "=", "False", "kwargs", "[", "'stderr'", "]", "=", "False", "rc", ",", "output", ",", "error", "=", "grompp_warnonly", "(", "*", "args", ",", "*", "*", "kwargs", ")", "gmxoutput", "=", "\"\\n\"", ".", "join", "(", "[", "x", "for", "x", "in", "[", "output", ",", "error", "]", "if", "x", "is", "not", "None", "]", ")", "if", "rc", "!=", "0", ":", "# error occured and we want to see the whole output for debugging", "msg", "=", "\"grompp_qtot() failed. See warning and screen output for clues.\"", "logger", ".", "error", "(", "msg", ")", "import", "sys", "sys", ".", "stderr", ".", "write", "(", "\"=========== grompp (stdout/stderr) ============\\n\"", ")", "sys", ".", "stderr", ".", "write", "(", "gmxoutput", ")", "sys", ".", "stderr", ".", "write", "(", "\"===============================================\\n\"", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "raise", "GromacsError", "(", "rc", ",", "msg", ")", "qtot", "=", "0", "for", "line", "in", "gmxoutput", ".", "split", "(", "'\\n'", ")", ":", "m", "=", "qtot_pattern", ".", "search", "(", "line", ")", "if", "m", ":", "qtot", "=", "float", "(", "m", ".", "group", "(", "'qtot'", ")", ")", "break", "logger", ".", "info", "(", "\"system total charge qtot = {qtot!r}\"", ".", "format", "(", "*", "*", "vars", "(", ")", ")", ")", "return", "qtot" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
_mdp_include_string
Generate a string that can be added to a mdp 'include = ' line.
gromacs/cbook.py
def _mdp_include_string(dirs): """Generate a string that can be added to a mdp 'include = ' line.""" include_paths = [os.path.expanduser(p) for p in dirs] return ' -I'.join([''] + include_paths)
def _mdp_include_string(dirs): """Generate a string that can be added to a mdp 'include = ' line.""" include_paths = [os.path.expanduser(p) for p in dirs] return ' -I'.join([''] + include_paths)
[ "Generate", "a", "string", "that", "can", "be", "added", "to", "a", "mdp", "include", "=", "line", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L639-L642
[ "def", "_mdp_include_string", "(", "dirs", ")", ":", "include_paths", "=", "[", "os", ".", "path", ".", "expanduser", "(", "p", ")", "for", "p", "in", "dirs", "]", "return", "' -I'", ".", "join", "(", "[", "''", "]", "+", "include_paths", ")" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
add_mdp_includes
Set the mdp *include* key in the *kwargs* dict. 1. Add the directory containing *topology*. 2. Add all directories appearing under the key *includes* 3. Generate a string of the form "-Idir1 -Idir2 ..." that is stored under the key *include* (the corresponding mdp parameter) By default, the directories ``.`` and ``..`` are also added to the *include* string for the mdp; when fed into :func:`gromacs.cbook.edit_mdp` it will result in a line such as :: include = -I. -I.. -I../topology_dir .... Note that the user can always override the behaviour by setting the *include* keyword herself; in this case this function does nothing. If no *kwargs* were supplied then a dict is generated with the single *include* entry. :Arguments: *topology* : top filename Topology file; the name of the enclosing directory is added to the include path (if supplied) [``None``] *kwargs* : dict Optional dictionary of mdp keywords; will be modified in place. If it contains the *includes* keyword with either a single string or a list of strings then these paths will be added to the include statement. :Returns: *kwargs* with the *include* keyword added if it did not exist previously; if the keyword already existed, nothing happens. .. Note:: The *kwargs* dict is **modified in place**. This function is a bit of a hack. It might be removed once all setup functions become methods in a nice class.
gromacs/cbook.py
def add_mdp_includes(topology=None, kwargs=None): """Set the mdp *include* key in the *kwargs* dict. 1. Add the directory containing *topology*. 2. Add all directories appearing under the key *includes* 3. Generate a string of the form "-Idir1 -Idir2 ..." that is stored under the key *include* (the corresponding mdp parameter) By default, the directories ``.`` and ``..`` are also added to the *include* string for the mdp; when fed into :func:`gromacs.cbook.edit_mdp` it will result in a line such as :: include = -I. -I.. -I../topology_dir .... Note that the user can always override the behaviour by setting the *include* keyword herself; in this case this function does nothing. If no *kwargs* were supplied then a dict is generated with the single *include* entry. :Arguments: *topology* : top filename Topology file; the name of the enclosing directory is added to the include path (if supplied) [``None``] *kwargs* : dict Optional dictionary of mdp keywords; will be modified in place. If it contains the *includes* keyword with either a single string or a list of strings then these paths will be added to the include statement. :Returns: *kwargs* with the *include* keyword added if it did not exist previously; if the keyword already existed, nothing happens. .. Note:: The *kwargs* dict is **modified in place**. This function is a bit of a hack. It might be removed once all setup functions become methods in a nice class. """ if kwargs is None: kwargs = {} include_dirs = ['.', '..'] # should . & .. always be added? if topology is not None: # half-hack: find additional itps in the same directory as the topology topology_dir = os.path.dirname(topology) include_dirs.append(topology_dir) include_dirs.extend(asiterable(kwargs.pop('includes', []))) # includes can be a list or a string # 1. setdefault: we do nothing if user defined include # 2. modify input in place! kwargs.setdefault('include', _mdp_include_string(include_dirs)) return kwargs
def add_mdp_includes(topology=None, kwargs=None): """Set the mdp *include* key in the *kwargs* dict. 1. Add the directory containing *topology*. 2. Add all directories appearing under the key *includes* 3. Generate a string of the form "-Idir1 -Idir2 ..." that is stored under the key *include* (the corresponding mdp parameter) By default, the directories ``.`` and ``..`` are also added to the *include* string for the mdp; when fed into :func:`gromacs.cbook.edit_mdp` it will result in a line such as :: include = -I. -I.. -I../topology_dir .... Note that the user can always override the behaviour by setting the *include* keyword herself; in this case this function does nothing. If no *kwargs* were supplied then a dict is generated with the single *include* entry. :Arguments: *topology* : top filename Topology file; the name of the enclosing directory is added to the include path (if supplied) [``None``] *kwargs* : dict Optional dictionary of mdp keywords; will be modified in place. If it contains the *includes* keyword with either a single string or a list of strings then these paths will be added to the include statement. :Returns: *kwargs* with the *include* keyword added if it did not exist previously; if the keyword already existed, nothing happens. .. Note:: The *kwargs* dict is **modified in place**. This function is a bit of a hack. It might be removed once all setup functions become methods in a nice class. """ if kwargs is None: kwargs = {} include_dirs = ['.', '..'] # should . & .. always be added? if topology is not None: # half-hack: find additional itps in the same directory as the topology topology_dir = os.path.dirname(topology) include_dirs.append(topology_dir) include_dirs.extend(asiterable(kwargs.pop('includes', []))) # includes can be a list or a string # 1. setdefault: we do nothing if user defined include # 2. modify input in place! kwargs.setdefault('include', _mdp_include_string(include_dirs)) return kwargs
[ "Set", "the", "mdp", "*", "include", "*", "key", "in", "the", "*", "kwargs", "*", "dict", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L644-L697
[ "def", "add_mdp_includes", "(", "topology", "=", "None", ",", "kwargs", "=", "None", ")", ":", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "include_dirs", "=", "[", "'.'", ",", "'..'", "]", "# should . & .. always be added?", "if", "topology", "is", "not", "None", ":", "# half-hack: find additional itps in the same directory as the topology", "topology_dir", "=", "os", ".", "path", ".", "dirname", "(", "topology", ")", "include_dirs", ".", "append", "(", "topology_dir", ")", "include_dirs", ".", "extend", "(", "asiterable", "(", "kwargs", ".", "pop", "(", "'includes'", ",", "[", "]", ")", ")", ")", "# includes can be a list or a string", "# 1. setdefault: we do nothing if user defined include", "# 2. modify input in place!", "kwargs", ".", "setdefault", "(", "'include'", ",", "_mdp_include_string", "(", "include_dirs", ")", ")", "return", "kwargs" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
filter_grompp_options
Returns one dictionary only containing valid :program:`grompp` options and everything else. Option list is hard coded and nased on :class:`~gromacs.tools.grompp` 4.5.3. :Returns: ``(grompp_dict, other_dict)`` .. versionadded:: 0.2.4
gromacs/cbook.py
def filter_grompp_options(**kwargs): """Returns one dictionary only containing valid :program:`grompp` options and everything else. Option list is hard coded and nased on :class:`~gromacs.tools.grompp` 4.5.3. :Returns: ``(grompp_dict, other_dict)`` .. versionadded:: 0.2.4 """ grompp_options = ('f','po','c','r','rb','n','p','pp','o','t','e', # files 'h', 'noh', 'version', 'noversion', 'nice', 'v', 'nov', 'time', 'rmvsbds', 'normvsbds', 'maxwarn', 'zero', 'nozero', 'renum', 'norenum') grompp = dict((k,v) for k,v in kwargs.items() if k in grompp_options) other = dict((k,v) for k,v in kwargs.items() if k not in grompp_options) return grompp, other
def filter_grompp_options(**kwargs): """Returns one dictionary only containing valid :program:`grompp` options and everything else. Option list is hard coded and nased on :class:`~gromacs.tools.grompp` 4.5.3. :Returns: ``(grompp_dict, other_dict)`` .. versionadded:: 0.2.4 """ grompp_options = ('f','po','c','r','rb','n','p','pp','o','t','e', # files 'h', 'noh', 'version', 'noversion', 'nice', 'v', 'nov', 'time', 'rmvsbds', 'normvsbds', 'maxwarn', 'zero', 'nozero', 'renum', 'norenum') grompp = dict((k,v) for k,v in kwargs.items() if k in grompp_options) other = dict((k,v) for k,v in kwargs.items() if k not in grompp_options) return grompp, other
[ "Returns", "one", "dictionary", "only", "containing", "valid", ":", "program", ":", "grompp", "options", "and", "everything", "else", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L699-L714
[ "def", "filter_grompp_options", "(", "*", "*", "kwargs", ")", ":", "grompp_options", "=", "(", "'f'", ",", "'po'", ",", "'c'", ",", "'r'", ",", "'rb'", ",", "'n'", ",", "'p'", ",", "'pp'", ",", "'o'", ",", "'t'", ",", "'e'", ",", "# files", "'h'", ",", "'noh'", ",", "'version'", ",", "'noversion'", ",", "'nice'", ",", "'v'", ",", "'nov'", ",", "'time'", ",", "'rmvsbds'", ",", "'normvsbds'", ",", "'maxwarn'", ",", "'zero'", ",", "'nozero'", ",", "'renum'", ",", "'norenum'", ")", "grompp", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "k", "in", "grompp_options", ")", "other", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "k", "not", "in", "grompp_options", ")", "return", "grompp", ",", "other" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
create_portable_topology
Create a processed topology. The processed (or portable) topology file does not contain any ``#include`` statements and hence can be easily copied around. It also makes it possible to re-grompp without having any special itp files available. :Arguments: *topol* topology file *struct* coordinat (structure) file :Keywords: *processed* name of the new topology file; if not set then it is named like *topol* but with ``pp_`` prepended *includes* path or list of paths of directories in which itp files are searched for *grompp_kwargs** other options for :program:`grompp` such as ``maxwarn=2`` can also be supplied :Returns: full path to the processed topology
gromacs/cbook.py
def create_portable_topology(topol, struct, **kwargs): """Create a processed topology. The processed (or portable) topology file does not contain any ``#include`` statements and hence can be easily copied around. It also makes it possible to re-grompp without having any special itp files available. :Arguments: *topol* topology file *struct* coordinat (structure) file :Keywords: *processed* name of the new topology file; if not set then it is named like *topol* but with ``pp_`` prepended *includes* path or list of paths of directories in which itp files are searched for *grompp_kwargs** other options for :program:`grompp` such as ``maxwarn=2`` can also be supplied :Returns: full path to the processed topology """ _topoldir, _topol = os.path.split(topol) processed = kwargs.pop('processed', os.path.join(_topoldir, 'pp_'+_topol)) grompp_kwargs, mdp_kwargs = filter_grompp_options(**kwargs) mdp_kwargs = add_mdp_includes(topol, mdp_kwargs) with tempfile.NamedTemporaryFile(suffix='.mdp') as mdp: mdp.write('; empty mdp file\ninclude = {include!s}\n'.format(**mdp_kwargs)) mdp.flush() grompp_kwargs['p'] = topol grompp_kwargs['pp'] = processed grompp_kwargs['f'] = mdp.name grompp_kwargs['c'] = struct grompp_kwargs['v'] = False try: gromacs.grompp(**grompp_kwargs) finally: utilities.unlink_gmx('topol.tpr', 'mdout.mdp') return utilities.realpath(processed)
def create_portable_topology(topol, struct, **kwargs): """Create a processed topology. The processed (or portable) topology file does not contain any ``#include`` statements and hence can be easily copied around. It also makes it possible to re-grompp without having any special itp files available. :Arguments: *topol* topology file *struct* coordinat (structure) file :Keywords: *processed* name of the new topology file; if not set then it is named like *topol* but with ``pp_`` prepended *includes* path or list of paths of directories in which itp files are searched for *grompp_kwargs** other options for :program:`grompp` such as ``maxwarn=2`` can also be supplied :Returns: full path to the processed topology """ _topoldir, _topol = os.path.split(topol) processed = kwargs.pop('processed', os.path.join(_topoldir, 'pp_'+_topol)) grompp_kwargs, mdp_kwargs = filter_grompp_options(**kwargs) mdp_kwargs = add_mdp_includes(topol, mdp_kwargs) with tempfile.NamedTemporaryFile(suffix='.mdp') as mdp: mdp.write('; empty mdp file\ninclude = {include!s}\n'.format(**mdp_kwargs)) mdp.flush() grompp_kwargs['p'] = topol grompp_kwargs['pp'] = processed grompp_kwargs['f'] = mdp.name grompp_kwargs['c'] = struct grompp_kwargs['v'] = False try: gromacs.grompp(**grompp_kwargs) finally: utilities.unlink_gmx('topol.tpr', 'mdout.mdp') return utilities.realpath(processed)
[ "Create", "a", "processed", "topology", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L716-L759
[ "def", "create_portable_topology", "(", "topol", ",", "struct", ",", "*", "*", "kwargs", ")", ":", "_topoldir", ",", "_topol", "=", "os", ".", "path", ".", "split", "(", "topol", ")", "processed", "=", "kwargs", ".", "pop", "(", "'processed'", ",", "os", ".", "path", ".", "join", "(", "_topoldir", ",", "'pp_'", "+", "_topol", ")", ")", "grompp_kwargs", ",", "mdp_kwargs", "=", "filter_grompp_options", "(", "*", "*", "kwargs", ")", "mdp_kwargs", "=", "add_mdp_includes", "(", "topol", ",", "mdp_kwargs", ")", "with", "tempfile", ".", "NamedTemporaryFile", "(", "suffix", "=", "'.mdp'", ")", "as", "mdp", ":", "mdp", ".", "write", "(", "'; empty mdp file\\ninclude = {include!s}\\n'", ".", "format", "(", "*", "*", "mdp_kwargs", ")", ")", "mdp", ".", "flush", "(", ")", "grompp_kwargs", "[", "'p'", "]", "=", "topol", "grompp_kwargs", "[", "'pp'", "]", "=", "processed", "grompp_kwargs", "[", "'f'", "]", "=", "mdp", ".", "name", "grompp_kwargs", "[", "'c'", "]", "=", "struct", "grompp_kwargs", "[", "'v'", "]", "=", "False", "try", ":", "gromacs", ".", "grompp", "(", "*", "*", "grompp_kwargs", ")", "finally", ":", "utilities", ".", "unlink_gmx", "(", "'topol.tpr'", ",", "'mdout.mdp'", ")", "return", "utilities", ".", "realpath", "(", "processed", ")" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
get_volume
Return the volume in nm^3 of structure file *f*. (Uses :func:`gromacs.editconf`; error handling is not good)
gromacs/cbook.py
def get_volume(f): """Return the volume in nm^3 of structure file *f*. (Uses :func:`gromacs.editconf`; error handling is not good) """ fd, temp = tempfile.mkstemp('.gro') try: rc,out,err = gromacs.editconf(f=f, o=temp, stdout=False) finally: os.unlink(temp) return [float(x.split()[1]) for x in out.splitlines() if x.startswith('Volume:')][0]
def get_volume(f): """Return the volume in nm^3 of structure file *f*. (Uses :func:`gromacs.editconf`; error handling is not good) """ fd, temp = tempfile.mkstemp('.gro') try: rc,out,err = gromacs.editconf(f=f, o=temp, stdout=False) finally: os.unlink(temp) return [float(x.split()[1]) for x in out.splitlines() if x.startswith('Volume:')][0]
[ "Return", "the", "volume", "in", "nm^3", "of", "structure", "file", "*", "f", "*", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L761-L772
[ "def", "get_volume", "(", "f", ")", ":", "fd", ",", "temp", "=", "tempfile", ".", "mkstemp", "(", "'.gro'", ")", "try", ":", "rc", ",", "out", ",", "err", "=", "gromacs", ".", "editconf", "(", "f", "=", "f", ",", "o", "=", "temp", ",", "stdout", "=", "False", ")", "finally", ":", "os", ".", "unlink", "(", "temp", ")", "return", "[", "float", "(", "x", ".", "split", "(", ")", "[", "1", "]", ")", "for", "x", "in", "out", ".", "splitlines", "(", ")", "if", "x", ".", "startswith", "(", "'Volume:'", ")", "]", "[", "0", "]" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
edit_mdp
Change values in a Gromacs mdp file. Parameters and values are supplied as substitutions, eg ``nsteps=1000``. By default the template mdp file is **overwritten in place**. If a parameter does not exist in the template then it cannot be substituted and the parameter/value pair is returned. The user has to check the returned list in order to make sure that everything worked as expected. At the moment it is not possible to automatically append the new values to the mdp file because of ambiguities when having to replace dashes in parameter names with underscores (see the notes below on dashes/underscores). If a parameter is set to the value ``None`` then it will be ignored. :Arguments: *mdp* : filename filename of input (and output filename of ``new_mdp=None``) *new_mdp* : filename filename of alternative output mdp file [None] *extend_parameters* : string or list of strings single parameter or list of parameters for which the new values should be appended to the existing value in the mdp file. This makes mostly sense for a single parameter, namely 'include', which is set as the default. Set to ``[]`` to disable. ['include'] *substitutions* parameter=value pairs, where parameter is defined by the Gromacs mdp file; dashes in parameter names have to be replaced by underscores. If a value is a list-like object then the items are written as a sequence, joined with spaces, e.g. :: ref_t=[310,310,310] ---> ref_t = 310 310 310 :Returns: Dict of parameters that have *not* been substituted. **Example** :: edit_mdp('md.mdp', new_mdp='long_md.mdp', nsteps=100000, nstxtcout=1000, lincs_iter=2) .. Note:: * Dashes in Gromacs mdp parameters have to be replaced by an underscore when supplied as python keyword arguments (a limitation of python). For example the MDP syntax is ``lincs-iter = 4`` but the corresponding keyword would be ``lincs_iter = 4``. * If the keyword is set as a dict key, eg ``mdp_params['lincs-iter']=4`` then one does not have to substitute. * Parameters *aa_bb* and *aa-bb* are considered the same (although this should not be a problem in practice because there are no mdp parameters that only differ by a underscore). * This code is more compact in ``Perl`` as one can use ``s///`` operators: ``s/^(\s*${key}\s*=\s*).*/$1${val}/`` .. SeeAlso:: One can also load the mdp file with :class:`gromacs.formats.MDP`, edit the object (a dict), and save it again.
gromacs/cbook.py
def edit_mdp(mdp, new_mdp=None, extend_parameters=None, **substitutions): """Change values in a Gromacs mdp file. Parameters and values are supplied as substitutions, eg ``nsteps=1000``. By default the template mdp file is **overwritten in place**. If a parameter does not exist in the template then it cannot be substituted and the parameter/value pair is returned. The user has to check the returned list in order to make sure that everything worked as expected. At the moment it is not possible to automatically append the new values to the mdp file because of ambiguities when having to replace dashes in parameter names with underscores (see the notes below on dashes/underscores). If a parameter is set to the value ``None`` then it will be ignored. :Arguments: *mdp* : filename filename of input (and output filename of ``new_mdp=None``) *new_mdp* : filename filename of alternative output mdp file [None] *extend_parameters* : string or list of strings single parameter or list of parameters for which the new values should be appended to the existing value in the mdp file. This makes mostly sense for a single parameter, namely 'include', which is set as the default. Set to ``[]`` to disable. ['include'] *substitutions* parameter=value pairs, where parameter is defined by the Gromacs mdp file; dashes in parameter names have to be replaced by underscores. If a value is a list-like object then the items are written as a sequence, joined with spaces, e.g. :: ref_t=[310,310,310] ---> ref_t = 310 310 310 :Returns: Dict of parameters that have *not* been substituted. **Example** :: edit_mdp('md.mdp', new_mdp='long_md.mdp', nsteps=100000, nstxtcout=1000, lincs_iter=2) .. Note:: * Dashes in Gromacs mdp parameters have to be replaced by an underscore when supplied as python keyword arguments (a limitation of python). For example the MDP syntax is ``lincs-iter = 4`` but the corresponding keyword would be ``lincs_iter = 4``. * If the keyword is set as a dict key, eg ``mdp_params['lincs-iter']=4`` then one does not have to substitute. * Parameters *aa_bb* and *aa-bb* are considered the same (although this should not be a problem in practice because there are no mdp parameters that only differ by a underscore). * This code is more compact in ``Perl`` as one can use ``s///`` operators: ``s/^(\s*${key}\s*=\s*).*/$1${val}/`` .. SeeAlso:: One can also load the mdp file with :class:`gromacs.formats.MDP`, edit the object (a dict), and save it again. """ if new_mdp is None: new_mdp = mdp if extend_parameters is None: extend_parameters = ['include'] else: extend_parameters = list(asiterable(extend_parameters)) # None parameters should be ignored (simple way to keep the template defaults) substitutions = {k: v for k,v in substitutions.items() if v is not None} params = list(substitutions.keys()) # list will be reduced for each match def demangled(p): """Return a RE string that matches the parameter.""" return p.replace('_', '[-_]') # must catch either - or _ patterns = {parameter: re.compile("""\ (?P<assignment>\s*{0!s}\s*=\s*) # parameter == everything before the value (?P<value>[^;]*) # value (stop before comment=;) (?P<comment>\s*;.*)? # optional comment """.format(demangled(parameter)), re.VERBOSE) for parameter in substitutions} with tempfile.TemporaryFile() as target: with open(mdp, 'rb') as src: logger.info("editing mdp = {0!r}: {1!r}".format(mdp, substitutions.keys())) for line in src: line = line.decode('utf-8') new_line = line.strip() # \n must be stripped to ensure that new line is built without break for p in params[:]: m = patterns[p].match(new_line) if m: # I am too stupid to replace a specific region in the string so I rebuild it # (matching a line and then replacing value requires TWO re calls) #print 'line:' + new_line #print m.groupdict() if m.group('comment') is None: comment = '' else: comment = " "+m.group('comment') assignment = m.group('assignment') if not assignment.endswith(' '): assignment += ' ' # build new line piece-wise: new_line = assignment if p in extend_parameters: # keep original value and add new stuff at end new_line += str(m.group('value')) + ' ' # automatically transform lists into space-separated string values value = " ".join(map(str, asiterable(substitutions[p]))) new_line += value + comment params.remove(p) break target.write((new_line+'\n').encode('utf-8')) target.seek(0) # XXX: Is there a danger of corrupting the original mdp if something went wrong? with open(new_mdp, 'wb') as final: shutil.copyfileobj(target, final) # return all parameters that have NOT been substituted if len(params) > 0: logger.warn("Not substituted in {new_mdp!r}: {params!r}".format(**vars())) return {p: substitutions[p] for p in params}
def edit_mdp(mdp, new_mdp=None, extend_parameters=None, **substitutions): """Change values in a Gromacs mdp file. Parameters and values are supplied as substitutions, eg ``nsteps=1000``. By default the template mdp file is **overwritten in place**. If a parameter does not exist in the template then it cannot be substituted and the parameter/value pair is returned. The user has to check the returned list in order to make sure that everything worked as expected. At the moment it is not possible to automatically append the new values to the mdp file because of ambiguities when having to replace dashes in parameter names with underscores (see the notes below on dashes/underscores). If a parameter is set to the value ``None`` then it will be ignored. :Arguments: *mdp* : filename filename of input (and output filename of ``new_mdp=None``) *new_mdp* : filename filename of alternative output mdp file [None] *extend_parameters* : string or list of strings single parameter or list of parameters for which the new values should be appended to the existing value in the mdp file. This makes mostly sense for a single parameter, namely 'include', which is set as the default. Set to ``[]`` to disable. ['include'] *substitutions* parameter=value pairs, where parameter is defined by the Gromacs mdp file; dashes in parameter names have to be replaced by underscores. If a value is a list-like object then the items are written as a sequence, joined with spaces, e.g. :: ref_t=[310,310,310] ---> ref_t = 310 310 310 :Returns: Dict of parameters that have *not* been substituted. **Example** :: edit_mdp('md.mdp', new_mdp='long_md.mdp', nsteps=100000, nstxtcout=1000, lincs_iter=2) .. Note:: * Dashes in Gromacs mdp parameters have to be replaced by an underscore when supplied as python keyword arguments (a limitation of python). For example the MDP syntax is ``lincs-iter = 4`` but the corresponding keyword would be ``lincs_iter = 4``. * If the keyword is set as a dict key, eg ``mdp_params['lincs-iter']=4`` then one does not have to substitute. * Parameters *aa_bb* and *aa-bb* are considered the same (although this should not be a problem in practice because there are no mdp parameters that only differ by a underscore). * This code is more compact in ``Perl`` as one can use ``s///`` operators: ``s/^(\s*${key}\s*=\s*).*/$1${val}/`` .. SeeAlso:: One can also load the mdp file with :class:`gromacs.formats.MDP`, edit the object (a dict), and save it again. """ if new_mdp is None: new_mdp = mdp if extend_parameters is None: extend_parameters = ['include'] else: extend_parameters = list(asiterable(extend_parameters)) # None parameters should be ignored (simple way to keep the template defaults) substitutions = {k: v for k,v in substitutions.items() if v is not None} params = list(substitutions.keys()) # list will be reduced for each match def demangled(p): """Return a RE string that matches the parameter.""" return p.replace('_', '[-_]') # must catch either - or _ patterns = {parameter: re.compile("""\ (?P<assignment>\s*{0!s}\s*=\s*) # parameter == everything before the value (?P<value>[^;]*) # value (stop before comment=;) (?P<comment>\s*;.*)? # optional comment """.format(demangled(parameter)), re.VERBOSE) for parameter in substitutions} with tempfile.TemporaryFile() as target: with open(mdp, 'rb') as src: logger.info("editing mdp = {0!r}: {1!r}".format(mdp, substitutions.keys())) for line in src: line = line.decode('utf-8') new_line = line.strip() # \n must be stripped to ensure that new line is built without break for p in params[:]: m = patterns[p].match(new_line) if m: # I am too stupid to replace a specific region in the string so I rebuild it # (matching a line and then replacing value requires TWO re calls) #print 'line:' + new_line #print m.groupdict() if m.group('comment') is None: comment = '' else: comment = " "+m.group('comment') assignment = m.group('assignment') if not assignment.endswith(' '): assignment += ' ' # build new line piece-wise: new_line = assignment if p in extend_parameters: # keep original value and add new stuff at end new_line += str(m.group('value')) + ' ' # automatically transform lists into space-separated string values value = " ".join(map(str, asiterable(substitutions[p]))) new_line += value + comment params.remove(p) break target.write((new_line+'\n').encode('utf-8')) target.seek(0) # XXX: Is there a danger of corrupting the original mdp if something went wrong? with open(new_mdp, 'wb') as final: shutil.copyfileobj(target, final) # return all parameters that have NOT been substituted if len(params) > 0: logger.warn("Not substituted in {new_mdp!r}: {params!r}".format(**vars())) return {p: substitutions[p] for p in params}
[ "Change", "values", "in", "a", "Gromacs", "mdp", "file", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L778-L899
[ "def", "edit_mdp", "(", "mdp", ",", "new_mdp", "=", "None", ",", "extend_parameters", "=", "None", ",", "*", "*", "substitutions", ")", ":", "if", "new_mdp", "is", "None", ":", "new_mdp", "=", "mdp", "if", "extend_parameters", "is", "None", ":", "extend_parameters", "=", "[", "'include'", "]", "else", ":", "extend_parameters", "=", "list", "(", "asiterable", "(", "extend_parameters", ")", ")", "# None parameters should be ignored (simple way to keep the template defaults)", "substitutions", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "substitutions", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}", "params", "=", "list", "(", "substitutions", ".", "keys", "(", ")", ")", "# list will be reduced for each match", "def", "demangled", "(", "p", ")", ":", "\"\"\"Return a RE string that matches the parameter.\"\"\"", "return", "p", ".", "replace", "(", "'_'", ",", "'[-_]'", ")", "# must catch either - or _", "patterns", "=", "{", "parameter", ":", "re", ".", "compile", "(", "\"\"\"\\\n (?P<assignment>\\s*{0!s}\\s*=\\s*) # parameter == everything before the value\n (?P<value>[^;]*) # value (stop before comment=;)\n (?P<comment>\\s*;.*)? # optional comment\n \"\"\"", ".", "format", "(", "demangled", "(", "parameter", ")", ")", ",", "re", ".", "VERBOSE", ")", "for", "parameter", "in", "substitutions", "}", "with", "tempfile", ".", "TemporaryFile", "(", ")", "as", "target", ":", "with", "open", "(", "mdp", ",", "'rb'", ")", "as", "src", ":", "logger", ".", "info", "(", "\"editing mdp = {0!r}: {1!r}\"", ".", "format", "(", "mdp", ",", "substitutions", ".", "keys", "(", ")", ")", ")", "for", "line", "in", "src", ":", "line", "=", "line", ".", "decode", "(", "'utf-8'", ")", "new_line", "=", "line", ".", "strip", "(", ")", "# \\n must be stripped to ensure that new line is built without break", "for", "p", "in", "params", "[", ":", "]", ":", "m", "=", "patterns", "[", "p", "]", ".", "match", "(", "new_line", ")", "if", "m", ":", "# I am too stupid to replace a specific region in the string so I rebuild it", "# (matching a line and then replacing value requires TWO re calls)", "#print 'line:' + new_line", "#print m.groupdict()", "if", "m", ".", "group", "(", "'comment'", ")", "is", "None", ":", "comment", "=", "''", "else", ":", "comment", "=", "\" \"", "+", "m", ".", "group", "(", "'comment'", ")", "assignment", "=", "m", ".", "group", "(", "'assignment'", ")", "if", "not", "assignment", ".", "endswith", "(", "' '", ")", ":", "assignment", "+=", "' '", "# build new line piece-wise:", "new_line", "=", "assignment", "if", "p", "in", "extend_parameters", ":", "# keep original value and add new stuff at end", "new_line", "+=", "str", "(", "m", ".", "group", "(", "'value'", ")", ")", "+", "' '", "# automatically transform lists into space-separated string values", "value", "=", "\" \"", ".", "join", "(", "map", "(", "str", ",", "asiterable", "(", "substitutions", "[", "p", "]", ")", ")", ")", "new_line", "+=", "value", "+", "comment", "params", ".", "remove", "(", "p", ")", "break", "target", ".", "write", "(", "(", "new_line", "+", "'\\n'", ")", ".", "encode", "(", "'utf-8'", ")", ")", "target", ".", "seek", "(", "0", ")", "# XXX: Is there a danger of corrupting the original mdp if something went wrong?", "with", "open", "(", "new_mdp", ",", "'wb'", ")", "as", "final", ":", "shutil", ".", "copyfileobj", "(", "target", ",", "final", ")", "# return all parameters that have NOT been substituted", "if", "len", "(", "params", ")", ">", "0", ":", "logger", ".", "warn", "(", "\"Not substituted in {new_mdp!r}: {params!r}\"", ".", "format", "(", "*", "*", "vars", "(", ")", ")", ")", "return", "{", "p", ":", "substitutions", "[", "p", "]", "for", "p", "in", "params", "}" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
edit_txt
Primitive text file stream editor. This function can be used to edit free-form text files such as the topology file. By default it does an **in-place edit** of *filename*. If *newname* is supplied then the edited file is written to *newname*. :Arguments: *filename* input text file *substitutions* substitution commands (see below for format) *newname* output filename; if ``None`` then *filename* is changed in place [``None``] *substitutions* is a list of triplets; the first two elements are regular expression strings, the last is the substitution value. It mimics ``sed`` search and replace. The rules for *substitutions*: .. productionlist:: substitutions: "[" search_replace_tuple, ... "]" search_replace_tuple: "(" line_match_RE "," search_RE "," replacement ")" line_match_RE: regular expression that selects the line (uses match) search_RE: regular expression that is searched in the line replacement: replacement string for search_RE Running :func:`edit_txt` does pretty much what a simple :: sed /line_match_RE/s/search_RE/replacement/ with repeated substitution commands does. Special replacement values: - ``None``: the rule is ignored - ``False``: the line is deleted (even if other rules match) .. note:: * No sanity checks are performed and the substitutions must be supplied exactly as shown. * All substitutions are applied to a line; thus the order of the substitution commands may matter when one substitution generates a match for a subsequent rule. * If replacement is set to ``None`` then the whole expression is ignored and whatever is in the template is used. To unset values you must provided an empty string or similar. * Delete a matching line if replacement=``False``.
gromacs/cbook.py
def edit_txt(filename, substitutions, newname=None): """Primitive text file stream editor. This function can be used to edit free-form text files such as the topology file. By default it does an **in-place edit** of *filename*. If *newname* is supplied then the edited file is written to *newname*. :Arguments: *filename* input text file *substitutions* substitution commands (see below for format) *newname* output filename; if ``None`` then *filename* is changed in place [``None``] *substitutions* is a list of triplets; the first two elements are regular expression strings, the last is the substitution value. It mimics ``sed`` search and replace. The rules for *substitutions*: .. productionlist:: substitutions: "[" search_replace_tuple, ... "]" search_replace_tuple: "(" line_match_RE "," search_RE "," replacement ")" line_match_RE: regular expression that selects the line (uses match) search_RE: regular expression that is searched in the line replacement: replacement string for search_RE Running :func:`edit_txt` does pretty much what a simple :: sed /line_match_RE/s/search_RE/replacement/ with repeated substitution commands does. Special replacement values: - ``None``: the rule is ignored - ``False``: the line is deleted (even if other rules match) .. note:: * No sanity checks are performed and the substitutions must be supplied exactly as shown. * All substitutions are applied to a line; thus the order of the substitution commands may matter when one substitution generates a match for a subsequent rule. * If replacement is set to ``None`` then the whole expression is ignored and whatever is in the template is used. To unset values you must provided an empty string or similar. * Delete a matching line if replacement=``False``. """ if newname is None: newname = filename # No sanity checks (figure out later how to give decent diagnostics). # Filter out any rules that have None in replacement. _substitutions = [{'lRE': re.compile(str(lRE)), 'sRE': re.compile(str(sRE)), 'repl': repl} for lRE,sRE,repl in substitutions if repl is not None] with tempfile.TemporaryFile() as target: with open(filename, 'rb') as src: logger.info("editing txt = {0!r} ({1:d} substitutions)".format(filename, len(substitutions))) for line in src: line = line.decode("utf-8") keep_line = True for subst in _substitutions: m = subst['lRE'].match(line) if m: # apply substition to this line? logger.debug('match: '+line.rstrip()) if subst['repl'] is False: # special rule: delete line keep_line = False else: # standard replacement line = subst['sRE'].sub(str(subst['repl']), line) logger.debug('replaced: '+line.rstrip()) if keep_line: target.write(line.encode('utf-8')) else: logger.debug("Deleting line %r", line) target.seek(0) with open(newname, 'wb') as final: shutil.copyfileobj(target, final) logger.info("edited txt = {newname!r}".format(**vars()))
def edit_txt(filename, substitutions, newname=None): """Primitive text file stream editor. This function can be used to edit free-form text files such as the topology file. By default it does an **in-place edit** of *filename*. If *newname* is supplied then the edited file is written to *newname*. :Arguments: *filename* input text file *substitutions* substitution commands (see below for format) *newname* output filename; if ``None`` then *filename* is changed in place [``None``] *substitutions* is a list of triplets; the first two elements are regular expression strings, the last is the substitution value. It mimics ``sed`` search and replace. The rules for *substitutions*: .. productionlist:: substitutions: "[" search_replace_tuple, ... "]" search_replace_tuple: "(" line_match_RE "," search_RE "," replacement ")" line_match_RE: regular expression that selects the line (uses match) search_RE: regular expression that is searched in the line replacement: replacement string for search_RE Running :func:`edit_txt` does pretty much what a simple :: sed /line_match_RE/s/search_RE/replacement/ with repeated substitution commands does. Special replacement values: - ``None``: the rule is ignored - ``False``: the line is deleted (even if other rules match) .. note:: * No sanity checks are performed and the substitutions must be supplied exactly as shown. * All substitutions are applied to a line; thus the order of the substitution commands may matter when one substitution generates a match for a subsequent rule. * If replacement is set to ``None`` then the whole expression is ignored and whatever is in the template is used. To unset values you must provided an empty string or similar. * Delete a matching line if replacement=``False``. """ if newname is None: newname = filename # No sanity checks (figure out later how to give decent diagnostics). # Filter out any rules that have None in replacement. _substitutions = [{'lRE': re.compile(str(lRE)), 'sRE': re.compile(str(sRE)), 'repl': repl} for lRE,sRE,repl in substitutions if repl is not None] with tempfile.TemporaryFile() as target: with open(filename, 'rb') as src: logger.info("editing txt = {0!r} ({1:d} substitutions)".format(filename, len(substitutions))) for line in src: line = line.decode("utf-8") keep_line = True for subst in _substitutions: m = subst['lRE'].match(line) if m: # apply substition to this line? logger.debug('match: '+line.rstrip()) if subst['repl'] is False: # special rule: delete line keep_line = False else: # standard replacement line = subst['sRE'].sub(str(subst['repl']), line) logger.debug('replaced: '+line.rstrip()) if keep_line: target.write(line.encode('utf-8')) else: logger.debug("Deleting line %r", line) target.seek(0) with open(newname, 'wb') as final: shutil.copyfileobj(target, final) logger.info("edited txt = {newname!r}".format(**vars()))
[ "Primitive", "text", "file", "stream", "editor", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L901-L983
[ "def", "edit_txt", "(", "filename", ",", "substitutions", ",", "newname", "=", "None", ")", ":", "if", "newname", "is", "None", ":", "newname", "=", "filename", "# No sanity checks (figure out later how to give decent diagnostics).", "# Filter out any rules that have None in replacement.", "_substitutions", "=", "[", "{", "'lRE'", ":", "re", ".", "compile", "(", "str", "(", "lRE", ")", ")", ",", "'sRE'", ":", "re", ".", "compile", "(", "str", "(", "sRE", ")", ")", ",", "'repl'", ":", "repl", "}", "for", "lRE", ",", "sRE", ",", "repl", "in", "substitutions", "if", "repl", "is", "not", "None", "]", "with", "tempfile", ".", "TemporaryFile", "(", ")", "as", "target", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "src", ":", "logger", ".", "info", "(", "\"editing txt = {0!r} ({1:d} substitutions)\"", ".", "format", "(", "filename", ",", "len", "(", "substitutions", ")", ")", ")", "for", "line", "in", "src", ":", "line", "=", "line", ".", "decode", "(", "\"utf-8\"", ")", "keep_line", "=", "True", "for", "subst", "in", "_substitutions", ":", "m", "=", "subst", "[", "'lRE'", "]", ".", "match", "(", "line", ")", "if", "m", ":", "# apply substition to this line?", "logger", ".", "debug", "(", "'match: '", "+", "line", ".", "rstrip", "(", ")", ")", "if", "subst", "[", "'repl'", "]", "is", "False", ":", "# special rule: delete line", "keep_line", "=", "False", "else", ":", "# standard replacement", "line", "=", "subst", "[", "'sRE'", "]", ".", "sub", "(", "str", "(", "subst", "[", "'repl'", "]", ")", ",", "line", ")", "logger", ".", "debug", "(", "'replaced: '", "+", "line", ".", "rstrip", "(", ")", ")", "if", "keep_line", ":", "target", ".", "write", "(", "line", ".", "encode", "(", "'utf-8'", ")", ")", "else", ":", "logger", ".", "debug", "(", "\"Deleting line %r\"", ",", "line", ")", "target", ".", "seek", "(", "0", ")", "with", "open", "(", "newname", ",", "'wb'", ")", "as", "final", ":", "shutil", ".", "copyfileobj", "(", "target", ",", "final", ")", "logger", ".", "info", "(", "\"edited txt = {newname!r}\"", ".", "format", "(", "*", "*", "vars", "(", ")", ")", ")" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
remove_molecules_from_topology
Remove autogenerated [ molecules ] entries from *filename*. Valid entries in ``[ molecules ]`` below the default *marker* are removed. For example, a topology file such as :: [ molecules ] Protein 1 SOL 213 ; The next line is the marker! ; Gromacs auto-generated entries follow: SOL 12345 NA+ 15 CL- 16 ; This is a comment that is NOT deleted. SOL 333 would become:: [ molecules ] Protein 1 SOL 213 ; The next line is the marker! ; Gromacs auto-generated entries follow: ; This is a comment that is NOT deleted. Valid molecule lines look like ``SOL 1234``, ``NA 17`` etc. The actual regular expression used is "\s*[\w+_-]+\s+\d+\s*(;.*)?$". In order to use this function, the marker line has to be manually added to the topology file. :Arguments: *filename* The topology file that includes the ``[ molecules ]`` section. It is **edited in place**. *marker* Any ``[ molecules ]`` entries below this pattern (python regular expression) are removed. Leading white space is ignored. ``None`` uses the default as described above.
gromacs/cbook.py
def remove_molecules_from_topology(filename, **kwargs): """Remove autogenerated [ molecules ] entries from *filename*. Valid entries in ``[ molecules ]`` below the default *marker* are removed. For example, a topology file such as :: [ molecules ] Protein 1 SOL 213 ; The next line is the marker! ; Gromacs auto-generated entries follow: SOL 12345 NA+ 15 CL- 16 ; This is a comment that is NOT deleted. SOL 333 would become:: [ molecules ] Protein 1 SOL 213 ; The next line is the marker! ; Gromacs auto-generated entries follow: ; This is a comment that is NOT deleted. Valid molecule lines look like ``SOL 1234``, ``NA 17`` etc. The actual regular expression used is "\s*[\w+_-]+\s+\d+\s*(;.*)?$". In order to use this function, the marker line has to be manually added to the topology file. :Arguments: *filename* The topology file that includes the ``[ molecules ]`` section. It is **edited in place**. *marker* Any ``[ molecules ]`` entries below this pattern (python regular expression) are removed. Leading white space is ignored. ``None`` uses the default as described above. """ marker = kwargs.pop('marker', None) if marker is None: marker = "; Gromacs auto-generated entries follow:" logger.debug("Scrubbed [ molecules ]: marker = %(marker)r", vars()) p_marker = re.compile("\s*{0!s}".format(marker)) p_molecule = re.compile("\s*[\w+_-]+\s+\d+\s*(;.*)?$") with tempfile.TemporaryFile() as target: with open(filename, 'rb') as src: autogenerated = False n_removed = 0 for line in src: line = line.decode('utf-8') if p_marker.match(line): autogenerated = True if autogenerated and p_molecule.match(line): n_removed += 1 continue # remove by skipping target.write(line.encode('utf-8')) if autogenerated and n_removed > 0: target.seek(0) with open(filename, 'wb') as final: # overwrite original! shutil.copyfileobj(target, final) logger.info("Removed %(n_removed)d autogenerated [ molecules ] from " "topol = %(filename)r" % vars()) return n_removed
def remove_molecules_from_topology(filename, **kwargs): """Remove autogenerated [ molecules ] entries from *filename*. Valid entries in ``[ molecules ]`` below the default *marker* are removed. For example, a topology file such as :: [ molecules ] Protein 1 SOL 213 ; The next line is the marker! ; Gromacs auto-generated entries follow: SOL 12345 NA+ 15 CL- 16 ; This is a comment that is NOT deleted. SOL 333 would become:: [ molecules ] Protein 1 SOL 213 ; The next line is the marker! ; Gromacs auto-generated entries follow: ; This is a comment that is NOT deleted. Valid molecule lines look like ``SOL 1234``, ``NA 17`` etc. The actual regular expression used is "\s*[\w+_-]+\s+\d+\s*(;.*)?$". In order to use this function, the marker line has to be manually added to the topology file. :Arguments: *filename* The topology file that includes the ``[ molecules ]`` section. It is **edited in place**. *marker* Any ``[ molecules ]`` entries below this pattern (python regular expression) are removed. Leading white space is ignored. ``None`` uses the default as described above. """ marker = kwargs.pop('marker', None) if marker is None: marker = "; Gromacs auto-generated entries follow:" logger.debug("Scrubbed [ molecules ]: marker = %(marker)r", vars()) p_marker = re.compile("\s*{0!s}".format(marker)) p_molecule = re.compile("\s*[\w+_-]+\s+\d+\s*(;.*)?$") with tempfile.TemporaryFile() as target: with open(filename, 'rb') as src: autogenerated = False n_removed = 0 for line in src: line = line.decode('utf-8') if p_marker.match(line): autogenerated = True if autogenerated and p_molecule.match(line): n_removed += 1 continue # remove by skipping target.write(line.encode('utf-8')) if autogenerated and n_removed > 0: target.seek(0) with open(filename, 'wb') as final: # overwrite original! shutil.copyfileobj(target, final) logger.info("Removed %(n_removed)d autogenerated [ molecules ] from " "topol = %(filename)r" % vars()) return n_removed
[ "Remove", "autogenerated", "[", "molecules", "]", "entries", "from", "*", "filename", "*", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L985-L1051
[ "def", "remove_molecules_from_topology", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "marker", "=", "kwargs", ".", "pop", "(", "'marker'", ",", "None", ")", "if", "marker", "is", "None", ":", "marker", "=", "\"; Gromacs auto-generated entries follow:\"", "logger", ".", "debug", "(", "\"Scrubbed [ molecules ]: marker = %(marker)r\"", ",", "vars", "(", ")", ")", "p_marker", "=", "re", ".", "compile", "(", "\"\\s*{0!s}\"", ".", "format", "(", "marker", ")", ")", "p_molecule", "=", "re", ".", "compile", "(", "\"\\s*[\\w+_-]+\\s+\\d+\\s*(;.*)?$\"", ")", "with", "tempfile", ".", "TemporaryFile", "(", ")", "as", "target", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "src", ":", "autogenerated", "=", "False", "n_removed", "=", "0", "for", "line", "in", "src", ":", "line", "=", "line", ".", "decode", "(", "'utf-8'", ")", "if", "p_marker", ".", "match", "(", "line", ")", ":", "autogenerated", "=", "True", "if", "autogenerated", "and", "p_molecule", ".", "match", "(", "line", ")", ":", "n_removed", "+=", "1", "continue", "# remove by skipping", "target", ".", "write", "(", "line", ".", "encode", "(", "'utf-8'", ")", ")", "if", "autogenerated", "and", "n_removed", ">", "0", ":", "target", ".", "seek", "(", "0", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "final", ":", "# overwrite original!", "shutil", ".", "copyfileobj", "(", "target", ",", "final", ")", "logger", ".", "info", "(", "\"Removed %(n_removed)d autogenerated [ molecules ] from \"", "\"topol = %(filename)r\"", "%", "vars", "(", ")", ")", "return", "n_removed" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
make_ndx_captured
make_ndx that captures all output Standard :func:`~gromacs.make_ndx` command with the input and output pre-set in such a way that it can be conveniently used for :func:`parse_ndxlist`. Example:: ndx_groups = parse_ndxlist(make_ndx_captured(n=ndx)[0]) Note that the convenient :func:`get_ndx_groups` function does exactly that and can probably used in most cases. :Arguments: keywords are passed on to :func:`~gromacs.make_ndx` :Returns: (*returncode*, *output*, ``None``)
gromacs/cbook.py
def make_ndx_captured(**kwargs): """make_ndx that captures all output Standard :func:`~gromacs.make_ndx` command with the input and output pre-set in such a way that it can be conveniently used for :func:`parse_ndxlist`. Example:: ndx_groups = parse_ndxlist(make_ndx_captured(n=ndx)[0]) Note that the convenient :func:`get_ndx_groups` function does exactly that and can probably used in most cases. :Arguments: keywords are passed on to :func:`~gromacs.make_ndx` :Returns: (*returncode*, *output*, ``None``) """ kwargs['stdout']=False # required for proper output as described in doc user_input = kwargs.pop('input',[]) user_input = [cmd for cmd in user_input if cmd != 'q'] # filter any quit kwargs['input'] = user_input + ['', 'q'] # necessary commands return gromacs.make_ndx(**kwargs)
def make_ndx_captured(**kwargs): """make_ndx that captures all output Standard :func:`~gromacs.make_ndx` command with the input and output pre-set in such a way that it can be conveniently used for :func:`parse_ndxlist`. Example:: ndx_groups = parse_ndxlist(make_ndx_captured(n=ndx)[0]) Note that the convenient :func:`get_ndx_groups` function does exactly that and can probably used in most cases. :Arguments: keywords are passed on to :func:`~gromacs.make_ndx` :Returns: (*returncode*, *output*, ``None``) """ kwargs['stdout']=False # required for proper output as described in doc user_input = kwargs.pop('input',[]) user_input = [cmd for cmd in user_input if cmd != 'q'] # filter any quit kwargs['input'] = user_input + ['', 'q'] # necessary commands return gromacs.make_ndx(**kwargs)
[ "make_ndx", "that", "captures", "all", "output" ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L1078-L1100
[ "def", "make_ndx_captured", "(", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'stdout'", "]", "=", "False", "# required for proper output as described in doc", "user_input", "=", "kwargs", ".", "pop", "(", "'input'", ",", "[", "]", ")", "user_input", "=", "[", "cmd", "for", "cmd", "in", "user_input", "if", "cmd", "!=", "'q'", "]", "# filter any quit", "kwargs", "[", "'input'", "]", "=", "user_input", "+", "[", "''", ",", "'q'", "]", "# necessary commands", "return", "gromacs", ".", "make_ndx", "(", "*", "*", "kwargs", ")" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
get_ndx_groups
Return a list of index groups in the index file *ndx*. :Arguments: - *ndx* is a Gromacs index file. - kwargs are passed to :func:`make_ndx_captured`. :Returns: list of groups as supplied by :func:`parse_ndxlist` Alternatively, load the index file with :class:`gromacs.formats.NDX` for full control.
gromacs/cbook.py
def get_ndx_groups(ndx, **kwargs): """Return a list of index groups in the index file *ndx*. :Arguments: - *ndx* is a Gromacs index file. - kwargs are passed to :func:`make_ndx_captured`. :Returns: list of groups as supplied by :func:`parse_ndxlist` Alternatively, load the index file with :class:`gromacs.formats.NDX` for full control. """ fd, tmp_ndx = tempfile.mkstemp(suffix='.ndx') kwargs['o'] = tmp_ndx try: g = parse_ndxlist(make_ndx_captured(n=ndx, **kwargs)[1]) finally: utilities.unlink_gmx(tmp_ndx) return g
def get_ndx_groups(ndx, **kwargs): """Return a list of index groups in the index file *ndx*. :Arguments: - *ndx* is a Gromacs index file. - kwargs are passed to :func:`make_ndx_captured`. :Returns: list of groups as supplied by :func:`parse_ndxlist` Alternatively, load the index file with :class:`gromacs.formats.NDX` for full control. """ fd, tmp_ndx = tempfile.mkstemp(suffix='.ndx') kwargs['o'] = tmp_ndx try: g = parse_ndxlist(make_ndx_captured(n=ndx, **kwargs)[1]) finally: utilities.unlink_gmx(tmp_ndx) return g
[ "Return", "a", "list", "of", "index", "groups", "in", "the", "index", "file", "*", "ndx", "*", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L1102-L1121
[ "def", "get_ndx_groups", "(", "ndx", ",", "*", "*", "kwargs", ")", ":", "fd", ",", "tmp_ndx", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "'.ndx'", ")", "kwargs", "[", "'o'", "]", "=", "tmp_ndx", "try", ":", "g", "=", "parse_ndxlist", "(", "make_ndx_captured", "(", "n", "=", "ndx", ",", "*", "*", "kwargs", ")", "[", "1", "]", ")", "finally", ":", "utilities", ".", "unlink_gmx", "(", "tmp_ndx", ")", "return", "g" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
parse_ndxlist
Parse output from make_ndx to build list of index groups:: groups = parse_ndxlist(output) output should be the standard output from ``make_ndx``, e.g.:: rc,output,junk = gromacs.make_ndx(..., input=('', 'q'), stdout=False, stderr=True) (or simply use rc,output,junk = cbook.make_ndx_captured(...) which presets input, stdout and stderr; of course input can be overriden.) :Returns: The function returns a list of dicts (``groups``) with fields name name of the groups nr number of the group (starts at 0) natoms number of atoms in the group
gromacs/cbook.py
def parse_ndxlist(output): """Parse output from make_ndx to build list of index groups:: groups = parse_ndxlist(output) output should be the standard output from ``make_ndx``, e.g.:: rc,output,junk = gromacs.make_ndx(..., input=('', 'q'), stdout=False, stderr=True) (or simply use rc,output,junk = cbook.make_ndx_captured(...) which presets input, stdout and stderr; of course input can be overriden.) :Returns: The function returns a list of dicts (``groups``) with fields name name of the groups nr number of the group (starts at 0) natoms number of atoms in the group """ m = NDXLIST.search(output) # make sure we pick up a proper full list grouplist = m.group('LIST') return parse_groups(grouplist)
def parse_ndxlist(output): """Parse output from make_ndx to build list of index groups:: groups = parse_ndxlist(output) output should be the standard output from ``make_ndx``, e.g.:: rc,output,junk = gromacs.make_ndx(..., input=('', 'q'), stdout=False, stderr=True) (or simply use rc,output,junk = cbook.make_ndx_captured(...) which presets input, stdout and stderr; of course input can be overriden.) :Returns: The function returns a list of dicts (``groups``) with fields name name of the groups nr number of the group (starts at 0) natoms number of atoms in the group """ m = NDXLIST.search(output) # make sure we pick up a proper full list grouplist = m.group('LIST') return parse_groups(grouplist)
[ "Parse", "output", "from", "make_ndx", "to", "build", "list", "of", "index", "groups", "::" ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L1123-L1152
[ "def", "parse_ndxlist", "(", "output", ")", ":", "m", "=", "NDXLIST", ".", "search", "(", "output", ")", "# make sure we pick up a proper full list", "grouplist", "=", "m", ".", "group", "(", "'LIST'", ")", "return", "parse_groups", "(", "grouplist", ")" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9
valid
parse_groups
Parse ``make_ndx`` output and return groups as a list of dicts.
gromacs/cbook.py
def parse_groups(output): """Parse ``make_ndx`` output and return groups as a list of dicts.""" groups = [] for line in output.split('\n'): m = NDXGROUP.match(line) if m: d = m.groupdict() groups.append({'name': d['GROUPNAME'], 'nr': int(d['GROUPNUMBER']), 'natoms': int(d['NATOMS'])}) return groups
def parse_groups(output): """Parse ``make_ndx`` output and return groups as a list of dicts.""" groups = [] for line in output.split('\n'): m = NDXGROUP.match(line) if m: d = m.groupdict() groups.append({'name': d['GROUPNAME'], 'nr': int(d['GROUPNUMBER']), 'natoms': int(d['NATOMS'])}) return groups
[ "Parse", "make_ndx", "output", "and", "return", "groups", "as", "a", "list", "of", "dicts", "." ]
Becksteinlab/GromacsWrapper
python
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L1154-L1164
[ "def", "parse_groups", "(", "output", ")", ":", "groups", "=", "[", "]", "for", "line", "in", "output", ".", "split", "(", "'\\n'", ")", ":", "m", "=", "NDXGROUP", ".", "match", "(", "line", ")", "if", "m", ":", "d", "=", "m", ".", "groupdict", "(", ")", "groups", ".", "append", "(", "{", "'name'", ":", "d", "[", "'GROUPNAME'", "]", ",", "'nr'", ":", "int", "(", "d", "[", "'GROUPNUMBER'", "]", ")", ",", "'natoms'", ":", "int", "(", "d", "[", "'NATOMS'", "]", ")", "}", ")", "return", "groups" ]
d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9