partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
_StructMessageToJsonObject
Converts Struct message according to Proto3 JSON Specification.
typy/google/protobuf/json_format.py
def _StructMessageToJsonObject(message, unused_including_default=False): """Converts Struct message according to Proto3 JSON Specification.""" fields = message.fields ret = {} for key in fields: ret[key] = _ValueMessageToJsonObject(fields[key]) return ret
def _StructMessageToJsonObject(message, unused_including_default=False): """Converts Struct message according to Proto3 JSON Specification.""" fields = message.fields ret = {} for key in fields: ret[key] = _ValueMessageToJsonObject(fields[key]) return ret
[ "Converts", "Struct", "message", "according", "to", "Proto3", "JSON", "Specification", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/json_format.py#L271-L277
[ "def", "_StructMessageToJsonObject", "(", "message", ",", "unused_including_default", "=", "False", ")", ":", "fields", "=", "message", ".", "fields", "ret", "=", "{", "}", "for", "key", "in", "fields", ":", "ret", "[", "key", "]", "=", "_ValueMessageToJsonObject", "(", "fields", "[", "key", "]", ")", "return", "ret" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
Parse
Parses a JSON representation of a protocol message into a message. Args: text: Message JSON representation. message: A protocol beffer message to merge into. Returns: The same message passed as argument. Raises:: ParseError: On JSON parsing problems.
typy/google/protobuf/json_format.py
def Parse(text, message): """Parses a JSON representation of a protocol message into a message. Args: text: Message JSON representation. message: A protocol beffer message to merge into. Returns: The same message passed as argument. Raises:: ParseError: On JSON parsing problems. """ if not isinstance(text, six.text_type): text = text.decode('utf-8') try: if sys.version_info < (2, 7): # object_pair_hook is not supported before python2.7 js = json.loads(text) else: js = json.loads(text, object_pairs_hook=_DuplicateChecker) except ValueError as e: raise ParseError('Failed to load JSON: {0}.'.format(str(e))) _ConvertMessage(js, message) return message
def Parse(text, message): """Parses a JSON representation of a protocol message into a message. Args: text: Message JSON representation. message: A protocol beffer message to merge into. Returns: The same message passed as argument. Raises:: ParseError: On JSON parsing problems. """ if not isinstance(text, six.text_type): text = text.decode('utf-8') try: if sys.version_info < (2, 7): # object_pair_hook is not supported before python2.7 js = json.loads(text) else: js = json.loads(text, object_pairs_hook=_DuplicateChecker) except ValueError as e: raise ParseError('Failed to load JSON: {0}.'.format(str(e))) _ConvertMessage(js, message) return message
[ "Parses", "a", "JSON", "representation", "of", "a", "protocol", "message", "into", "a", "message", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/json_format.py#L298-L321
[ "def", "Parse", "(", "text", ",", "message", ")", ":", "if", "not", "isinstance", "(", "text", ",", "six", ".", "text_type", ")", ":", "text", "=", "text", ".", "decode", "(", "'utf-8'", ")", "try", ":", "if", "sys", ".", "version_info", "<", "(", "2", ",", "7", ")", ":", "# object_pair_hook is not supported before python2.7", "js", "=", "json", ".", "loads", "(", "text", ")", "else", ":", "js", "=", "json", ".", "loads", "(", "text", ",", "object_pairs_hook", "=", "_DuplicateChecker", ")", "except", "ValueError", "as", "e", ":", "raise", "ParseError", "(", "'Failed to load JSON: {0}.'", ".", "format", "(", "str", "(", "e", ")", ")", ")", "_ConvertMessage", "(", "js", ",", "message", ")", "return", "message" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_ConvertFieldValuePair
Convert field value pairs into regular message. Args: js: A JSON object to convert the field value pairs. message: A regular protocol message to record the data. Raises: ParseError: In case of problems converting.
typy/google/protobuf/json_format.py
def _ConvertFieldValuePair(js, message): """Convert field value pairs into regular message. Args: js: A JSON object to convert the field value pairs. message: A regular protocol message to record the data. Raises: ParseError: In case of problems converting. """ names = [] message_descriptor = message.DESCRIPTOR for name in js: try: field = message_descriptor.fields_by_camelcase_name.get(name, None) if not field: raise ParseError( 'Message type "{0}" has no field named "{1}".'.format( message_descriptor.full_name, name)) if name in names: raise ParseError( 'Message type "{0}" should not have multiple "{1}" fields.'.format( message.DESCRIPTOR.full_name, name)) names.append(name) # Check no other oneof field is parsed. if field.containing_oneof is not None: oneof_name = field.containing_oneof.name if oneof_name in names: raise ParseError('Message type "{0}" should not have multiple "{1}" ' 'oneof fields.'.format( message.DESCRIPTOR.full_name, oneof_name)) names.append(oneof_name) value = js[name] if value is None: message.ClearField(field.name) continue # Parse field value. if _IsMapEntry(field): message.ClearField(field.name) _ConvertMapFieldValue(value, message, field) elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: message.ClearField(field.name) if not isinstance(value, list): raise ParseError('repeated field {0} must be in [] which is ' '{1}.'.format(name, value)) if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: # Repeated message field. for item in value: sub_message = getattr(message, field.name).add() # None is a null_value in Value. if (item is None and sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'): raise ParseError('null is not allowed to be used as an element' ' in a repeated field.') _ConvertMessage(item, sub_message) else: # Repeated scalar field. for item in value: if item is None: raise ParseError('null is not allowed to be used as an element' ' in a repeated field.') getattr(message, field.name).append( _ConvertScalarFieldValue(item, field)) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: sub_message = getattr(message, field.name) _ConvertMessage(value, sub_message) else: setattr(message, field.name, _ConvertScalarFieldValue(value, field)) except ParseError as e: if field and field.containing_oneof is None: raise ParseError('Failed to parse {0} field: {1}'.format(name, e)) else: raise ParseError(str(e)) except ValueError as e: raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) except TypeError as e: raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
def _ConvertFieldValuePair(js, message): """Convert field value pairs into regular message. Args: js: A JSON object to convert the field value pairs. message: A regular protocol message to record the data. Raises: ParseError: In case of problems converting. """ names = [] message_descriptor = message.DESCRIPTOR for name in js: try: field = message_descriptor.fields_by_camelcase_name.get(name, None) if not field: raise ParseError( 'Message type "{0}" has no field named "{1}".'.format( message_descriptor.full_name, name)) if name in names: raise ParseError( 'Message type "{0}" should not have multiple "{1}" fields.'.format( message.DESCRIPTOR.full_name, name)) names.append(name) # Check no other oneof field is parsed. if field.containing_oneof is not None: oneof_name = field.containing_oneof.name if oneof_name in names: raise ParseError('Message type "{0}" should not have multiple "{1}" ' 'oneof fields.'.format( message.DESCRIPTOR.full_name, oneof_name)) names.append(oneof_name) value = js[name] if value is None: message.ClearField(field.name) continue # Parse field value. if _IsMapEntry(field): message.ClearField(field.name) _ConvertMapFieldValue(value, message, field) elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: message.ClearField(field.name) if not isinstance(value, list): raise ParseError('repeated field {0} must be in [] which is ' '{1}.'.format(name, value)) if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: # Repeated message field. for item in value: sub_message = getattr(message, field.name).add() # None is a null_value in Value. if (item is None and sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'): raise ParseError('null is not allowed to be used as an element' ' in a repeated field.') _ConvertMessage(item, sub_message) else: # Repeated scalar field. for item in value: if item is None: raise ParseError('null is not allowed to be used as an element' ' in a repeated field.') getattr(message, field.name).append( _ConvertScalarFieldValue(item, field)) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: sub_message = getattr(message, field.name) _ConvertMessage(value, sub_message) else: setattr(message, field.name, _ConvertScalarFieldValue(value, field)) except ParseError as e: if field and field.containing_oneof is None: raise ParseError('Failed to parse {0} field: {1}'.format(name, e)) else: raise ParseError(str(e)) except ValueError as e: raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) except TypeError as e: raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
[ "Convert", "field", "value", "pairs", "into", "regular", "message", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/json_format.py#L324-L402
[ "def", "_ConvertFieldValuePair", "(", "js", ",", "message", ")", ":", "names", "=", "[", "]", "message_descriptor", "=", "message", ".", "DESCRIPTOR", "for", "name", "in", "js", ":", "try", ":", "field", "=", "message_descriptor", ".", "fields_by_camelcase_name", ".", "get", "(", "name", ",", "None", ")", "if", "not", "field", ":", "raise", "ParseError", "(", "'Message type \"{0}\" has no field named \"{1}\".'", ".", "format", "(", "message_descriptor", ".", "full_name", ",", "name", ")", ")", "if", "name", "in", "names", ":", "raise", "ParseError", "(", "'Message type \"{0}\" should not have multiple \"{1}\" fields.'", ".", "format", "(", "message", ".", "DESCRIPTOR", ".", "full_name", ",", "name", ")", ")", "names", ".", "append", "(", "name", ")", "# Check no other oneof field is parsed.", "if", "field", ".", "containing_oneof", "is", "not", "None", ":", "oneof_name", "=", "field", ".", "containing_oneof", ".", "name", "if", "oneof_name", "in", "names", ":", "raise", "ParseError", "(", "'Message type \"{0}\" should not have multiple \"{1}\" '", "'oneof fields.'", ".", "format", "(", "message", ".", "DESCRIPTOR", ".", "full_name", ",", "oneof_name", ")", ")", "names", ".", "append", "(", "oneof_name", ")", "value", "=", "js", "[", "name", "]", "if", "value", "is", "None", ":", "message", ".", "ClearField", "(", "field", ".", "name", ")", "continue", "# Parse field value.", "if", "_IsMapEntry", "(", "field", ")", ":", "message", ".", "ClearField", "(", "field", ".", "name", ")", "_ConvertMapFieldValue", "(", "value", ",", "message", ",", "field", ")", "elif", "field", ".", "label", "==", "descriptor", ".", "FieldDescriptor", ".", "LABEL_REPEATED", ":", "message", ".", "ClearField", "(", "field", ".", "name", ")", "if", "not", "isinstance", "(", "value", ",", "list", ")", ":", "raise", "ParseError", "(", "'repeated field {0} must be in [] which is '", "'{1}.'", ".", "format", "(", "name", ",", "value", ")", ")", "if", "field", ".", "cpp_type", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "# Repeated message field.", "for", "item", "in", "value", ":", "sub_message", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", ".", "add", "(", ")", "# None is a null_value in Value.", "if", "(", "item", "is", "None", "and", "sub_message", ".", "DESCRIPTOR", ".", "full_name", "!=", "'google.protobuf.Value'", ")", ":", "raise", "ParseError", "(", "'null is not allowed to be used as an element'", "' in a repeated field.'", ")", "_ConvertMessage", "(", "item", ",", "sub_message", ")", "else", ":", "# Repeated scalar field.", "for", "item", "in", "value", ":", "if", "item", "is", "None", ":", "raise", "ParseError", "(", "'null is not allowed to be used as an element'", "' in a repeated field.'", ")", "getattr", "(", "message", ",", "field", ".", "name", ")", ".", "append", "(", "_ConvertScalarFieldValue", "(", "item", ",", "field", ")", ")", "elif", "field", ".", "cpp_type", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "sub_message", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", "_ConvertMessage", "(", "value", ",", "sub_message", ")", "else", ":", "setattr", "(", "message", ",", "field", ".", "name", ",", "_ConvertScalarFieldValue", "(", "value", ",", "field", ")", ")", "except", "ParseError", "as", "e", ":", "if", "field", "and", "field", ".", "containing_oneof", "is", "None", ":", "raise", "ParseError", "(", "'Failed to parse {0} field: {1}'", ".", "format", "(", "name", ",", "e", ")", ")", "else", ":", "raise", "ParseError", "(", "str", "(", "e", ")", ")", "except", "ValueError", "as", "e", ":", "raise", "ParseError", "(", "'Failed to parse {0} field: {1}.'", ".", "format", "(", "name", ",", "e", ")", ")", "except", "TypeError", "as", "e", ":", "raise", "ParseError", "(", "'Failed to parse {0} field: {1}.'", ".", "format", "(", "name", ",", "e", ")", ")" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_ConvertMessage
Convert a JSON object into a message. Args: value: A JSON object. message: A WKT or regular protocol message to record the data. Raises: ParseError: In case of convert problems.
typy/google/protobuf/json_format.py
def _ConvertMessage(value, message): """Convert a JSON object into a message. Args: value: A JSON object. message: A WKT or regular protocol message to record the data. Raises: ParseError: In case of convert problems. """ message_descriptor = message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): _ConvertWrapperMessage(value, message) elif full_name in _WKTJSONMETHODS: _WKTJSONMETHODS[full_name][1](value, message) else: _ConvertFieldValuePair(value, message)
def _ConvertMessage(value, message): """Convert a JSON object into a message. Args: value: A JSON object. message: A WKT or regular protocol message to record the data. Raises: ParseError: In case of convert problems. """ message_descriptor = message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): _ConvertWrapperMessage(value, message) elif full_name in _WKTJSONMETHODS: _WKTJSONMETHODS[full_name][1](value, message) else: _ConvertFieldValuePair(value, message)
[ "Convert", "a", "JSON", "object", "into", "a", "message", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/json_format.py#L405-L422
[ "def", "_ConvertMessage", "(", "value", ",", "message", ")", ":", "message_descriptor", "=", "message", ".", "DESCRIPTOR", "full_name", "=", "message_descriptor", ".", "full_name", "if", "_IsWrapperMessage", "(", "message_descriptor", ")", ":", "_ConvertWrapperMessage", "(", "value", ",", "message", ")", "elif", "full_name", "in", "_WKTJSONMETHODS", ":", "_WKTJSONMETHODS", "[", "full_name", "]", "[", "1", "]", "(", "value", ",", "message", ")", "else", ":", "_ConvertFieldValuePair", "(", "value", ",", "message", ")" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_ConvertValueMessage
Convert a JSON representation into Value message.
typy/google/protobuf/json_format.py
def _ConvertValueMessage(value, message): """Convert a JSON representation into Value message.""" if isinstance(value, dict): _ConvertStructMessage(value, message.struct_value) elif isinstance(value, list): _ConvertListValueMessage(value, message.list_value) elif value is None: message.null_value = 0 elif isinstance(value, bool): message.bool_value = value elif isinstance(value, six.string_types): message.string_value = value elif isinstance(value, _INT_OR_FLOAT): message.number_value = value else: raise ParseError('Unexpected type for Value message.')
def _ConvertValueMessage(value, message): """Convert a JSON representation into Value message.""" if isinstance(value, dict): _ConvertStructMessage(value, message.struct_value) elif isinstance(value, list): _ConvertListValueMessage(value, message.list_value) elif value is None: message.null_value = 0 elif isinstance(value, bool): message.bool_value = value elif isinstance(value, six.string_types): message.string_value = value elif isinstance(value, _INT_OR_FLOAT): message.number_value = value else: raise ParseError('Unexpected type for Value message.')
[ "Convert", "a", "JSON", "representation", "into", "Value", "message", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/json_format.py#L459-L474
[ "def", "_ConvertValueMessage", "(", "value", ",", "message", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "_ConvertStructMessage", "(", "value", ",", "message", ".", "struct_value", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "_ConvertListValueMessage", "(", "value", ",", "message", ".", "list_value", ")", "elif", "value", "is", "None", ":", "message", ".", "null_value", "=", "0", "elif", "isinstance", "(", "value", ",", "bool", ")", ":", "message", ".", "bool_value", "=", "value", "elif", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "message", ".", "string_value", "=", "value", "elif", "isinstance", "(", "value", ",", "_INT_OR_FLOAT", ")", ":", "message", ".", "number_value", "=", "value", "else", ":", "raise", "ParseError", "(", "'Unexpected type for Value message.'", ")" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_ConvertListValueMessage
Convert a JSON representation into ListValue message.
typy/google/protobuf/json_format.py
def _ConvertListValueMessage(value, message): """Convert a JSON representation into ListValue message.""" if not isinstance(value, list): raise ParseError( 'ListValue must be in [] which is {0}.'.format(value)) message.ClearField('values') for item in value: _ConvertValueMessage(item, message.values.add())
def _ConvertListValueMessage(value, message): """Convert a JSON representation into ListValue message.""" if not isinstance(value, list): raise ParseError( 'ListValue must be in [] which is {0}.'.format(value)) message.ClearField('values') for item in value: _ConvertValueMessage(item, message.values.add())
[ "Convert", "a", "JSON", "representation", "into", "ListValue", "message", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/json_format.py#L477-L484
[ "def", "_ConvertListValueMessage", "(", "value", ",", "message", ")", ":", "if", "not", "isinstance", "(", "value", ",", "list", ")", ":", "raise", "ParseError", "(", "'ListValue must be in [] which is {0}.'", ".", "format", "(", "value", ")", ")", "message", ".", "ClearField", "(", "'values'", ")", "for", "item", "in", "value", ":", "_ConvertValueMessage", "(", "item", ",", "message", ".", "values", ".", "add", "(", ")", ")" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_ConvertStructMessage
Convert a JSON representation into Struct message.
typy/google/protobuf/json_format.py
def _ConvertStructMessage(value, message): """Convert a JSON representation into Struct message.""" if not isinstance(value, dict): raise ParseError( 'Struct must be in a dict which is {0}.'.format(value)) for key in value: _ConvertValueMessage(value[key], message.fields[key]) return
def _ConvertStructMessage(value, message): """Convert a JSON representation into Struct message.""" if not isinstance(value, dict): raise ParseError( 'Struct must be in a dict which is {0}.'.format(value)) for key in value: _ConvertValueMessage(value[key], message.fields[key]) return
[ "Convert", "a", "JSON", "representation", "into", "Struct", "message", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/json_format.py#L487-L494
[ "def", "_ConvertStructMessage", "(", "value", ",", "message", ")", ":", "if", "not", "isinstance", "(", "value", ",", "dict", ")", ":", "raise", "ParseError", "(", "'Struct must be in a dict which is {0}.'", ".", "format", "(", "value", ")", ")", "for", "key", "in", "value", ":", "_ConvertValueMessage", "(", "value", "[", "key", "]", ",", "message", ".", "fields", "[", "key", "]", ")", "return" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
update_config
Update config options with the provided dictionary of options.
nbserve/app.py
def update_config(new_config): """ Update config options with the provided dictionary of options. """ flask_app.base_config.update(new_config) # Check for changed working directory. if new_config.has_key('working_directory'): wd = os.path.abspath(new_config['working_directory']) if nbmanager.notebook_dir != wd: if not os.path.exists(wd): raise IOError('Path not found: %s' % wd) nbmanager.notebook_dir = wd
def update_config(new_config): """ Update config options with the provided dictionary of options. """ flask_app.base_config.update(new_config) # Check for changed working directory. if new_config.has_key('working_directory'): wd = os.path.abspath(new_config['working_directory']) if nbmanager.notebook_dir != wd: if not os.path.exists(wd): raise IOError('Path not found: %s' % wd) nbmanager.notebook_dir = wd
[ "Update", "config", "options", "with", "the", "provided", "dictionary", "of", "options", "." ]
robchambers/nbserve
python
https://github.com/robchambers/nbserve/blob/74d820fdd5dd7cdaafae22698dcba9487974bcc5/nbserve/app.py#L62-L73
[ "def", "update_config", "(", "new_config", ")", ":", "flask_app", ".", "base_config", ".", "update", "(", "new_config", ")", "# Check for changed working directory.", "if", "new_config", ".", "has_key", "(", "'working_directory'", ")", ":", "wd", "=", "os", ".", "path", ".", "abspath", "(", "new_config", "[", "'working_directory'", "]", ")", "if", "nbmanager", ".", "notebook_dir", "!=", "wd", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "wd", ")", ":", "raise", "IOError", "(", "'Path not found: %s'", "%", "wd", ")", "nbmanager", ".", "notebook_dir", "=", "wd" ]
74d820fdd5dd7cdaafae22698dcba9487974bcc5
valid
set_config
Reset config options to defaults, and then update (optionally) with the provided dictionary of options.
nbserve/app.py
def set_config(new_config={}): """ Reset config options to defaults, and then update (optionally) with the provided dictionary of options. """ # The default base configuration. flask_app.base_config = dict(working_directory='.', template='collapse-input', debug=False, port=None) update_config(new_config)
def set_config(new_config={}): """ Reset config options to defaults, and then update (optionally) with the provided dictionary of options. """ # The default base configuration. flask_app.base_config = dict(working_directory='.', template='collapse-input', debug=False, port=None) update_config(new_config)
[ "Reset", "config", "options", "to", "defaults", "and", "then", "update", "(", "optionally", ")", "with", "the", "provided", "dictionary", "of", "options", "." ]
robchambers/nbserve
python
https://github.com/robchambers/nbserve/blob/74d820fdd5dd7cdaafae22698dcba9487974bcc5/nbserve/app.py#L76-L84
[ "def", "set_config", "(", "new_config", "=", "{", "}", ")", ":", "# The default base configuration.", "flask_app", ".", "base_config", "=", "dict", "(", "working_directory", "=", "'.'", ",", "template", "=", "'collapse-input'", ",", "debug", "=", "False", ",", "port", "=", "None", ")", "update_config", "(", "new_config", ")" ]
74d820fdd5dd7cdaafae22698dcba9487974bcc5
valid
Command.execute
Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: ApplicationException: when execution fails for whatever reason.
pip_services_commons/commands/Command.py
def execute(self, correlation_id, args): """ Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: ApplicationException: when execution fails for whatever reason. """ # Validate arguments if self._schema != None: self.validate_and_throw_exception(correlation_id, args) # Call the function try: return self._function(correlation_id, args) # Intercept unhandled errors except Exception as ex: raise InvocationException( correlation_id, "EXEC_FAILED", "Execution " + self._name + " failed: " + str(ex) ).with_details("command", self._name).wrap(ex)
def execute(self, correlation_id, args): """ Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: ApplicationException: when execution fails for whatever reason. """ # Validate arguments if self._schema != None: self.validate_and_throw_exception(correlation_id, args) # Call the function try: return self._function(correlation_id, args) # Intercept unhandled errors except Exception as ex: raise InvocationException( correlation_id, "EXEC_FAILED", "Execution " + self._name + " failed: " + str(ex) ).with_details("command", self._name).wrap(ex)
[ "Executes", "the", "command", "given", "specific", "arguments", "as", "an", "input", ".", "Args", ":", "correlation_id", ":", "a", "unique", "correlation", "/", "transaction", "id", "args", ":", "command", "arguments", "Returns", ":", "an", "execution", "result", ".", "Raises", ":", "ApplicationException", ":", "when", "execution", "fails", "for", "whatever", "reason", "." ]
pip-services/pip-services-commons-python
python
https://github.com/pip-services/pip-services-commons-python/blob/2205b18c45c60372966c62c1f23ac4fbc31e11b3/pip_services_commons/commands/Command.py#L50-L76
[ "def", "execute", "(", "self", ",", "correlation_id", ",", "args", ")", ":", "# Validate arguments\r", "if", "self", ".", "_schema", "!=", "None", ":", "self", ".", "validate_and_throw_exception", "(", "correlation_id", ",", "args", ")", "# Call the function\r", "try", ":", "return", "self", ".", "_function", "(", "correlation_id", ",", "args", ")", "# Intercept unhandled errors\r", "except", "Exception", "as", "ex", ":", "raise", "InvocationException", "(", "correlation_id", ",", "\"EXEC_FAILED\"", ",", "\"Execution \"", "+", "self", ".", "_name", "+", "\" failed: \"", "+", "str", "(", "ex", ")", ")", ".", "with_details", "(", "\"command\"", ",", "self", ".", "_name", ")", ".", "wrap", "(", "ex", ")" ]
2205b18c45c60372966c62c1f23ac4fbc31e11b3
valid
optimize
**Optimization method based on Brent's method** First, a bracket (a b c) is sought that contains the minimum (b value is smaller than both a or c). The bracket is then recursively halfed. Here we apply some modifications to ensure our suggested point is not too close to either a or c, because that could be problematic with the local approximation. Also, if the bracket does not seem to include the minimum, it is expanded generously in the right direction until it covers it. Thus, this function is fail safe, and will always find a local minimum.
jbopt/optimize1d.py
def optimize(function, x0, cons=[], ftol=0.2, disp=0, plot=False): """ **Optimization method based on Brent's method** First, a bracket (a b c) is sought that contains the minimum (b value is smaller than both a or c). The bracket is then recursively halfed. Here we apply some modifications to ensure our suggested point is not too close to either a or c, because that could be problematic with the local approximation. Also, if the bracket does not seem to include the minimum, it is expanded generously in the right direction until it covers it. Thus, this function is fail safe, and will always find a local minimum. """ if disp > 0: print print ' ===== custom 1d optimization routine ==== ' print print 'initial suggestion on', function, ':', x0 points = [] values = [] def recordfunction(x): v = function(x) points.append(x) values.append(v) return v (a, b, c), (va, vb, vc) = seek_minimum_bracket(recordfunction, x0, cons=cons, ftol=ftol, disp=disp, plot=plot) if disp > 0: print '---------------------------------------------------' print 'found useable minimum bracker after %d evaluations:' % len(points), (a, b, c), (va, vb, vc) if disp > 2: if plot: plot_values(values, points, lastpoint=-1, ftol=ftol) pause() result = brent(recordfunction, a, b, c, va, vb, vc, cons=cons, ftol=ftol, disp=disp, plot=plot) if disp > 0: print '---------------------------------------------------' print 'found minimum after %d evaluations:' % len(points), result if disp > 1 or len(points) > 20: if plot: plot_values(values, points, lastpoint=-1, ftol=ftol) if disp > 2: pause() if disp > 0: print '---------------------------------------------------' print print ' ===== end of custom 1d optimization routine ==== ' print global neval neval += len(points) return result
def optimize(function, x0, cons=[], ftol=0.2, disp=0, plot=False): """ **Optimization method based on Brent's method** First, a bracket (a b c) is sought that contains the minimum (b value is smaller than both a or c). The bracket is then recursively halfed. Here we apply some modifications to ensure our suggested point is not too close to either a or c, because that could be problematic with the local approximation. Also, if the bracket does not seem to include the minimum, it is expanded generously in the right direction until it covers it. Thus, this function is fail safe, and will always find a local minimum. """ if disp > 0: print print ' ===== custom 1d optimization routine ==== ' print print 'initial suggestion on', function, ':', x0 points = [] values = [] def recordfunction(x): v = function(x) points.append(x) values.append(v) return v (a, b, c), (va, vb, vc) = seek_minimum_bracket(recordfunction, x0, cons=cons, ftol=ftol, disp=disp, plot=plot) if disp > 0: print '---------------------------------------------------' print 'found useable minimum bracker after %d evaluations:' % len(points), (a, b, c), (va, vb, vc) if disp > 2: if plot: plot_values(values, points, lastpoint=-1, ftol=ftol) pause() result = brent(recordfunction, a, b, c, va, vb, vc, cons=cons, ftol=ftol, disp=disp, plot=plot) if disp > 0: print '---------------------------------------------------' print 'found minimum after %d evaluations:' % len(points), result if disp > 1 or len(points) > 20: if plot: plot_values(values, points, lastpoint=-1, ftol=ftol) if disp > 2: pause() if disp > 0: print '---------------------------------------------------' print print ' ===== end of custom 1d optimization routine ==== ' print global neval neval += len(points) return result
[ "**", "Optimization", "method", "based", "on", "Brent", "s", "method", "**", "First", "a", "bracket", "(", "a", "b", "c", ")", "is", "sought", "that", "contains", "the", "minimum", "(", "b", "value", "is", "smaller", "than", "both", "a", "or", "c", ")", ".", "The", "bracket", "is", "then", "recursively", "halfed", ".", "Here", "we", "apply", "some", "modifications", "to", "ensure", "our", "suggested", "point", "is", "not", "too", "close", "to", "either", "a", "or", "c", "because", "that", "could", "be", "problematic", "with", "the", "local", "approximation", ".", "Also", "if", "the", "bracket", "does", "not", "seem", "to", "include", "the", "minimum", "it", "is", "expanded", "generously", "in", "the", "right", "direction", "until", "it", "covers", "it", ".", "Thus", "this", "function", "is", "fail", "safe", "and", "will", "always", "find", "a", "local", "minimum", "." ]
JohannesBuchner/jbopt
python
https://github.com/JohannesBuchner/jbopt/blob/11b721ea001625ad7820f71ff684723c71216646/jbopt/optimize1d.py#L270-L322
[ "def", "optimize", "(", "function", ",", "x0", ",", "cons", "=", "[", "]", ",", "ftol", "=", "0.2", ",", "disp", "=", "0", ",", "plot", "=", "False", ")", ":", "if", "disp", ">", "0", ":", "print", "print", "' ===== custom 1d optimization routine ==== '", "print", "print", "'initial suggestion on'", ",", "function", ",", "':'", ",", "x0", "points", "=", "[", "]", "values", "=", "[", "]", "def", "recordfunction", "(", "x", ")", ":", "v", "=", "function", "(", "x", ")", "points", ".", "append", "(", "x", ")", "values", ".", "append", "(", "v", ")", "return", "v", "(", "a", ",", "b", ",", "c", ")", ",", "(", "va", ",", "vb", ",", "vc", ")", "=", "seek_minimum_bracket", "(", "recordfunction", ",", "x0", ",", "cons", "=", "cons", ",", "ftol", "=", "ftol", ",", "disp", "=", "disp", ",", "plot", "=", "plot", ")", "if", "disp", ">", "0", ":", "print", "'---------------------------------------------------'", "print", "'found useable minimum bracker after %d evaluations:'", "%", "len", "(", "points", ")", ",", "(", "a", ",", "b", ",", "c", ")", ",", "(", "va", ",", "vb", ",", "vc", ")", "if", "disp", ">", "2", ":", "if", "plot", ":", "plot_values", "(", "values", ",", "points", ",", "lastpoint", "=", "-", "1", ",", "ftol", "=", "ftol", ")", "pause", "(", ")", "result", "=", "brent", "(", "recordfunction", ",", "a", ",", "b", ",", "c", ",", "va", ",", "vb", ",", "vc", ",", "cons", "=", "cons", ",", "ftol", "=", "ftol", ",", "disp", "=", "disp", ",", "plot", "=", "plot", ")", "if", "disp", ">", "0", ":", "print", "'---------------------------------------------------'", "print", "'found minimum after %d evaluations:'", "%", "len", "(", "points", ")", ",", "result", "if", "disp", ">", "1", "or", "len", "(", "points", ")", ">", "20", ":", "if", "plot", ":", "plot_values", "(", "values", ",", "points", ",", "lastpoint", "=", "-", "1", ",", "ftol", "=", "ftol", ")", "if", "disp", ">", "2", ":", "pause", "(", ")", "if", "disp", ">", "0", ":", "print", "'---------------------------------------------------'", "print", "print", "' ===== end of custom 1d optimization routine ==== '", "print", "global", "neval", "neval", "+=", "len", "(", "points", ")", "return", "result" ]
11b721ea001625ad7820f71ff684723c71216646
valid
cache2errors
This function will attempt to identify 1 sigma errors, assuming your function is a chi^2. For this, the 1-sigma is bracketed. If you were smart enough to build a cache list of [x,y] into your function, you can pass it here. The values bracketing 1 sigma will be used as starting values. If no such values exist, e.g. because all values were very close to the optimum (good starting values), the bracket is expanded.
jbopt/optimize1d.py
def cache2errors(function, cache, disp=0, ftol=0.05): """ This function will attempt to identify 1 sigma errors, assuming your function is a chi^2. For this, the 1-sigma is bracketed. If you were smart enough to build a cache list of [x,y] into your function, you can pass it here. The values bracketing 1 sigma will be used as starting values. If no such values exist, e.g. because all values were very close to the optimum (good starting values), the bracket is expanded. """ vals = numpy.array(sorted(cache, key=lambda x: x[0])) if disp > 0: print ' --- cache2errors --- ', vals vi = vals[:,1].min() def renormedfunc(x): y = function(x) cache.append([x, y]) if disp > 1: print ' renormed:', x, y, y - (vi + 1) return y - (vi + 1) vals[:,1] -= vi + 1 lowmask = vals[:,1] < 0 highmask = vals[:,1] > 0 indices = numpy.arange(len(vals)) b, vb = vals[indices[lowmask][ 0],:] c, vc = vals[indices[lowmask][-1],:] if any(vals[:,0][highmask] < b): if disp > 0: print 'already have bracket' a, va = vals[indices[highmask][vals[:,0][highmask] < b][-1],:] else: a = b va = vb while b > -50: a = b - max(vals[-1,0] - vals[0,0], 1) va = renormedfunc(a) if disp > 0: print 'going further left: %.1f [%.1f] --> %.1f [%.1f]' % (b, vb, a, va) if va > 0: if disp > 0: print 'found outer part' break else: # need to go further b = a vb = va if disp > 0: print 'left bracket', a, b, va, vb if va > 0 and vb < 0: leftroot = scipy.optimize.brentq(renormedfunc, a, b, rtol=ftol) else: if disp > 0: print 'WARNING: border problem found.' leftroot = a if disp > 0: print 'left root', leftroot if any(vals[:,0][highmask] > c): if disp > 0: print 'already have bracket' d, vd = vals[indices[highmask][vals[:,0][highmask] > c][ 0],:] else: d = c vd = vc while c < 50: d = c + max(vals[-1,0] - vals[0,0], 1) vd = renormedfunc(d) if disp > 0: print 'going further right: %.1f [%.1f] --> %.1f [%.1f]' % (c, vc, d, vd) if vd > 0: if disp > 0: print 'found outer part' break else: # need to go further c = d vc = vd if disp > 0: print 'right bracket', c, d, vc, vd if vd > 0 and vc < 0: rightroot = scipy.optimize.brentq(renormedfunc, c, d, rtol=ftol) else: if disp > 0: print 'WARNING: border problem found.' rightroot = d if disp > 0: print 'right root', rightroot assert leftroot < rightroot if disp > 2: fullvals = numpy.array(sorted(cache, key=lambda x: x[0])) fullvals[:,1] -= vi + 1 plt.figure() plt.plot(fullvals[:,0], fullvals[:,1], 's') plt.plot(vals[:,0], vals[:,1], 'o') plt.xlim(a, d) plt.ylim(min(va, vb, vc, vd), max(va, vb, vc, vd)) ymin, ymax = plt.ylim() plt.vlines([leftroot, rightroot], ymin, ymax, linestyles='dotted') plt.savefig('cache_brent.pdf') return leftroot, rightroot
def cache2errors(function, cache, disp=0, ftol=0.05): """ This function will attempt to identify 1 sigma errors, assuming your function is a chi^2. For this, the 1-sigma is bracketed. If you were smart enough to build a cache list of [x,y] into your function, you can pass it here. The values bracketing 1 sigma will be used as starting values. If no such values exist, e.g. because all values were very close to the optimum (good starting values), the bracket is expanded. """ vals = numpy.array(sorted(cache, key=lambda x: x[0])) if disp > 0: print ' --- cache2errors --- ', vals vi = vals[:,1].min() def renormedfunc(x): y = function(x) cache.append([x, y]) if disp > 1: print ' renormed:', x, y, y - (vi + 1) return y - (vi + 1) vals[:,1] -= vi + 1 lowmask = vals[:,1] < 0 highmask = vals[:,1] > 0 indices = numpy.arange(len(vals)) b, vb = vals[indices[lowmask][ 0],:] c, vc = vals[indices[lowmask][-1],:] if any(vals[:,0][highmask] < b): if disp > 0: print 'already have bracket' a, va = vals[indices[highmask][vals[:,0][highmask] < b][-1],:] else: a = b va = vb while b > -50: a = b - max(vals[-1,0] - vals[0,0], 1) va = renormedfunc(a) if disp > 0: print 'going further left: %.1f [%.1f] --> %.1f [%.1f]' % (b, vb, a, va) if va > 0: if disp > 0: print 'found outer part' break else: # need to go further b = a vb = va if disp > 0: print 'left bracket', a, b, va, vb if va > 0 and vb < 0: leftroot = scipy.optimize.brentq(renormedfunc, a, b, rtol=ftol) else: if disp > 0: print 'WARNING: border problem found.' leftroot = a if disp > 0: print 'left root', leftroot if any(vals[:,0][highmask] > c): if disp > 0: print 'already have bracket' d, vd = vals[indices[highmask][vals[:,0][highmask] > c][ 0],:] else: d = c vd = vc while c < 50: d = c + max(vals[-1,0] - vals[0,0], 1) vd = renormedfunc(d) if disp > 0: print 'going further right: %.1f [%.1f] --> %.1f [%.1f]' % (c, vc, d, vd) if vd > 0: if disp > 0: print 'found outer part' break else: # need to go further c = d vc = vd if disp > 0: print 'right bracket', c, d, vc, vd if vd > 0 and vc < 0: rightroot = scipy.optimize.brentq(renormedfunc, c, d, rtol=ftol) else: if disp > 0: print 'WARNING: border problem found.' rightroot = d if disp > 0: print 'right root', rightroot assert leftroot < rightroot if disp > 2: fullvals = numpy.array(sorted(cache, key=lambda x: x[0])) fullvals[:,1] -= vi + 1 plt.figure() plt.plot(fullvals[:,0], fullvals[:,1], 's') plt.plot(vals[:,0], vals[:,1], 'o') plt.xlim(a, d) plt.ylim(min(va, vb, vc, vd), max(va, vb, vc, vd)) ymin, ymax = plt.ylim() plt.vlines([leftroot, rightroot], ymin, ymax, linestyles='dotted') plt.savefig('cache_brent.pdf') return leftroot, rightroot
[ "This", "function", "will", "attempt", "to", "identify", "1", "sigma", "errors", "assuming", "your", "function", "is", "a", "chi^2", ".", "For", "this", "the", "1", "-", "sigma", "is", "bracketed", ".", "If", "you", "were", "smart", "enough", "to", "build", "a", "cache", "list", "of", "[", "x", "y", "]", "into", "your", "function", "you", "can", "pass", "it", "here", ".", "The", "values", "bracketing", "1", "sigma", "will", "be", "used", "as", "starting", "values", ".", "If", "no", "such", "values", "exist", "e", ".", "g", ".", "because", "all", "values", "were", "very", "close", "to", "the", "optimum", "(", "good", "starting", "values", ")", "the", "bracket", "is", "expanded", "." ]
JohannesBuchner/jbopt
python
https://github.com/JohannesBuchner/jbopt/blob/11b721ea001625ad7820f71ff684723c71216646/jbopt/optimize1d.py#L324-L415
[ "def", "cache2errors", "(", "function", ",", "cache", ",", "disp", "=", "0", ",", "ftol", "=", "0.05", ")", ":", "vals", "=", "numpy", ".", "array", "(", "sorted", "(", "cache", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", ")", "if", "disp", ">", "0", ":", "print", "' --- cache2errors --- '", ",", "vals", "vi", "=", "vals", "[", ":", ",", "1", "]", ".", "min", "(", ")", "def", "renormedfunc", "(", "x", ")", ":", "y", "=", "function", "(", "x", ")", "cache", ".", "append", "(", "[", "x", ",", "y", "]", ")", "if", "disp", ">", "1", ":", "print", "' renormed:'", ",", "x", ",", "y", ",", "y", "-", "(", "vi", "+", "1", ")", "return", "y", "-", "(", "vi", "+", "1", ")", "vals", "[", ":", ",", "1", "]", "-=", "vi", "+", "1", "lowmask", "=", "vals", "[", ":", ",", "1", "]", "<", "0", "highmask", "=", "vals", "[", ":", ",", "1", "]", ">", "0", "indices", "=", "numpy", ".", "arange", "(", "len", "(", "vals", ")", ")", "b", ",", "vb", "=", "vals", "[", "indices", "[", "lowmask", "]", "[", "0", "]", ",", ":", "]", "c", ",", "vc", "=", "vals", "[", "indices", "[", "lowmask", "]", "[", "-", "1", "]", ",", ":", "]", "if", "any", "(", "vals", "[", ":", ",", "0", "]", "[", "highmask", "]", "<", "b", ")", ":", "if", "disp", ">", "0", ":", "print", "'already have bracket'", "a", ",", "va", "=", "vals", "[", "indices", "[", "highmask", "]", "[", "vals", "[", ":", ",", "0", "]", "[", "highmask", "]", "<", "b", "]", "[", "-", "1", "]", ",", ":", "]", "else", ":", "a", "=", "b", "va", "=", "vb", "while", "b", ">", "-", "50", ":", "a", "=", "b", "-", "max", "(", "vals", "[", "-", "1", ",", "0", "]", "-", "vals", "[", "0", ",", "0", "]", ",", "1", ")", "va", "=", "renormedfunc", "(", "a", ")", "if", "disp", ">", "0", ":", "print", "'going further left: %.1f [%.1f] --> %.1f [%.1f]'", "%", "(", "b", ",", "vb", ",", "a", ",", "va", ")", "if", "va", ">", "0", ":", "if", "disp", ">", "0", ":", "print", "'found outer part'", "break", "else", ":", "# need to go further", "b", "=", "a", "vb", "=", "va", "if", "disp", ">", "0", ":", "print", "'left bracket'", ",", "a", ",", "b", ",", "va", ",", "vb", "if", "va", ">", "0", "and", "vb", "<", "0", ":", "leftroot", "=", "scipy", ".", "optimize", ".", "brentq", "(", "renormedfunc", ",", "a", ",", "b", ",", "rtol", "=", "ftol", ")", "else", ":", "if", "disp", ">", "0", ":", "print", "'WARNING: border problem found.'", "leftroot", "=", "a", "if", "disp", ">", "0", ":", "print", "'left root'", ",", "leftroot", "if", "any", "(", "vals", "[", ":", ",", "0", "]", "[", "highmask", "]", ">", "c", ")", ":", "if", "disp", ">", "0", ":", "print", "'already have bracket'", "d", ",", "vd", "=", "vals", "[", "indices", "[", "highmask", "]", "[", "vals", "[", ":", ",", "0", "]", "[", "highmask", "]", ">", "c", "]", "[", "0", "]", ",", ":", "]", "else", ":", "d", "=", "c", "vd", "=", "vc", "while", "c", "<", "50", ":", "d", "=", "c", "+", "max", "(", "vals", "[", "-", "1", ",", "0", "]", "-", "vals", "[", "0", ",", "0", "]", ",", "1", ")", "vd", "=", "renormedfunc", "(", "d", ")", "if", "disp", ">", "0", ":", "print", "'going further right: %.1f [%.1f] --> %.1f [%.1f]'", "%", "(", "c", ",", "vc", ",", "d", ",", "vd", ")", "if", "vd", ">", "0", ":", "if", "disp", ">", "0", ":", "print", "'found outer part'", "break", "else", ":", "# need to go further", "c", "=", "d", "vc", "=", "vd", "if", "disp", ">", "0", ":", "print", "'right bracket'", ",", "c", ",", "d", ",", "vc", ",", "vd", "if", "vd", ">", "0", "and", "vc", "<", "0", ":", "rightroot", "=", "scipy", ".", "optimize", ".", "brentq", "(", "renormedfunc", ",", "c", ",", "d", ",", "rtol", "=", "ftol", ")", "else", ":", "if", "disp", ">", "0", ":", "print", "'WARNING: border problem found.'", "rightroot", "=", "d", "if", "disp", ">", "0", ":", "print", "'right root'", ",", "rightroot", "assert", "leftroot", "<", "rightroot", "if", "disp", ">", "2", ":", "fullvals", "=", "numpy", ".", "array", "(", "sorted", "(", "cache", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ")", ")", "fullvals", "[", ":", ",", "1", "]", "-=", "vi", "+", "1", "plt", ".", "figure", "(", ")", "plt", ".", "plot", "(", "fullvals", "[", ":", ",", "0", "]", ",", "fullvals", "[", ":", ",", "1", "]", ",", "'s'", ")", "plt", ".", "plot", "(", "vals", "[", ":", ",", "0", "]", ",", "vals", "[", ":", ",", "1", "]", ",", "'o'", ")", "plt", ".", "xlim", "(", "a", ",", "d", ")", "plt", ".", "ylim", "(", "min", "(", "va", ",", "vb", ",", "vc", ",", "vd", ")", ",", "max", "(", "va", ",", "vb", ",", "vc", ",", "vd", ")", ")", "ymin", ",", "ymax", "=", "plt", ".", "ylim", "(", ")", "plt", ".", "vlines", "(", "[", "leftroot", ",", "rightroot", "]", ",", "ymin", ",", "ymax", ",", "linestyles", "=", "'dotted'", ")", "plt", ".", "savefig", "(", "'cache_brent.pdf'", ")", "return", "leftroot", ",", "rightroot" ]
11b721ea001625ad7820f71ff684723c71216646
valid
Timing.end_timing
Completes measuring time interval and updates counter.
pip_services_commons/count/Timing.py
def end_timing(self): """ Completes measuring time interval and updates counter. """ if self._callback != None: elapsed = time.clock() * 1000 - self._start self._callback.end_timing(self._counter, elapsed)
def end_timing(self): """ Completes measuring time interval and updates counter. """ if self._callback != None: elapsed = time.clock() * 1000 - self._start self._callback.end_timing(self._counter, elapsed)
[ "Completes", "measuring", "time", "interval", "and", "updates", "counter", "." ]
pip-services/pip-services-commons-python
python
https://github.com/pip-services/pip-services-commons-python/blob/2205b18c45c60372966c62c1f23ac4fbc31e11b3/pip_services_commons/count/Timing.py#L37-L44
[ "def", "end_timing", "(", "self", ")", ":", "if", "self", ".", "_callback", "!=", "None", ":", "elapsed", "=", "time", ".", "clock", "(", ")", "*", "1000", "-", "self", ".", "_start", "self", ".", "_callback", ".", "end_timing", "(", "self", ".", "_counter", ",", "elapsed", ")" ]
2205b18c45c60372966c62c1f23ac4fbc31e11b3
valid
Duration.ToJsonString
Converts Duration to string format. Returns: A string converted from self. The string format will contains 3, 6, or 9 fractional digits depending on the precision required to represent the exact Duration value. For example: "1s", "1.010s", "1.000000100s", "-3.100s"
typy/google/protobuf/internal/well_known_types.py
def ToJsonString(self): """Converts Duration to string format. Returns: A string converted from self. The string format will contains 3, 6, or 9 fractional digits depending on the precision required to represent the exact Duration value. For example: "1s", "1.010s", "1.000000100s", "-3.100s" """ if self.seconds < 0 or self.nanos < 0: result = '-' seconds = - self.seconds + int((0 - self.nanos) // 1e9) nanos = (0 - self.nanos) % 1e9 else: result = '' seconds = self.seconds + int(self.nanos // 1e9) nanos = self.nanos % 1e9 result += '%d' % seconds if (nanos % 1e9) == 0: # If there are 0 fractional digits, the fractional # point '.' should be omitted when serializing. return result + 's' if (nanos % 1e6) == 0: # Serialize 3 fractional digits. return result + '.%03ds' % (nanos / 1e6) if (nanos % 1e3) == 0: # Serialize 6 fractional digits. return result + '.%06ds' % (nanos / 1e3) # Serialize 9 fractional digits. return result + '.%09ds' % nanos
def ToJsonString(self): """Converts Duration to string format. Returns: A string converted from self. The string format will contains 3, 6, or 9 fractional digits depending on the precision required to represent the exact Duration value. For example: "1s", "1.010s", "1.000000100s", "-3.100s" """ if self.seconds < 0 or self.nanos < 0: result = '-' seconds = - self.seconds + int((0 - self.nanos) // 1e9) nanos = (0 - self.nanos) % 1e9 else: result = '' seconds = self.seconds + int(self.nanos // 1e9) nanos = self.nanos % 1e9 result += '%d' % seconds if (nanos % 1e9) == 0: # If there are 0 fractional digits, the fractional # point '.' should be omitted when serializing. return result + 's' if (nanos % 1e6) == 0: # Serialize 3 fractional digits. return result + '.%03ds' % (nanos / 1e6) if (nanos % 1e3) == 0: # Serialize 6 fractional digits. return result + '.%06ds' % (nanos / 1e3) # Serialize 9 fractional digits. return result + '.%09ds' % nanos
[ "Converts", "Duration", "to", "string", "format", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/internal/well_known_types.py#L241-L270
[ "def", "ToJsonString", "(", "self", ")", ":", "if", "self", ".", "seconds", "<", "0", "or", "self", ".", "nanos", "<", "0", ":", "result", "=", "'-'", "seconds", "=", "-", "self", ".", "seconds", "+", "int", "(", "(", "0", "-", "self", ".", "nanos", ")", "//", "1e9", ")", "nanos", "=", "(", "0", "-", "self", ".", "nanos", ")", "%", "1e9", "else", ":", "result", "=", "''", "seconds", "=", "self", ".", "seconds", "+", "int", "(", "self", ".", "nanos", "//", "1e9", ")", "nanos", "=", "self", ".", "nanos", "%", "1e9", "result", "+=", "'%d'", "%", "seconds", "if", "(", "nanos", "%", "1e9", ")", "==", "0", ":", "# If there are 0 fractional digits, the fractional", "# point '.' should be omitted when serializing.", "return", "result", "+", "'s'", "if", "(", "nanos", "%", "1e6", ")", "==", "0", ":", "# Serialize 3 fractional digits.", "return", "result", "+", "'.%03ds'", "%", "(", "nanos", "/", "1e6", ")", "if", "(", "nanos", "%", "1e3", ")", "==", "0", ":", "# Serialize 6 fractional digits.", "return", "result", "+", "'.%06ds'", "%", "(", "nanos", "/", "1e3", ")", "# Serialize 9 fractional digits.", "return", "result", "+", "'.%09ds'", "%", "nanos" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
Duration.FromJsonString
Converts a string to Duration. Args: value: A string to be converted. The string must end with 's'. Any fractional digits (or none) are accepted as long as they fit into precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s Raises: ParseError: On parsing problems.
typy/google/protobuf/internal/well_known_types.py
def FromJsonString(self, value): """Converts a string to Duration. Args: value: A string to be converted. The string must end with 's'. Any fractional digits (or none) are accepted as long as they fit into precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s Raises: ParseError: On parsing problems. """ if len(value) < 1 or value[-1] != 's': raise ParseError( 'Duration must end with letter "s": {0}.'.format(value)) try: pos = value.find('.') if pos == -1: self.seconds = int(value[:-1]) self.nanos = 0 else: self.seconds = int(value[:pos]) if value[0] == '-': self.nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9)) else: self.nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9)) except ValueError: raise ParseError( 'Couldn\'t parse duration: {0}.'.format(value))
def FromJsonString(self, value): """Converts a string to Duration. Args: value: A string to be converted. The string must end with 's'. Any fractional digits (or none) are accepted as long as they fit into precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s Raises: ParseError: On parsing problems. """ if len(value) < 1 or value[-1] != 's': raise ParseError( 'Duration must end with letter "s": {0}.'.format(value)) try: pos = value.find('.') if pos == -1: self.seconds = int(value[:-1]) self.nanos = 0 else: self.seconds = int(value[:pos]) if value[0] == '-': self.nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9)) else: self.nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9)) except ValueError: raise ParseError( 'Couldn\'t parse duration: {0}.'.format(value))
[ "Converts", "a", "string", "to", "Duration", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/internal/well_known_types.py#L272-L299
[ "def", "FromJsonString", "(", "self", ",", "value", ")", ":", "if", "len", "(", "value", ")", "<", "1", "or", "value", "[", "-", "1", "]", "!=", "'s'", ":", "raise", "ParseError", "(", "'Duration must end with letter \"s\": {0}.'", ".", "format", "(", "value", ")", ")", "try", ":", "pos", "=", "value", ".", "find", "(", "'.'", ")", "if", "pos", "==", "-", "1", ":", "self", ".", "seconds", "=", "int", "(", "value", "[", ":", "-", "1", "]", ")", "self", ".", "nanos", "=", "0", "else", ":", "self", ".", "seconds", "=", "int", "(", "value", "[", ":", "pos", "]", ")", "if", "value", "[", "0", "]", "==", "'-'", ":", "self", ".", "nanos", "=", "int", "(", "round", "(", "float", "(", "'-0{0}'", ".", "format", "(", "value", "[", "pos", ":", "-", "1", "]", ")", ")", "*", "1e9", ")", ")", "else", ":", "self", ".", "nanos", "=", "int", "(", "round", "(", "float", "(", "'0{0}'", ".", "format", "(", "value", "[", "pos", ":", "-", "1", "]", ")", ")", "*", "1e9", ")", ")", "except", "ValueError", ":", "raise", "ParseError", "(", "'Couldn\\'t parse duration: {0}.'", ".", "format", "(", "value", ")", ")" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
FieldMask.FromJsonString
Converts string to FieldMask according to proto3 JSON spec.
typy/google/protobuf/internal/well_known_types.py
def FromJsonString(self, value): """Converts string to FieldMask according to proto3 JSON spec.""" self.Clear() for path in value.split(','): self.paths.append(path)
def FromJsonString(self, value): """Converts string to FieldMask according to proto3 JSON spec.""" self.Clear() for path in value.split(','): self.paths.append(path)
[ "Converts", "string", "to", "FieldMask", "according", "to", "proto3", "JSON", "spec", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/internal/well_known_types.py#L384-L388
[ "def", "FromJsonString", "(", "self", ",", "value", ")", ":", "self", ".", "Clear", "(", ")", "for", "path", "in", "value", ".", "split", "(", "','", ")", ":", "self", ".", "paths", ".", "append", "(", "path", ")" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
get_doc
Return a CouchDB document, given its ID, revision and database name.
relax/couchdb/shortcuts.py
def get_doc(doc_id, db_name, server_url='http://127.0.0.1:5984/', rev=None): """Return a CouchDB document, given its ID, revision and database name.""" db = get_server(server_url)[db_name] if rev: headers, response = db.resource.get(doc_id, rev=rev) return couchdb.client.Document(response) return db[doc_id]
def get_doc(doc_id, db_name, server_url='http://127.0.0.1:5984/', rev=None): """Return a CouchDB document, given its ID, revision and database name.""" db = get_server(server_url)[db_name] if rev: headers, response = db.resource.get(doc_id, rev=rev) return couchdb.client.Document(response) return db[doc_id]
[ "Return", "a", "CouchDB", "document", "given", "its", "ID", "revision", "and", "database", "name", "." ]
zvoase/django-relax
python
https://github.com/zvoase/django-relax/blob/10bb37bf3a512b290816856a6877c17fa37e930f/relax/couchdb/shortcuts.py#L20-L26
[ "def", "get_doc", "(", "doc_id", ",", "db_name", ",", "server_url", "=", "'http://127.0.0.1:5984/'", ",", "rev", "=", "None", ")", ":", "db", "=", "get_server", "(", "server_url", ")", "[", "db_name", "]", "if", "rev", ":", "headers", ",", "response", "=", "db", ".", "resource", ".", "get", "(", "doc_id", ",", "rev", "=", "rev", ")", "return", "couchdb", ".", "client", ".", "Document", "(", "response", ")", "return", "db", "[", "doc_id", "]" ]
10bb37bf3a512b290816856a6877c17fa37e930f
valid
get_or_create_db
Return an (optionally existing) CouchDB database instance.
relax/couchdb/shortcuts.py
def get_or_create_db(db_name, server_url='http://127.0.0.1:5984/'): """Return an (optionally existing) CouchDB database instance.""" server = get_server(server_url) if db_name in server: return server[db_name] return server.create(db_name)
def get_or_create_db(db_name, server_url='http://127.0.0.1:5984/'): """Return an (optionally existing) CouchDB database instance.""" server = get_server(server_url) if db_name in server: return server[db_name] return server.create(db_name)
[ "Return", "an", "(", "optionally", "existing", ")", "CouchDB", "database", "instance", "." ]
zvoase/django-relax
python
https://github.com/zvoase/django-relax/blob/10bb37bf3a512b290816856a6877c17fa37e930f/relax/couchdb/shortcuts.py#L28-L33
[ "def", "get_or_create_db", "(", "db_name", ",", "server_url", "=", "'http://127.0.0.1:5984/'", ")", ":", "server", "=", "get_server", "(", "server_url", ")", "if", "db_name", "in", "server", ":", "return", "server", "[", "db_name", "]", "return", "server", ".", "create", "(", "db_name", ")" ]
10bb37bf3a512b290816856a6877c17fa37e930f
valid
read
Give reST format README for pypi.
setup.py
def read(readme): """Give reST format README for pypi.""" extend = os.path.splitext(readme)[1] if (extend == '.rst'): import codecs return codecs.open(readme, 'r', 'utf-8').read() elif (extend == '.md'): import pypandoc return pypandoc.convert(readme, 'rst')
def read(readme): """Give reST format README for pypi.""" extend = os.path.splitext(readme)[1] if (extend == '.rst'): import codecs return codecs.open(readme, 'r', 'utf-8').read() elif (extend == '.md'): import pypandoc return pypandoc.convert(readme, 'rst')
[ "Give", "reST", "format", "README", "for", "pypi", "." ]
crazy-canux/arguspy
python
https://github.com/crazy-canux/arguspy/blob/e9486b5df61978a990d56bf43de35f3a4cdefcc3/setup.py#L29-L37
[ "def", "read", "(", "readme", ")", ":", "extend", "=", "os", ".", "path", ".", "splitext", "(", "readme", ")", "[", "1", "]", "if", "(", "extend", "==", "'.rst'", ")", ":", "import", "codecs", "return", "codecs", ".", "open", "(", "readme", ",", "'r'", ",", "'utf-8'", ")", ".", "read", "(", ")", "elif", "(", "extend", "==", "'.md'", ")", ":", "import", "pypandoc", "return", "pypandoc", ".", "convert", "(", "readme", ",", "'rst'", ")" ]
e9486b5df61978a990d56bf43de35f3a4cdefcc3
valid
main
Register your own mode and handle method here.
scripts/check_mssql.py
def main(): """Register your own mode and handle method here.""" plugin = Register() if plugin.args.option == 'sql': plugin.sql_handle() elif plugin.args.option == 'database-used': plugin.database_used_handle() elif plugin.args.option == 'databaselog-used': plugin.database_log_used_handle() else: plugin.unknown("Unknown actions.")
def main(): """Register your own mode and handle method here.""" plugin = Register() if plugin.args.option == 'sql': plugin.sql_handle() elif plugin.args.option == 'database-used': plugin.database_used_handle() elif plugin.args.option == 'databaselog-used': plugin.database_log_used_handle() else: plugin.unknown("Unknown actions.")
[ "Register", "your", "own", "mode", "and", "handle", "method", "here", "." ]
crazy-canux/arguspy
python
https://github.com/crazy-canux/arguspy/blob/e9486b5df61978a990d56bf43de35f3a4cdefcc3/scripts/check_mssql.py#L475-L485
[ "def", "main", "(", ")", ":", "plugin", "=", "Register", "(", ")", "if", "plugin", ".", "args", ".", "option", "==", "'sql'", ":", "plugin", ".", "sql_handle", "(", ")", "elif", "plugin", ".", "args", ".", "option", "==", "'database-used'", ":", "plugin", ".", "database_used_handle", "(", ")", "elif", "plugin", ".", "args", ".", "option", "==", "'databaselog-used'", ":", "plugin", ".", "database_log_used_handle", "(", ")", "else", ":", "plugin", ".", "unknown", "(", "\"Unknown actions.\"", ")" ]
e9486b5df61978a990d56bf43de35f3a4cdefcc3
valid
Parser.parse
:param args: arguments :type args: None or string or list of string :return: formatted arguments if specified else ``self.default_args`` :rtype: list of string
headlessvim/arguments.py
def parse(self, args): """ :param args: arguments :type args: None or string or list of string :return: formatted arguments if specified else ``self.default_args`` :rtype: list of string """ if args is None: args = self._default_args if isinstance(args, six.string_types): args = shlex.split(args) return args
def parse(self, args): """ :param args: arguments :type args: None or string or list of string :return: formatted arguments if specified else ``self.default_args`` :rtype: list of string """ if args is None: args = self._default_args if isinstance(args, six.string_types): args = shlex.split(args) return args
[ ":", "param", "args", ":", "arguments", ":", "type", "args", ":", "None", "or", "string", "or", "list", "of", "string", ":", "return", ":", "formatted", "arguments", "if", "specified", "else", "self", ".", "default_args", ":", "rtype", ":", "list", "of", "string" ]
manicmaniac/headlessvim
python
https://github.com/manicmaniac/headlessvim/blob/3e4657f95d981ddf21fd285b7e1b9da2154f9cb9/headlessvim/arguments.py#L24-L35
[ "def", "parse", "(", "self", ",", "args", ")", ":", "if", "args", "is", "None", ":", "args", "=", "self", ".", "_default_args", "if", "isinstance", "(", "args", ",", "six", ".", "string_types", ")", ":", "args", "=", "shlex", ".", "split", "(", "args", ")", "return", "args" ]
3e4657f95d981ddf21fd285b7e1b9da2154f9cb9
valid
PrivateClient._request
Sends an HTTP request to the REST API and receives the requested data. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses
cbexchange/private.py
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) if method == 'get': response = get(uri, auth=self.auth, params=kwargs.get('params', None)) elif method == 'post': response = post(uri, auth=self.auth, json=kwargs.get('data', None)) else: response = delete(uri, auth=self.auth, json=kwargs.get('data', None)) return self._handle_response(response).json()
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) if method == 'get': response = get(uri, auth=self.auth, params=kwargs.get('params', None)) elif method == 'post': response = post(uri, auth=self.auth, json=kwargs.get('data', None)) else: response = delete(uri, auth=self.auth, json=kwargs.get('data', None)) return self._handle_response(response).json()
[ "Sends", "an", "HTTP", "request", "to", "the", "REST", "API", "and", "receives", "the", "requested", "data", "." ]
agsimeonov/cbexchange
python
https://github.com/agsimeonov/cbexchange/blob/e3762f77583f89cf7b4f501ab3c7675fc7d30ab3/cbexchange/private.py#L72-L89
[ "def", "_request", "(", "self", ",", "method", ",", "*", "relative_path_parts", ",", "*", "*", "kwargs", ")", ":", "uri", "=", "self", ".", "_create_api_uri", "(", "*", "relative_path_parts", ")", "if", "method", "==", "'get'", ":", "response", "=", "get", "(", "uri", ",", "auth", "=", "self", ".", "auth", ",", "params", "=", "kwargs", ".", "get", "(", "'params'", ",", "None", ")", ")", "elif", "method", "==", "'post'", ":", "response", "=", "post", "(", "uri", ",", "auth", "=", "self", ".", "auth", ",", "json", "=", "kwargs", ".", "get", "(", "'data'", ",", "None", ")", ")", "else", ":", "response", "=", "delete", "(", "uri", ",", "auth", "=", "self", ".", "auth", ",", "json", "=", "kwargs", ".", "get", "(", "'data'", ",", "None", ")", ")", "return", "self", ".", "_handle_response", "(", "response", ")", ".", "json", "(", ")" ]
e3762f77583f89cf7b4f501ab3c7675fc7d30ab3
valid
PrivateClient._place_order
`<https://docs.exchange.coinbase.com/#orders>`_
cbexchange/private.py
def _place_order(self, side, product_id='BTC-USD', client_oid=None, type=None, stp=None, price=None, size=None, funds=None, time_in_force=None, cancel_after=None, post_only=None): """`<https://docs.exchange.coinbase.com/#orders>`_""" data = { 'side':side, 'product_id':product_id, 'client_oid':client_oid, 'type':type, 'stp':stp, 'price':price, 'size':size, 'funds':funds, 'time_in_force':time_in_force, 'cancel_after':cancel_after, 'post_only':post_only } return self._post('orders', data=data)
def _place_order(self, side, product_id='BTC-USD', client_oid=None, type=None, stp=None, price=None, size=None, funds=None, time_in_force=None, cancel_after=None, post_only=None): """`<https://docs.exchange.coinbase.com/#orders>`_""" data = { 'side':side, 'product_id':product_id, 'client_oid':client_oid, 'type':type, 'stp':stp, 'price':price, 'size':size, 'funds':funds, 'time_in_force':time_in_force, 'cancel_after':cancel_after, 'post_only':post_only } return self._post('orders', data=data)
[ "<https", ":", "//", "docs", ".", "exchange", ".", "coinbase", ".", "com", "/", "#orders", ">", "_" ]
agsimeonov/cbexchange
python
https://github.com/agsimeonov/cbexchange/blob/e3762f77583f89cf7b4f501ab3c7675fc7d30ab3/cbexchange/private.py#L107-L133
[ "def", "_place_order", "(", "self", ",", "side", ",", "product_id", "=", "'BTC-USD'", ",", "client_oid", "=", "None", ",", "type", "=", "None", ",", "stp", "=", "None", ",", "price", "=", "None", ",", "size", "=", "None", ",", "funds", "=", "None", ",", "time_in_force", "=", "None", ",", "cancel_after", "=", "None", ",", "post_only", "=", "None", ")", ":", "data", "=", "{", "'side'", ":", "side", ",", "'product_id'", ":", "product_id", ",", "'client_oid'", ":", "client_oid", ",", "'type'", ":", "type", ",", "'stp'", ":", "stp", ",", "'price'", ":", "price", ",", "'size'", ":", "size", ",", "'funds'", ":", "funds", ",", "'time_in_force'", ":", "time_in_force", ",", "'cancel_after'", ":", "cancel_after", ",", "'post_only'", ":", "post_only", "}", "return", "self", ".", "_post", "(", "'orders'", ",", "data", "=", "data", ")" ]
e3762f77583f89cf7b4f501ab3c7675fc7d30ab3
valid
PrivateClient.place_limit_order
`<https://docs.exchange.coinbase.com/#orders>`_
cbexchange/private.py
def place_limit_order(self, side, price, size, product_id='BTC-USD', client_oid=None, stp=None, time_in_force=None, cancel_after=None, post_only=None): """`<https://docs.exchange.coinbase.com/#orders>`_""" return self._place_order(side, product_id=product_id, client_oid=client_oid, type='limit', stp=stp, price=price, size=size, time_in_force=time_in_force, cancel_after=cancel_after, post_only=post_only)
def place_limit_order(self, side, price, size, product_id='BTC-USD', client_oid=None, stp=None, time_in_force=None, cancel_after=None, post_only=None): """`<https://docs.exchange.coinbase.com/#orders>`_""" return self._place_order(side, product_id=product_id, client_oid=client_oid, type='limit', stp=stp, price=price, size=size, time_in_force=time_in_force, cancel_after=cancel_after, post_only=post_only)
[ "<https", ":", "//", "docs", ".", "exchange", ".", "coinbase", ".", "com", "/", "#orders", ">", "_" ]
agsimeonov/cbexchange
python
https://github.com/agsimeonov/cbexchange/blob/e3762f77583f89cf7b4f501ab3c7675fc7d30ab3/cbexchange/private.py#L135-L155
[ "def", "place_limit_order", "(", "self", ",", "side", ",", "price", ",", "size", ",", "product_id", "=", "'BTC-USD'", ",", "client_oid", "=", "None", ",", "stp", "=", "None", ",", "time_in_force", "=", "None", ",", "cancel_after", "=", "None", ",", "post_only", "=", "None", ")", ":", "return", "self", ".", "_place_order", "(", "side", ",", "product_id", "=", "product_id", ",", "client_oid", "=", "client_oid", ",", "type", "=", "'limit'", ",", "stp", "=", "stp", ",", "price", "=", "price", ",", "size", "=", "size", ",", "time_in_force", "=", "time_in_force", ",", "cancel_after", "=", "cancel_after", ",", "post_only", "=", "post_only", ")" ]
e3762f77583f89cf7b4f501ab3c7675fc7d30ab3
valid
PrivateClient.place_market_order
`<https://docs.exchange.coinbase.com/#orders>`_
cbexchange/private.py
def place_market_order(self, side, product_id='BTC-USD', size=None, funds=None, client_oid=None, stp=None): """`<https://docs.exchange.coinbase.com/#orders>`_""" return self._place_order(type='market', side=size, product_id=product_id, size=size, funds=funds, client_oid=client_oid, stp=stp)
def place_market_order(self, side, product_id='BTC-USD', size=None, funds=None, client_oid=None, stp=None): """`<https://docs.exchange.coinbase.com/#orders>`_""" return self._place_order(type='market', side=size, product_id=product_id, size=size, funds=funds, client_oid=client_oid, stp=stp)
[ "<https", ":", "//", "docs", ".", "exchange", ".", "coinbase", ".", "com", "/", "#orders", ">", "_" ]
agsimeonov/cbexchange
python
https://github.com/agsimeonov/cbexchange/blob/e3762f77583f89cf7b4f501ab3c7675fc7d30ab3/cbexchange/private.py#L157-L171
[ "def", "place_market_order", "(", "self", ",", "side", ",", "product_id", "=", "'BTC-USD'", ",", "size", "=", "None", ",", "funds", "=", "None", ",", "client_oid", "=", "None", ",", "stp", "=", "None", ")", ":", "return", "self", ".", "_place_order", "(", "type", "=", "'market'", ",", "side", "=", "size", ",", "product_id", "=", "product_id", ",", "size", "=", "size", ",", "funds", "=", "funds", ",", "client_oid", "=", "client_oid", ",", "stp", "=", "stp", ")" ]
e3762f77583f89cf7b4f501ab3c7675fc7d30ab3
valid
PrivateClient._deposit_withdraw
`<https://docs.exchange.coinbase.com/#depositwithdraw>`_
cbexchange/private.py
def _deposit_withdraw(self, type, amount, coinbase_account_id): """`<https://docs.exchange.coinbase.com/#depositwithdraw>`_""" data = { 'type':type, 'amount':amount, 'coinbase_account_id':coinbase_account_id } return self._post('transfers', data=data)
def _deposit_withdraw(self, type, amount, coinbase_account_id): """`<https://docs.exchange.coinbase.com/#depositwithdraw>`_""" data = { 'type':type, 'amount':amount, 'coinbase_account_id':coinbase_account_id } return self._post('transfers', data=data)
[ "<https", ":", "//", "docs", ".", "exchange", ".", "coinbase", ".", "com", "/", "#depositwithdraw", ">", "_" ]
agsimeonov/cbexchange
python
https://github.com/agsimeonov/cbexchange/blob/e3762f77583f89cf7b4f501ab3c7675fc7d30ab3/cbexchange/private.py#L193-L200
[ "def", "_deposit_withdraw", "(", "self", ",", "type", ",", "amount", ",", "coinbase_account_id", ")", ":", "data", "=", "{", "'type'", ":", "type", ",", "'amount'", ":", "amount", ",", "'coinbase_account_id'", ":", "coinbase_account_id", "}", "return", "self", ".", "_post", "(", "'transfers'", ",", "data", "=", "data", ")" ]
e3762f77583f89cf7b4f501ab3c7675fc7d30ab3
valid
PrivateClient._new_report
`<https://docs.exchange.coinbase.com/#create-a-new-report>`_
cbexchange/private.py
def _new_report(self, type, start_date, end_date, product_id='BTC-USD', account_id=None, format=None, email=None): """`<https://docs.exchange.coinbase.com/#create-a-new-report>`_""" data = { 'type':type, 'start_date':self._format_iso_time(start_date), 'end_date':self._format_iso_time(end_date), 'product_id':product_id, 'account_id':account_id, 'format':format, 'email':email } return self._post('reports', data=data)
def _new_report(self, type, start_date, end_date, product_id='BTC-USD', account_id=None, format=None, email=None): """`<https://docs.exchange.coinbase.com/#create-a-new-report>`_""" data = { 'type':type, 'start_date':self._format_iso_time(start_date), 'end_date':self._format_iso_time(end_date), 'product_id':product_id, 'account_id':account_id, 'format':format, 'email':email } return self._post('reports', data=data)
[ "<https", ":", "//", "docs", ".", "exchange", ".", "coinbase", ".", "com", "/", "#create", "-", "a", "-", "new", "-", "report", ">", "_" ]
agsimeonov/cbexchange
python
https://github.com/agsimeonov/cbexchange/blob/e3762f77583f89cf7b4f501ab3c7675fc7d30ab3/cbexchange/private.py#L210-L228
[ "def", "_new_report", "(", "self", ",", "type", ",", "start_date", ",", "end_date", ",", "product_id", "=", "'BTC-USD'", ",", "account_id", "=", "None", ",", "format", "=", "None", ",", "email", "=", "None", ")", ":", "data", "=", "{", "'type'", ":", "type", ",", "'start_date'", ":", "self", ".", "_format_iso_time", "(", "start_date", ")", ",", "'end_date'", ":", "self", ".", "_format_iso_time", "(", "end_date", ")", ",", "'product_id'", ":", "product_id", ",", "'account_id'", ":", "account_id", ",", "'format'", ":", "format", ",", "'email'", ":", "email", "}", "return", "self", ".", "_post", "(", "'reports'", ",", "data", "=", "data", ")" ]
e3762f77583f89cf7b4f501ab3c7675fc7d30ab3
valid
PrivateClient.new_fills_report
`<https://docs.exchange.coinbase.com/#create-a-new-report>`_
cbexchange/private.py
def new_fills_report(self, start_date, end_date, account_id=None, product_id='BTC-USD', format=None, email=None): """`<https://docs.exchange.coinbase.com/#create-a-new-report>`_""" return self._new_report(start_date, 'fills', end_date, account_id, product_id, format, email)
def new_fills_report(self, start_date, end_date, account_id=None, product_id='BTC-USD', format=None, email=None): """`<https://docs.exchange.coinbase.com/#create-a-new-report>`_""" return self._new_report(start_date, 'fills', end_date, account_id, product_id, format, email)
[ "<https", ":", "//", "docs", ".", "exchange", ".", "coinbase", ".", "com", "/", "#create", "-", "a", "-", "new", "-", "report", ">", "_" ]
agsimeonov/cbexchange
python
https://github.com/agsimeonov/cbexchange/blob/e3762f77583f89cf7b4f501ab3c7675fc7d30ab3/cbexchange/private.py#L230-L244
[ "def", "new_fills_report", "(", "self", ",", "start_date", ",", "end_date", ",", "account_id", "=", "None", ",", "product_id", "=", "'BTC-USD'", ",", "format", "=", "None", ",", "email", "=", "None", ")", ":", "return", "self", ".", "_new_report", "(", "start_date", ",", "'fills'", ",", "end_date", ",", "account_id", ",", "product_id", ",", "format", ",", "email", ")" ]
e3762f77583f89cf7b4f501ab3c7675fc7d30ab3
valid
PrivatePaginationClient._request
Sends an HTTP request to the REST API and receives the requested data. Additionally sets up pagination cursors. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses
cbexchange/private.py
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. Additionally sets up pagination cursors. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) if method == 'get': response = get(uri, auth=self.auth, params=kwargs.get('params', None)) elif method == 'post': response = post(uri, auth=self.auth, json=kwargs.get('data', None)) else: response = delete(uri, auth=self.auth, json=kwargs.get('data', None)) self.is_initial = False self.before_cursor = response.headers.get('cb-before', None) self.after_cursor = response.headers.get('cb-after', None) return self._handle_response(response).json()
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. Additionally sets up pagination cursors. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) if method == 'get': response = get(uri, auth=self.auth, params=kwargs.get('params', None)) elif method == 'post': response = post(uri, auth=self.auth, json=kwargs.get('data', None)) else: response = delete(uri, auth=self.auth, json=kwargs.get('data', None)) self.is_initial = False self.before_cursor = response.headers.get('cb-before', None) self.after_cursor = response.headers.get('cb-after', None) return self._handle_response(response).json()
[ "Sends", "an", "HTTP", "request", "to", "the", "REST", "API", "and", "receives", "the", "requested", "data", ".", "Additionally", "sets", "up", "pagination", "cursors", "." ]
agsimeonov/cbexchange
python
https://github.com/agsimeonov/cbexchange/blob/e3762f77583f89cf7b4f501ab3c7675fc7d30ab3/cbexchange/private.py#L274-L295
[ "def", "_request", "(", "self", ",", "method", ",", "*", "relative_path_parts", ",", "*", "*", "kwargs", ")", ":", "uri", "=", "self", ".", "_create_api_uri", "(", "*", "relative_path_parts", ")", "if", "method", "==", "'get'", ":", "response", "=", "get", "(", "uri", ",", "auth", "=", "self", ".", "auth", ",", "params", "=", "kwargs", ".", "get", "(", "'params'", ",", "None", ")", ")", "elif", "method", "==", "'post'", ":", "response", "=", "post", "(", "uri", ",", "auth", "=", "self", ".", "auth", ",", "json", "=", "kwargs", ".", "get", "(", "'data'", ",", "None", ")", ")", "else", ":", "response", "=", "delete", "(", "uri", ",", "auth", "=", "self", ".", "auth", ",", "json", "=", "kwargs", ".", "get", "(", "'data'", ",", "None", ")", ")", "self", ".", "is_initial", "=", "False", "self", ".", "before_cursor", "=", "response", ".", "headers", ".", "get", "(", "'cb-before'", ",", "None", ")", "self", ".", "after_cursor", "=", "response", ".", "headers", ".", "get", "(", "'cb-after'", ",", "None", ")", "return", "self", ".", "_handle_response", "(", "response", ")", ".", "json", "(", ")" ]
e3762f77583f89cf7b4f501ab3c7675fc7d30ab3
valid
DataStore.fetch
return one record from the collection whose parameters match kwargs --- kwargs should be a dictionary whose keys match column names (in traditional SQL / fields in NoSQL) and whose values are the values of those fields. e.g. kwargs={name='my application name',client_id=12345}
proauth2/data_stores/async_mongo_ds.py
def fetch(self, collection, **kwargs): ''' return one record from the collection whose parameters match kwargs --- kwargs should be a dictionary whose keys match column names (in traditional SQL / fields in NoSQL) and whose values are the values of those fields. e.g. kwargs={name='my application name',client_id=12345} ''' callback = kwargs.pop('callback') data = yield Op(self.db[collection].find_one, kwargs) callback(data)
def fetch(self, collection, **kwargs): ''' return one record from the collection whose parameters match kwargs --- kwargs should be a dictionary whose keys match column names (in traditional SQL / fields in NoSQL) and whose values are the values of those fields. e.g. kwargs={name='my application name',client_id=12345} ''' callback = kwargs.pop('callback') data = yield Op(self.db[collection].find_one, kwargs) callback(data)
[ "return", "one", "record", "from", "the", "collection", "whose", "parameters", "match", "kwargs", "---", "kwargs", "should", "be", "a", "dictionary", "whose", "keys", "match", "column", "names", "(", "in", "traditional", "SQL", "/", "fields", "in", "NoSQL", ")", "and", "whose", "values", "are", "the", "values", "of", "those", "fields", ".", "e", ".", "g", ".", "kwargs", "=", "{", "name", "=", "my", "application", "name", "client_id", "=", "12345", "}" ]
charlesthomas/proauth2
python
https://github.com/charlesthomas/proauth2/blob/f88c8df966a1802414047ed304d02df1dd520097/proauth2/data_stores/async_mongo_ds.py#L31-L42
[ "def", "fetch", "(", "self", ",", "collection", ",", "*", "*", "kwargs", ")", ":", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ")", "data", "=", "yield", "Op", "(", "self", ".", "db", "[", "collection", "]", ".", "find_one", ",", "kwargs", ")", "callback", "(", "data", ")" ]
f88c8df966a1802414047ed304d02df1dd520097
valid
DataStore.remove
remove records from collection whose parameters match kwargs
proauth2/data_stores/async_mongo_ds.py
def remove(self, collection, **kwargs): ''' remove records from collection whose parameters match kwargs ''' callback = kwargs.pop('callback') yield Op(self.db[collection].remove, kwargs) callback()
def remove(self, collection, **kwargs): ''' remove records from collection whose parameters match kwargs ''' callback = kwargs.pop('callback') yield Op(self.db[collection].remove, kwargs) callback()
[ "remove", "records", "from", "collection", "whose", "parameters", "match", "kwargs" ]
charlesthomas/proauth2
python
https://github.com/charlesthomas/proauth2/blob/f88c8df966a1802414047ed304d02df1dd520097/proauth2/data_stores/async_mongo_ds.py#L45-L51
[ "def", "remove", "(", "self", ",", "collection", ",", "*", "*", "kwargs", ")", ":", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ")", "yield", "Op", "(", "self", ".", "db", "[", "collection", "]", ".", "remove", ",", "kwargs", ")", "callback", "(", ")" ]
f88c8df966a1802414047ed304d02df1dd520097
valid
DataStore.store
validate the passed values in kwargs based on the collection, store them in the mongodb collection
proauth2/data_stores/async_mongo_ds.py
def store(self, collection, **kwargs): ''' validate the passed values in kwargs based on the collection, store them in the mongodb collection ''' callback = kwargs.pop('callback') key = validate(collection, **kwargs) data = yield Task(self.fetch, collection, **{key: kwargs[key]}) if data is not None: raise Proauth2Error('duplicate_key') yield Op(self.db[collection].insert, kwargs) callback()
def store(self, collection, **kwargs): ''' validate the passed values in kwargs based on the collection, store them in the mongodb collection ''' callback = kwargs.pop('callback') key = validate(collection, **kwargs) data = yield Task(self.fetch, collection, **{key: kwargs[key]}) if data is not None: raise Proauth2Error('duplicate_key') yield Op(self.db[collection].insert, kwargs) callback()
[ "validate", "the", "passed", "values", "in", "kwargs", "based", "on", "the", "collection", "store", "them", "in", "the", "mongodb", "collection" ]
charlesthomas/proauth2
python
https://github.com/charlesthomas/proauth2/blob/f88c8df966a1802414047ed304d02df1dd520097/proauth2/data_stores/async_mongo_ds.py#L54-L65
[ "def", "store", "(", "self", ",", "collection", ",", "*", "*", "kwargs", ")", ":", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ")", "key", "=", "validate", "(", "collection", ",", "*", "*", "kwargs", ")", "data", "=", "yield", "Task", "(", "self", ".", "fetch", ",", "collection", ",", "*", "*", "{", "key", ":", "kwargs", "[", "key", "]", "}", ")", "if", "data", "is", "not", "None", ":", "raise", "Proauth2Error", "(", "'duplicate_key'", ")", "yield", "Op", "(", "self", ".", "db", "[", "collection", "]", ".", "insert", ",", "kwargs", ")", "callback", "(", ")" ]
f88c8df966a1802414047ed304d02df1dd520097
valid
generate_api
Generates a factory function to instantiate the API with the given version.
trelloapi/api.py
def generate_api(version): """ Generates a factory function to instantiate the API with the given version. """ def get_partial_api(key, token=None): return TrelloAPI(ENDPOINTS[version], version, key, token=token) get_partial_api.__doc__ = \ """Interfaz REST con Trello. Versión {}""".format(version) return get_partial_api
def generate_api(version): """ Generates a factory function to instantiate the API with the given version. """ def get_partial_api(key, token=None): return TrelloAPI(ENDPOINTS[version], version, key, token=token) get_partial_api.__doc__ = \ """Interfaz REST con Trello. Versión {}""".format(version) return get_partial_api
[ "Generates", "a", "factory", "function", "to", "instantiate", "the", "API", "with", "the", "given", "version", "." ]
nilp0inter/trelloapi
python
https://github.com/nilp0inter/trelloapi/blob/88f4135832548ea71598d50a73943890e1cf9e20/trelloapi/api.py#L131-L143
[ "def", "generate_api", "(", "version", ")", ":", "def", "get_partial_api", "(", "key", ",", "token", "=", "None", ")", ":", "return", "TrelloAPI", "(", "ENDPOINTS", "[", "version", "]", ",", "version", ",", "key", ",", "token", "=", "token", ")", "get_partial_api", ".", "__doc__", "=", "\"\"\"Interfaz REST con Trello. Versión {}\"\"\".", "f", "ormat(", "v", "ersion)", "", "return", "get_partial_api" ]
88f4135832548ea71598d50a73943890e1cf9e20
valid
TrelloAPI._url
Resolve the URL to this point. >>> trello = TrelloAPIV1('APIKEY') >>> trello.batch._url '1/batch' >>> trello.boards(board_id='BOARD_ID')._url '1/boards/BOARD_ID' >>> trello.boards(board_id='BOARD_ID')(field='FIELD')._url '1/boards/BOARD_ID/FIELD' >>> trello.boards(board_id='BOARD_ID').cards(filter='FILTER')._url '1/boards/BOARD_ID/cards/FILTER'
trelloapi/api.py
def _url(self): """ Resolve the URL to this point. >>> trello = TrelloAPIV1('APIKEY') >>> trello.batch._url '1/batch' >>> trello.boards(board_id='BOARD_ID')._url '1/boards/BOARD_ID' >>> trello.boards(board_id='BOARD_ID')(field='FIELD')._url '1/boards/BOARD_ID/FIELD' >>> trello.boards(board_id='BOARD_ID').cards(filter='FILTER')._url '1/boards/BOARD_ID/cards/FILTER' """ if self._api_arg: mypart = str(self._api_arg) else: mypart = self._name if self._parent: return '/'.join(filter(None, [self._parent._url, mypart])) else: return mypart
def _url(self): """ Resolve the URL to this point. >>> trello = TrelloAPIV1('APIKEY') >>> trello.batch._url '1/batch' >>> trello.boards(board_id='BOARD_ID')._url '1/boards/BOARD_ID' >>> trello.boards(board_id='BOARD_ID')(field='FIELD')._url '1/boards/BOARD_ID/FIELD' >>> trello.boards(board_id='BOARD_ID').cards(filter='FILTER')._url '1/boards/BOARD_ID/cards/FILTER' """ if self._api_arg: mypart = str(self._api_arg) else: mypart = self._name if self._parent: return '/'.join(filter(None, [self._parent._url, mypart])) else: return mypart
[ "Resolve", "the", "URL", "to", "this", "point", "." ]
nilp0inter/trelloapi
python
https://github.com/nilp0inter/trelloapi/blob/88f4135832548ea71598d50a73943890e1cf9e20/trelloapi/api.py#L67-L90
[ "def", "_url", "(", "self", ")", ":", "if", "self", ".", "_api_arg", ":", "mypart", "=", "str", "(", "self", ".", "_api_arg", ")", "else", ":", "mypart", "=", "self", ".", "_name", "if", "self", ".", "_parent", ":", "return", "'/'", ".", "join", "(", "filter", "(", "None", ",", "[", "self", ".", "_parent", ".", "_url", ",", "mypart", "]", ")", ")", "else", ":", "return", "mypart" ]
88f4135832548ea71598d50a73943890e1cf9e20
valid
TrelloAPI._api_call
Makes the HTTP request.
trelloapi/api.py
def _api_call(self, method_name, *args, **kwargs): """ Makes the HTTP request. """ params = kwargs.setdefault('params', {}) params.update({'key': self._apikey}) if self._token is not None: params.update({'token': self._token}) http_method = getattr(requests, method_name) return http_method(TRELLO_URL + self._url, *args, **kwargs)
def _api_call(self, method_name, *args, **kwargs): """ Makes the HTTP request. """ params = kwargs.setdefault('params', {}) params.update({'key': self._apikey}) if self._token is not None: params.update({'token': self._token}) http_method = getattr(requests, method_name) return http_method(TRELLO_URL + self._url, *args, **kwargs)
[ "Makes", "the", "HTTP", "request", "." ]
nilp0inter/trelloapi
python
https://github.com/nilp0inter/trelloapi/blob/88f4135832548ea71598d50a73943890e1cf9e20/trelloapi/api.py#L92-L103
[ "def", "_api_call", "(", "self", ",", "method_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "params", "=", "kwargs", ".", "setdefault", "(", "'params'", ",", "{", "}", ")", "params", ".", "update", "(", "{", "'key'", ":", "self", ".", "_apikey", "}", ")", "if", "self", ".", "_token", "is", "not", "None", ":", "params", ".", "update", "(", "{", "'token'", ":", "self", ".", "_token", "}", ")", "http_method", "=", "getattr", "(", "requests", ",", "method_name", ")", "return", "http_method", "(", "TRELLO_URL", "+", "self", ".", "_url", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
88f4135832548ea71598d50a73943890e1cf9e20
valid
Merge
Parses an text representation of a protocol message into a message. Like Parse(), but allows repeated values for a non-repeated field, and uses the last one. Args: text: Message text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems.
typy/google/protobuf/text_format.py
def Merge(text, message, allow_unknown_extension=False, allow_field_number=False): """Parses an text representation of a protocol message into a message. Like Parse(), but allows repeated values for a non-repeated field, and uses the last one. Args: text: Message text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems. """ return MergeLines(text.split('\n'), message, allow_unknown_extension, allow_field_number)
def Merge(text, message, allow_unknown_extension=False, allow_field_number=False): """Parses an text representation of a protocol message into a message. Like Parse(), but allows repeated values for a non-repeated field, and uses the last one. Args: text: Message text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems. """ return MergeLines(text.split('\n'), message, allow_unknown_extension, allow_field_number)
[ "Parses", "an", "text", "representation", "of", "a", "protocol", "message", "into", "a", "message", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L348-L369
[ "def", "Merge", "(", "text", ",", "message", ",", "allow_unknown_extension", "=", "False", ",", "allow_field_number", "=", "False", ")", ":", "return", "MergeLines", "(", "text", ".", "split", "(", "'\\n'", ")", ",", "message", ",", "allow_unknown_extension", ",", "allow_field_number", ")" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
ParseLines
Parses an text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems.
typy/google/protobuf/text_format.py
def ParseLines(lines, message, allow_unknown_extension=False, allow_field_number=False): """Parses an text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems. """ parser = _Parser(allow_unknown_extension, allow_field_number) return parser.ParseLines(lines, message)
def ParseLines(lines, message, allow_unknown_extension=False, allow_field_number=False): """Parses an text representation of a protocol message into a message. Args: lines: An iterable of lines of a message's text representation. message: A protocol buffer message to merge into. allow_unknown_extension: if True, skip over missing extensions and keep parsing allow_field_number: if True, both field number and field name are allowed. Returns: The same message passed as argument. Raises: ParseError: On text parsing problems. """ parser = _Parser(allow_unknown_extension, allow_field_number) return parser.ParseLines(lines, message)
[ "Parses", "an", "text", "representation", "of", "a", "protocol", "message", "into", "a", "message", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L372-L390
[ "def", "ParseLines", "(", "lines", ",", "message", ",", "allow_unknown_extension", "=", "False", ",", "allow_field_number", "=", "False", ")", ":", "parser", "=", "_Parser", "(", "allow_unknown_extension", ",", "allow_field_number", ")", "return", "parser", ".", "ParseLines", "(", "lines", ",", "message", ")" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_SkipFieldValue
Skips over a field value. Args: tokenizer: A tokenizer to parse the field name and values. Raises: ParseError: In case an invalid field value is found.
typy/google/protobuf/text_format.py
def _SkipFieldValue(tokenizer): """Skips over a field value. Args: tokenizer: A tokenizer to parse the field name and values. Raises: ParseError: In case an invalid field value is found. """ # String/bytes tokens can come in multiple adjacent string literals. # If we can consume one, consume as many as we can. if tokenizer.TryConsumeByteString(): while tokenizer.TryConsumeByteString(): pass return if (not tokenizer.TryConsumeIdentifier() and not tokenizer.TryConsumeInt64() and not tokenizer.TryConsumeUint64() and not tokenizer.TryConsumeFloat()): raise ParseError('Invalid field value: ' + tokenizer.token)
def _SkipFieldValue(tokenizer): """Skips over a field value. Args: tokenizer: A tokenizer to parse the field name and values. Raises: ParseError: In case an invalid field value is found. """ # String/bytes tokens can come in multiple adjacent string literals. # If we can consume one, consume as many as we can. if tokenizer.TryConsumeByteString(): while tokenizer.TryConsumeByteString(): pass return if (not tokenizer.TryConsumeIdentifier() and not tokenizer.TryConsumeInt64() and not tokenizer.TryConsumeUint64() and not tokenizer.TryConsumeFloat()): raise ParseError('Invalid field value: ' + tokenizer.token)
[ "Skips", "over", "a", "field", "value", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L739-L759
[ "def", "_SkipFieldValue", "(", "tokenizer", ")", ":", "# String/bytes tokens can come in multiple adjacent string literals.", "# If we can consume one, consume as many as we can.", "if", "tokenizer", ".", "TryConsumeByteString", "(", ")", ":", "while", "tokenizer", ".", "TryConsumeByteString", "(", ")", ":", "pass", "return", "if", "(", "not", "tokenizer", ".", "TryConsumeIdentifier", "(", ")", "and", "not", "tokenizer", ".", "TryConsumeInt64", "(", ")", "and", "not", "tokenizer", ".", "TryConsumeUint64", "(", ")", "and", "not", "tokenizer", ".", "TryConsumeFloat", "(", ")", ")", ":", "raise", "ParseError", "(", "'Invalid field value: '", "+", "tokenizer", ".", "token", ")" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
ParseInteger
Parses an integer. Args: text: The text to parse. is_signed: True if a signed integer must be parsed. is_long: True if a long integer must be parsed. Returns: The integer value. Raises: ValueError: Thrown Iff the text is not a valid integer.
typy/google/protobuf/text_format.py
def ParseInteger(text, is_signed=False, is_long=False): """Parses an integer. Args: text: The text to parse. is_signed: True if a signed integer must be parsed. is_long: True if a long integer must be parsed. Returns: The integer value. Raises: ValueError: Thrown Iff the text is not a valid integer. """ # Do the actual parsing. Exception handling is propagated to caller. try: # We force 32-bit values to int and 64-bit values to long to make # alternate implementations where the distinction is more significant # (e.g. the C++ implementation) simpler. if is_long: result = long(text, 0) else: result = int(text, 0) except ValueError: raise ValueError('Couldn\'t parse integer: %s' % text) # Check if the integer is sane. Exceptions handled by callers. checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] checker.CheckValue(result) return result
def ParseInteger(text, is_signed=False, is_long=False): """Parses an integer. Args: text: The text to parse. is_signed: True if a signed integer must be parsed. is_long: True if a long integer must be parsed. Returns: The integer value. Raises: ValueError: Thrown Iff the text is not a valid integer. """ # Do the actual parsing. Exception handling is propagated to caller. try: # We force 32-bit values to int and 64-bit values to long to make # alternate implementations where the distinction is more significant # (e.g. the C++ implementation) simpler. if is_long: result = long(text, 0) else: result = int(text, 0) except ValueError: raise ValueError('Couldn\'t parse integer: %s' % text) # Check if the integer is sane. Exceptions handled by callers. checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)] checker.CheckValue(result) return result
[ "Parses", "an", "integer", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L1102-L1131
[ "def", "ParseInteger", "(", "text", ",", "is_signed", "=", "False", ",", "is_long", "=", "False", ")", ":", "# Do the actual parsing. Exception handling is propagated to caller.", "try", ":", "# We force 32-bit values to int and 64-bit values to long to make", "# alternate implementations where the distinction is more significant", "# (e.g. the C++ implementation) simpler.", "if", "is_long", ":", "result", "=", "long", "(", "text", ",", "0", ")", "else", ":", "result", "=", "int", "(", "text", ",", "0", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'Couldn\\'t parse integer: %s'", "%", "text", ")", "# Check if the integer is sane. Exceptions handled by callers.", "checker", "=", "_INTEGER_CHECKERS", "[", "2", "*", "int", "(", "is_long", ")", "+", "int", "(", "is_signed", ")", "]", "checker", ".", "CheckValue", "(", "result", ")", "return", "result" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_Printer.PrintMessage
Convert protobuf message to text format. Args: message: The protocol buffers message.
typy/google/protobuf/text_format.py
def PrintMessage(self, message): """Convert protobuf message to text format. Args: message: The protocol buffers message. """ fields = message.ListFields() if self.use_index_order: fields.sort(key=lambda x: x[0].index) for field, value in fields: if _IsMapEntry(field): for key in sorted(value): # This is slow for maps with submessage entires because it copies the # entire tree. Unfortunately this would take significant refactoring # of this file to work around. # # TODO(haberman): refactor and optimize if this becomes an issue. entry_submsg = field.message_type._concrete_class( key=key, value=value[key]) self.PrintField(field, entry_submsg) elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: for element in value: self.PrintField(field, element) else: self.PrintField(field, value)
def PrintMessage(self, message): """Convert protobuf message to text format. Args: message: The protocol buffers message. """ fields = message.ListFields() if self.use_index_order: fields.sort(key=lambda x: x[0].index) for field, value in fields: if _IsMapEntry(field): for key in sorted(value): # This is slow for maps with submessage entires because it copies the # entire tree. Unfortunately this would take significant refactoring # of this file to work around. # # TODO(haberman): refactor and optimize if this becomes an issue. entry_submsg = field.message_type._concrete_class( key=key, value=value[key]) self.PrintField(field, entry_submsg) elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: for element in value: self.PrintField(field, element) else: self.PrintField(field, value)
[ "Convert", "protobuf", "message", "to", "text", "format", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L208-L232
[ "def", "PrintMessage", "(", "self", ",", "message", ")", ":", "fields", "=", "message", ".", "ListFields", "(", ")", "if", "self", ".", "use_index_order", ":", "fields", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ".", "index", ")", "for", "field", ",", "value", "in", "fields", ":", "if", "_IsMapEntry", "(", "field", ")", ":", "for", "key", "in", "sorted", "(", "value", ")", ":", "# This is slow for maps with submessage entires because it copies the", "# entire tree. Unfortunately this would take significant refactoring", "# of this file to work around.", "#", "# TODO(haberman): refactor and optimize if this becomes an issue.", "entry_submsg", "=", "field", ".", "message_type", ".", "_concrete_class", "(", "key", "=", "key", ",", "value", "=", "value", "[", "key", "]", ")", "self", ".", "PrintField", "(", "field", ",", "entry_submsg", ")", "elif", "field", ".", "label", "==", "descriptor", ".", "FieldDescriptor", ".", "LABEL_REPEATED", ":", "for", "element", "in", "value", ":", "self", ".", "PrintField", "(", "field", ",", "element", ")", "else", ":", "self", ".", "PrintField", "(", "field", ",", "value", ")" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_Printer.PrintFieldValue
Print a single field value (not including name). For repeated fields, the value should be a single element. Args: field: The descriptor of the field to be printed. value: The value of the field.
typy/google/protobuf/text_format.py
def PrintFieldValue(self, field, value): """Print a single field value (not including name). For repeated fields, the value should be a single element. Args: field: The descriptor of the field to be printed. value: The value of the field. """ out = self.out if self.pointy_brackets: openb = '<' closeb = '>' else: openb = '{' closeb = '}' if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: if self.as_one_line: out.write(' %s ' % openb) self.PrintMessage(value) out.write(closeb) else: out.write(' %s\n' % openb) self.indent += 2 self.PrintMessage(value) self.indent -= 2 out.write(' ' * self.indent + closeb) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: enum_value = field.enum_type.values_by_number.get(value, None) if enum_value is not None: out.write(enum_value.name) else: out.write(str(value)) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: out.write('\"') if isinstance(value, six.text_type): out_value = value.encode('utf-8') else: out_value = value if field.type == descriptor.FieldDescriptor.TYPE_BYTES: # We need to escape non-UTF8 chars in TYPE_BYTES field. out_as_utf8 = False else: out_as_utf8 = self.as_utf8 out.write(text_encoding.CEscape(out_value, out_as_utf8)) out.write('\"') elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: if value: out.write('true') else: out.write('false') elif field.cpp_type in _FLOAT_TYPES and self.float_format is not None: out.write('{1:{0}}'.format(self.float_format, value)) else: out.write(str(value))
def PrintFieldValue(self, field, value): """Print a single field value (not including name). For repeated fields, the value should be a single element. Args: field: The descriptor of the field to be printed. value: The value of the field. """ out = self.out if self.pointy_brackets: openb = '<' closeb = '>' else: openb = '{' closeb = '}' if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: if self.as_one_line: out.write(' %s ' % openb) self.PrintMessage(value) out.write(closeb) else: out.write(' %s\n' % openb) self.indent += 2 self.PrintMessage(value) self.indent -= 2 out.write(' ' * self.indent + closeb) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM: enum_value = field.enum_type.values_by_number.get(value, None) if enum_value is not None: out.write(enum_value.name) else: out.write(str(value)) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING: out.write('\"') if isinstance(value, six.text_type): out_value = value.encode('utf-8') else: out_value = value if field.type == descriptor.FieldDescriptor.TYPE_BYTES: # We need to escape non-UTF8 chars in TYPE_BYTES field. out_as_utf8 = False else: out_as_utf8 = self.as_utf8 out.write(text_encoding.CEscape(out_value, out_as_utf8)) out.write('\"') elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL: if value: out.write('true') else: out.write('false') elif field.cpp_type in _FLOAT_TYPES and self.float_format is not None: out.write('{1:{0}}'.format(self.float_format, value)) else: out.write(str(value))
[ "Print", "a", "single", "field", "value", "(", "not", "including", "name", ")", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L267-L322
[ "def", "PrintFieldValue", "(", "self", ",", "field", ",", "value", ")", ":", "out", "=", "self", ".", "out", "if", "self", ".", "pointy_brackets", ":", "openb", "=", "'<'", "closeb", "=", "'>'", "else", ":", "openb", "=", "'{'", "closeb", "=", "'}'", "if", "field", ".", "cpp_type", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "if", "self", ".", "as_one_line", ":", "out", ".", "write", "(", "' %s '", "%", "openb", ")", "self", ".", "PrintMessage", "(", "value", ")", "out", ".", "write", "(", "closeb", ")", "else", ":", "out", ".", "write", "(", "' %s\\n'", "%", "openb", ")", "self", ".", "indent", "+=", "2", "self", ".", "PrintMessage", "(", "value", ")", "self", ".", "indent", "-=", "2", "out", ".", "write", "(", "' '", "*", "self", ".", "indent", "+", "closeb", ")", "elif", "field", ".", "cpp_type", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_ENUM", ":", "enum_value", "=", "field", ".", "enum_type", ".", "values_by_number", ".", "get", "(", "value", ",", "None", ")", "if", "enum_value", "is", "not", "None", ":", "out", ".", "write", "(", "enum_value", ".", "name", ")", "else", ":", "out", ".", "write", "(", "str", "(", "value", ")", ")", "elif", "field", ".", "cpp_type", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_STRING", ":", "out", ".", "write", "(", "'\\\"'", ")", "if", "isinstance", "(", "value", ",", "six", ".", "text_type", ")", ":", "out_value", "=", "value", ".", "encode", "(", "'utf-8'", ")", "else", ":", "out_value", "=", "value", "if", "field", ".", "type", "==", "descriptor", ".", "FieldDescriptor", ".", "TYPE_BYTES", ":", "# We need to escape non-UTF8 chars in TYPE_BYTES field.", "out_as_utf8", "=", "False", "else", ":", "out_as_utf8", "=", "self", ".", "as_utf8", "out", ".", "write", "(", "text_encoding", ".", "CEscape", "(", "out_value", ",", "out_as_utf8", ")", ")", "out", ".", "write", "(", "'\\\"'", ")", "elif", "field", ".", "cpp_type", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_BOOL", ":", "if", "value", ":", "out", ".", "write", "(", "'true'", ")", "else", ":", "out", ".", "write", "(", "'false'", ")", "elif", "field", ".", "cpp_type", "in", "_FLOAT_TYPES", "and", "self", ".", "float_format", "is", "not", "None", ":", "out", ".", "write", "(", "'{1:{0}}'", ".", "format", "(", "self", ".", "float_format", ",", "value", ")", ")", "else", ":", "out", ".", "write", "(", "str", "(", "value", ")", ")" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_Parser._ParseOrMerge
Converts an text representation of a protocol message into a message. Args: lines: Lines of a message's text representation. message: A protocol buffer message to merge into. Raises: ParseError: On text parsing problems.
typy/google/protobuf/text_format.py
def _ParseOrMerge(self, lines, message): """Converts an text representation of a protocol message into a message. Args: lines: Lines of a message's text representation. message: A protocol buffer message to merge into. Raises: ParseError: On text parsing problems. """ tokenizer = _Tokenizer(lines) while not tokenizer.AtEnd(): self._MergeField(tokenizer, message)
def _ParseOrMerge(self, lines, message): """Converts an text representation of a protocol message into a message. Args: lines: Lines of a message's text representation. message: A protocol buffer message to merge into. Raises: ParseError: On text parsing problems. """ tokenizer = _Tokenizer(lines) while not tokenizer.AtEnd(): self._MergeField(tokenizer, message)
[ "Converts", "an", "text", "representation", "of", "a", "protocol", "message", "into", "a", "message", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L443-L455
[ "def", "_ParseOrMerge", "(", "self", ",", "lines", ",", "message", ")", ":", "tokenizer", "=", "_Tokenizer", "(", "lines", ")", "while", "not", "tokenizer", ".", "AtEnd", "(", ")", ":", "self", ".", "_MergeField", "(", "tokenizer", ",", "message", ")" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_Parser._MergeMessageField
Merges a single scalar field into a message. Args: tokenizer: A tokenizer to parse the field value. message: The message of which field is a member. field: The descriptor of the field to be merged. Raises: ParseError: In case of text parsing problems.
typy/google/protobuf/text_format.py
def _MergeMessageField(self, tokenizer, message, field): """Merges a single scalar field into a message. Args: tokenizer: A tokenizer to parse the field value. message: The message of which field is a member. field: The descriptor of the field to be merged. Raises: ParseError: In case of text parsing problems. """ is_map_entry = _IsMapEntry(field) if tokenizer.TryConsume('<'): end_token = '>' else: tokenizer.Consume('{') end_token = '}' if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: if field.is_extension: sub_message = message.Extensions[field].add() elif is_map_entry: # pylint: disable=protected-access sub_message = field.message_type._concrete_class() else: sub_message = getattr(message, field.name).add() else: if field.is_extension: sub_message = message.Extensions[field] else: sub_message = getattr(message, field.name) sub_message.SetInParent() while not tokenizer.TryConsume(end_token): if tokenizer.AtEnd(): raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,)) self._MergeField(tokenizer, sub_message) if is_map_entry: value_cpptype = field.message_type.fields_by_name['value'].cpp_type if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: value = getattr(message, field.name)[sub_message.key] value.MergeFrom(sub_message.value) else: getattr(message, field.name)[sub_message.key] = sub_message.value
def _MergeMessageField(self, tokenizer, message, field): """Merges a single scalar field into a message. Args: tokenizer: A tokenizer to parse the field value. message: The message of which field is a member. field: The descriptor of the field to be merged. Raises: ParseError: In case of text parsing problems. """ is_map_entry = _IsMapEntry(field) if tokenizer.TryConsume('<'): end_token = '>' else: tokenizer.Consume('{') end_token = '}' if field.label == descriptor.FieldDescriptor.LABEL_REPEATED: if field.is_extension: sub_message = message.Extensions[field].add() elif is_map_entry: # pylint: disable=protected-access sub_message = field.message_type._concrete_class() else: sub_message = getattr(message, field.name).add() else: if field.is_extension: sub_message = message.Extensions[field] else: sub_message = getattr(message, field.name) sub_message.SetInParent() while not tokenizer.TryConsume(end_token): if tokenizer.AtEnd(): raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,)) self._MergeField(tokenizer, sub_message) if is_map_entry: value_cpptype = field.message_type.fields_by_name['value'].cpp_type if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: value = getattr(message, field.name)[sub_message.key] value.MergeFrom(sub_message.value) else: getattr(message, field.name)[sub_message.key] = sub_message.value
[ "Merges", "a", "single", "scalar", "field", "into", "a", "message", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L566-L611
[ "def", "_MergeMessageField", "(", "self", ",", "tokenizer", ",", "message", ",", "field", ")", ":", "is_map_entry", "=", "_IsMapEntry", "(", "field", ")", "if", "tokenizer", ".", "TryConsume", "(", "'<'", ")", ":", "end_token", "=", "'>'", "else", ":", "tokenizer", ".", "Consume", "(", "'{'", ")", "end_token", "=", "'}'", "if", "field", ".", "label", "==", "descriptor", ".", "FieldDescriptor", ".", "LABEL_REPEATED", ":", "if", "field", ".", "is_extension", ":", "sub_message", "=", "message", ".", "Extensions", "[", "field", "]", ".", "add", "(", ")", "elif", "is_map_entry", ":", "# pylint: disable=protected-access", "sub_message", "=", "field", ".", "message_type", ".", "_concrete_class", "(", ")", "else", ":", "sub_message", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", ".", "add", "(", ")", "else", ":", "if", "field", ".", "is_extension", ":", "sub_message", "=", "message", ".", "Extensions", "[", "field", "]", "else", ":", "sub_message", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", "sub_message", ".", "SetInParent", "(", ")", "while", "not", "tokenizer", ".", "TryConsume", "(", "end_token", ")", ":", "if", "tokenizer", ".", "AtEnd", "(", ")", ":", "raise", "tokenizer", ".", "ParseErrorPreviousToken", "(", "'Expected \"%s\".'", "%", "(", "end_token", ",", ")", ")", "self", ".", "_MergeField", "(", "tokenizer", ",", "sub_message", ")", "if", "is_map_entry", ":", "value_cpptype", "=", "field", ".", "message_type", ".", "fields_by_name", "[", "'value'", "]", ".", "cpp_type", "if", "value_cpptype", "==", "descriptor", ".", "FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "value", "=", "getattr", "(", "message", ",", "field", ".", "name", ")", "[", "sub_message", ".", "key", "]", "value", ".", "MergeFrom", "(", "sub_message", ".", "value", ")", "else", ":", "getattr", "(", "message", ",", "field", ".", "name", ")", "[", "sub_message", ".", "key", "]", "=", "sub_message", ".", "value" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_Tokenizer.ConsumeIdentifier
Consumes protocol message field identifier. Returns: Identifier string. Raises: ParseError: If an identifier couldn't be consumed.
typy/google/protobuf/text_format.py
def ConsumeIdentifier(self): """Consumes protocol message field identifier. Returns: Identifier string. Raises: ParseError: If an identifier couldn't be consumed. """ result = self.token if not self._IDENTIFIER.match(result): raise self._ParseError('Expected identifier.') self.NextToken() return result
def ConsumeIdentifier(self): """Consumes protocol message field identifier. Returns: Identifier string. Raises: ParseError: If an identifier couldn't be consumed. """ result = self.token if not self._IDENTIFIER.match(result): raise self._ParseError('Expected identifier.') self.NextToken() return result
[ "Consumes", "protocol", "message", "field", "identifier", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L860-L873
[ "def", "ConsumeIdentifier", "(", "self", ")", ":", "result", "=", "self", ".", "token", "if", "not", "self", ".", "_IDENTIFIER", ".", "match", "(", "result", ")", ":", "raise", "self", ".", "_ParseError", "(", "'Expected identifier.'", ")", "self", ".", "NextToken", "(", ")", "return", "result" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_Tokenizer.ConsumeInt32
Consumes a signed 32bit integer number. Returns: The integer parsed. Raises: ParseError: If a signed 32bit integer couldn't be consumed.
typy/google/protobuf/text_format.py
def ConsumeInt32(self): """Consumes a signed 32bit integer number. Returns: The integer parsed. Raises: ParseError: If a signed 32bit integer couldn't be consumed. """ try: result = ParseInteger(self.token, is_signed=True, is_long=False) except ValueError as e: raise self._ParseError(str(e)) self.NextToken() return result
def ConsumeInt32(self): """Consumes a signed 32bit integer number. Returns: The integer parsed. Raises: ParseError: If a signed 32bit integer couldn't be consumed. """ try: result = ParseInteger(self.token, is_signed=True, is_long=False) except ValueError as e: raise self._ParseError(str(e)) self.NextToken() return result
[ "Consumes", "a", "signed", "32bit", "integer", "number", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L875-L889
[ "def", "ConsumeInt32", "(", "self", ")", ":", "try", ":", "result", "=", "ParseInteger", "(", "self", ".", "token", ",", "is_signed", "=", "True", ",", "is_long", "=", "False", ")", "except", "ValueError", "as", "e", ":", "raise", "self", ".", "_ParseError", "(", "str", "(", "e", ")", ")", "self", ".", "NextToken", "(", ")", "return", "result" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_Tokenizer.ConsumeFloat
Consumes an floating point number. Returns: The number parsed. Raises: ParseError: If a floating point number couldn't be consumed.
typy/google/protobuf/text_format.py
def ConsumeFloat(self): """Consumes an floating point number. Returns: The number parsed. Raises: ParseError: If a floating point number couldn't be consumed. """ try: result = ParseFloat(self.token) except ValueError as e: raise self._ParseError(str(e)) self.NextToken() return result
def ConsumeFloat(self): """Consumes an floating point number. Returns: The number parsed. Raises: ParseError: If a floating point number couldn't be consumed. """ try: result = ParseFloat(self.token) except ValueError as e: raise self._ParseError(str(e)) self.NextToken() return result
[ "Consumes", "an", "floating", "point", "number", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L960-L974
[ "def", "ConsumeFloat", "(", "self", ")", ":", "try", ":", "result", "=", "ParseFloat", "(", "self", ".", "token", ")", "except", "ValueError", "as", "e", ":", "raise", "self", ".", "_ParseError", "(", "str", "(", "e", ")", ")", "self", ".", "NextToken", "(", ")", "return", "result" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_Tokenizer.ConsumeBool
Consumes a boolean value. Returns: The bool parsed. Raises: ParseError: If a boolean value couldn't be consumed.
typy/google/protobuf/text_format.py
def ConsumeBool(self): """Consumes a boolean value. Returns: The bool parsed. Raises: ParseError: If a boolean value couldn't be consumed. """ try: result = ParseBool(self.token) except ValueError as e: raise self._ParseError(str(e)) self.NextToken() return result
def ConsumeBool(self): """Consumes a boolean value. Returns: The bool parsed. Raises: ParseError: If a boolean value couldn't be consumed. """ try: result = ParseBool(self.token) except ValueError as e: raise self._ParseError(str(e)) self.NextToken() return result
[ "Consumes", "a", "boolean", "value", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L976-L990
[ "def", "ConsumeBool", "(", "self", ")", ":", "try", ":", "result", "=", "ParseBool", "(", "self", ".", "token", ")", "except", "ValueError", "as", "e", ":", "raise", "self", ".", "_ParseError", "(", "str", "(", "e", ")", ")", "self", ".", "NextToken", "(", ")", "return", "result" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
_Tokenizer._ConsumeSingleByteString
Consume one token of a string literal. String literals (whether bytes or text) can come in multiple adjacent tokens which are automatically concatenated, like in C or Python. This method only consumes one token. Returns: The token parsed. Raises: ParseError: When the wrong format data is found.
typy/google/protobuf/text_format.py
def _ConsumeSingleByteString(self): """Consume one token of a string literal. String literals (whether bytes or text) can come in multiple adjacent tokens which are automatically concatenated, like in C or Python. This method only consumes one token. Returns: The token parsed. Raises: ParseError: When the wrong format data is found. """ text = self.token if len(text) < 1 or text[0] not in _QUOTES: raise self._ParseError('Expected string but found: %r' % (text,)) if len(text) < 2 or text[-1] != text[0]: raise self._ParseError('String missing ending quote: %r' % (text,)) try: result = text_encoding.CUnescape(text[1:-1]) except ValueError as e: raise self._ParseError(str(e)) self.NextToken() return result
def _ConsumeSingleByteString(self): """Consume one token of a string literal. String literals (whether bytes or text) can come in multiple adjacent tokens which are automatically concatenated, like in C or Python. This method only consumes one token. Returns: The token parsed. Raises: ParseError: When the wrong format data is found. """ text = self.token if len(text) < 1 or text[0] not in _QUOTES: raise self._ParseError('Expected string but found: %r' % (text,)) if len(text) < 2 or text[-1] != text[0]: raise self._ParseError('String missing ending quote: %r' % (text,)) try: result = text_encoding.CUnescape(text[1:-1]) except ValueError as e: raise self._ParseError(str(e)) self.NextToken() return result
[ "Consume", "one", "token", "of", "a", "string", "literal", "." ]
ibelie/typy
python
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/text_format.py#L1028-L1052
[ "def", "_ConsumeSingleByteString", "(", "self", ")", ":", "text", "=", "self", ".", "token", "if", "len", "(", "text", ")", "<", "1", "or", "text", "[", "0", "]", "not", "in", "_QUOTES", ":", "raise", "self", ".", "_ParseError", "(", "'Expected string but found: %r'", "%", "(", "text", ",", ")", ")", "if", "len", "(", "text", ")", "<", "2", "or", "text", "[", "-", "1", "]", "!=", "text", "[", "0", "]", ":", "raise", "self", ".", "_ParseError", "(", "'String missing ending quote: %r'", "%", "(", "text", ",", ")", ")", "try", ":", "result", "=", "text_encoding", ".", "CUnescape", "(", "text", "[", "1", ":", "-", "1", "]", ")", "except", "ValueError", "as", "e", ":", "raise", "self", ".", "_ParseError", "(", "str", "(", "e", ")", ")", "self", ".", "NextToken", "(", ")", "return", "result" ]
3616845fb91459aacd8df6bf82c5d91f4542bee7
valid
timestamp
Returns a human-readable timestamp given a Unix timestamp 't' or for the current time. The Unix timestamp is the number of seconds since start of epoch (1970-01-01 00:00:00). When forfilename is True, then spaces and semicolons are replace with hyphens. The returned string is usable as a (part of a) filename.
dpostools/utils.py
def timestamp(t = None, forfilename=False): """Returns a human-readable timestamp given a Unix timestamp 't' or for the current time. The Unix timestamp is the number of seconds since start of epoch (1970-01-01 00:00:00). When forfilename is True, then spaces and semicolons are replace with hyphens. The returned string is usable as a (part of a) filename. """ datetimesep = ' ' timesep = ':' if forfilename: datetimesep = '-' timesep = '-' return time.strftime('%Y-%m-%d' + datetimesep + '%H' + timesep + '%M' + timesep + '%S', time.localtime(t))
def timestamp(t = None, forfilename=False): """Returns a human-readable timestamp given a Unix timestamp 't' or for the current time. The Unix timestamp is the number of seconds since start of epoch (1970-01-01 00:00:00). When forfilename is True, then spaces and semicolons are replace with hyphens. The returned string is usable as a (part of a) filename. """ datetimesep = ' ' timesep = ':' if forfilename: datetimesep = '-' timesep = '-' return time.strftime('%Y-%m-%d' + datetimesep + '%H' + timesep + '%M' + timesep + '%S', time.localtime(t))
[ "Returns", "a", "human", "-", "readable", "timestamp", "given", "a", "Unix", "timestamp", "t", "or", "for", "the", "current", "time", ".", "The", "Unix", "timestamp", "is", "the", "number", "of", "seconds", "since", "start", "of", "epoch", "(", "1970", "-", "01", "-", "01", "00", ":", "00", ":", "00", ")", ".", "When", "forfilename", "is", "True", "then", "spaces", "and", "semicolons", "are", "replace", "with", "hyphens", ".", "The", "returned", "string", "is", "usable", "as", "a", "(", "part", "of", "a", ")", "filename", "." ]
BlockHub/blockhubdpostools
python
https://github.com/BlockHub/blockhubdpostools/blob/27712cd97cd3658ee54a4330ff3135b51a01d7d1/dpostools/utils.py#L33-L48
[ "def", "timestamp", "(", "t", "=", "None", ",", "forfilename", "=", "False", ")", ":", "datetimesep", "=", "' '", "timesep", "=", "':'", "if", "forfilename", ":", "datetimesep", "=", "'-'", "timesep", "=", "'-'", "return", "time", ".", "strftime", "(", "'%Y-%m-%d'", "+", "datetimesep", "+", "'%H'", "+", "timesep", "+", "'%M'", "+", "timesep", "+", "'%S'", ",", "time", ".", "localtime", "(", "t", ")", ")" ]
27712cd97cd3658ee54a4330ff3135b51a01d7d1
valid
arktimestamp
Returns a human-readable timestamp given an Ark timestamp 'arct'. An Ark timestamp is the number of seconds since Genesis block, 2017:03:21 15:55:44.
dpostools/utils.py
def arktimestamp(arkt, forfilename=False): """Returns a human-readable timestamp given an Ark timestamp 'arct'. An Ark timestamp is the number of seconds since Genesis block, 2017:03:21 15:55:44.""" t = arkt + time.mktime((2017, 3, 21, 15, 55, 44, 0, 0, 0)) return '%d %s' % (arkt, timestamp(t))
def arktimestamp(arkt, forfilename=False): """Returns a human-readable timestamp given an Ark timestamp 'arct'. An Ark timestamp is the number of seconds since Genesis block, 2017:03:21 15:55:44.""" t = arkt + time.mktime((2017, 3, 21, 15, 55, 44, 0, 0, 0)) return '%d %s' % (arkt, timestamp(t))
[ "Returns", "a", "human", "-", "readable", "timestamp", "given", "an", "Ark", "timestamp", "arct", ".", "An", "Ark", "timestamp", "is", "the", "number", "of", "seconds", "since", "Genesis", "block", "2017", ":", "03", ":", "21", "15", ":", "55", ":", "44", "." ]
BlockHub/blockhubdpostools
python
https://github.com/BlockHub/blockhubdpostools/blob/27712cd97cd3658ee54a4330ff3135b51a01d7d1/dpostools/utils.py#L51-L57
[ "def", "arktimestamp", "(", "arkt", ",", "forfilename", "=", "False", ")", ":", "t", "=", "arkt", "+", "time", ".", "mktime", "(", "(", "2017", ",", "3", ",", "21", ",", "15", ",", "55", ",", "44", ",", "0", ",", "0", ",", "0", ")", ")", "return", "'%d %s'", "%", "(", "arkt", ",", "timestamp", "(", "t", ")", ")" ]
27712cd97cd3658ee54a4330ff3135b51a01d7d1
valid
arkt_to_unixt
convert ark timestamp to unix timestamp
dpostools/utils.py
def arkt_to_unixt(ark_timestamp): """ convert ark timestamp to unix timestamp""" res = datetime.datetime(2017, 3, 21, 15, 55, 44) + datetime.timedelta(seconds=ark_timestamp) return res.timestamp()
def arkt_to_unixt(ark_timestamp): """ convert ark timestamp to unix timestamp""" res = datetime.datetime(2017, 3, 21, 15, 55, 44) + datetime.timedelta(seconds=ark_timestamp) return res.timestamp()
[ "convert", "ark", "timestamp", "to", "unix", "timestamp" ]
BlockHub/blockhubdpostools
python
https://github.com/BlockHub/blockhubdpostools/blob/27712cd97cd3658ee54a4330ff3135b51a01d7d1/dpostools/utils.py#L65-L68
[ "def", "arkt_to_unixt", "(", "ark_timestamp", ")", ":", "res", "=", "datetime", ".", "datetime", "(", "2017", ",", "3", ",", "21", ",", "15", ",", "55", ",", "44", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "ark_timestamp", ")", "return", "res", ".", "timestamp", "(", ")" ]
27712cd97cd3658ee54a4330ff3135b51a01d7d1
valid
Mssql.close
Close the connection.
arguspy/mssql_pymssql.py
def close(self): """Close the connection.""" try: self.conn.close() self.logger.debug("Close connect succeed.") except pymssql.Error as e: self.unknown("Close connect error: %s" % e)
def close(self): """Close the connection.""" try: self.conn.close() self.logger.debug("Close connect succeed.") except pymssql.Error as e: self.unknown("Close connect error: %s" % e)
[ "Close", "the", "connection", "." ]
crazy-canux/arguspy
python
https://github.com/crazy-canux/arguspy/blob/e9486b5df61978a990d56bf43de35f3a4cdefcc3/arguspy/mssql_pymssql.py#L67-L73
[ "def", "close", "(", "self", ")", ":", "try", ":", "self", ".", "conn", ".", "close", "(", ")", "self", ".", "logger", ".", "debug", "(", "\"Close connect succeed.\"", ")", "except", "pymssql", ".", "Error", "as", "e", ":", "self", ".", "unknown", "(", "\"Close connect error: %s\"", "%", "e", ")" ]
e9486b5df61978a990d56bf43de35f3a4cdefcc3
valid
get_version
Extract package __version__
setup.py
def get_version(): """Extract package __version__""" with open(VERSION_FILE, encoding='utf-8') as fp: content = fp.read() match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', content, re.M) if match: return match.group(1) raise RuntimeError("Could not extract package __version__")
def get_version(): """Extract package __version__""" with open(VERSION_FILE, encoding='utf-8') as fp: content = fp.read() match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', content, re.M) if match: return match.group(1) raise RuntimeError("Could not extract package __version__")
[ "Extract", "package", "__version__" ]
suryakencana007/baka_model
python
https://github.com/suryakencana007/baka_model/blob/915c2da9920e973302f5764ae63799acd5ecf0b7/setup.py#L89-L96
[ "def", "get_version", "(", ")", ":", "with", "open", "(", "VERSION_FILE", ",", "encoding", "=", "'utf-8'", ")", "as", "fp", ":", "content", "=", "fp", ".", "read", "(", ")", "match", "=", "re", ".", "search", "(", "r'^__version__ = [\\'\"]([^\\'\"]*)[\\'\"]'", ",", "content", ",", "re", ".", "M", ")", "if", "match", ":", "return", "match", ".", "group", "(", "1", ")", "raise", "RuntimeError", "(", "\"Could not extract package __version__\"", ")" ]
915c2da9920e973302f5764ae63799acd5ecf0b7
valid
de
**Differential evolution** via `inspyred <http://inspyred.github.io/>`_ specially tuned. steady state replacement, n-point crossover, pop size 20, gaussian mutation noise 0.01 & 1e-6. stores intermediate results (can be used for resume, see seeds) :param start: start point :param seeds: list of start points :param vizfunc: callback to do visualization of current best solution :param printfunc: callback to summarize current best solution :param seed: RNG initialization (if set)
jbopt/de.py
def de(output_basename, parameter_names, transform, loglikelihood, prior, nsteps=40000, vizfunc=None, printfunc=None, **problem): """ **Differential evolution** via `inspyred <http://inspyred.github.io/>`_ specially tuned. steady state replacement, n-point crossover, pop size 20, gaussian mutation noise 0.01 & 1e-6. stores intermediate results (can be used for resume, see seeds) :param start: start point :param seeds: list of start points :param vizfunc: callback to do visualization of current best solution :param printfunc: callback to summarize current best solution :param seed: RNG initialization (if set) """ import json import inspyred import random prng = random.Random() if 'seed' in problem: prng.seed(problem['seed']) n_params = len(parameter_names) seeds = problem.get('seeds', []) if 'start' in problem: seeds.append(problem['start']) prefix = output_basename def viz(candidate, args): if vizfunc is not None: vizfunc(candidate) def print_candidate(candidate, l, args): if printfunc is not None: printfunc(cube=candidate, loglikelihood=l) else: print l, candidate def eval_candidate(candidate): params = transform(candidate) l = loglikelihood(params) p = prior(params) if numpy.isinf(p) and p < 0: print ' prior rejection' return -1e300 if numpy.isnan(l): return -1e300 return l, p @inspyred.ec.utilities.memoize @inspyred.ec.evaluators.evaluator def fitness(candidate, args): l, p = eval_candidate(candidate) #print_candidate(candidate, (l + p), args) return (l + p) cutoff_store = 10 def solution_archiver(random, population, archive, args): psize = len(population) population.sort(reverse=True) best = population[0].fitness #print 'BEST: ', best, all_candidates = sorted(population + archive, reverse=True) all_fitness = numpy.array([c.fitness for c in all_candidates]) mask = best - all_fitness > cutoff_store / 3 if mask.sum() < 20: mask = best - all_fitness > cutoff_store newarchive = [c for i, c in enumerate(all_candidates) if i == 0 or all_fitness[i - 1] != c.fitness] print 'ARCHIVE: ', len(archive), len(newarchive) json.dump([{'candidate': [float(f) for f in c.candidate], 'fitness':c.fitness} for c in newarchive], open(prefix + '_values.json', 'w'), indent=4) return newarchive def observer(population, num_generations, num_evaluations, args): population.sort(reverse=True) candidate = population[0] print ('{0} evaluations'.format(num_evaluations)), ' best:', print_candidate(candidate.candidate, candidate.fitness, args) if num_evaluations % len(population) == 0 or num_evaluations < len(population) or args.get('force_viz', False): # for each turnaround of a full generation viz(candidate.candidate, args) def generator(random, args): u = [random.uniform(0, 1) for _ in range(n_params)] u = [random.gauss(0.5, 0.1) for _ in range(n_params)] return bounder(u, args) ea = inspyred.ec.DEA(prng) ea.terminator = inspyred.ec.terminators.evaluation_termination ea.archiver = solution_archiver bounder = inspyred.ec.Bounder(lower_bound=1e-10, upper_bound=1-1e-10) #bounder = inspyred.ec.Bounder(lower_bound=-20, upper_bound=20) import copy from math import log @inspyred.ec.variators.mutator def double_exponential_mutation(random, candidate, args): mut_rate = args.setdefault('mutation_rate', 0.1) mean = args.setdefault('gaussian_mean', 0.0) stdev = args.setdefault('gaussian_stdev', 1.0) scale = log(0.5) / - (stdev) bounder = args['_ec'].bounder mutant = copy.copy(candidate) for i, m in enumerate(mutant): dice = random.random() if dice < mut_rate: sign = (dice < mut_rate / 2) * 2 - 1 delta = -log(random.random()) / scale mutant[i] += delta * sign mutant = bounder(mutant, args) return mutant def minute_gaussian_mutation(random, candidates, args): args = dict(args) args['mutation_rate'] = 1 args['gaussian_stdev'] = 1e-6 return inspyred.ec.variators.gaussian_mutation(random, candidates, args) ea.variator = [inspyred.ec.variators.n_point_crossover, inspyred.ec.variators.gaussian_mutation, minute_gaussian_mutation] #ea.variator = [inspyred.ec.variators.n_point_crossover, double_exponential_mutation] ea.replacer = inspyred.ec.replacers.steady_state_replacement ea.observer = observer pop_size = 20 final_pop = ea.evolve(pop_size=pop_size, max_evaluations=nsteps, maximize=True, seeds=seeds, gaussian_stdev=0.01, #mutation_rate=0.3, bounder=bounder, generator=generator, evaluator=fitness, ) best = max(final_pop) seeds = [c.candidate for c in ea.archive] print 'final candidate:', best return {'start': best.candidate, 'value': best.fitness, 'seeds': seeds, 'method': 'DE'}
def de(output_basename, parameter_names, transform, loglikelihood, prior, nsteps=40000, vizfunc=None, printfunc=None, **problem): """ **Differential evolution** via `inspyred <http://inspyred.github.io/>`_ specially tuned. steady state replacement, n-point crossover, pop size 20, gaussian mutation noise 0.01 & 1e-6. stores intermediate results (can be used for resume, see seeds) :param start: start point :param seeds: list of start points :param vizfunc: callback to do visualization of current best solution :param printfunc: callback to summarize current best solution :param seed: RNG initialization (if set) """ import json import inspyred import random prng = random.Random() if 'seed' in problem: prng.seed(problem['seed']) n_params = len(parameter_names) seeds = problem.get('seeds', []) if 'start' in problem: seeds.append(problem['start']) prefix = output_basename def viz(candidate, args): if vizfunc is not None: vizfunc(candidate) def print_candidate(candidate, l, args): if printfunc is not None: printfunc(cube=candidate, loglikelihood=l) else: print l, candidate def eval_candidate(candidate): params = transform(candidate) l = loglikelihood(params) p = prior(params) if numpy.isinf(p) and p < 0: print ' prior rejection' return -1e300 if numpy.isnan(l): return -1e300 return l, p @inspyred.ec.utilities.memoize @inspyred.ec.evaluators.evaluator def fitness(candidate, args): l, p = eval_candidate(candidate) #print_candidate(candidate, (l + p), args) return (l + p) cutoff_store = 10 def solution_archiver(random, population, archive, args): psize = len(population) population.sort(reverse=True) best = population[0].fitness #print 'BEST: ', best, all_candidates = sorted(population + archive, reverse=True) all_fitness = numpy.array([c.fitness for c in all_candidates]) mask = best - all_fitness > cutoff_store / 3 if mask.sum() < 20: mask = best - all_fitness > cutoff_store newarchive = [c for i, c in enumerate(all_candidates) if i == 0 or all_fitness[i - 1] != c.fitness] print 'ARCHIVE: ', len(archive), len(newarchive) json.dump([{'candidate': [float(f) for f in c.candidate], 'fitness':c.fitness} for c in newarchive], open(prefix + '_values.json', 'w'), indent=4) return newarchive def observer(population, num_generations, num_evaluations, args): population.sort(reverse=True) candidate = population[0] print ('{0} evaluations'.format(num_evaluations)), ' best:', print_candidate(candidate.candidate, candidate.fitness, args) if num_evaluations % len(population) == 0 or num_evaluations < len(population) or args.get('force_viz', False): # for each turnaround of a full generation viz(candidate.candidate, args) def generator(random, args): u = [random.uniform(0, 1) for _ in range(n_params)] u = [random.gauss(0.5, 0.1) for _ in range(n_params)] return bounder(u, args) ea = inspyred.ec.DEA(prng) ea.terminator = inspyred.ec.terminators.evaluation_termination ea.archiver = solution_archiver bounder = inspyred.ec.Bounder(lower_bound=1e-10, upper_bound=1-1e-10) #bounder = inspyred.ec.Bounder(lower_bound=-20, upper_bound=20) import copy from math import log @inspyred.ec.variators.mutator def double_exponential_mutation(random, candidate, args): mut_rate = args.setdefault('mutation_rate', 0.1) mean = args.setdefault('gaussian_mean', 0.0) stdev = args.setdefault('gaussian_stdev', 1.0) scale = log(0.5) / - (stdev) bounder = args['_ec'].bounder mutant = copy.copy(candidate) for i, m in enumerate(mutant): dice = random.random() if dice < mut_rate: sign = (dice < mut_rate / 2) * 2 - 1 delta = -log(random.random()) / scale mutant[i] += delta * sign mutant = bounder(mutant, args) return mutant def minute_gaussian_mutation(random, candidates, args): args = dict(args) args['mutation_rate'] = 1 args['gaussian_stdev'] = 1e-6 return inspyred.ec.variators.gaussian_mutation(random, candidates, args) ea.variator = [inspyred.ec.variators.n_point_crossover, inspyred.ec.variators.gaussian_mutation, minute_gaussian_mutation] #ea.variator = [inspyred.ec.variators.n_point_crossover, double_exponential_mutation] ea.replacer = inspyred.ec.replacers.steady_state_replacement ea.observer = observer pop_size = 20 final_pop = ea.evolve(pop_size=pop_size, max_evaluations=nsteps, maximize=True, seeds=seeds, gaussian_stdev=0.01, #mutation_rate=0.3, bounder=bounder, generator=generator, evaluator=fitness, ) best = max(final_pop) seeds = [c.candidate for c in ea.archive] print 'final candidate:', best return {'start': best.candidate, 'value': best.fitness, 'seeds': seeds, 'method': 'DE'}
[ "**", "Differential", "evolution", "**", "via", "inspyred", "<http", ":", "//", "inspyred", ".", "github", ".", "io", "/", ">", "_", "specially", "tuned", ".", "steady", "state", "replacement", "n", "-", "point", "crossover", "pop", "size", "20", "gaussian", "mutation", "noise", "0", ".", "01", "&", "1e", "-", "6", ".", "stores", "intermediate", "results", "(", "can", "be", "used", "for", "resume", "see", "seeds", ")", ":", "param", "start", ":", "start", "point", ":", "param", "seeds", ":", "list", "of", "start", "points", ":", "param", "vizfunc", ":", "callback", "to", "do", "visualization", "of", "current", "best", "solution", ":", "param", "printfunc", ":", "callback", "to", "summarize", "current", "best", "solution", ":", "param", "seed", ":", "RNG", "initialization", "(", "if", "set", ")" ]
JohannesBuchner/jbopt
python
https://github.com/JohannesBuchner/jbopt/blob/11b721ea001625ad7820f71ff684723c71216646/jbopt/de.py#L6-L141
[ "def", "de", "(", "output_basename", ",", "parameter_names", ",", "transform", ",", "loglikelihood", ",", "prior", ",", "nsteps", "=", "40000", ",", "vizfunc", "=", "None", ",", "printfunc", "=", "None", ",", "*", "*", "problem", ")", ":", "import", "json", "import", "inspyred", "import", "random", "prng", "=", "random", ".", "Random", "(", ")", "if", "'seed'", "in", "problem", ":", "prng", ".", "seed", "(", "problem", "[", "'seed'", "]", ")", "n_params", "=", "len", "(", "parameter_names", ")", "seeds", "=", "problem", ".", "get", "(", "'seeds'", ",", "[", "]", ")", "if", "'start'", "in", "problem", ":", "seeds", ".", "append", "(", "problem", "[", "'start'", "]", ")", "prefix", "=", "output_basename", "def", "viz", "(", "candidate", ",", "args", ")", ":", "if", "vizfunc", "is", "not", "None", ":", "vizfunc", "(", "candidate", ")", "def", "print_candidate", "(", "candidate", ",", "l", ",", "args", ")", ":", "if", "printfunc", "is", "not", "None", ":", "printfunc", "(", "cube", "=", "candidate", ",", "loglikelihood", "=", "l", ")", "else", ":", "print", "l", ",", "candidate", "def", "eval_candidate", "(", "candidate", ")", ":", "params", "=", "transform", "(", "candidate", ")", "l", "=", "loglikelihood", "(", "params", ")", "p", "=", "prior", "(", "params", ")", "if", "numpy", ".", "isinf", "(", "p", ")", "and", "p", "<", "0", ":", "print", "' prior rejection'", "return", "-", "1e300", "if", "numpy", ".", "isnan", "(", "l", ")", ":", "return", "-", "1e300", "return", "l", ",", "p", "@", "inspyred", ".", "ec", ".", "utilities", ".", "memoize", "@", "inspyred", ".", "ec", ".", "evaluators", ".", "evaluator", "def", "fitness", "(", "candidate", ",", "args", ")", ":", "l", ",", "p", "=", "eval_candidate", "(", "candidate", ")", "#print_candidate(candidate, (l + p), args)", "return", "(", "l", "+", "p", ")", "cutoff_store", "=", "10", "def", "solution_archiver", "(", "random", ",", "population", ",", "archive", ",", "args", ")", ":", "psize", "=", "len", "(", "population", ")", "population", ".", "sort", "(", "reverse", "=", "True", ")", "best", "=", "population", "[", "0", "]", ".", "fitness", "#print 'BEST: ', best, ", "all_candidates", "=", "sorted", "(", "population", "+", "archive", ",", "reverse", "=", "True", ")", "all_fitness", "=", "numpy", ".", "array", "(", "[", "c", ".", "fitness", "for", "c", "in", "all_candidates", "]", ")", "mask", "=", "best", "-", "all_fitness", ">", "cutoff_store", "/", "3", "if", "mask", ".", "sum", "(", ")", "<", "20", ":", "mask", "=", "best", "-", "all_fitness", ">", "cutoff_store", "newarchive", "=", "[", "c", "for", "i", ",", "c", "in", "enumerate", "(", "all_candidates", ")", "if", "i", "==", "0", "or", "all_fitness", "[", "i", "-", "1", "]", "!=", "c", ".", "fitness", "]", "print", "'ARCHIVE: '", ",", "len", "(", "archive", ")", ",", "len", "(", "newarchive", ")", "json", ".", "dump", "(", "[", "{", "'candidate'", ":", "[", "float", "(", "f", ")", "for", "f", "in", "c", ".", "candidate", "]", ",", "'fitness'", ":", "c", ".", "fitness", "}", "for", "c", "in", "newarchive", "]", ",", "open", "(", "prefix", "+", "'_values.json'", ",", "'w'", ")", ",", "indent", "=", "4", ")", "return", "newarchive", "def", "observer", "(", "population", ",", "num_generations", ",", "num_evaluations", ",", "args", ")", ":", "population", ".", "sort", "(", "reverse", "=", "True", ")", "candidate", "=", "population", "[", "0", "]", "print", "(", "'{0} evaluations'", ".", "format", "(", "num_evaluations", ")", ")", ",", "' best:'", ",", "print_candidate", "(", "candidate", ".", "candidate", ",", "candidate", ".", "fitness", ",", "args", ")", "if", "num_evaluations", "%", "len", "(", "population", ")", "==", "0", "or", "num_evaluations", "<", "len", "(", "population", ")", "or", "args", ".", "get", "(", "'force_viz'", ",", "False", ")", ":", "# for each turnaround of a full generation", "viz", "(", "candidate", ".", "candidate", ",", "args", ")", "def", "generator", "(", "random", ",", "args", ")", ":", "u", "=", "[", "random", ".", "uniform", "(", "0", ",", "1", ")", "for", "_", "in", "range", "(", "n_params", ")", "]", "u", "=", "[", "random", ".", "gauss", "(", "0.5", ",", "0.1", ")", "for", "_", "in", "range", "(", "n_params", ")", "]", "return", "bounder", "(", "u", ",", "args", ")", "ea", "=", "inspyred", ".", "ec", ".", "DEA", "(", "prng", ")", "ea", ".", "terminator", "=", "inspyred", ".", "ec", ".", "terminators", ".", "evaluation_termination", "ea", ".", "archiver", "=", "solution_archiver", "bounder", "=", "inspyred", ".", "ec", ".", "Bounder", "(", "lower_bound", "=", "1e-10", ",", "upper_bound", "=", "1", "-", "1e-10", ")", "#bounder = inspyred.ec.Bounder(lower_bound=-20, upper_bound=20)", "import", "copy", "from", "math", "import", "log", "@", "inspyred", ".", "ec", ".", "variators", ".", "mutator", "def", "double_exponential_mutation", "(", "random", ",", "candidate", ",", "args", ")", ":", "mut_rate", "=", "args", ".", "setdefault", "(", "'mutation_rate'", ",", "0.1", ")", "mean", "=", "args", ".", "setdefault", "(", "'gaussian_mean'", ",", "0.0", ")", "stdev", "=", "args", ".", "setdefault", "(", "'gaussian_stdev'", ",", "1.0", ")", "scale", "=", "log", "(", "0.5", ")", "/", "-", "(", "stdev", ")", "bounder", "=", "args", "[", "'_ec'", "]", ".", "bounder", "mutant", "=", "copy", ".", "copy", "(", "candidate", ")", "for", "i", ",", "m", "in", "enumerate", "(", "mutant", ")", ":", "dice", "=", "random", ".", "random", "(", ")", "if", "dice", "<", "mut_rate", ":", "sign", "=", "(", "dice", "<", "mut_rate", "/", "2", ")", "*", "2", "-", "1", "delta", "=", "-", "log", "(", "random", ".", "random", "(", ")", ")", "/", "scale", "mutant", "[", "i", "]", "+=", "delta", "*", "sign", "mutant", "=", "bounder", "(", "mutant", ",", "args", ")", "return", "mutant", "def", "minute_gaussian_mutation", "(", "random", ",", "candidates", ",", "args", ")", ":", "args", "=", "dict", "(", "args", ")", "args", "[", "'mutation_rate'", "]", "=", "1", "args", "[", "'gaussian_stdev'", "]", "=", "1e-6", "return", "inspyred", ".", "ec", ".", "variators", ".", "gaussian_mutation", "(", "random", ",", "candidates", ",", "args", ")", "ea", ".", "variator", "=", "[", "inspyred", ".", "ec", ".", "variators", ".", "n_point_crossover", ",", "inspyred", ".", "ec", ".", "variators", ".", "gaussian_mutation", ",", "minute_gaussian_mutation", "]", "#ea.variator = [inspyred.ec.variators.n_point_crossover, double_exponential_mutation]", "ea", ".", "replacer", "=", "inspyred", ".", "ec", ".", "replacers", ".", "steady_state_replacement", "ea", ".", "observer", "=", "observer", "pop_size", "=", "20", "final_pop", "=", "ea", ".", "evolve", "(", "pop_size", "=", "pop_size", ",", "max_evaluations", "=", "nsteps", ",", "maximize", "=", "True", ",", "seeds", "=", "seeds", ",", "gaussian_stdev", "=", "0.01", ",", "#mutation_rate=0.3,", "bounder", "=", "bounder", ",", "generator", "=", "generator", ",", "evaluator", "=", "fitness", ",", ")", "best", "=", "max", "(", "final_pop", ")", "seeds", "=", "[", "c", ".", "candidate", "for", "c", "in", "ea", ".", "archive", "]", "print", "'final candidate:'", ",", "best", "return", "{", "'start'", ":", "best", ".", "candidate", ",", "'value'", ":", "best", ".", "fitness", ",", "'seeds'", ":", "seeds", ",", "'method'", ":", "'DE'", "}" ]
11b721ea001625ad7820f71ff684723c71216646
valid
Preprocessor.process_macros
Replace macros with content defined in the config. :param content: Markdown content :returns: Markdown content without macros
foliant/preprocessors/macros.py
def process_macros(self, content: str) -> str: '''Replace macros with content defined in the config. :param content: Markdown content :returns: Markdown content without macros ''' def _sub(macro): name = macro.group('body') params = self.get_options(macro.group('options')) return self.options['macros'].get(name, '').format_map(params) return self.pattern.sub(_sub, content)
def process_macros(self, content: str) -> str: '''Replace macros with content defined in the config. :param content: Markdown content :returns: Markdown content without macros ''' def _sub(macro): name = macro.group('body') params = self.get_options(macro.group('options')) return self.options['macros'].get(name, '').format_map(params) return self.pattern.sub(_sub, content)
[ "Replace", "macros", "with", "content", "defined", "in", "the", "config", "." ]
foliant-docs/foliantcontrib.macros
python
https://github.com/foliant-docs/foliantcontrib.macros/blob/0332dcd7c2b32be72fdf710a012096db1ee83a51/foliant/preprocessors/macros.py#L10-L24
[ "def", "process_macros", "(", "self", ",", "content", ":", "str", ")", "->", "str", ":", "def", "_sub", "(", "macro", ")", ":", "name", "=", "macro", ".", "group", "(", "'body'", ")", "params", "=", "self", ".", "get_options", "(", "macro", ".", "group", "(", "'options'", ")", ")", "return", "self", ".", "options", "[", "'macros'", "]", ".", "get", "(", "name", ",", "''", ")", ".", "format_map", "(", "params", ")", "return", "self", ".", "pattern", ".", "sub", "(", "_sub", ",", "content", ")" ]
0332dcd7c2b32be72fdf710a012096db1ee83a51
valid
MarketClient._request
Sends an HTTP request to the REST API and receives the requested data. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses
cbexchange/market.py
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) response = get(uri, params=kwargs.get('params', None)) return self._handle_response(response).json()
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) response = get(uri, params=kwargs.get('params', None)) return self._handle_response(response).json()
[ "Sends", "an", "HTTP", "request", "to", "the", "REST", "API", "and", "receives", "the", "requested", "data", "." ]
agsimeonov/cbexchange
python
https://github.com/agsimeonov/cbexchange/blob/e3762f77583f89cf7b4f501ab3c7675fc7d30ab3/cbexchange/market.py#L23-L35
[ "def", "_request", "(", "self", ",", "method", ",", "*", "relative_path_parts", ",", "*", "*", "kwargs", ")", ":", "uri", "=", "self", ".", "_create_api_uri", "(", "*", "relative_path_parts", ")", "response", "=", "get", "(", "uri", ",", "params", "=", "kwargs", ".", "get", "(", "'params'", ",", "None", ")", ")", "return", "self", ".", "_handle_response", "(", "response", ")", ".", "json", "(", ")" ]
e3762f77583f89cf7b4f501ab3c7675fc7d30ab3
valid
MarketClient.get_historic_trades
`<https://docs.exchange.coinbase.com/#get-historic-rates>`_ :param start: either datetime.datetime or str in ISO 8601 :param end: either datetime.datetime or str in ISO 8601 :pram int granularity: desired timeslice in seconds :returns: desired data
cbexchange/market.py
def get_historic_trades(self, start, end, granularity, product_id='BTC-USD'): """`<https://docs.exchange.coinbase.com/#get-historic-rates>`_ :param start: either datetime.datetime or str in ISO 8601 :param end: either datetime.datetime or str in ISO 8601 :pram int granularity: desired timeslice in seconds :returns: desired data """ params = { 'start':self._format_iso_time(start), 'end':self._format_iso_time(end), 'granularity':granularity } return self._get('products', product_id, 'candles', params=params)
def get_historic_trades(self, start, end, granularity, product_id='BTC-USD'): """`<https://docs.exchange.coinbase.com/#get-historic-rates>`_ :param start: either datetime.datetime or str in ISO 8601 :param end: either datetime.datetime or str in ISO 8601 :pram int granularity: desired timeslice in seconds :returns: desired data """ params = { 'start':self._format_iso_time(start), 'end':self._format_iso_time(end), 'granularity':granularity } return self._get('products', product_id, 'candles', params=params)
[ "<https", ":", "//", "docs", ".", "exchange", ".", "coinbase", ".", "com", "/", "#get", "-", "historic", "-", "rates", ">", "_" ]
agsimeonov/cbexchange
python
https://github.com/agsimeonov/cbexchange/blob/e3762f77583f89cf7b4f501ab3c7675fc7d30ab3/cbexchange/market.py#L53-L67
[ "def", "get_historic_trades", "(", "self", ",", "start", ",", "end", ",", "granularity", ",", "product_id", "=", "'BTC-USD'", ")", ":", "params", "=", "{", "'start'", ":", "self", ".", "_format_iso_time", "(", "start", ")", ",", "'end'", ":", "self", ".", "_format_iso_time", "(", "end", ")", ",", "'granularity'", ":", "granularity", "}", "return", "self", ".", "_get", "(", "'products'", ",", "product_id", ",", "'candles'", ",", "params", "=", "params", ")" ]
e3762f77583f89cf7b4f501ab3c7675fc7d30ab3
valid
MarketPaginationClient._request
Sends an HTTP request to the REST API and receives the requested data. Additionally sets up pagination cursors. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses
cbexchange/market.py
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. Additionally sets up pagination cursors. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) response = get(uri, params=self._get_params(**kwargs)) self.is_initial = False self.before_cursor = response.headers.get('cb-before', None) self.after_cursor = response.headers.get('cb-after', None) return self._handle_response(response).json()
def _request(self, method, *relative_path_parts, **kwargs): """Sends an HTTP request to the REST API and receives the requested data. Additionally sets up pagination cursors. :param str method: HTTP method name :param relative_path_parts: the relative paths for the request URI :param kwargs: argument keywords :returns: requested data :raises APIError: for non-2xx responses """ uri = self._create_api_uri(*relative_path_parts) response = get(uri, params=self._get_params(**kwargs)) self.is_initial = False self.before_cursor = response.headers.get('cb-before', None) self.after_cursor = response.headers.get('cb-after', None) return self._handle_response(response).json()
[ "Sends", "an", "HTTP", "request", "to", "the", "REST", "API", "and", "receives", "the", "requested", "data", ".", "Additionally", "sets", "up", "pagination", "cursors", "." ]
agsimeonov/cbexchange
python
https://github.com/agsimeonov/cbexchange/blob/e3762f77583f89cf7b4f501ab3c7675fc7d30ab3/cbexchange/market.py#L82-L98
[ "def", "_request", "(", "self", ",", "method", ",", "*", "relative_path_parts", ",", "*", "*", "kwargs", ")", ":", "uri", "=", "self", ".", "_create_api_uri", "(", "*", "relative_path_parts", ")", "response", "=", "get", "(", "uri", ",", "params", "=", "self", ".", "_get_params", "(", "*", "*", "kwargs", ")", ")", "self", ".", "is_initial", "=", "False", "self", ".", "before_cursor", "=", "response", ".", "headers", ".", "get", "(", "'cb-before'", ",", "None", ")", "self", ".", "after_cursor", "=", "response", ".", "headers", ".", "get", "(", "'cb-after'", ",", "None", ")", "return", "self", ".", "_handle_response", "(", "response", ")", ".", "json", "(", ")" ]
e3762f77583f89cf7b4f501ab3c7675fc7d30ab3
valid
get_unique_pathname
Return a pathname possibly with a number appended to it so that it is unique in the directory.
jaraco/path.py
def get_unique_pathname(path, root=''): """Return a pathname possibly with a number appended to it so that it is unique in the directory.""" path = os.path.join(root, path) # consider the path supplied, then the paths with numbers appended potentialPaths = itertools.chain((path,), __get_numbered_paths(path)) potentialPaths = six.moves.filterfalse(os.path.exists, potentialPaths) return next(potentialPaths)
def get_unique_pathname(path, root=''): """Return a pathname possibly with a number appended to it so that it is unique in the directory.""" path = os.path.join(root, path) # consider the path supplied, then the paths with numbers appended potentialPaths = itertools.chain((path,), __get_numbered_paths(path)) potentialPaths = six.moves.filterfalse(os.path.exists, potentialPaths) return next(potentialPaths)
[ "Return", "a", "pathname", "possibly", "with", "a", "number", "appended", "to", "it", "so", "that", "it", "is", "unique", "in", "the", "directory", "." ]
jaraco/jaraco.path
python
https://github.com/jaraco/jaraco.path/blob/39e4da09f325382e21b0917b1b5cd027edce8728/jaraco/path.py#L29-L36
[ "def", "get_unique_pathname", "(", "path", ",", "root", "=", "''", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "root", ",", "path", ")", "# consider the path supplied, then the paths with numbers appended\r", "potentialPaths", "=", "itertools", ".", "chain", "(", "(", "path", ",", ")", ",", "__get_numbered_paths", "(", "path", ")", ")", "potentialPaths", "=", "six", ".", "moves", ".", "filterfalse", "(", "os", ".", "path", ".", "exists", ",", "potentialPaths", ")", "return", "next", "(", "potentialPaths", ")" ]
39e4da09f325382e21b0917b1b5cd027edce8728
valid
__get_numbered_paths
Append numbers in sequential order to the filename or folder name Numbers should be appended before the extension on a filename.
jaraco/path.py
def __get_numbered_paths(filepath): """Append numbers in sequential order to the filename or folder name Numbers should be appended before the extension on a filename.""" format = '%s (%%d)%s' % splitext_files_only(filepath) return map(lambda n: format % n, itertools.count(1))
def __get_numbered_paths(filepath): """Append numbers in sequential order to the filename or folder name Numbers should be appended before the extension on a filename.""" format = '%s (%%d)%s' % splitext_files_only(filepath) return map(lambda n: format % n, itertools.count(1))
[ "Append", "numbers", "in", "sequential", "order", "to", "the", "filename", "or", "folder", "name", "Numbers", "should", "be", "appended", "before", "the", "extension", "on", "a", "filename", "." ]
jaraco/jaraco.path
python
https://github.com/jaraco/jaraco.path/blob/39e4da09f325382e21b0917b1b5cd027edce8728/jaraco/path.py#L39-L43
[ "def", "__get_numbered_paths", "(", "filepath", ")", ":", "format", "=", "'%s (%%d)%s'", "%", "splitext_files_only", "(", "filepath", ")", "return", "map", "(", "lambda", "n", ":", "format", "%", "n", ",", "itertools", ".", "count", "(", "1", ")", ")" ]
39e4da09f325382e21b0917b1b5cd027edce8728
valid
splitext_files_only
Custom version of splitext that doesn't perform splitext on directories
jaraco/path.py
def splitext_files_only(filepath): "Custom version of splitext that doesn't perform splitext on directories" return ( (filepath, '') if os.path.isdir(filepath) else os.path.splitext(filepath) )
def splitext_files_only(filepath): "Custom version of splitext that doesn't perform splitext on directories" return ( (filepath, '') if os.path.isdir(filepath) else os.path.splitext(filepath) )
[ "Custom", "version", "of", "splitext", "that", "doesn", "t", "perform", "splitext", "on", "directories" ]
jaraco/jaraco.path
python
https://github.com/jaraco/jaraco.path/blob/39e4da09f325382e21b0917b1b5cd027edce8728/jaraco/path.py#L46-L50
[ "def", "splitext_files_only", "(", "filepath", ")", ":", "return", "(", "(", "filepath", ",", "''", ")", "if", "os", ".", "path", ".", "isdir", "(", "filepath", ")", "else", "os", ".", "path", ".", "splitext", "(", "filepath", ")", ")" ]
39e4da09f325382e21b0917b1b5cd027edce8728
valid
set_time
Set the modified time of a file
jaraco/path.py
def set_time(filename, mod_time): """ Set the modified time of a file """ log.debug('Setting modified time to %s', mod_time) mtime = calendar.timegm(mod_time.utctimetuple()) # utctimetuple discards microseconds, so restore it (for consistency) mtime += mod_time.microsecond / 1000000 atime = os.stat(filename).st_atime os.utime(filename, (atime, mtime))
def set_time(filename, mod_time): """ Set the modified time of a file """ log.debug('Setting modified time to %s', mod_time) mtime = calendar.timegm(mod_time.utctimetuple()) # utctimetuple discards microseconds, so restore it (for consistency) mtime += mod_time.microsecond / 1000000 atime = os.stat(filename).st_atime os.utime(filename, (atime, mtime))
[ "Set", "the", "modified", "time", "of", "a", "file" ]
jaraco/jaraco.path
python
https://github.com/jaraco/jaraco.path/blob/39e4da09f325382e21b0917b1b5cd027edce8728/jaraco/path.py#L53-L62
[ "def", "set_time", "(", "filename", ",", "mod_time", ")", ":", "log", ".", "debug", "(", "'Setting modified time to %s'", ",", "mod_time", ")", "mtime", "=", "calendar", ".", "timegm", "(", "mod_time", ".", "utctimetuple", "(", ")", ")", "# utctimetuple discards microseconds, so restore it (for consistency)\r", "mtime", "+=", "mod_time", ".", "microsecond", "/", "1000000", "atime", "=", "os", ".", "stat", "(", "filename", ")", ".", "st_atime", "os", ".", "utime", "(", "filename", ",", "(", "atime", ",", "mtime", ")", ")" ]
39e4da09f325382e21b0917b1b5cd027edce8728
valid
get_time
Get the modified time for a file as a datetime instance
jaraco/path.py
def get_time(filename): """ Get the modified time for a file as a datetime instance """ ts = os.stat(filename).st_mtime return datetime.datetime.utcfromtimestamp(ts)
def get_time(filename): """ Get the modified time for a file as a datetime instance """ ts = os.stat(filename).st_mtime return datetime.datetime.utcfromtimestamp(ts)
[ "Get", "the", "modified", "time", "for", "a", "file", "as", "a", "datetime", "instance" ]
jaraco/jaraco.path
python
https://github.com/jaraco/jaraco.path/blob/39e4da09f325382e21b0917b1b5cd027edce8728/jaraco/path.py#L65-L70
[ "def", "get_time", "(", "filename", ")", ":", "ts", "=", "os", ".", "stat", "(", "filename", ")", ".", "st_mtime", "return", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "ts", ")" ]
39e4da09f325382e21b0917b1b5cd027edce8728
valid
insert_before_extension
Given a filename and some content, insert the content just before the extension. >>> insert_before_extension('pages.pdf', '-old') 'pages-old.pdf'
jaraco/path.py
def insert_before_extension(filename, content): """ Given a filename and some content, insert the content just before the extension. >>> insert_before_extension('pages.pdf', '-old') 'pages-old.pdf' """ parts = list(os.path.splitext(filename)) parts[1:1] = [content] return ''.join(parts)
def insert_before_extension(filename, content): """ Given a filename and some content, insert the content just before the extension. >>> insert_before_extension('pages.pdf', '-old') 'pages-old.pdf' """ parts = list(os.path.splitext(filename)) parts[1:1] = [content] return ''.join(parts)
[ "Given", "a", "filename", "and", "some", "content", "insert", "the", "content", "just", "before", "the", "extension", ".", ">>>", "insert_before_extension", "(", "pages", ".", "pdf", "-", "old", ")", "pages", "-", "old", ".", "pdf" ]
jaraco/jaraco.path
python
https://github.com/jaraco/jaraco.path/blob/39e4da09f325382e21b0917b1b5cd027edce8728/jaraco/path.py#L73-L83
[ "def", "insert_before_extension", "(", "filename", ",", "content", ")", ":", "parts", "=", "list", "(", "os", ".", "path", ".", "splitext", "(", "filename", ")", ")", "parts", "[", "1", ":", "1", "]", "=", "[", "content", "]", "return", "''", ".", "join", "(", "parts", ")" ]
39e4da09f325382e21b0917b1b5cd027edce8728
valid
recursive_glob
Like iglob, but recurse directories >>> any('path.py' in result for result in recursive_glob('.', '*.py')) True >>> all(result.startswith('.') for result in recursive_glob('.', '*.py')) True >>> len(list(recursive_glob('.', '*.foo'))) 0
jaraco/path.py
def recursive_glob(root, spec): """ Like iglob, but recurse directories >>> any('path.py' in result for result in recursive_glob('.', '*.py')) True >>> all(result.startswith('.') for result in recursive_glob('.', '*.py')) True >>> len(list(recursive_glob('.', '*.foo'))) 0 """ specs = ( os.path.join(dirpath, dirname, spec) for dirpath, dirnames, filenames in os.walk(root) for dirname in dirnames ) return itertools.chain.from_iterable( glob.iglob(spec) for spec in specs )
def recursive_glob(root, spec): """ Like iglob, but recurse directories >>> any('path.py' in result for result in recursive_glob('.', '*.py')) True >>> all(result.startswith('.') for result in recursive_glob('.', '*.py')) True >>> len(list(recursive_glob('.', '*.foo'))) 0 """ specs = ( os.path.join(dirpath, dirname, spec) for dirpath, dirnames, filenames in os.walk(root) for dirname in dirnames ) return itertools.chain.from_iterable( glob.iglob(spec) for spec in specs )
[ "Like", "iglob", "but", "recurse", "directories", ">>>", "any", "(", "path", ".", "py", "in", "result", "for", "result", "in", "recursive_glob", "(", ".", "*", ".", "py", "))", "True", ">>>", "all", "(", "result", ".", "startswith", "(", ".", ")", "for", "result", "in", "recursive_glob", "(", ".", "*", ".", "py", "))", "True", ">>>", "len", "(", "list", "(", "recursive_glob", "(", ".", "*", ".", "foo", ")))", "0" ]
jaraco/jaraco.path
python
https://github.com/jaraco/jaraco.path/blob/39e4da09f325382e21b0917b1b5cd027edce8728/jaraco/path.py#L131-L154
[ "def", "recursive_glob", "(", "root", ",", "spec", ")", ":", "specs", "=", "(", "os", ".", "path", ".", "join", "(", "dirpath", ",", "dirname", ",", "spec", ")", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "root", ")", "for", "dirname", "in", "dirnames", ")", "return", "itertools", ".", "chain", ".", "from_iterable", "(", "glob", ".", "iglob", "(", "spec", ")", "for", "spec", "in", "specs", ")" ]
39e4da09f325382e21b0917b1b5cd027edce8728
valid
encode
Encode the name for a suitable name in the given filesystem >>> encode('Test :1') 'Test _1'
jaraco/path.py
def encode(name, system='NTFS'): """ Encode the name for a suitable name in the given filesystem >>> encode('Test :1') 'Test _1' """ assert system == 'NTFS', 'unsupported filesystem' special_characters = r'<>:"/\|?*' + ''.join(map(chr, range(32))) pattern = '|'.join(map(re.escape, special_characters)) pattern = re.compile(pattern) return pattern.sub('_', name)
def encode(name, system='NTFS'): """ Encode the name for a suitable name in the given filesystem >>> encode('Test :1') 'Test _1' """ assert system == 'NTFS', 'unsupported filesystem' special_characters = r'<>:"/\|?*' + ''.join(map(chr, range(32))) pattern = '|'.join(map(re.escape, special_characters)) pattern = re.compile(pattern) return pattern.sub('_', name)
[ "Encode", "the", "name", "for", "a", "suitable", "name", "in", "the", "given", "filesystem", ">>>", "encode", "(", "Test", ":", "1", ")", "Test", "_1" ]
jaraco/jaraco.path
python
https://github.com/jaraco/jaraco.path/blob/39e4da09f325382e21b0917b1b5cd027edce8728/jaraco/path.py#L157-L167
[ "def", "encode", "(", "name", ",", "system", "=", "'NTFS'", ")", ":", "assert", "system", "==", "'NTFS'", ",", "'unsupported filesystem'", "special_characters", "=", "r'<>:\"/\\|?*'", "+", "''", ".", "join", "(", "map", "(", "chr", ",", "range", "(", "32", ")", ")", ")", "pattern", "=", "'|'", ".", "join", "(", "map", "(", "re", ".", "escape", ",", "special_characters", ")", ")", "pattern", "=", "re", ".", "compile", "(", "pattern", ")", "return", "pattern", ".", "sub", "(", "'_'", ",", "name", ")" ]
39e4da09f325382e21b0917b1b5cd027edce8728
valid
ensure_dir_exists
wrap a function that returns a dir, making sure it exists
jaraco/path.py
def ensure_dir_exists(func): "wrap a function that returns a dir, making sure it exists" @functools.wraps(func) def make_if_not_present(): dir = func() if not os.path.isdir(dir): os.makedirs(dir) return dir return make_if_not_present
def ensure_dir_exists(func): "wrap a function that returns a dir, making sure it exists" @functools.wraps(func) def make_if_not_present(): dir = func() if not os.path.isdir(dir): os.makedirs(dir) return dir return make_if_not_present
[ "wrap", "a", "function", "that", "returns", "a", "dir", "making", "sure", "it", "exists" ]
jaraco/jaraco.path
python
https://github.com/jaraco/jaraco.path/blob/39e4da09f325382e21b0917b1b5cd027edce8728/jaraco/path.py#L229-L237
[ "def", "ensure_dir_exists", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "make_if_not_present", "(", ")", ":", "dir", "=", "func", "(", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "dir", ")", ":", "os", ".", "makedirs", "(", "dir", ")", "return", "dir", "return", "make_if_not_present" ]
39e4da09f325382e21b0917b1b5cd027edce8728
valid
read_chunks
Read file in chunks of size chunk_size (or smaller). If update_func is specified, call it on every chunk with the amount read.
jaraco/path.py
def read_chunks(file, chunk_size=2048, update_func=lambda x: None): """ Read file in chunks of size chunk_size (or smaller). If update_func is specified, call it on every chunk with the amount read. """ while(True): res = file.read(chunk_size) if not res: break update_func(len(res)) yield res
def read_chunks(file, chunk_size=2048, update_func=lambda x: None): """ Read file in chunks of size chunk_size (or smaller). If update_func is specified, call it on every chunk with the amount read. """ while(True): res = file.read(chunk_size) if not res: break update_func(len(res)) yield res
[ "Read", "file", "in", "chunks", "of", "size", "chunk_size", "(", "or", "smaller", ")", ".", "If", "update_func", "is", "specified", "call", "it", "on", "every", "chunk", "with", "the", "amount", "read", "." ]
jaraco/jaraco.path
python
https://github.com/jaraco/jaraco.path/blob/39e4da09f325382e21b0917b1b5cd027edce8728/jaraco/path.py#L240-L251
[ "def", "read_chunks", "(", "file", ",", "chunk_size", "=", "2048", ",", "update_func", "=", "lambda", "x", ":", "None", ")", ":", "while", "(", "True", ")", ":", "res", "=", "file", ".", "read", "(", "chunk_size", ")", "if", "not", "res", ":", "break", "update_func", "(", "len", "(", "res", ")", ")", "yield", "res" ]
39e4da09f325382e21b0917b1b5cd027edce8728
valid
is_hidden
Check whether a file is presumed hidden, either because the pathname starts with dot or because the platform indicates such.
jaraco/path.py
def is_hidden(path): """ Check whether a file is presumed hidden, either because the pathname starts with dot or because the platform indicates such. """ full_path = os.path.abspath(path) name = os.path.basename(full_path) def no(path): return False platform_hidden = globals().get('is_hidden_' + platform.system(), no) return name.startswith('.') or platform_hidden(full_path)
def is_hidden(path): """ Check whether a file is presumed hidden, either because the pathname starts with dot or because the platform indicates such. """ full_path = os.path.abspath(path) name = os.path.basename(full_path) def no(path): return False platform_hidden = globals().get('is_hidden_' + platform.system(), no) return name.startswith('.') or platform_hidden(full_path)
[ "Check", "whether", "a", "file", "is", "presumed", "hidden", "either", "because", "the", "pathname", "starts", "with", "dot", "or", "because", "the", "platform", "indicates", "such", "." ]
jaraco/jaraco.path
python
https://github.com/jaraco/jaraco.path/blob/39e4da09f325382e21b0917b1b5cd027edce8728/jaraco/path.py#L254-L266
[ "def", "is_hidden", "(", "path", ")", ":", "full_path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "name", "=", "os", ".", "path", ".", "basename", "(", "full_path", ")", "def", "no", "(", "path", ")", ":", "return", "False", "platform_hidden", "=", "globals", "(", ")", ".", "get", "(", "'is_hidden_'", "+", "platform", ".", "system", "(", ")", ",", "no", ")", "return", "name", ".", "startswith", "(", "'.'", ")", "or", "platform_hidden", "(", "full_path", ")" ]
39e4da09f325382e21b0917b1b5cd027edce8728
valid
SerialReader.age
Get closer to your EOL
libardurep/serialreader.py
def age(self): """ Get closer to your EOL """ # 0 means this composer will never decompose if self.rounds == 1: self.do_run = False elif self.rounds > 1: self.rounds -= 1
def age(self): """ Get closer to your EOL """ # 0 means this composer will never decompose if self.rounds == 1: self.do_run = False elif self.rounds > 1: self.rounds -= 1
[ "Get", "closer", "to", "your", "EOL" ]
zwischenloesung/ardu-report-lib
python
https://github.com/zwischenloesung/ardu-report-lib/blob/51bd4a07e036065aafcb1273b151bea3fdfa50fa/libardurep/serialreader.py#L42-L50
[ "def", "age", "(", "self", ")", ":", "# 0 means this composer will never decompose", "if", "self", ".", "rounds", "==", "1", ":", "self", ".", "do_run", "=", "False", "elif", "self", ".", "rounds", ">", "1", ":", "self", ".", "rounds", "-=", "1" ]
51bd4a07e036065aafcb1273b151bea3fdfa50fa
valid
SerialReader.run
Open a connection over the serial line and receive data lines
libardurep/serialreader.py
def run(self): """ Open a connection over the serial line and receive data lines """ if not self.device: return try: data = "" while (self.do_run): try: if (self.device.inWaiting() > 1): l = self.device.readline()[:-2] l = l.decode("UTF-8") if (l == "["): # start recording data = "[" elif (l == "]") and (len(data) > 4) and (data[0] == "["): # now parse the input data = data + "]" self.store.register_json(data) self.age() elif (l[0:3] == " {"): # this is a data line data = data + " " + l else: # this is a slow interface - give it some time sleep(1) # then count down.. self.age() except (UnicodeDecodeError, ValueError): # only accepting unicode: throw away the whole bunch data = "" # and count down the exit condition self.age() except serial.serialutil.SerialException: print("Could not connect to the serial line at " + self.device_name)
def run(self): """ Open a connection over the serial line and receive data lines """ if not self.device: return try: data = "" while (self.do_run): try: if (self.device.inWaiting() > 1): l = self.device.readline()[:-2] l = l.decode("UTF-8") if (l == "["): # start recording data = "[" elif (l == "]") and (len(data) > 4) and (data[0] == "["): # now parse the input data = data + "]" self.store.register_json(data) self.age() elif (l[0:3] == " {"): # this is a data line data = data + " " + l else: # this is a slow interface - give it some time sleep(1) # then count down.. self.age() except (UnicodeDecodeError, ValueError): # only accepting unicode: throw away the whole bunch data = "" # and count down the exit condition self.age() except serial.serialutil.SerialException: print("Could not connect to the serial line at " + self.device_name)
[ "Open", "a", "connection", "over", "the", "serial", "line", "and", "receive", "data", "lines" ]
zwischenloesung/ardu-report-lib
python
https://github.com/zwischenloesung/ardu-report-lib/blob/51bd4a07e036065aafcb1273b151bea3fdfa50fa/libardurep/serialreader.py#L52-L89
[ "def", "run", "(", "self", ")", ":", "if", "not", "self", ".", "device", ":", "return", "try", ":", "data", "=", "\"\"", "while", "(", "self", ".", "do_run", ")", ":", "try", ":", "if", "(", "self", ".", "device", ".", "inWaiting", "(", ")", ">", "1", ")", ":", "l", "=", "self", ".", "device", ".", "readline", "(", ")", "[", ":", "-", "2", "]", "l", "=", "l", ".", "decode", "(", "\"UTF-8\"", ")", "if", "(", "l", "==", "\"[\"", ")", ":", "# start recording", "data", "=", "\"[\"", "elif", "(", "l", "==", "\"]\"", ")", "and", "(", "len", "(", "data", ")", ">", "4", ")", "and", "(", "data", "[", "0", "]", "==", "\"[\"", ")", ":", "# now parse the input", "data", "=", "data", "+", "\"]\"", "self", ".", "store", ".", "register_json", "(", "data", ")", "self", ".", "age", "(", ")", "elif", "(", "l", "[", "0", ":", "3", "]", "==", "\" {\"", ")", ":", "# this is a data line", "data", "=", "data", "+", "\" \"", "+", "l", "else", ":", "# this is a slow interface - give it some time", "sleep", "(", "1", ")", "# then count down..", "self", ".", "age", "(", ")", "except", "(", "UnicodeDecodeError", ",", "ValueError", ")", ":", "# only accepting unicode: throw away the whole bunch", "data", "=", "\"\"", "# and count down the exit condition", "self", ".", "age", "(", ")", "except", "serial", ".", "serialutil", ".", "SerialException", ":", "print", "(", "\"Could not connect to the serial line at \"", "+", "self", ".", "device_name", ")" ]
51bd4a07e036065aafcb1273b151bea3fdfa50fa
valid
ThreadCreator.append_main_thread
create & start main thread :return: None
threads_creator/entry.py
def append_main_thread(self): """create & start main thread :return: None """ thread = MainThread(main_queue=self.main_queue, main_spider=self.main_spider, branch_spider=self.branch_spider) thread.daemon = True thread.start()
def append_main_thread(self): """create & start main thread :return: None """ thread = MainThread(main_queue=self.main_queue, main_spider=self.main_spider, branch_spider=self.branch_spider) thread.daemon = True thread.start()
[ "create", "&", "start", "main", "thread" ]
ecmadao/threads-creator
python
https://github.com/ecmadao/threads-creator/blob/f081091425d4382e5e9776c395c20e1af2332657/threads_creator/entry.py#L51-L60
[ "def", "append_main_thread", "(", "self", ")", ":", "thread", "=", "MainThread", "(", "main_queue", "=", "self", ".", "main_queue", ",", "main_spider", "=", "self", ".", "main_spider", ",", "branch_spider", "=", "self", ".", "branch_spider", ")", "thread", ".", "daemon", "=", "True", "thread", ".", "start", "(", ")" ]
f081091425d4382e5e9776c395c20e1af2332657
valid
getTextFromNode
Scans through all children of node and gathers the text. If node has non-text child-nodes then NotTextNodeError is raised.
ambient/__init__.py
def getTextFromNode(node): """ Scans through all children of node and gathers the text. If node has non-text child-nodes then NotTextNodeError is raised. """ t = "" for n in node.childNodes: if n.nodeType == n.TEXT_NODE: t += n.nodeValue else: raise NotTextNodeError return t
def getTextFromNode(node): """ Scans through all children of node and gathers the text. If node has non-text child-nodes then NotTextNodeError is raised. """ t = "" for n in node.childNodes: if n.nodeType == n.TEXT_NODE: t += n.nodeValue else: raise NotTextNodeError return t
[ "Scans", "through", "all", "children", "of", "node", "and", "gathers", "the", "text", ".", "If", "node", "has", "non", "-", "text", "child", "-", "nodes", "then", "NotTextNodeError", "is", "raised", "." ]
praekelt/python-ambient
python
https://github.com/praekelt/python-ambient/blob/392d82a63445bcc48d2adcaab2a0cf2fb90abe7b/ambient/__init__.py#L54-L66
[ "def", "getTextFromNode", "(", "node", ")", ":", "t", "=", "\"\"", "for", "n", "in", "node", ".", "childNodes", ":", "if", "n", ".", "nodeType", "==", "n", ".", "TEXT_NODE", ":", "t", "+=", "n", ".", "nodeValue", "else", ":", "raise", "NotTextNodeError", "return", "t" ]
392d82a63445bcc48d2adcaab2a0cf2fb90abe7b
valid
AmbientSMS.getbalance
Get the number of credits remaining at AmbientSMS
ambient/__init__.py
def getbalance(self, url='http://services.ambientmobile.co.za/credits'): """ Get the number of credits remaining at AmbientSMS """ postXMLList = [] postXMLList.append("<api-key>%s</api-key>" % self.api_key) postXMLList.append("<password>%s</password>" % self.password) postXML = '<sms>%s</sms>' % "".join(postXMLList) result = self.curl(url, postXML) if result.get("credits", None): return result["credits"] else: raise AmbientSMSError(result["status"])
def getbalance(self, url='http://services.ambientmobile.co.za/credits'): """ Get the number of credits remaining at AmbientSMS """ postXMLList = [] postXMLList.append("<api-key>%s</api-key>" % self.api_key) postXMLList.append("<password>%s</password>" % self.password) postXML = '<sms>%s</sms>' % "".join(postXMLList) result = self.curl(url, postXML) if result.get("credits", None): return result["credits"] else: raise AmbientSMSError(result["status"])
[ "Get", "the", "number", "of", "credits", "remaining", "at", "AmbientSMS" ]
praekelt/python-ambient
python
https://github.com/praekelt/python-ambient/blob/392d82a63445bcc48d2adcaab2a0cf2fb90abe7b/ambient/__init__.py#L123-L136
[ "def", "getbalance", "(", "self", ",", "url", "=", "'http://services.ambientmobile.co.za/credits'", ")", ":", "postXMLList", "=", "[", "]", "postXMLList", ".", "append", "(", "\"<api-key>%s</api-key>\"", "%", "self", ".", "api_key", ")", "postXMLList", ".", "append", "(", "\"<password>%s</password>\"", "%", "self", ".", "password", ")", "postXML", "=", "'<sms>%s</sms>'", "%", "\"\"", ".", "join", "(", "postXMLList", ")", "result", "=", "self", ".", "curl", "(", "url", ",", "postXML", ")", "if", "result", ".", "get", "(", "\"credits\"", ",", "None", ")", ":", "return", "result", "[", "\"credits\"", "]", "else", ":", "raise", "AmbientSMSError", "(", "result", "[", "\"status\"", "]", ")" ]
392d82a63445bcc48d2adcaab2a0cf2fb90abe7b
valid
AmbientSMS.sendmsg
Send a mesage via the AmbientSMS API server
ambient/__init__.py
def sendmsg(self, message, recipient_mobiles=[], url='http://services.ambientmobile.co.za/sms', concatenate_message=True, message_id=str(time()).replace(".", ""), reply_path=None, allow_duplicates=True, allow_invalid_numbers=True, ): """ Send a mesage via the AmbientSMS API server """ if not recipient_mobiles or not(isinstance(recipient_mobiles, list) \ or isinstance(recipient_mobiles, tuple)): raise AmbientSMSError("Missing recipients") if not message or not len(message): raise AmbientSMSError("Missing message") postXMLList = [] postXMLList.append("<api-key>%s</api-key>" % self.api_key) postXMLList.append("<password>%s</password>" % self.password) postXMLList.append("<recipients>%s</recipients>" % \ "".join(["<mobile>%s</mobile>" % \ m for m in recipient_mobiles])) postXMLList.append("<msg>%s</msg>" % message) postXMLList.append("<concat>%s</concat>" % \ (1 if concatenate_message else 0)) postXMLList.append("<message_id>%s</message_id>" % message_id) postXMLList.append("<allow_duplicates>%s</allow_duplicates>" % \ (1 if allow_duplicates else 0)) postXMLList.append( "<allow_invalid_numbers>%s</allow_invalid_numbers>" % \ (1 if allow_invalid_numbers else 0) ) if reply_path: postXMLList.append("<reply_path>%s</reply_path>" % reply_path) postXML = '<sms>%s</sms>' % "".join(postXMLList) result = self.curl(url, postXML) status = result.get("status", None) if status and int(status) in [0, 1, 2]: return result else: raise AmbientSMSError(int(status))
def sendmsg(self, message, recipient_mobiles=[], url='http://services.ambientmobile.co.za/sms', concatenate_message=True, message_id=str(time()).replace(".", ""), reply_path=None, allow_duplicates=True, allow_invalid_numbers=True, ): """ Send a mesage via the AmbientSMS API server """ if not recipient_mobiles or not(isinstance(recipient_mobiles, list) \ or isinstance(recipient_mobiles, tuple)): raise AmbientSMSError("Missing recipients") if not message or not len(message): raise AmbientSMSError("Missing message") postXMLList = [] postXMLList.append("<api-key>%s</api-key>" % self.api_key) postXMLList.append("<password>%s</password>" % self.password) postXMLList.append("<recipients>%s</recipients>" % \ "".join(["<mobile>%s</mobile>" % \ m for m in recipient_mobiles])) postXMLList.append("<msg>%s</msg>" % message) postXMLList.append("<concat>%s</concat>" % \ (1 if concatenate_message else 0)) postXMLList.append("<message_id>%s</message_id>" % message_id) postXMLList.append("<allow_duplicates>%s</allow_duplicates>" % \ (1 if allow_duplicates else 0)) postXMLList.append( "<allow_invalid_numbers>%s</allow_invalid_numbers>" % \ (1 if allow_invalid_numbers else 0) ) if reply_path: postXMLList.append("<reply_path>%s</reply_path>" % reply_path) postXML = '<sms>%s</sms>' % "".join(postXMLList) result = self.curl(url, postXML) status = result.get("status", None) if status and int(status) in [0, 1, 2]: return result else: raise AmbientSMSError(int(status))
[ "Send", "a", "mesage", "via", "the", "AmbientSMS", "API", "server" ]
praekelt/python-ambient
python
https://github.com/praekelt/python-ambient/blob/392d82a63445bcc48d2adcaab2a0cf2fb90abe7b/ambient/__init__.py#L138-L185
[ "def", "sendmsg", "(", "self", ",", "message", ",", "recipient_mobiles", "=", "[", "]", ",", "url", "=", "'http://services.ambientmobile.co.za/sms'", ",", "concatenate_message", "=", "True", ",", "message_id", "=", "str", "(", "time", "(", ")", ")", ".", "replace", "(", "\".\"", ",", "\"\"", ")", ",", "reply_path", "=", "None", ",", "allow_duplicates", "=", "True", ",", "allow_invalid_numbers", "=", "True", ",", ")", ":", "if", "not", "recipient_mobiles", "or", "not", "(", "isinstance", "(", "recipient_mobiles", ",", "list", ")", "or", "isinstance", "(", "recipient_mobiles", ",", "tuple", ")", ")", ":", "raise", "AmbientSMSError", "(", "\"Missing recipients\"", ")", "if", "not", "message", "or", "not", "len", "(", "message", ")", ":", "raise", "AmbientSMSError", "(", "\"Missing message\"", ")", "postXMLList", "=", "[", "]", "postXMLList", ".", "append", "(", "\"<api-key>%s</api-key>\"", "%", "self", ".", "api_key", ")", "postXMLList", ".", "append", "(", "\"<password>%s</password>\"", "%", "self", ".", "password", ")", "postXMLList", ".", "append", "(", "\"<recipients>%s</recipients>\"", "%", "\"\"", ".", "join", "(", "[", "\"<mobile>%s</mobile>\"", "%", "m", "for", "m", "in", "recipient_mobiles", "]", ")", ")", "postXMLList", ".", "append", "(", "\"<msg>%s</msg>\"", "%", "message", ")", "postXMLList", ".", "append", "(", "\"<concat>%s</concat>\"", "%", "(", "1", "if", "concatenate_message", "else", "0", ")", ")", "postXMLList", ".", "append", "(", "\"<message_id>%s</message_id>\"", "%", "message_id", ")", "postXMLList", ".", "append", "(", "\"<allow_duplicates>%s</allow_duplicates>\"", "%", "(", "1", "if", "allow_duplicates", "else", "0", ")", ")", "postXMLList", ".", "append", "(", "\"<allow_invalid_numbers>%s</allow_invalid_numbers>\"", "%", "(", "1", "if", "allow_invalid_numbers", "else", "0", ")", ")", "if", "reply_path", ":", "postXMLList", ".", "append", "(", "\"<reply_path>%s</reply_path>\"", "%", "reply_path", ")", "postXML", "=", "'<sms>%s</sms>'", "%", "\"\"", ".", "join", "(", "postXMLList", ")", "result", "=", "self", ".", "curl", "(", "url", ",", "postXML", ")", "status", "=", "result", ".", "get", "(", "\"status\"", ",", "None", ")", "if", "status", "and", "int", "(", "status", ")", "in", "[", "0", ",", "1", ",", "2", "]", ":", "return", "result", "else", ":", "raise", "AmbientSMSError", "(", "int", "(", "status", ")", ")" ]
392d82a63445bcc48d2adcaab2a0cf2fb90abe7b
valid
AmbientSMS.curl
Inteface for sending web requests to the AmbientSMS API Server
ambient/__init__.py
def curl(self, url, post): """ Inteface for sending web requests to the AmbientSMS API Server """ try: req = urllib2.Request(url) req.add_header("Content-type", "application/xml") data = urllib2.urlopen(req, post.encode('utf-8')).read() except urllib2.URLError, v: raise AmbientSMSError(v) return dictFromXml(data)
def curl(self, url, post): """ Inteface for sending web requests to the AmbientSMS API Server """ try: req = urllib2.Request(url) req.add_header("Content-type", "application/xml") data = urllib2.urlopen(req, post.encode('utf-8')).read() except urllib2.URLError, v: raise AmbientSMSError(v) return dictFromXml(data)
[ "Inteface", "for", "sending", "web", "requests", "to", "the", "AmbientSMS", "API", "Server" ]
praekelt/python-ambient
python
https://github.com/praekelt/python-ambient/blob/392d82a63445bcc48d2adcaab2a0cf2fb90abe7b/ambient/__init__.py#L187-L197
[ "def", "curl", "(", "self", ",", "url", ",", "post", ")", ":", "try", ":", "req", "=", "urllib2", ".", "Request", "(", "url", ")", "req", ".", "add_header", "(", "\"Content-type\"", ",", "\"application/xml\"", ")", "data", "=", "urllib2", ".", "urlopen", "(", "req", ",", "post", ".", "encode", "(", "'utf-8'", ")", ")", ".", "read", "(", ")", "except", "urllib2", ".", "URLError", ",", "v", ":", "raise", "AmbientSMSError", "(", "v", ")", "return", "dictFromXml", "(", "data", ")" ]
392d82a63445bcc48d2adcaab2a0cf2fb90abe7b
valid
InterceptedCommand.execute
Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: MicroserviceError: when execution fails for whatever reason.
pip_services_commons/commands/InterceptedCommand.py
def execute(self, correlation_id, args): """ Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: MicroserviceError: when execution fails for whatever reason. """ return self._intercepter.execute(_next, correlation_id, args)
def execute(self, correlation_id, args): """ Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: MicroserviceError: when execution fails for whatever reason. """ return self._intercepter.execute(_next, correlation_id, args)
[ "Executes", "the", "command", "given", "specific", "arguments", "as", "an", "input", ".", "Args", ":", "correlation_id", ":", "a", "unique", "correlation", "/", "transaction", "id", "args", ":", "command", "arguments", "Returns", ":", "an", "execution", "result", ".", "Raises", ":", "MicroserviceError", ":", "when", "execution", "fails", "for", "whatever", "reason", "." ]
pip-services/pip-services-commons-python
python
https://github.com/pip-services/pip-services-commons-python/blob/2205b18c45c60372966c62c1f23ac4fbc31e11b3/pip_services_commons/commands/InterceptedCommand.py#L42-L55
[ "def", "execute", "(", "self", ",", "correlation_id", ",", "args", ")", ":", "return", "self", ".", "_intercepter", ".", "execute", "(", "_next", ",", "correlation_id", ",", "args", ")" ]
2205b18c45c60372966c62c1f23ac4fbc31e11b3
valid
DefaultMinifier.contents
Called for each file Must return file content Can be wrapped :type f: static_bundle.files.StaticFileResult :type text: str|unicode :rtype: str|unicode
static_bundle/minifiers.py
def contents(self, f, text): """ Called for each file Must return file content Can be wrapped :type f: static_bundle.files.StaticFileResult :type text: str|unicode :rtype: str|unicode """ text += self._read(f.abs_path) + "\r\n" return text
def contents(self, f, text): """ Called for each file Must return file content Can be wrapped :type f: static_bundle.files.StaticFileResult :type text: str|unicode :rtype: str|unicode """ text += self._read(f.abs_path) + "\r\n" return text
[ "Called", "for", "each", "file", "Must", "return", "file", "content", "Can", "be", "wrapped" ]
Rikanishu/static-bundle
python
https://github.com/Rikanishu/static-bundle/blob/2f6458cb9d9d9049b4fd829f7d6951a45d547c68/static_bundle/minifiers.py#L34-L45
[ "def", "contents", "(", "self", ",", "f", ",", "text", ")", ":", "text", "+=", "self", ".", "_read", "(", "f", ".", "abs_path", ")", "+", "\"\\r\\n\"", "return", "text" ]
2f6458cb9d9d9049b4fd829f7d6951a45d547c68
valid
is_date_type
Return True if the class is a date type.
era.py
def is_date_type(cls): """Return True if the class is a date type.""" if not isinstance(cls, type): return False return issubclass(cls, date) and not issubclass(cls, datetime)
def is_date_type(cls): """Return True if the class is a date type.""" if not isinstance(cls, type): return False return issubclass(cls, date) and not issubclass(cls, datetime)
[ "Return", "True", "if", "the", "class", "is", "a", "date", "type", "." ]
zenreach/py-era
python
https://github.com/zenreach/py-era/blob/73994c82360e65a983c803b1182892e2138320b2/era.py#L76-L80
[ "def", "is_date_type", "(", "cls", ")", ":", "if", "not", "isinstance", "(", "cls", ",", "type", ")", ":", "return", "False", "return", "issubclass", "(", "cls", ",", "date", ")", "and", "not", "issubclass", "(", "cls", ",", "datetime", ")" ]
73994c82360e65a983c803b1182892e2138320b2
valid
to_datetime
Convert a date or time to a datetime. If when is a date then it sets the time to midnight. If when is a time it sets the date to the epoch. If when is None or a datetime it returns when. Otherwise a TypeError is raised. Returned datetimes have tzinfo set to None unless when is a datetime with tzinfo set in which case it remains the same.
era.py
def to_datetime(when): """ Convert a date or time to a datetime. If when is a date then it sets the time to midnight. If when is a time it sets the date to the epoch. If when is None or a datetime it returns when. Otherwise a TypeError is raised. Returned datetimes have tzinfo set to None unless when is a datetime with tzinfo set in which case it remains the same. """ if when is None or is_datetime(when): return when if is_time(when): return datetime.combine(epoch.date(), when) if is_date(when): return datetime.combine(when, time(0)) raise TypeError("unable to convert {} to datetime".format(when.__class__.__name__))
def to_datetime(when): """ Convert a date or time to a datetime. If when is a date then it sets the time to midnight. If when is a time it sets the date to the epoch. If when is None or a datetime it returns when. Otherwise a TypeError is raised. Returned datetimes have tzinfo set to None unless when is a datetime with tzinfo set in which case it remains the same. """ if when is None or is_datetime(when): return when if is_time(when): return datetime.combine(epoch.date(), when) if is_date(when): return datetime.combine(when, time(0)) raise TypeError("unable to convert {} to datetime".format(when.__class__.__name__))
[ "Convert", "a", "date", "or", "time", "to", "a", "datetime", ".", "If", "when", "is", "a", "date", "then", "it", "sets", "the", "time", "to", "midnight", ".", "If", "when", "is", "a", "time", "it", "sets", "the", "date", "to", "the", "epoch", ".", "If", "when", "is", "None", "or", "a", "datetime", "it", "returns", "when", ".", "Otherwise", "a", "TypeError", "is", "raised", ".", "Returned", "datetimes", "have", "tzinfo", "set", "to", "None", "unless", "when", "is", "a", "datetime", "with", "tzinfo", "set", "in", "which", "case", "it", "remains", "the", "same", "." ]
zenreach/py-era
python
https://github.com/zenreach/py-era/blob/73994c82360e65a983c803b1182892e2138320b2/era.py#L95-L108
[ "def", "to_datetime", "(", "when", ")", ":", "if", "when", "is", "None", "or", "is_datetime", "(", "when", ")", ":", "return", "when", "if", "is_time", "(", "when", ")", ":", "return", "datetime", ".", "combine", "(", "epoch", ".", "date", "(", ")", ",", "when", ")", "if", "is_date", "(", "when", ")", ":", "return", "datetime", ".", "combine", "(", "when", ",", "time", "(", "0", ")", ")", "raise", "TypeError", "(", "\"unable to convert {} to datetime\"", ".", "format", "(", "when", ".", "__class__", ".", "__name__", ")", ")" ]
73994c82360e65a983c803b1182892e2138320b2
valid
totz
Return a date, time, or datetime converted to a datetime in the given timezone. If when is a datetime and has no timezone it is assumed to be local time. Date and time objects are also assumed to be UTC. The tz value defaults to UTC. Raise TypeError if when cannot be converted to a datetime.
era.py
def totz(when, tz=None): """ Return a date, time, or datetime converted to a datetime in the given timezone. If when is a datetime and has no timezone it is assumed to be local time. Date and time objects are also assumed to be UTC. The tz value defaults to UTC. Raise TypeError if when cannot be converted to a datetime. """ if when is None: return None when = to_datetime(when) if when.tzinfo is None: when = when.replace(tzinfo=localtz) return when.astimezone(tz or utc)
def totz(when, tz=None): """ Return a date, time, or datetime converted to a datetime in the given timezone. If when is a datetime and has no timezone it is assumed to be local time. Date and time objects are also assumed to be UTC. The tz value defaults to UTC. Raise TypeError if when cannot be converted to a datetime. """ if when is None: return None when = to_datetime(when) if when.tzinfo is None: when = when.replace(tzinfo=localtz) return when.astimezone(tz or utc)
[ "Return", "a", "date", "time", "or", "datetime", "converted", "to", "a", "datetime", "in", "the", "given", "timezone", ".", "If", "when", "is", "a", "datetime", "and", "has", "no", "timezone", "it", "is", "assumed", "to", "be", "local", "time", ".", "Date", "and", "time", "objects", "are", "also", "assumed", "to", "be", "UTC", ".", "The", "tz", "value", "defaults", "to", "UTC", ".", "Raise", "TypeError", "if", "when", "cannot", "be", "converted", "to", "a", "datetime", "." ]
zenreach/py-era
python
https://github.com/zenreach/py-era/blob/73994c82360e65a983c803b1182892e2138320b2/era.py#L121-L133
[ "def", "totz", "(", "when", ",", "tz", "=", "None", ")", ":", "if", "when", "is", "None", ":", "return", "None", "when", "=", "to_datetime", "(", "when", ")", "if", "when", ".", "tzinfo", "is", "None", ":", "when", "=", "when", ".", "replace", "(", "tzinfo", "=", "localtz", ")", "return", "when", ".", "astimezone", "(", "tz", "or", "utc", ")" ]
73994c82360e65a983c803b1182892e2138320b2
valid
timeago
Return a datetime so much time ago. Takes the same arguments as timedelta().
era.py
def timeago(tz=None, *args, **kwargs): """Return a datetime so much time ago. Takes the same arguments as timedelta().""" return totz(datetime.now(), tz) - timedelta(*args, **kwargs)
def timeago(tz=None, *args, **kwargs): """Return a datetime so much time ago. Takes the same arguments as timedelta().""" return totz(datetime.now(), tz) - timedelta(*args, **kwargs)
[ "Return", "a", "datetime", "so", "much", "time", "ago", ".", "Takes", "the", "same", "arguments", "as", "timedelta", "()", "." ]
zenreach/py-era
python
https://github.com/zenreach/py-era/blob/73994c82360e65a983c803b1182892e2138320b2/era.py#L156-L158
[ "def", "timeago", "(", "tz", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "totz", "(", "datetime", ".", "now", "(", ")", ",", "tz", ")", "-", "timedelta", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
73994c82360e65a983c803b1182892e2138320b2
valid
ts
Return a Unix timestamp in seconds for the provided datetime. The `totz` function is called on the datetime to convert it to the provided timezone. It will be converted to UTC if no timezone is provided.
era.py
def ts(when, tz=None): """ Return a Unix timestamp in seconds for the provided datetime. The `totz` function is called on the datetime to convert it to the provided timezone. It will be converted to UTC if no timezone is provided. """ if not when: return None when = totz(when, tz) return calendar.timegm(when.timetuple())
def ts(when, tz=None): """ Return a Unix timestamp in seconds for the provided datetime. The `totz` function is called on the datetime to convert it to the provided timezone. It will be converted to UTC if no timezone is provided. """ if not when: return None when = totz(when, tz) return calendar.timegm(when.timetuple())
[ "Return", "a", "Unix", "timestamp", "in", "seconds", "for", "the", "provided", "datetime", ".", "The", "totz", "function", "is", "called", "on", "the", "datetime", "to", "convert", "it", "to", "the", "provided", "timezone", ".", "It", "will", "be", "converted", "to", "UTC", "if", "no", "timezone", "is", "provided", "." ]
zenreach/py-era
python
https://github.com/zenreach/py-era/blob/73994c82360e65a983c803b1182892e2138320b2/era.py#L161-L170
[ "def", "ts", "(", "when", ",", "tz", "=", "None", ")", ":", "if", "not", "when", ":", "return", "None", "when", "=", "totz", "(", "when", ",", "tz", ")", "return", "calendar", ".", "timegm", "(", "when", ".", "timetuple", "(", ")", ")" ]
73994c82360e65a983c803b1182892e2138320b2
valid
tsms
Return a Unix timestamp in milliseconds for the provided datetime. The `totz` function is called on the datetime to convert it to the provided timezone. It will be converted to UTC if no timezone is provided.
era.py
def tsms(when, tz=None): """ Return a Unix timestamp in milliseconds for the provided datetime. The `totz` function is called on the datetime to convert it to the provided timezone. It will be converted to UTC if no timezone is provided. """ if not when: return None when = totz(when, tz) return calendar.timegm(when.timetuple()) * 1000 + int(round(when.microsecond / 1000.0))
def tsms(when, tz=None): """ Return a Unix timestamp in milliseconds for the provided datetime. The `totz` function is called on the datetime to convert it to the provided timezone. It will be converted to UTC if no timezone is provided. """ if not when: return None when = totz(when, tz) return calendar.timegm(when.timetuple()) * 1000 + int(round(when.microsecond / 1000.0))
[ "Return", "a", "Unix", "timestamp", "in", "milliseconds", "for", "the", "provided", "datetime", ".", "The", "totz", "function", "is", "called", "on", "the", "datetime", "to", "convert", "it", "to", "the", "provided", "timezone", ".", "It", "will", "be", "converted", "to", "UTC", "if", "no", "timezone", "is", "provided", "." ]
zenreach/py-era
python
https://github.com/zenreach/py-era/blob/73994c82360e65a983c803b1182892e2138320b2/era.py#L173-L182
[ "def", "tsms", "(", "when", ",", "tz", "=", "None", ")", ":", "if", "not", "when", ":", "return", "None", "when", "=", "totz", "(", "when", ",", "tz", ")", "return", "calendar", ".", "timegm", "(", "when", ".", "timetuple", "(", ")", ")", "*", "1000", "+", "int", "(", "round", "(", "when", ".", "microsecond", "/", "1000.0", ")", ")" ]
73994c82360e65a983c803b1182892e2138320b2
valid
fromts
Return the datetime representation of the provided Unix timestamp. By defaults the timestamp is interpreted as UTC. If tzin is set it will be interpreted as this timestamp instead. By default the output datetime will have UTC time. If tzout is set it will be converted in this timezone instead.
era.py
def fromts(ts, tzin=None, tzout=None): """ Return the datetime representation of the provided Unix timestamp. By defaults the timestamp is interpreted as UTC. If tzin is set it will be interpreted as this timestamp instead. By default the output datetime will have UTC time. If tzout is set it will be converted in this timezone instead. """ if ts is None: return None when = datetime.utcfromtimestamp(ts).replace(tzinfo=tzin or utc) return totz(when, tzout)
def fromts(ts, tzin=None, tzout=None): """ Return the datetime representation of the provided Unix timestamp. By defaults the timestamp is interpreted as UTC. If tzin is set it will be interpreted as this timestamp instead. By default the output datetime will have UTC time. If tzout is set it will be converted in this timezone instead. """ if ts is None: return None when = datetime.utcfromtimestamp(ts).replace(tzinfo=tzin or utc) return totz(when, tzout)
[ "Return", "the", "datetime", "representation", "of", "the", "provided", "Unix", "timestamp", ".", "By", "defaults", "the", "timestamp", "is", "interpreted", "as", "UTC", ".", "If", "tzin", "is", "set", "it", "will", "be", "interpreted", "as", "this", "timestamp", "instead", ".", "By", "default", "the", "output", "datetime", "will", "have", "UTC", "time", ".", "If", "tzout", "is", "set", "it", "will", "be", "converted", "in", "this", "timezone", "instead", "." ]
zenreach/py-era
python
https://github.com/zenreach/py-era/blob/73994c82360e65a983c803b1182892e2138320b2/era.py#L185-L195
[ "def", "fromts", "(", "ts", ",", "tzin", "=", "None", ",", "tzout", "=", "None", ")", ":", "if", "ts", "is", "None", ":", "return", "None", "when", "=", "datetime", ".", "utcfromtimestamp", "(", "ts", ")", ".", "replace", "(", "tzinfo", "=", "tzin", "or", "utc", ")", "return", "totz", "(", "when", ",", "tzout", ")" ]
73994c82360e65a983c803b1182892e2138320b2
valid
fromtsms
Return the Unix timestamp in milliseconds as a datetime object. If tz is set it will be converted to the requested timezone otherwise it defaults to UTC.
era.py
def fromtsms(ts, tzin=None, tzout=None): """ Return the Unix timestamp in milliseconds as a datetime object. If tz is set it will be converted to the requested timezone otherwise it defaults to UTC. """ if ts is None: return None when = datetime.utcfromtimestamp(ts / 1000).replace(microsecond=ts % 1000 * 1000) when = when.replace(tzinfo=tzin or utc) return totz(when, tzout)
def fromtsms(ts, tzin=None, tzout=None): """ Return the Unix timestamp in milliseconds as a datetime object. If tz is set it will be converted to the requested timezone otherwise it defaults to UTC. """ if ts is None: return None when = datetime.utcfromtimestamp(ts / 1000).replace(microsecond=ts % 1000 * 1000) when = when.replace(tzinfo=tzin or utc) return totz(when, tzout)
[ "Return", "the", "Unix", "timestamp", "in", "milliseconds", "as", "a", "datetime", "object", ".", "If", "tz", "is", "set", "it", "will", "be", "converted", "to", "the", "requested", "timezone", "otherwise", "it", "defaults", "to", "UTC", "." ]
zenreach/py-era
python
https://github.com/zenreach/py-era/blob/73994c82360e65a983c803b1182892e2138320b2/era.py#L198-L207
[ "def", "fromtsms", "(", "ts", ",", "tzin", "=", "None", ",", "tzout", "=", "None", ")", ":", "if", "ts", "is", "None", ":", "return", "None", "when", "=", "datetime", ".", "utcfromtimestamp", "(", "ts", "/", "1000", ")", ".", "replace", "(", "microsecond", "=", "ts", "%", "1000", "*", "1000", ")", "when", "=", "when", ".", "replace", "(", "tzinfo", "=", "tzin", "or", "utc", ")", "return", "totz", "(", "when", ",", "tzout", ")" ]
73994c82360e65a983c803b1182892e2138320b2
valid
truncate
Return the datetime truncated to the precision of the provided unit.
era.py
def truncate(when, unit, week_start=mon): """Return the datetime truncated to the precision of the provided unit.""" if is_datetime(when): if unit == millisecond: return when.replace(microsecond=int(round(when.microsecond / 1000.0)) * 1000) elif unit == second: return when.replace(microsecond=0) elif unit == minute: return when.replace(second=0, microsecond=0) elif unit == hour: return when.replace(minute=0, second=0, microsecond=0) elif unit == day: return when.replace(hour=0, minute=0, second=0, microsecond=0) elif unit == week: weekday = prevweekday(when, week_start) return when.replace(year=weekday.year, month=weekday.month, day=weekday.day, hour=0, minute=0, second=0, microsecond=0) elif unit == month: return when.replace(day=1, hour=0, minute=0, second=0, microsecond=0) elif unit == year: return when.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0) elif is_date(when): if unit == week: return prevweekday(when, week_start) elif unit == month: return when.replace(day=1) elif unit == year: return when.replace(month=1, day=1) elif is_time(when): if unit == millisecond: return when.replace(microsecond=int(when.microsecond / 1000.0) * 1000) elif unit == second: return when.replace(microsecond=0) elif unit == minute: return when.replace(second=0, microsecond=0) return when
def truncate(when, unit, week_start=mon): """Return the datetime truncated to the precision of the provided unit.""" if is_datetime(when): if unit == millisecond: return when.replace(microsecond=int(round(when.microsecond / 1000.0)) * 1000) elif unit == second: return when.replace(microsecond=0) elif unit == minute: return when.replace(second=0, microsecond=0) elif unit == hour: return when.replace(minute=0, second=0, microsecond=0) elif unit == day: return when.replace(hour=0, minute=0, second=0, microsecond=0) elif unit == week: weekday = prevweekday(when, week_start) return when.replace(year=weekday.year, month=weekday.month, day=weekday.day, hour=0, minute=0, second=0, microsecond=0) elif unit == month: return when.replace(day=1, hour=0, minute=0, second=0, microsecond=0) elif unit == year: return when.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0) elif is_date(when): if unit == week: return prevweekday(when, week_start) elif unit == month: return when.replace(day=1) elif unit == year: return when.replace(month=1, day=1) elif is_time(when): if unit == millisecond: return when.replace(microsecond=int(when.microsecond / 1000.0) * 1000) elif unit == second: return when.replace(microsecond=0) elif unit == minute: return when.replace(second=0, microsecond=0) return when
[ "Return", "the", "datetime", "truncated", "to", "the", "precision", "of", "the", "provided", "unit", "." ]
zenreach/py-era
python
https://github.com/zenreach/py-era/blob/73994c82360e65a983c803b1182892e2138320b2/era.py#L210-L245
[ "def", "truncate", "(", "when", ",", "unit", ",", "week_start", "=", "mon", ")", ":", "if", "is_datetime", "(", "when", ")", ":", "if", "unit", "==", "millisecond", ":", "return", "when", ".", "replace", "(", "microsecond", "=", "int", "(", "round", "(", "when", ".", "microsecond", "/", "1000.0", ")", ")", "*", "1000", ")", "elif", "unit", "==", "second", ":", "return", "when", ".", "replace", "(", "microsecond", "=", "0", ")", "elif", "unit", "==", "minute", ":", "return", "when", ".", "replace", "(", "second", "=", "0", ",", "microsecond", "=", "0", ")", "elif", "unit", "==", "hour", ":", "return", "when", ".", "replace", "(", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")", "elif", "unit", "==", "day", ":", "return", "when", ".", "replace", "(", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")", "elif", "unit", "==", "week", ":", "weekday", "=", "prevweekday", "(", "when", ",", "week_start", ")", "return", "when", ".", "replace", "(", "year", "=", "weekday", ".", "year", ",", "month", "=", "weekday", ".", "month", ",", "day", "=", "weekday", ".", "day", ",", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")", "elif", "unit", "==", "month", ":", "return", "when", ".", "replace", "(", "day", "=", "1", ",", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")", "elif", "unit", "==", "year", ":", "return", "when", ".", "replace", "(", "month", "=", "1", ",", "day", "=", "1", ",", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")", "elif", "is_date", "(", "when", ")", ":", "if", "unit", "==", "week", ":", "return", "prevweekday", "(", "when", ",", "week_start", ")", "elif", "unit", "==", "month", ":", "return", "when", ".", "replace", "(", "day", "=", "1", ")", "elif", "unit", "==", "year", ":", "return", "when", ".", "replace", "(", "month", "=", "1", ",", "day", "=", "1", ")", "elif", "is_time", "(", "when", ")", ":", "if", "unit", "==", "millisecond", ":", "return", "when", ".", "replace", "(", "microsecond", "=", "int", "(", "when", ".", "microsecond", "/", "1000.0", ")", "*", "1000", ")", "elif", "unit", "==", "second", ":", "return", "when", ".", "replace", "(", "microsecond", "=", "0", ")", "elif", "unit", "==", "minute", ":", "return", "when", ".", "replace", "(", "second", "=", "0", ",", "microsecond", "=", "0", ")", "return", "when" ]
73994c82360e65a983c803b1182892e2138320b2
valid
weekday
Return the date for the day of this week.
era.py
def weekday(when, weekday, start=mon): """Return the date for the day of this week.""" if isinstance(when, datetime): when = when.date() today = when.weekday() delta = weekday - today if weekday < start and today >= start: delta += 7 elif weekday >= start and today < start: delta -= 7 return when + timedelta(days=delta)
def weekday(when, weekday, start=mon): """Return the date for the day of this week.""" if isinstance(when, datetime): when = when.date() today = when.weekday() delta = weekday - today if weekday < start and today >= start: delta += 7 elif weekday >= start and today < start: delta -= 7 return when + timedelta(days=delta)
[ "Return", "the", "date", "for", "the", "day", "of", "this", "week", "." ]
zenreach/py-era
python
https://github.com/zenreach/py-era/blob/73994c82360e65a983c803b1182892e2138320b2/era.py#L248-L259
[ "def", "weekday", "(", "when", ",", "weekday", ",", "start", "=", "mon", ")", ":", "if", "isinstance", "(", "when", ",", "datetime", ")", ":", "when", "=", "when", ".", "date", "(", ")", "today", "=", "when", ".", "weekday", "(", ")", "delta", "=", "weekday", "-", "today", "if", "weekday", "<", "start", "and", "today", ">=", "start", ":", "delta", "+=", "7", "elif", "weekday", ">=", "start", "and", "today", "<", "start", ":", "delta", "-=", "7", "return", "when", "+", "timedelta", "(", "days", "=", "delta", ")" ]
73994c82360e65a983c803b1182892e2138320b2
valid
prevweekday
Return the date for the most recent day of the week. If inclusive is True (the default) today may count as the weekday we're looking for.
era.py
def prevweekday(when, weekday, inclusive=True): """ Return the date for the most recent day of the week. If inclusive is True (the default) today may count as the weekday we're looking for. """ if isinstance(when, datetime): when = when.date() delta = weekday - when.weekday() if (inclusive and delta > 0) or (not inclusive and delta >= 0): delta -= 7 return when + timedelta(days=delta)
def prevweekday(when, weekday, inclusive=True): """ Return the date for the most recent day of the week. If inclusive is True (the default) today may count as the weekday we're looking for. """ if isinstance(when, datetime): when = when.date() delta = weekday - when.weekday() if (inclusive and delta > 0) or (not inclusive and delta >= 0): delta -= 7 return when + timedelta(days=delta)
[ "Return", "the", "date", "for", "the", "most", "recent", "day", "of", "the", "week", ".", "If", "inclusive", "is", "True", "(", "the", "default", ")", "today", "may", "count", "as", "the", "weekday", "we", "re", "looking", "for", "." ]
zenreach/py-era
python
https://github.com/zenreach/py-era/blob/73994c82360e65a983c803b1182892e2138320b2/era.py#L262-L272
[ "def", "prevweekday", "(", "when", ",", "weekday", ",", "inclusive", "=", "True", ")", ":", "if", "isinstance", "(", "when", ",", "datetime", ")", ":", "when", "=", "when", ".", "date", "(", ")", "delta", "=", "weekday", "-", "when", ".", "weekday", "(", ")", "if", "(", "inclusive", "and", "delta", ">", "0", ")", "or", "(", "not", "inclusive", "and", "delta", ">=", "0", ")", ":", "delta", "-=", "7", "return", "when", "+", "timedelta", "(", "days", "=", "delta", ")" ]
73994c82360e65a983c803b1182892e2138320b2
valid
opt_normalizations
**optimization algorithm for scale variables (positive value of unknown magnitude)** Each parameter is a normalization of a feature, and its value is sought. The parameters are handled in order (assumed to be independent), but a second round can be run. Various magnitudes of the normalization are tried. If the normalization converges to zero, the largest value yielding a comparable value is used. Optimizes each normalization parameter in rough steps using multiples of 3 of start point to find reasonable starting values for another algorithm. parameters, minimization function, parameter space definition [(lo, hi) for i in params] :param abandon_threshold: if in one direction the function increases by this much over the best value, abort search in this direction :param noimprovement_threshold: when decreasing the normalization, if the function increases by less than this amount, abort search in this direction :param disp: verbosity
jbopt/independent.py
def opt_normalizations(params, func, limits, abandon_threshold=100, noimprovement_threshold=1e-3, disp=0): """ **optimization algorithm for scale variables (positive value of unknown magnitude)** Each parameter is a normalization of a feature, and its value is sought. The parameters are handled in order (assumed to be independent), but a second round can be run. Various magnitudes of the normalization are tried. If the normalization converges to zero, the largest value yielding a comparable value is used. Optimizes each normalization parameter in rough steps using multiples of 3 of start point to find reasonable starting values for another algorithm. parameters, minimization function, parameter space definition [(lo, hi) for i in params] :param abandon_threshold: if in one direction the function increases by this much over the best value, abort search in this direction :param noimprovement_threshold: when decreasing the normalization, if the function increases by less than this amount, abort search in this direction :param disp: verbosity """ newparams = numpy.copy(params) lower = [lo for lo, hi in limits] upper = [hi for lo, hi in limits] for i, p in enumerate(params): startval = p beststat = func(newparams) bestval = startval if disp > 0: print '\t\tstart val = %e: %e' % (startval, beststat) go_up = True go_down = True # go up and down in multiples of 3 # once that is done, refine in multiples of 1.1 for n in list(3.**numpy.arange(1, 20)) + [None] + list(1.1**numpy.arange(1, 13)): if n is None: startval = bestval if disp > 0: print '\t\trefining from %e' % (startval) go_up = True go_down = True continue if go_up and startval * n > upper[i]: if disp > 0: print '\t\thit upper border (%e * %e > %e)' % (startval, n, upper[i]) go_up = False if go_down and startval / n < lower[i]: if disp > 0: print '\t\thit lower border (%e / %e > %e)' % (startval, n, lower[i]) go_down = False if go_up: if disp > 1: print '\t\ttrying %e ^' % (startval * n) newparams[i] = startval * n newstat = func(newparams) if disp > 1: print '\t\tval = %e: %e' % (newparams[i], newstat) if newstat <= beststat: bestval = newparams[i] beststat = newstat if disp > 0: print '\t\t\timprovement: %e' % newparams[i] if newstat > beststat + abandon_threshold: go_up = False if go_down: if disp > 1: print '\t\ttrying %e v' % (startval / n) newparams[i] = startval / n newstat = func(newparams) if disp > 1: print '\t\tval = %e: %e' % (newparams[i], newstat) if newstat + noimprovement_threshold < beststat: # avoid zeros in normalizations bestval = newparams[i] beststat = newstat if disp > 0: print '\t\t\timprovement: %e' % newparams[i] if newstat > beststat + abandon_threshold: go_down = False newparams[i] = bestval print '\tnew normalization of %d: %e' % (i, newparams[i]) print 'optimization done, reached %.3f' % (beststat) return newparams
def opt_normalizations(params, func, limits, abandon_threshold=100, noimprovement_threshold=1e-3, disp=0): """ **optimization algorithm for scale variables (positive value of unknown magnitude)** Each parameter is a normalization of a feature, and its value is sought. The parameters are handled in order (assumed to be independent), but a second round can be run. Various magnitudes of the normalization are tried. If the normalization converges to zero, the largest value yielding a comparable value is used. Optimizes each normalization parameter in rough steps using multiples of 3 of start point to find reasonable starting values for another algorithm. parameters, minimization function, parameter space definition [(lo, hi) for i in params] :param abandon_threshold: if in one direction the function increases by this much over the best value, abort search in this direction :param noimprovement_threshold: when decreasing the normalization, if the function increases by less than this amount, abort search in this direction :param disp: verbosity """ newparams = numpy.copy(params) lower = [lo for lo, hi in limits] upper = [hi for lo, hi in limits] for i, p in enumerate(params): startval = p beststat = func(newparams) bestval = startval if disp > 0: print '\t\tstart val = %e: %e' % (startval, beststat) go_up = True go_down = True # go up and down in multiples of 3 # once that is done, refine in multiples of 1.1 for n in list(3.**numpy.arange(1, 20)) + [None] + list(1.1**numpy.arange(1, 13)): if n is None: startval = bestval if disp > 0: print '\t\trefining from %e' % (startval) go_up = True go_down = True continue if go_up and startval * n > upper[i]: if disp > 0: print '\t\thit upper border (%e * %e > %e)' % (startval, n, upper[i]) go_up = False if go_down and startval / n < lower[i]: if disp > 0: print '\t\thit lower border (%e / %e > %e)' % (startval, n, lower[i]) go_down = False if go_up: if disp > 1: print '\t\ttrying %e ^' % (startval * n) newparams[i] = startval * n newstat = func(newparams) if disp > 1: print '\t\tval = %e: %e' % (newparams[i], newstat) if newstat <= beststat: bestval = newparams[i] beststat = newstat if disp > 0: print '\t\t\timprovement: %e' % newparams[i] if newstat > beststat + abandon_threshold: go_up = False if go_down: if disp > 1: print '\t\ttrying %e v' % (startval / n) newparams[i] = startval / n newstat = func(newparams) if disp > 1: print '\t\tval = %e: %e' % (newparams[i], newstat) if newstat + noimprovement_threshold < beststat: # avoid zeros in normalizations bestval = newparams[i] beststat = newstat if disp > 0: print '\t\t\timprovement: %e' % newparams[i] if newstat > beststat + abandon_threshold: go_down = False newparams[i] = bestval print '\tnew normalization of %d: %e' % (i, newparams[i]) print 'optimization done, reached %.3f' % (beststat) return newparams
[ "**", "optimization", "algorithm", "for", "scale", "variables", "(", "positive", "value", "of", "unknown", "magnitude", ")", "**", "Each", "parameter", "is", "a", "normalization", "of", "a", "feature", "and", "its", "value", "is", "sought", ".", "The", "parameters", "are", "handled", "in", "order", "(", "assumed", "to", "be", "independent", ")", "but", "a", "second", "round", "can", "be", "run", ".", "Various", "magnitudes", "of", "the", "normalization", "are", "tried", ".", "If", "the", "normalization", "converges", "to", "zero", "the", "largest", "value", "yielding", "a", "comparable", "value", "is", "used", "." ]
JohannesBuchner/jbopt
python
https://github.com/JohannesBuchner/jbopt/blob/11b721ea001625ad7820f71ff684723c71216646/jbopt/independent.py#L10-L86
[ "def", "opt_normalizations", "(", "params", ",", "func", ",", "limits", ",", "abandon_threshold", "=", "100", ",", "noimprovement_threshold", "=", "1e-3", ",", "disp", "=", "0", ")", ":", "newparams", "=", "numpy", ".", "copy", "(", "params", ")", "lower", "=", "[", "lo", "for", "lo", ",", "hi", "in", "limits", "]", "upper", "=", "[", "hi", "for", "lo", ",", "hi", "in", "limits", "]", "for", "i", ",", "p", "in", "enumerate", "(", "params", ")", ":", "startval", "=", "p", "beststat", "=", "func", "(", "newparams", ")", "bestval", "=", "startval", "if", "disp", ">", "0", ":", "print", "'\\t\\tstart val = %e: %e'", "%", "(", "startval", ",", "beststat", ")", "go_up", "=", "True", "go_down", "=", "True", "# go up and down in multiples of 3", "# once that is done, refine in multiples of 1.1", "for", "n", "in", "list", "(", "3.", "**", "numpy", ".", "arange", "(", "1", ",", "20", ")", ")", "+", "[", "None", "]", "+", "list", "(", "1.1", "**", "numpy", ".", "arange", "(", "1", ",", "13", ")", ")", ":", "if", "n", "is", "None", ":", "startval", "=", "bestval", "if", "disp", ">", "0", ":", "print", "'\\t\\trefining from %e'", "%", "(", "startval", ")", "go_up", "=", "True", "go_down", "=", "True", "continue", "if", "go_up", "and", "startval", "*", "n", ">", "upper", "[", "i", "]", ":", "if", "disp", ">", "0", ":", "print", "'\\t\\thit upper border (%e * %e > %e)'", "%", "(", "startval", ",", "n", ",", "upper", "[", "i", "]", ")", "go_up", "=", "False", "if", "go_down", "and", "startval", "/", "n", "<", "lower", "[", "i", "]", ":", "if", "disp", ">", "0", ":", "print", "'\\t\\thit lower border (%e / %e > %e)'", "%", "(", "startval", ",", "n", ",", "lower", "[", "i", "]", ")", "go_down", "=", "False", "if", "go_up", ":", "if", "disp", ">", "1", ":", "print", "'\\t\\ttrying %e ^'", "%", "(", "startval", "*", "n", ")", "newparams", "[", "i", "]", "=", "startval", "*", "n", "newstat", "=", "func", "(", "newparams", ")", "if", "disp", ">", "1", ":", "print", "'\\t\\tval = %e: %e'", "%", "(", "newparams", "[", "i", "]", ",", "newstat", ")", "if", "newstat", "<=", "beststat", ":", "bestval", "=", "newparams", "[", "i", "]", "beststat", "=", "newstat", "if", "disp", ">", "0", ":", "print", "'\\t\\t\\timprovement: %e'", "%", "newparams", "[", "i", "]", "if", "newstat", ">", "beststat", "+", "abandon_threshold", ":", "go_up", "=", "False", "if", "go_down", ":", "if", "disp", ">", "1", ":", "print", "'\\t\\ttrying %e v'", "%", "(", "startval", "/", "n", ")", "newparams", "[", "i", "]", "=", "startval", "/", "n", "newstat", "=", "func", "(", "newparams", ")", "if", "disp", ">", "1", ":", "print", "'\\t\\tval = %e: %e'", "%", "(", "newparams", "[", "i", "]", ",", "newstat", ")", "if", "newstat", "+", "noimprovement_threshold", "<", "beststat", ":", "# avoid zeros in normalizations", "bestval", "=", "newparams", "[", "i", "]", "beststat", "=", "newstat", "if", "disp", ">", "0", ":", "print", "'\\t\\t\\timprovement: %e'", "%", "newparams", "[", "i", "]", "if", "newstat", ">", "beststat", "+", "abandon_threshold", ":", "go_down", "=", "False", "newparams", "[", "i", "]", "=", "bestval", "print", "'\\tnew normalization of %d: %e'", "%", "(", "i", ",", "newparams", "[", "i", "]", ")", "print", "'optimization done, reached %.3f'", "%", "(", "beststat", ")", "return", "newparams" ]
11b721ea001625ad7820f71ff684723c71216646
valid
opt_grid
see :func:`optimize1d.optimize`, considers each parameter in order :param ftol: difference in values at which the function can be considered flat :param compute_errors: compute standard deviation of gaussian around optimum
jbopt/independent.py
def opt_grid(params, func, limits, ftol=0.01, disp=0, compute_errors=True): """ see :func:`optimize1d.optimize`, considers each parameter in order :param ftol: difference in values at which the function can be considered flat :param compute_errors: compute standard deviation of gaussian around optimum """ caches = [[] for p in params] newparams = numpy.copy(params) errors = [[] for p in params] for i, p in enumerate(params): cache = [] def func1(x0): newparams[i] = x0 v = func(newparams) cache.append([x0, v]) return v lo, hi = limits[i] bestval = optimize(func1, x0=p, cons=[lambda x: x - lo, lambda x: hi - x], ftol=ftol, disp=disp - 1) beststat = func1(bestval) if compute_errors: errors[i] = cache2errors(func1, cache, disp=disp - 1) newparams[i] = bestval caches[i] = cache if disp > 0: if compute_errors: print '\tnew value of %d: %e [%e .. %e] yielded %e' % (i, bestval, errors[i][0], errors[i][1], beststat) else: print '\tnew value of %d: %e yielded %e' % (i, bestval, beststat) beststat = func(newparams) if disp > 0: print 'optimization done, reached %.3f' % (beststat) if compute_errors: return newparams, errors else: return newparams
def opt_grid(params, func, limits, ftol=0.01, disp=0, compute_errors=True): """ see :func:`optimize1d.optimize`, considers each parameter in order :param ftol: difference in values at which the function can be considered flat :param compute_errors: compute standard deviation of gaussian around optimum """ caches = [[] for p in params] newparams = numpy.copy(params) errors = [[] for p in params] for i, p in enumerate(params): cache = [] def func1(x0): newparams[i] = x0 v = func(newparams) cache.append([x0, v]) return v lo, hi = limits[i] bestval = optimize(func1, x0=p, cons=[lambda x: x - lo, lambda x: hi - x], ftol=ftol, disp=disp - 1) beststat = func1(bestval) if compute_errors: errors[i] = cache2errors(func1, cache, disp=disp - 1) newparams[i] = bestval caches[i] = cache if disp > 0: if compute_errors: print '\tnew value of %d: %e [%e .. %e] yielded %e' % (i, bestval, errors[i][0], errors[i][1], beststat) else: print '\tnew value of %d: %e yielded %e' % (i, bestval, beststat) beststat = func(newparams) if disp > 0: print 'optimization done, reached %.3f' % (beststat) if compute_errors: return newparams, errors else: return newparams
[ "see", ":", "func", ":", "optimize1d", ".", "optimize", "considers", "each", "parameter", "in", "order", ":", "param", "ftol", ":", "difference", "in", "values", "at", "which", "the", "function", "can", "be", "considered", "flat", ":", "param", "compute_errors", ":", "compute", "standard", "deviation", "of", "gaussian", "around", "optimum" ]
JohannesBuchner/jbopt
python
https://github.com/JohannesBuchner/jbopt/blob/11b721ea001625ad7820f71ff684723c71216646/jbopt/independent.py#L90-L131
[ "def", "opt_grid", "(", "params", ",", "func", ",", "limits", ",", "ftol", "=", "0.01", ",", "disp", "=", "0", ",", "compute_errors", "=", "True", ")", ":", "caches", "=", "[", "[", "]", "for", "p", "in", "params", "]", "newparams", "=", "numpy", ".", "copy", "(", "params", ")", "errors", "=", "[", "[", "]", "for", "p", "in", "params", "]", "for", "i", ",", "p", "in", "enumerate", "(", "params", ")", ":", "cache", "=", "[", "]", "def", "func1", "(", "x0", ")", ":", "newparams", "[", "i", "]", "=", "x0", "v", "=", "func", "(", "newparams", ")", "cache", ".", "append", "(", "[", "x0", ",", "v", "]", ")", "return", "v", "lo", ",", "hi", "=", "limits", "[", "i", "]", "bestval", "=", "optimize", "(", "func1", ",", "x0", "=", "p", ",", "cons", "=", "[", "lambda", "x", ":", "x", "-", "lo", ",", "lambda", "x", ":", "hi", "-", "x", "]", ",", "ftol", "=", "ftol", ",", "disp", "=", "disp", "-", "1", ")", "beststat", "=", "func1", "(", "bestval", ")", "if", "compute_errors", ":", "errors", "[", "i", "]", "=", "cache2errors", "(", "func1", ",", "cache", ",", "disp", "=", "disp", "-", "1", ")", "newparams", "[", "i", "]", "=", "bestval", "caches", "[", "i", "]", "=", "cache", "if", "disp", ">", "0", ":", "if", "compute_errors", ":", "print", "'\\tnew value of %d: %e [%e .. %e] yielded %e'", "%", "(", "i", ",", "bestval", ",", "errors", "[", "i", "]", "[", "0", "]", ",", "errors", "[", "i", "]", "[", "1", "]", ",", "beststat", ")", "else", ":", "print", "'\\tnew value of %d: %e yielded %e'", "%", "(", "i", ",", "bestval", ",", "beststat", ")", "beststat", "=", "func", "(", "newparams", ")", "if", "disp", ">", "0", ":", "print", "'optimization done, reached %.3f'", "%", "(", "beststat", ")", "if", "compute_errors", ":", "return", "newparams", ",", "errors", "else", ":", "return", "newparams" ]
11b721ea001625ad7820f71ff684723c71216646
valid
opt_grid_parallel
parallelized version of :func:`opt_grid`
jbopt/independent.py
def opt_grid_parallel(params, func, limits, ftol=0.01, disp=0, compute_errors=True): """ parallelized version of :func:`opt_grid` """ import multiprocessing def spawn(f): def fun(q_in,q_out): while True: i,x = q_in.get() if i == None: break q_out.put((i,f(x))) return fun def parmap(f, X, nprocs = multiprocessing.cpu_count()): q_in = multiprocessing.Queue(1) q_out = multiprocessing.Queue() proc = [multiprocessing.Process(target=spawn(f),args=(q_in,q_out)) for _ in range(nprocs)] for p in proc: p.daemon = True p.start() sent = [q_in.put((i,x)) for i,x in enumerate(X)] [q_in.put((None,None)) for _ in range(nprocs)] res = [q_out.get() for _ in range(len(sent))] [p.join() for p in proc] return [x for i,x in sorted(res)] nthreads = multiprocessing.cpu_count() caches = [[] for p in params] newparams = numpy.copy(params) errors = [[] for p in params] indices = range(0, len(params), nthreads) k = 0 while k < len(params): j = min(len(params), k + nthreads * 2) def run1d((i, curparams, curlimits)): cache = [] def func1(x0): curparams[i] = x0 v = func(curparams) cache.append([x0, v]) return v lo, hi = curlimits bestval = optimize(func1, x0=p, cons=[lambda x: x - lo, lambda x: hi - x], ftol=ftol, disp=disp - 1) beststat = func1(bestval) if compute_errors: errors = cache2errors(func1, cache, disp=disp - 1) return bestval, beststat, errors, cache return bestval, beststat, cache results = parmap(run1d, [(i, numpy.copy(newparams), limits[i]) for i in range(k, j)]) for i, r in enumerate(results): if compute_errors: v, s, e, c = r if disp > 0: print '\tnew value of %d: %e [%e .. %e] yielded %e' % (i + k, v, e[0], e[1], s) else: v, s, c = r e = [] if disp > 0: print '\tnew value of %d: %e yielded %e' % (i + k, v, s) newparams[i + k] = v caches[i + k] = c errors[i + k] = e k = j beststat = func(newparams) if disp > 0: print 'optimization done, reached %e' % (beststat) if compute_errors: return newparams, errors else: return newparams
def opt_grid_parallel(params, func, limits, ftol=0.01, disp=0, compute_errors=True): """ parallelized version of :func:`opt_grid` """ import multiprocessing def spawn(f): def fun(q_in,q_out): while True: i,x = q_in.get() if i == None: break q_out.put((i,f(x))) return fun def parmap(f, X, nprocs = multiprocessing.cpu_count()): q_in = multiprocessing.Queue(1) q_out = multiprocessing.Queue() proc = [multiprocessing.Process(target=spawn(f),args=(q_in,q_out)) for _ in range(nprocs)] for p in proc: p.daemon = True p.start() sent = [q_in.put((i,x)) for i,x in enumerate(X)] [q_in.put((None,None)) for _ in range(nprocs)] res = [q_out.get() for _ in range(len(sent))] [p.join() for p in proc] return [x for i,x in sorted(res)] nthreads = multiprocessing.cpu_count() caches = [[] for p in params] newparams = numpy.copy(params) errors = [[] for p in params] indices = range(0, len(params), nthreads) k = 0 while k < len(params): j = min(len(params), k + nthreads * 2) def run1d((i, curparams, curlimits)): cache = [] def func1(x0): curparams[i] = x0 v = func(curparams) cache.append([x0, v]) return v lo, hi = curlimits bestval = optimize(func1, x0=p, cons=[lambda x: x - lo, lambda x: hi - x], ftol=ftol, disp=disp - 1) beststat = func1(bestval) if compute_errors: errors = cache2errors(func1, cache, disp=disp - 1) return bestval, beststat, errors, cache return bestval, beststat, cache results = parmap(run1d, [(i, numpy.copy(newparams), limits[i]) for i in range(k, j)]) for i, r in enumerate(results): if compute_errors: v, s, e, c = r if disp > 0: print '\tnew value of %d: %e [%e .. %e] yielded %e' % (i + k, v, e[0], e[1], s) else: v, s, c = r e = [] if disp > 0: print '\tnew value of %d: %e yielded %e' % (i + k, v, s) newparams[i + k] = v caches[i + k] = c errors[i + k] = e k = j beststat = func(newparams) if disp > 0: print 'optimization done, reached %e' % (beststat) if compute_errors: return newparams, errors else: return newparams
[ "parallelized", "version", "of", ":", "func", ":", "opt_grid" ]
JohannesBuchner/jbopt
python
https://github.com/JohannesBuchner/jbopt/blob/11b721ea001625ad7820f71ff684723c71216646/jbopt/independent.py#L133-L214
[ "def", "opt_grid_parallel", "(", "params", ",", "func", ",", "limits", ",", "ftol", "=", "0.01", ",", "disp", "=", "0", ",", "compute_errors", "=", "True", ")", ":", "import", "multiprocessing", "def", "spawn", "(", "f", ")", ":", "def", "fun", "(", "q_in", ",", "q_out", ")", ":", "while", "True", ":", "i", ",", "x", "=", "q_in", ".", "get", "(", ")", "if", "i", "==", "None", ":", "break", "q_out", ".", "put", "(", "(", "i", ",", "f", "(", "x", ")", ")", ")", "return", "fun", "def", "parmap", "(", "f", ",", "X", ",", "nprocs", "=", "multiprocessing", ".", "cpu_count", "(", ")", ")", ":", "q_in", "=", "multiprocessing", ".", "Queue", "(", "1", ")", "q_out", "=", "multiprocessing", ".", "Queue", "(", ")", "proc", "=", "[", "multiprocessing", ".", "Process", "(", "target", "=", "spawn", "(", "f", ")", ",", "args", "=", "(", "q_in", ",", "q_out", ")", ")", "for", "_", "in", "range", "(", "nprocs", ")", "]", "for", "p", "in", "proc", ":", "p", ".", "daemon", "=", "True", "p", ".", "start", "(", ")", "sent", "=", "[", "q_in", ".", "put", "(", "(", "i", ",", "x", ")", ")", "for", "i", ",", "x", "in", "enumerate", "(", "X", ")", "]", "[", "q_in", ".", "put", "(", "(", "None", ",", "None", ")", ")", "for", "_", "in", "range", "(", "nprocs", ")", "]", "res", "=", "[", "q_out", ".", "get", "(", ")", "for", "_", "in", "range", "(", "len", "(", "sent", ")", ")", "]", "[", "p", ".", "join", "(", ")", "for", "p", "in", "proc", "]", "return", "[", "x", "for", "i", ",", "x", "in", "sorted", "(", "res", ")", "]", "nthreads", "=", "multiprocessing", ".", "cpu_count", "(", ")", "caches", "=", "[", "[", "]", "for", "p", "in", "params", "]", "newparams", "=", "numpy", ".", "copy", "(", "params", ")", "errors", "=", "[", "[", "]", "for", "p", "in", "params", "]", "indices", "=", "range", "(", "0", ",", "len", "(", "params", ")", ",", "nthreads", ")", "k", "=", "0", "while", "k", "<", "len", "(", "params", ")", ":", "j", "=", "min", "(", "len", "(", "params", ")", ",", "k", "+", "nthreads", "*", "2", ")", "def", "run1d", "(", "(", "i", ",", "curparams", ",", "curlimits", ")", ")", ":", "cache", "=", "[", "]", "def", "func1", "(", "x0", ")", ":", "curparams", "[", "i", "]", "=", "x0", "v", "=", "func", "(", "curparams", ")", "cache", ".", "append", "(", "[", "x0", ",", "v", "]", ")", "return", "v", "lo", ",", "hi", "=", "curlimits", "bestval", "=", "optimize", "(", "func1", ",", "x0", "=", "p", ",", "cons", "=", "[", "lambda", "x", ":", "x", "-", "lo", ",", "lambda", "x", ":", "hi", "-", "x", "]", ",", "ftol", "=", "ftol", ",", "disp", "=", "disp", "-", "1", ")", "beststat", "=", "func1", "(", "bestval", ")", "if", "compute_errors", ":", "errors", "=", "cache2errors", "(", "func1", ",", "cache", ",", "disp", "=", "disp", "-", "1", ")", "return", "bestval", ",", "beststat", ",", "errors", ",", "cache", "return", "bestval", ",", "beststat", ",", "cache", "results", "=", "parmap", "(", "run1d", ",", "[", "(", "i", ",", "numpy", ".", "copy", "(", "newparams", ")", ",", "limits", "[", "i", "]", ")", "for", "i", "in", "range", "(", "k", ",", "j", ")", "]", ")", "for", "i", ",", "r", "in", "enumerate", "(", "results", ")", ":", "if", "compute_errors", ":", "v", ",", "s", ",", "e", ",", "c", "=", "r", "if", "disp", ">", "0", ":", "print", "'\\tnew value of %d: %e [%e .. %e] yielded %e'", "%", "(", "i", "+", "k", ",", "v", ",", "e", "[", "0", "]", ",", "e", "[", "1", "]", ",", "s", ")", "else", ":", "v", ",", "s", ",", "c", "=", "r", "e", "=", "[", "]", "if", "disp", ">", "0", ":", "print", "'\\tnew value of %d: %e yielded %e'", "%", "(", "i", "+", "k", ",", "v", ",", "s", ")", "newparams", "[", "i", "+", "k", "]", "=", "v", "caches", "[", "i", "+", "k", "]", "=", "c", "errors", "[", "i", "+", "k", "]", "=", "e", "k", "=", "j", "beststat", "=", "func", "(", "newparams", ")", "if", "disp", ">", "0", ":", "print", "'optimization done, reached %e'", "%", "(", "beststat", ")", "if", "compute_errors", ":", "return", "newparams", ",", "errors", "else", ":", "return", "newparams" ]
11b721ea001625ad7820f71ff684723c71216646
valid
_GetNativeEolStyle
Internal function that determines EOL_STYLE_NATIVE constant with the proper value for the current platform.
zerotk/easyfs/_easyfs.py
def _GetNativeEolStyle(platform=sys.platform): ''' Internal function that determines EOL_STYLE_NATIVE constant with the proper value for the current platform. ''' _NATIVE_EOL_STYLE_MAP = { 'win32' : EOL_STYLE_WINDOWS, 'linux2' : EOL_STYLE_UNIX, 'linux' : EOL_STYLE_UNIX, 'darwin' : EOL_STYLE_MAC, } result = _NATIVE_EOL_STYLE_MAP.get(platform) if result is None: from ._exceptions import UnknownPlatformError raise UnknownPlatformError(platform) return result
def _GetNativeEolStyle(platform=sys.platform): ''' Internal function that determines EOL_STYLE_NATIVE constant with the proper value for the current platform. ''' _NATIVE_EOL_STYLE_MAP = { 'win32' : EOL_STYLE_WINDOWS, 'linux2' : EOL_STYLE_UNIX, 'linux' : EOL_STYLE_UNIX, 'darwin' : EOL_STYLE_MAC, } result = _NATIVE_EOL_STYLE_MAP.get(platform) if result is None: from ._exceptions import UnknownPlatformError raise UnknownPlatformError(platform) return result
[ "Internal", "function", "that", "determines", "EOL_STYLE_NATIVE", "constant", "with", "the", "proper", "value", "for", "the", "current", "platform", "." ]
zerotk/easyfs
python
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L29-L46
[ "def", "_GetNativeEolStyle", "(", "platform", "=", "sys", ".", "platform", ")", ":", "_NATIVE_EOL_STYLE_MAP", "=", "{", "'win32'", ":", "EOL_STYLE_WINDOWS", ",", "'linux2'", ":", "EOL_STYLE_UNIX", ",", "'linux'", ":", "EOL_STYLE_UNIX", ",", "'darwin'", ":", "EOL_STYLE_MAC", ",", "}", "result", "=", "_NATIVE_EOL_STYLE_MAP", ".", "get", "(", "platform", ")", "if", "result", "is", "None", ":", "from", ".", "_exceptions", "import", "UnknownPlatformError", "raise", "UnknownPlatformError", "(", "platform", ")", "return", "result" ]
140923db51fb91d5a5847ad17412e8bce51ba3da
valid
Cwd
Context manager for current directory (uses with_statement) e.g.: # working on some directory with Cwd('/home/new_dir'): # working on new_dir # working on some directory again :param unicode directory: Target directory to enter
zerotk/easyfs/_easyfs.py
def Cwd(directory): ''' Context manager for current directory (uses with_statement) e.g.: # working on some directory with Cwd('/home/new_dir'): # working on new_dir # working on some directory again :param unicode directory: Target directory to enter ''' old_directory = six.moves.getcwd() if directory is not None: os.chdir(directory) try: yield directory finally: os.chdir(old_directory)
def Cwd(directory): ''' Context manager for current directory (uses with_statement) e.g.: # working on some directory with Cwd('/home/new_dir'): # working on new_dir # working on some directory again :param unicode directory: Target directory to enter ''' old_directory = six.moves.getcwd() if directory is not None: os.chdir(directory) try: yield directory finally: os.chdir(old_directory)
[ "Context", "manager", "for", "current", "directory", "(", "uses", "with_statement", ")" ]
zerotk/easyfs
python
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L70-L90
[ "def", "Cwd", "(", "directory", ")", ":", "old_directory", "=", "six", ".", "moves", ".", "getcwd", "(", ")", "if", "directory", "is", "not", "None", ":", "os", ".", "chdir", "(", "directory", ")", "try", ":", "yield", "directory", "finally", ":", "os", ".", "chdir", "(", "old_directory", ")" ]
140923db51fb91d5a5847ad17412e8bce51ba3da
valid
NormalizePath
Normalizes a path maintaining the final slashes. Some environment variables need the final slash in order to work. Ex. The SOURCES_DIR set by subversion must end with a slash because of the way it is used in the Visual Studio projects. :param unicode path: The path to normalize. :rtype: unicode :returns: Normalized path
zerotk/easyfs/_easyfs.py
def NormalizePath(path): ''' Normalizes a path maintaining the final slashes. Some environment variables need the final slash in order to work. Ex. The SOURCES_DIR set by subversion must end with a slash because of the way it is used in the Visual Studio projects. :param unicode path: The path to normalize. :rtype: unicode :returns: Normalized path ''' if path.endswith('/') or path.endswith('\\'): slash = os.path.sep else: slash = '' return os.path.normpath(path) + slash
def NormalizePath(path): ''' Normalizes a path maintaining the final slashes. Some environment variables need the final slash in order to work. Ex. The SOURCES_DIR set by subversion must end with a slash because of the way it is used in the Visual Studio projects. :param unicode path: The path to normalize. :rtype: unicode :returns: Normalized path ''' if path.endswith('/') or path.endswith('\\'): slash = os.path.sep else: slash = '' return os.path.normpath(path) + slash
[ "Normalizes", "a", "path", "maintaining", "the", "final", "slashes", "." ]
zerotk/easyfs
python
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L97-L117
[ "def", "NormalizePath", "(", "path", ")", ":", "if", "path", ".", "endswith", "(", "'/'", ")", "or", "path", ".", "endswith", "(", "'\\\\'", ")", ":", "slash", "=", "os", ".", "path", ".", "sep", "else", ":", "slash", "=", "''", "return", "os", ".", "path", ".", "normpath", "(", "path", ")", "+", "slash" ]
140923db51fb91d5a5847ad17412e8bce51ba3da
valid
CanonicalPath
Returns a version of a path that is unique. Given two paths path1 and path2: CanonicalPath(path1) == CanonicalPath(path2) if and only if they represent the same file on the host OS. Takes account of case, slashes and relative paths. :param unicode path: The original path. :rtype: unicode :returns: The unique path.
zerotk/easyfs/_easyfs.py
def CanonicalPath(path): ''' Returns a version of a path that is unique. Given two paths path1 and path2: CanonicalPath(path1) == CanonicalPath(path2) if and only if they represent the same file on the host OS. Takes account of case, slashes and relative paths. :param unicode path: The original path. :rtype: unicode :returns: The unique path. ''' path = os.path.normpath(path) path = os.path.abspath(path) path = os.path.normcase(path) return path
def CanonicalPath(path): ''' Returns a version of a path that is unique. Given two paths path1 and path2: CanonicalPath(path1) == CanonicalPath(path2) if and only if they represent the same file on the host OS. Takes account of case, slashes and relative paths. :param unicode path: The original path. :rtype: unicode :returns: The unique path. ''' path = os.path.normpath(path) path = os.path.abspath(path) path = os.path.normcase(path) return path
[ "Returns", "a", "version", "of", "a", "path", "that", "is", "unique", "." ]
zerotk/easyfs
python
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L123-L142
[ "def", "CanonicalPath", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "normpath", "(", "path", ")", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "path", "=", "os", ".", "path", ".", "normcase", "(", "path", ")", "return", "path" ]
140923db51fb91d5a5847ad17412e8bce51ba3da
valid
StandardizePath
Replaces all slashes and backslashes with the target separator StandardPath: We are defining that the standard-path is the one with only back-slashes in it, either on Windows or any other platform. :param bool strip: If True, removes additional slashes from the end of the path.
zerotk/easyfs/_easyfs.py
def StandardizePath(path, strip=False): ''' Replaces all slashes and backslashes with the target separator StandardPath: We are defining that the standard-path is the one with only back-slashes in it, either on Windows or any other platform. :param bool strip: If True, removes additional slashes from the end of the path. ''' path = path.replace(SEPARATOR_WINDOWS, SEPARATOR_UNIX) if strip: path = path.rstrip(SEPARATOR_UNIX) return path
def StandardizePath(path, strip=False): ''' Replaces all slashes and backslashes with the target separator StandardPath: We are defining that the standard-path is the one with only back-slashes in it, either on Windows or any other platform. :param bool strip: If True, removes additional slashes from the end of the path. ''' path = path.replace(SEPARATOR_WINDOWS, SEPARATOR_UNIX) if strip: path = path.rstrip(SEPARATOR_UNIX) return path
[ "Replaces", "all", "slashes", "and", "backslashes", "with", "the", "target", "separator" ]
zerotk/easyfs
python
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L148-L162
[ "def", "StandardizePath", "(", "path", ",", "strip", "=", "False", ")", ":", "path", "=", "path", ".", "replace", "(", "SEPARATOR_WINDOWS", ",", "SEPARATOR_UNIX", ")", "if", "strip", ":", "path", "=", "path", ".", "rstrip", "(", "SEPARATOR_UNIX", ")", "return", "path" ]
140923db51fb91d5a5847ad17412e8bce51ba3da
valid
NormStandardPath
Normalizes a standard path (posixpath.normpath) maintaining any slashes at the end of the path. Normalize: Removes any local references in the path "/../" StandardPath: We are defining that the standard-path is the one with only back-slashes in it, either on Windows or any other platform.
zerotk/easyfs/_easyfs.py
def NormStandardPath(path): ''' Normalizes a standard path (posixpath.normpath) maintaining any slashes at the end of the path. Normalize: Removes any local references in the path "/../" StandardPath: We are defining that the standard-path is the one with only back-slashes in it, either on Windows or any other platform. ''' import posixpath if path.endswith('/'): slash = '/' else: slash = '' return posixpath.normpath(path) + slash
def NormStandardPath(path): ''' Normalizes a standard path (posixpath.normpath) maintaining any slashes at the end of the path. Normalize: Removes any local references in the path "/../" StandardPath: We are defining that the standard-path is the one with only back-slashes in it, either on Windows or any other platform. ''' import posixpath if path.endswith('/'): slash = '/' else: slash = '' return posixpath.normpath(path) + slash
[ "Normalizes", "a", "standard", "path", "(", "posixpath", ".", "normpath", ")", "maintaining", "any", "slashes", "at", "the", "end", "of", "the", "path", "." ]
zerotk/easyfs
python
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L169-L185
[ "def", "NormStandardPath", "(", "path", ")", ":", "import", "posixpath", "if", "path", ".", "endswith", "(", "'/'", ")", ":", "slash", "=", "'/'", "else", ":", "slash", "=", "''", "return", "posixpath", ".", "normpath", "(", "path", ")", "+", "slash" ]
140923db51fb91d5a5847ad17412e8bce51ba3da
valid
CreateMD5
Creates a md5 file from a source file (contents are the md5 hash of source file) :param unicode source_filename: Path to source file :type target_filename: unicode or None :param target_filename: Name of the target file with the md5 contents If None, defaults to source_filename + '.md5'
zerotk/easyfs/_easyfs.py
def CreateMD5(source_filename, target_filename=None): ''' Creates a md5 file from a source file (contents are the md5 hash of source file) :param unicode source_filename: Path to source file :type target_filename: unicode or None :param target_filename: Name of the target file with the md5 contents If None, defaults to source_filename + '.md5' ''' if target_filename is None: target_filename = source_filename + '.md5' from six.moves.urllib.parse import urlparse source_url = urlparse(source_filename) # Obtain MD5 hex if _UrlIsLocal(source_url): # If using a local file, we can give Md5Hex the filename md5_contents = Md5Hex(filename=source_filename) else: # Md5Hex can't handle remote files, we open it and pray we won't run out of memory. md5_contents = Md5Hex(contents=GetFileContents(source_filename, binary=True)) # Write MD5 hash to a file CreateFile(target_filename, md5_contents)
def CreateMD5(source_filename, target_filename=None): ''' Creates a md5 file from a source file (contents are the md5 hash of source file) :param unicode source_filename: Path to source file :type target_filename: unicode or None :param target_filename: Name of the target file with the md5 contents If None, defaults to source_filename + '.md5' ''' if target_filename is None: target_filename = source_filename + '.md5' from six.moves.urllib.parse import urlparse source_url = urlparse(source_filename) # Obtain MD5 hex if _UrlIsLocal(source_url): # If using a local file, we can give Md5Hex the filename md5_contents = Md5Hex(filename=source_filename) else: # Md5Hex can't handle remote files, we open it and pray we won't run out of memory. md5_contents = Md5Hex(contents=GetFileContents(source_filename, binary=True)) # Write MD5 hash to a file CreateFile(target_filename, md5_contents)
[ "Creates", "a", "md5", "file", "from", "a", "source", "file", "(", "contents", "are", "the", "md5", "hash", "of", "source", "file", ")" ]
zerotk/easyfs
python
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L192-L220
[ "def", "CreateMD5", "(", "source_filename", ",", "target_filename", "=", "None", ")", ":", "if", "target_filename", "is", "None", ":", "target_filename", "=", "source_filename", "+", "'.md5'", "from", "six", ".", "moves", ".", "urllib", ".", "parse", "import", "urlparse", "source_url", "=", "urlparse", "(", "source_filename", ")", "# Obtain MD5 hex", "if", "_UrlIsLocal", "(", "source_url", ")", ":", "# If using a local file, we can give Md5Hex the filename", "md5_contents", "=", "Md5Hex", "(", "filename", "=", "source_filename", ")", "else", ":", "# Md5Hex can't handle remote files, we open it and pray we won't run out of memory.", "md5_contents", "=", "Md5Hex", "(", "contents", "=", "GetFileContents", "(", "source_filename", ",", "binary", "=", "True", ")", ")", "# Write MD5 hash to a file", "CreateFile", "(", "target_filename", ",", "md5_contents", ")" ]
140923db51fb91d5a5847ad17412e8bce51ba3da
valid
CopyFile
Copy a file from source to target. :param source_filename: @see _DoCopyFile :param target_filename: @see _DoCopyFile :param bool md5_check: If True, checks md5 files (of both source and target files), if they match, skip this copy and return MD5_SKIP Md5 files are assumed to be {source, target} + '.md5' If any file is missing (source, target or md5), the copy will always be made. :param copy_symlink: @see _DoCopyFile :raises FileAlreadyExistsError: If target_filename already exists, and override is False :raises NotImplementedProtocol: If file protocol is not accepted Protocols allowed are: source_filename: local, ftp, http target_filename: local, ftp :rtype: None | MD5_SKIP :returns: MD5_SKIP if the file was not copied because there was a matching .md5 file .. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
zerotk/easyfs/_easyfs.py
def CopyFile(source_filename, target_filename, override=True, md5_check=False, copy_symlink=True): ''' Copy a file from source to target. :param source_filename: @see _DoCopyFile :param target_filename: @see _DoCopyFile :param bool md5_check: If True, checks md5 files (of both source and target files), if they match, skip this copy and return MD5_SKIP Md5 files are assumed to be {source, target} + '.md5' If any file is missing (source, target or md5), the copy will always be made. :param copy_symlink: @see _DoCopyFile :raises FileAlreadyExistsError: If target_filename already exists, and override is False :raises NotImplementedProtocol: If file protocol is not accepted Protocols allowed are: source_filename: local, ftp, http target_filename: local, ftp :rtype: None | MD5_SKIP :returns: MD5_SKIP if the file was not copied because there was a matching .md5 file .. seealso:: FTP LIMITATIONS at this module's doc for performance issues information ''' from ._exceptions import FileNotFoundError # Check override if not override and Exists(target_filename): from ._exceptions import FileAlreadyExistsError raise FileAlreadyExistsError(target_filename) # Don't do md5 check for md5 files themselves. md5_check = md5_check and not target_filename.endswith('.md5') # If we enabled md5 checks, ignore copy of files that haven't changed their md5 contents. if md5_check: source_md5_filename = source_filename + '.md5' target_md5_filename = target_filename + '.md5' try: source_md5_contents = GetFileContents(source_md5_filename) except FileNotFoundError: source_md5_contents = None try: target_md5_contents = GetFileContents(target_md5_filename) except FileNotFoundError: target_md5_contents = None if source_md5_contents is not None and \ source_md5_contents == target_md5_contents and \ Exists(target_filename): return MD5_SKIP # Copy source file _DoCopyFile(source_filename, target_filename, copy_symlink=copy_symlink) # If we have a source_md5, but no target_md5, create the target_md5 file if md5_check and source_md5_contents is not None and source_md5_contents != target_md5_contents: CreateFile(target_md5_filename, source_md5_contents)
def CopyFile(source_filename, target_filename, override=True, md5_check=False, copy_symlink=True): ''' Copy a file from source to target. :param source_filename: @see _DoCopyFile :param target_filename: @see _DoCopyFile :param bool md5_check: If True, checks md5 files (of both source and target files), if they match, skip this copy and return MD5_SKIP Md5 files are assumed to be {source, target} + '.md5' If any file is missing (source, target or md5), the copy will always be made. :param copy_symlink: @see _DoCopyFile :raises FileAlreadyExistsError: If target_filename already exists, and override is False :raises NotImplementedProtocol: If file protocol is not accepted Protocols allowed are: source_filename: local, ftp, http target_filename: local, ftp :rtype: None | MD5_SKIP :returns: MD5_SKIP if the file was not copied because there was a matching .md5 file .. seealso:: FTP LIMITATIONS at this module's doc for performance issues information ''' from ._exceptions import FileNotFoundError # Check override if not override and Exists(target_filename): from ._exceptions import FileAlreadyExistsError raise FileAlreadyExistsError(target_filename) # Don't do md5 check for md5 files themselves. md5_check = md5_check and not target_filename.endswith('.md5') # If we enabled md5 checks, ignore copy of files that haven't changed their md5 contents. if md5_check: source_md5_filename = source_filename + '.md5' target_md5_filename = target_filename + '.md5' try: source_md5_contents = GetFileContents(source_md5_filename) except FileNotFoundError: source_md5_contents = None try: target_md5_contents = GetFileContents(target_md5_filename) except FileNotFoundError: target_md5_contents = None if source_md5_contents is not None and \ source_md5_contents == target_md5_contents and \ Exists(target_filename): return MD5_SKIP # Copy source file _DoCopyFile(source_filename, target_filename, copy_symlink=copy_symlink) # If we have a source_md5, but no target_md5, create the target_md5 file if md5_check and source_md5_contents is not None and source_md5_contents != target_md5_contents: CreateFile(target_md5_filename, source_md5_contents)
[ "Copy", "a", "file", "from", "source", "to", "target", "." ]
zerotk/easyfs
python
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L228-L299
[ "def", "CopyFile", "(", "source_filename", ",", "target_filename", ",", "override", "=", "True", ",", "md5_check", "=", "False", ",", "copy_symlink", "=", "True", ")", ":", "from", ".", "_exceptions", "import", "FileNotFoundError", "# Check override", "if", "not", "override", "and", "Exists", "(", "target_filename", ")", ":", "from", ".", "_exceptions", "import", "FileAlreadyExistsError", "raise", "FileAlreadyExistsError", "(", "target_filename", ")", "# Don't do md5 check for md5 files themselves.", "md5_check", "=", "md5_check", "and", "not", "target_filename", ".", "endswith", "(", "'.md5'", ")", "# If we enabled md5 checks, ignore copy of files that haven't changed their md5 contents.", "if", "md5_check", ":", "source_md5_filename", "=", "source_filename", "+", "'.md5'", "target_md5_filename", "=", "target_filename", "+", "'.md5'", "try", ":", "source_md5_contents", "=", "GetFileContents", "(", "source_md5_filename", ")", "except", "FileNotFoundError", ":", "source_md5_contents", "=", "None", "try", ":", "target_md5_contents", "=", "GetFileContents", "(", "target_md5_filename", ")", "except", "FileNotFoundError", ":", "target_md5_contents", "=", "None", "if", "source_md5_contents", "is", "not", "None", "and", "source_md5_contents", "==", "target_md5_contents", "and", "Exists", "(", "target_filename", ")", ":", "return", "MD5_SKIP", "# Copy source file", "_DoCopyFile", "(", "source_filename", ",", "target_filename", ",", "copy_symlink", "=", "copy_symlink", ")", "# If we have a source_md5, but no target_md5, create the target_md5 file", "if", "md5_check", "and", "source_md5_contents", "is", "not", "None", "and", "source_md5_contents", "!=", "target_md5_contents", ":", "CreateFile", "(", "target_md5_filename", ",", "source_md5_contents", ")" ]
140923db51fb91d5a5847ad17412e8bce51ba3da