INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
: rtype: list ( list ( str ))
def _get_webpages(self): """ :rtype: list(list(str)) """ urls = [] for child in self.vcard.getChildren(): if child.name == "URL": urls.append(child.value) return sorted(urls)
: returns: contacts anniversary or None if not available: rtype: datetime. datetime or str
def get_anniversary(self): """:returns: contacts anniversary or None if not available :rtype: datetime.datetime or str """ # vcard 4.0 could contain a single text value try: if self.vcard.anniversary.params.get("VALUE")[0] == "text": return self.vcard.anniversary.value except (AttributeError, IndexError, TypeError): pass # else try to convert to a datetime object try: return helpers.string_to_date(self.vcard.anniversary.value) except (AttributeError, ValueError): # vcard 3.0: x-anniversary (private object) try: return helpers.string_to_date(self.vcard.x_anniversary.value) except (AttributeError, ValueError): pass return None
: returns: contacts birthday or None if not available: rtype: datetime. datetime or str
def get_birthday(self): """:returns: contacts birthday or None if not available :rtype: datetime.datetime or str """ # vcard 4.0 could contain a single text value try: if self.vcard.bday.params.get("VALUE")[0] == "text": return self.vcard.bday.value except (AttributeError, IndexError, TypeError): pass # else try to convert to a datetime object try: return helpers.string_to_date(self.vcard.bday.value) except (AttributeError, ValueError): pass return None
get list of types for phone number email or post address: param object: vcard class object: type object: vobject. vCard: param default_type: use if the object contains no type: type default_type: str: returns: list of type labels: rtype: list ( str )
def _get_types_for_vcard_object(self, object, default_type): """ get list of types for phone number, email or post address :param object: vcard class object :type object: vobject.vCard :param default_type: use if the object contains no type :type default_type: str :returns: list of type labels :rtype: list(str) """ type_list = [] # try to find label group for custom value type if object.group: for label in self.vcard.getChildren(): if label.name == "X-ABLABEL" and label.group == object.group: custom_type = label.value.strip() if custom_type: type_list.append(custom_type) # then load type from params dict standard_types = object.params.get("TYPE") if standard_types is not None: if not isinstance(standard_types, list): standard_types = [standard_types] for type in standard_types: type = type.strip() if type and type.lower() != "pref": if not type.lower().startswith("x-"): type_list.append(type) elif type[2:].lower() not in [x.lower() for x in type_list]: # add x-custom type in case it's not already added by # custom label for loop above but strip x- before type_list.append(type[2:]) # try to get pref parameter from vcard version 4.0 try: type_list.append("pref=%d" % int(object.params.get("PREF")[0])) except (IndexError, TypeError, ValueError): # else try to determine, if type params contain pref attribute try: for x in object.params.get("TYPE"): if x.lower() == "pref" and "pref" not in type_list: type_list.append("pref") except TypeError: pass # return type_list or default type if type_list: return type_list return [default_type]
Parse type value of phone numbers email and post addresses.
def _parse_type_value(types, value, supported_types): """Parse type value of phone numbers, email and post addresses. :param types: list of type values :type types: list(str) :param value: the corresponding label, required for more verbose exceptions :type value: str :param supported_types: all allowed standard types :type supported_types: list(str) :returns: tuple of standard and custom types and pref integer :rtype: tuple(list(str), list(str), int) """ custom_types = [] standard_types = [] pref = 0 for type in types: type = type.strip() if type: if type.lower() in supported_types: standard_types.append(type) elif type.lower() == "pref": pref += 1 elif re.match(r"^pref=\d{1,2}$", type.lower()): pref += int(type.split("=")[1]) else: if type.lower().startswith("x-"): custom_types.append(type[2:]) standard_types.append(type) else: custom_types.append(type) standard_types.append("X-{}".format(type)) return (standard_types, custom_types, pref)
converts list to string recursively so that nested lists are supported
def list_to_string(input, delimiter): """converts list to string recursively so that nested lists are supported :param input: a list of strings and lists of strings (and so on recursive) :type input: list :param delimiter: the deimiter to use when joining the items :type delimiter: str :returns: the recursively joined list :rtype: str """ if isinstance(input, list): return delimiter.join( list_to_string(item, delimiter) for item in input) return input
Convert string to date object.
def string_to_date(input): """Convert string to date object. :param input: the date string to parse :type input: str :returns: the parsed datetime object :rtype: datetime.datetime """ # try date formats --mmdd, --mm-dd, yyyymmdd, yyyy-mm-dd and datetime # formats yyyymmddThhmmss, yyyy-mm-ddThh:mm:ss, yyyymmddThhmmssZ, # yyyy-mm-ddThh:mm:ssZ. for format_string in ("--%m%d", "--%m-%d", "%Y%m%d", "%Y-%m-%d", "%Y%m%dT%H%M%S", "%Y-%m-%dT%H:%M:%S", "%Y%m%dT%H%M%SZ", "%Y-%m-%dT%H:%M:%SZ"): try: return datetime.strptime(input, format_string) except ValueError: pass # try datetime formats yyyymmddThhmmsstz and yyyy-mm-ddThh:mm:sstz where tz # may look like -06:00. for format_string in ("%Y%m%dT%H%M%S%z", "%Y-%m-%dT%H:%M:%S%z"): try: return datetime.strptime(''.join(input.rsplit(":", 1)), format_string) except ValueError: pass raise ValueError
converts a value list into yaml syntax: param name: name of object ( example: phone ): type name: str: param value: object contents: type value: str list ( str ) list ( list ( str )): param indentation: indent all by number of spaces: type indentation: int: param indexOfColon: use to position: at the name string ( - 1 for no space ): type indexOfColon: int: param show_multi_line_character: option to hide |: type show_multi_line_character: boolean: returns: yaml formatted string array of name value pair: rtype: list ( str )
def convert_to_yaml( name, value, indentation, indexOfColon, show_multi_line_character): """converts a value list into yaml syntax :param name: name of object (example: phone) :type name: str :param value: object contents :type value: str, list(str), list(list(str)) :param indentation: indent all by number of spaces :type indentation: int :param indexOfColon: use to position : at the name string (-1 for no space) :type indexOfColon: int :param show_multi_line_character: option to hide "|" :type show_multi_line_character: boolean :returns: yaml formatted string array of name, value pair :rtype: list(str) """ strings = [] if isinstance(value, list): # special case for single item lists: if len(value) == 1 \ and isinstance(value[0], str): # value = ["string"] should not be converted to # name: # - string # but to "name: string" instead value = value[0] elif len(value) == 1 \ and isinstance(value[0], list) \ and len(value[0]) == 1 \ and isinstance(value[0][0], str): # same applies to value = [["string"]] value = value[0][0] if isinstance(value, str): strings.append("%s%s%s: %s" % ( ' ' * indentation, name, ' ' * (indexOfColon-len(name)), indent_multiline_string(value, indentation+4, show_multi_line_character))) elif isinstance(value, list): strings.append("%s%s%s: " % ( ' ' * indentation, name, ' ' * (indexOfColon-len(name)))) for outer in value: # special case for single item sublists if isinstance(outer, list) \ and len(outer) == 1 \ and isinstance(outer[0], str): # outer = ["string"] should not be converted to # - # - string # but to "- string" instead outer = outer[0] if isinstance(outer, str): strings.append("%s- %s" % ( ' ' * (indentation+4), indent_multiline_string( outer, indentation+8, show_multi_line_character))) elif isinstance(outer, list): strings.append("%s- " % (' ' * (indentation+4))) for inner in outer: if isinstance(inner, str): strings.append("%s- %s" % ( ' ' * (indentation+8), indent_multiline_string( inner, indentation+12, show_multi_line_character))) return strings
converts user input into vcard compatible data structures: param name: object name only required for error messages: type name: str: param value: user input: type value: str or list ( str ): param allowed_object_type: set the accepted return type for vcard attribute: type allowed_object_type: enum of type ObjectType: returns: cleaned user input ready for vcard or a ValueError: rtype: str or list ( str )
def convert_to_vcard(name, value, allowed_object_type): """converts user input into vcard compatible data structures :param name: object name, only required for error messages :type name: str :param value: user input :type value: str or list(str) :param allowed_object_type: set the accepted return type for vcard attribute :type allowed_object_type: enum of type ObjectType :returns: cleaned user input, ready for vcard or a ValueError :rtype: str or list(str) """ if isinstance(value, str): if allowed_object_type == ObjectType.list_with_strings: raise ValueError( "Error: " + name + " must not contain a single string.") else: return value.strip() elif isinstance(value, list): if allowed_object_type == ObjectType.string: raise ValueError( "Error: " + name + " must not contain a list.") else: for entry in value: if not isinstance(entry, str): raise ValueError( "Error: " + name + " must not contain a nested list") # filter out empty list items and strip leading and trailing space return [x.strip() for x in value if x] else: if allowed_object_type == ObjectType.string: raise ValueError( "Error: " + name + " must be a string.") elif allowed_object_type == ObjectType.list_with_strings: raise ValueError( "Error: " + name + " must be a list with strings.") else: raise ValueError( "Error: " + name + " must be a string or a list with strings.")
Calculate the minimum length of initial substrings of uid1 and uid2 for them to be different.
def _compare_uids(uid1, uid2): """Calculate the minimum length of initial substrings of uid1 and uid2 for them to be different. :param uid1: first uid to compare :type uid1: str :param uid2: second uid to compare :type uid2: str :returns: the length of the shortes unequal initial substrings :rtype: int """ sum = 0 for char1, char2 in zip(uid1, uid2): if char1 == char2: sum += 1 else: break return sum
Search in all fields for contacts matching query.
def _search_all(self, query): """Search in all fields for contacts matching query. :param query: the query to search for :type query: str :yields: all found contacts :rtype: generator(carddav_object.CarddavObject) """ regexp = re.compile(query, re.IGNORECASE | re.DOTALL) for contact in self.contacts.values(): # search in all contact fields contact_details = contact.print_vcard() if regexp.search(contact_details) is not None: yield contact else: # find phone numbers with special chars like / clean_contact_details = re.sub("[^a-zA-Z0-9\n]", "", contact_details) if regexp.search(clean_contact_details) is not None \ and len(re.sub("\D", "", query)) >= 3: yield contact
Search in the name filed for contacts matching query.
def _search_names(self, query): """Search in the name filed for contacts matching query. :param query: the query to search for :type query: str :yields: all found contacts :rtype: generator(carddav_object.CarddavObject) """ regexp = re.compile(query, re.IGNORECASE | re.DOTALL) for contact in self.contacts.values(): # only search in contact name if regexp.search(contact.get_full_name()) is not None: yield contact
Search for contacts with a matching uid.
def _search_uid(self, query): """Search for contacts with a matching uid. :param query: the query to search for :type query: str :yields: all found contacts :rtype: generator(carddav_object.CarddavObject) """ try: # First we treat the argument as a full UID and try to match it # exactly. yield self.contacts[query] except KeyError: # If that failed we look for all contacts whos UID start with the # given query. for uid in self.contacts: if uid.startswith(query): yield self.contacts[uid]
Search this address book for contacts matching the query.
def search(self, query, method="all"): """Search this address book for contacts matching the query. The method can be one of "all", "name" and "uid". The backend for this address book migth be load()ed if needed. :param query: the query to search for :type query: str :param method: the type of fileds to use when seaching :type method: str :returns: all found contacts :rtype: list(carddav_object.CarddavObject) """ logging.debug('address book %s, searching with %s', self.name, query) if not self._loaded: self.load(query) if method == "all": search_function = self._search_all elif method == "name": search_function = self._search_names elif method == "uid": search_function = self._search_uid else: raise ValueError('Only the search methods "all", "name" and "uid" ' 'are supported.') return list(search_function(query))
Create a dictionary of shortend UIDs for all contacts.
def get_short_uid_dict(self, query=None): """Create a dictionary of shortend UIDs for all contacts. All arguments are only used if the address book is not yet initialized and will just be handed to self.load(). :param query: see self.load() :type query: str :returns: the contacts mapped by the shortes unique prefix of their UID :rtype: dict(str: CarddavObject) """ if self._short_uids is None: if not self._loaded: self.load(query) if not self.contacts: self._short_uids = {} elif len(self.contacts) == 1: self._short_uids = {uid[0:1]: contact for uid, contact in self.contacts.items()} else: self._short_uids = {} sorted_uids = sorted(self.contacts) # Prepare for the loop; the first and last items are handled # seperatly. item0, item1 = sorted_uids[:2] same1 = self._compare_uids(item0, item1) self._short_uids[item0[:same1 + 1]] = self.contacts[item0] for item_new in sorted_uids[2:]: # shift the items and the common prefix lenght one further item0, item1 = item1, item_new same0, same1 = same1, self._compare_uids(item0, item1) # compute the final prefix length for item1 same = max(same0, same1) self._short_uids[item0[:same + 1]] = self.contacts[item0] # Save the last item. self._short_uids[item1[:same1 + 1]] = self.contacts[item1] return self._short_uids
Get the shortend UID for the given UID.
def get_short_uid(self, uid): """Get the shortend UID for the given UID. :param uid: the full UID to shorten :type uid: str :returns: the shortend uid or the empty string :rtype: str """ if uid: short_uids = self.get_short_uid_dict() for length_of_uid in range(len(uid), 0, -1): if short_uids.get(uid[:length_of_uid]) is not None: return uid[:length_of_uid] return ""
Find all vcard files inside this address book.
def _find_vcard_files(self, search=None, search_in_source_files=False): """Find all vcard files inside this address book. If a search string is given only files which contents match that will be returned. :param search: a regular expression to limit the results :type search: str :param search_in_source_files: apply search regexp directly on the .vcf files to speed up parsing (less accurate) :type search_in_source_files: bool :returns: the paths of the vcard files :rtype: generator """ files = glob.glob(os.path.join(self.path, "*.vcf")) if search and search_in_source_files: for filename in files: with open(filename, "r") as filehandle: if re.search(search, filehandle.read(), re.IGNORECASE | re.DOTALL): yield filename else: yield from files
Load all vcard files in this address book from disk.
def load(self, query=None, search_in_source_files=False): """Load all vcard files in this address book from disk. If a search string is given only files which contents match that will be loaded. :param query: a regular expression to limit the results :type query: str :param search_in_source_files: apply search regexp directly on the .vcf files to speed up parsing (less accurate) :type search_in_source_files: bool :returns: the number of successfully loaded cards and the number of errors :rtype: int, int :throws: AddressBookParseError """ if self._loaded: return logging.debug('Loading Vdir %s with query %s', self.name, query) errors = 0 for filename in self._find_vcard_files( search=query, search_in_source_files=search_in_source_files): try: card = CarddavObject.from_file(self, filename, self._private_objects, self._localize_dates) except (IOError, vobject.base.ParseError) as err: verb = "open" if isinstance(err, IOError) else "parse" logging.debug("Error: Could not %s file %s\n%s", verb, filename, err) if self._skip: errors += 1 else: # FIXME: This should throw an apropriate exception and the # sys.exit should be called somewhere closer to the command # line parsing. logging.error( "The vcard file %s of address book %s could not be " "parsed\nUse --debug for more information or " "--skip-unparsable to proceed", filename, self.name) sys.exit(2) else: uid = card.get_uid() if not uid: logging.warning("Card %s from address book %s has no UID " "and will not be availbale.", card, self.name) elif uid in self.contacts: logging.warning( "Card %s and %s from address book %s have the same " "UID. The former will not be availbale.", card, self.contacts[uid], self.name) else: self.contacts[uid] = card self._loaded = True if errors: logging.warning( "%d of %d vCard files of address book %s could not be parsed.", errors, len(self.contacts) + errors, self) logging.debug('Loded %s contacts from address book %s.', len(self.contacts), self.name)
Get one of the backing abdress books by its name
def get_abook(self, name): """Get one of the backing abdress books by its name, :param name: the name of the address book to get :type name: str :returns: the matching address book or None :rtype: AddressBook or NoneType """ for abook in self._abooks: if abook.name == name: return abook
This function is used in sys command ( when user want to find a specific syscall )
def get_table(self, arch, pattern, colored=False, verbose=False): ''' This function is used in sys command (when user want to find a specific syscall) :param Architecture for syscall table; :param Searching pattern; :param Flag for verbose output :return Return a printable table of matched syscalls ''' rawtable = self.search(arch, pattern) if len(rawtable) == 0: return None used_hd = self.__fetch_used_headers(rawtable, verbose) table = [self.__make_colored_row(used_hd, 'yellow,bold', upper=True) if colored else used_hd] for command in rawtable: cur_tb_field = [] for hd in used_hd: value = command[hd] cur_tb_field.append(self.__make_colored_field(value, hd, verbose=verbose)) table.append(cur_tb_field) return DoubleTable(table)
Initialize the dictionary of architectures for assembling via keystone
def avail_archs(self): ''' Initialize the dictionary of architectures for assembling via keystone''' return { ARM32: (KS_ARCH_ARM, KS_MODE_ARM), ARM64: (KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN), ARM_TB: (KS_ARCH_ARM, KS_MODE_THUMB), HEXAGON: (KS_ARCH_HEXAGON, KS_MODE_BIG_ENDIAN), MIPS32: (KS_ARCH_MIPS, KS_MODE_MIPS32), MIPS64: (KS_ARCH_MIPS, KS_MODE_MIPS64), PPC32: (KS_ARCH_PPC, KS_MODE_PPC32), PPC64: (KS_ARCH_PPC, KS_MODE_PPC64), SPARC32: (KS_ARCH_SPARC, KS_MODE_SPARC32), SPARC64: (KS_ARCH_SPARC, KS_MODE_SPARC64), SYSTEMZ: (KS_ARCH_SYSTEMZ, KS_MODE_BIG_ENDIAN), X86_16: (KS_ARCH_X86, KS_MODE_16), X86_32: (KS_ARCH_X86, KS_MODE_32), X86_64: (KS_ARCH_X86, KS_MODE_64), }
Initialize the dictionary of architectures for disassembling via capstone
def avail_archs(self): ''' Initialize the dictionary of architectures for disassembling via capstone''' return { ARM32: (CS_ARCH_ARM, CS_MODE_ARM), ARM64: (CS_ARCH_ARM64, CS_MODE_LITTLE_ENDIAN), ARM_TB: (CS_ARCH_ARM, CS_MODE_THUMB), MIPS32: (CS_ARCH_MIPS, CS_MODE_MIPS32), MIPS64: (CS_ARCH_MIPS, CS_MODE_MIPS64), SPARC32: (CS_ARCH_SPARC, CS_MODE_BIG_ENDIAN), SPARC64: (CS_ARCH_SPARC, CS_MODE_V9), SYSTEMZ: (CS_ARCH_SYSZ, CS_MODE_BIG_ENDIAN), X86_16: (CS_ARCH_X86, CS_MODE_16), X86_32: (CS_ARCH_X86, CS_MODE_32), X86_64: (CS_ARCH_X86, CS_MODE_64), }
An inspect. getargspec with a relaxed sanity check to support Cython.
def getargspec_permissive(func): """ An `inspect.getargspec` with a relaxed sanity check to support Cython. Motivation: A Cython-compiled function is *not* an instance of Python's types.FunctionType. That is the sanity check the standard Py2 library uses in `inspect.getargspec()`. So, an exception is raised when calling `argh.dispatch_command(cythonCompiledFunc)`. However, the CyFunctions do have perfectly usable `.func_code` and `.func_defaults` which is all `inspect.getargspec` needs. This function just copies `inspect.getargspec()` from the standard library but relaxes the test to a more duck-typing one of having both `.func_code` and `.func_defaults` attributes. """ if inspect.ismethod(func): func = func.im_func # Py2 Stdlib uses isfunction(func) which is too strict for Cython-compiled # functions though such have perfectly usable func_code, func_defaults. if not (hasattr(func, "func_code") and hasattr(func, "func_defaults")): raise TypeError('{!r} missing func_code or func_defaults'.format(func)) args, varargs, varkw = inspect.getargs(func.func_code) return inspect.ArgSpec(args, varargs, varkw, func.func_defaults)
Parses given list of arguments using given parser calls the relevant function and prints the result.
def dispatch(parser, argv=None, add_help_command=True, completion=True, pre_call=None, output_file=sys.stdout, errors_file=sys.stderr, raw_output=False, namespace=None, skip_unknown_args=False): """ Parses given list of arguments using given parser, calls the relevant function and prints the result. The target function should expect one positional argument: the :class:`argparse.Namespace` object. However, if the function is decorated with :func:`~argh.decorators.plain_signature`, the positional and named arguments from the namespace object are passed to the function instead of the object itself. :param parser: the ArgumentParser instance. :param argv: a list of strings representing the arguments. If `None`, ``sys.argv`` is used instead. Default is `None`. :param add_help_command: if `True`, converts first positional argument "help" to a keyword argument so that ``help foo`` becomes ``foo --help`` and displays usage information for "foo". Default is `True`. :param output_file: A file-like object for output. If `None`, the resulting lines are collected and returned as a string. Default is ``sys.stdout``. :param errors_file: Same as `output_file` but for ``sys.stderr``. :param raw_output: If `True`, results are written to the output file raw, without adding whitespaces or newlines between yielded strings. Default is `False`. :param completion: If `True`, shell tab completion is enabled. Default is `True`. (You will also need to install it.) See :mod:`argh.completion`. :param skip_unknown_args: If `True`, unknown arguments do not cause an error (`ArgumentParser.parse_known_args` is used). :param namespace: An `argparse.Namespace`-like object. By default an :class:`ArghNamespace` object is used. Please note that support for combined default and nested functions may be broken if a different type of object is forced. By default the exceptions are not wrapped and will propagate. The only exception that is always wrapped is :class:`~argh.exceptions.CommandError` which is interpreted as an expected event so the traceback is hidden. You can also mark arbitrary exceptions as "wrappable" by using the :func:`~argh.decorators.wrap_errors` decorator. """ if completion: autocomplete(parser) if argv is None: argv = sys.argv[1:] if add_help_command: if argv and argv[0] == 'help': argv.pop(0) argv.append('--help') if skip_unknown_args: parse_args = parser.parse_known_args else: parse_args = parser.parse_args if not namespace: namespace = ArghNamespace() # this will raise SystemExit if parsing fails namespace_obj = parse_args(argv, namespace=namespace) function = _get_function_from_namespace_obj(namespace_obj) if function: lines = _execute_command(function, namespace_obj, errors_file, pre_call=pre_call) else: # no commands declared, can't dispatch; display help message lines = [parser.format_usage()] if output_file is None: # user wants a string; we create an internal temporary file-like object # and will return its contents as a string if sys.version_info < (3,0): f = compat.BytesIO() else: f = compat.StringIO() else: # normally this is stdout; can be any file f = output_file for line in lines: # print the line as soon as it is generated to ensure that it is # displayed to the user before anything else happens, e.g. # raw_input() is called io.dump(line, f) if not raw_output: # in most cases user wants one message per line io.dump('\n', f) if output_file is None: # user wanted a string; return contents of our temporary file-like obj f.seek(0) return f.read()
Assumes that function is a callable. Tries different approaches to call it ( with namespace_obj or with ordinary signature ). Yields the results line by line.
def _execute_command(function, namespace_obj, errors_file, pre_call=None): """ Assumes that `function` is a callable. Tries different approaches to call it (with `namespace_obj` or with ordinary signature). Yields the results line by line. If :class:`~argh.exceptions.CommandError` is raised, its message is appended to the results (i.e. yielded by the generator as a string). All other exceptions propagate unless marked as wrappable by :func:`wrap_errors`. """ if pre_call: # XXX undocumented because I'm unsure if it's OK # Actually used in real projects: # * https://google.com/search?q=argh+dispatch+pre_call # * https://github.com/neithere/argh/issues/63 pre_call(namespace_obj) # the function is nested to catch certain exceptions (see below) def _call(): # Actually call the function if getattr(function, ATTR_EXPECTS_NAMESPACE_OBJECT, False): result = function(namespace_obj) else: # namespace -> dictionary _flat_key = lambda key: key.replace('-', '_') all_input = dict((_flat_key(k), v) for k,v in vars(namespace_obj).items()) # filter the namespace variables so that only those expected # by the actual function will pass spec = get_arg_spec(function) positional = [all_input[k] for k in spec.args] kwonly = getattr(spec, 'kwonlyargs', []) keywords = dict((k, all_input[k]) for k in kwonly) # *args if spec.varargs: positional += getattr(namespace_obj, spec.varargs) # **kwargs varkw = getattr(spec, 'varkw', getattr(spec, 'keywords', [])) if varkw: not_kwargs = [DEST_FUNCTION] + spec.args + [spec.varargs] + kwonly for k in vars(namespace_obj): if k.startswith('_') or k in not_kwargs: continue keywords[k] = getattr(namespace_obj, k) result = function(*positional, **keywords) # Yield the results if isinstance(result, (GeneratorType, list, tuple)): # yield each line ASAP, convert CommandError message to a line for line in result: yield line else: # yield non-empty non-iterable result as a single line if result is not None: yield result wrappable_exceptions = [CommandError] wrappable_exceptions += getattr(function, ATTR_WRAPPED_EXCEPTIONS, []) try: result = _call() for line in result: yield line except tuple(wrappable_exceptions) as e: processor = getattr(function, ATTR_WRAPPED_EXCEPTIONS_PROCESSOR, lambda e: '{0.__class__.__name__}: {0}'.format(e)) errors_file.write(compat.text_type(processor(e))) errors_file.write('\n')
A wrapper for: func: dispatch that creates a one - command parser. Uses: attr: PARSER_FORMATTER.
def dispatch_command(function, *args, **kwargs): """ A wrapper for :func:`dispatch` that creates a one-command parser. Uses :attr:`PARSER_FORMATTER`. This:: dispatch_command(foo) ...is a shortcut for:: parser = ArgumentParser() set_default_command(parser, foo) dispatch(parser) This function can be also used as a decorator. """ parser = argparse.ArgumentParser(formatter_class=PARSER_FORMATTER) set_default_command(parser, function) dispatch(parser, *args, **kwargs)
A wrapper for: func: dispatch that creates a parser adds commands to the parser and dispatches them. Uses: attr: PARSER_FORMATTER.
def dispatch_commands(functions, *args, **kwargs): """ A wrapper for :func:`dispatch` that creates a parser, adds commands to the parser and dispatches them. Uses :attr:`PARSER_FORMATTER`. This:: dispatch_commands([foo, bar]) ...is a shortcut for:: parser = ArgumentParser() add_commands(parser, [foo, bar]) dispatch(parser) """ parser = argparse.ArgumentParser(formatter_class=PARSER_FORMATTER) add_commands(parser, functions) dispatch(parser, *args, **kwargs)
Prompts user for input. Correctly handles prompt message encoding.
def safe_input(prompt): """ Prompts user for input. Correctly handles prompt message encoding. """ if sys.version_info < (3,0): if isinstance(prompt, compat.text_type): # Python 2.x: unicode → bytes encoding = locale.getpreferredencoding() or 'utf-8' prompt = prompt.encode(encoding) else: if not isinstance(prompt, compat.text_type): # Python 3.x: bytes → unicode prompt = prompt.decode() return _input(prompt)
Encodes given value so it can be written to given file object.
def encode_output(value, output_file): """ Encodes given value so it can be written to given file object. Value may be Unicode, binary string or any other data type. The exact behaviour depends on the Python version: Python 3.x `sys.stdout` is a `_io.TextIOWrapper` instance that accepts `str` (unicode) and breaks on `bytes`. It is OK to simply assume that everything is Unicode unless special handling is introduced in the client code. Thus, no additional processing is performed. Python 2.x `sys.stdout` is a file-like object that accepts `str` (bytes) and breaks when `unicode` is passed to `sys.stdout.write()`. We can expect both Unicode and bytes. They need to be encoded so as to match the file object encoding. The output is binary if the object doesn't explicitly require Unicode. """ if sys.version_info > (3,0): # Python 3: whatever → unicode return compat.text_type(value) else: # Python 2: handle special cases stream_encoding = getattr(output_file, 'encoding', None) if stream_encoding: if stream_encoding.upper() == 'UTF-8': return compat.text_type(value) else: return value.encode(stream_encoding, 'ignore') else: # no explicit encoding requirements; force binary if isinstance(value, compat.text_type): # unicode → binary return value.encode('utf-8') else: return str(value)
Writes given line to given output file. See: func: encode_output for details.
def dump(raw_data, output_file): """ Writes given line to given output file. See :func:`encode_output` for details. """ data = encode_output(raw_data, output_file) output_file.write(data)
Adds support for shell completion via argcomplete_ by patching given argparse. ArgumentParser ( sub ) class.
def autocomplete(parser): """ Adds support for shell completion via argcomplete_ by patching given `argparse.ArgumentParser` (sub)class. If completion is not enabled, logs a debug-level message. """ if COMPLETION_ENABLED: argcomplete.autocomplete(parser) elif 'bash' in os.getenv('SHELL', ''): logger.debug('Bash completion not available. Install argcomplete.')
Wrapper for: meth: argparse. ArgumentParser. parse_args. If namespace is not defined: class: argh. dispatching. ArghNamespace is used. This is required for functions to be properly used as commands.
def parse_args(self, args=None, namespace=None): """ Wrapper for :meth:`argparse.ArgumentParser.parse_args`. If `namespace` is not defined, :class:`argh.dispatching.ArghNamespace` is used. This is required for functions to be properly used as commands. """ namespace = namespace or ArghNamespace() return super(ArghParser, self).parse_args(args, namespace)
This method is copied verbatim from ArgumentDefaultsHelpFormatter with a couple of lines added just before the end. Reason: we need to repr () default values instead of simply inserting them as is. This helps notice for example an empty string as the default value ; moreover it prevents breaking argparse due to logical quirks inside of its formatters.
def _expand_help(self, action): """ This method is copied verbatim from ArgumentDefaultsHelpFormatter with a couple of lines added just before the end. Reason: we need to `repr()` default values instead of simply inserting them as is. This helps notice, for example, an empty string as the default value; moreover, it prevents breaking argparse due to logical quirks inside of its formatters. Ideally this could be achieved by simply defining :attr:`DEFAULT_ARGUMENT_TEMPLATE` as ``{default!r}`` but unfortunately argparse only supports the old printf syntax. """ params = dict(vars(action), prog=self._prog) for name in list(params): if params[name] is argparse.SUPPRESS: del params[name] for name in list(params): if hasattr(params[name], '__name__'): params[name] = params[name].__name__ if params.get('choices') is not None: choices_str = ', '.join([str(c) for c in params['choices']]) params['choices'] = choices_str # XXX this is added in Argh vs. argparse.ArgumentDefaultsHelpFormatter # (avoiding empty strings, otherwise Argparse would die with # an IndexError in _format_action) # if 'default' in params: if params['default'] is None: params['default'] = '-' else: params['default'] = repr(params['default']) # # / return self._get_help_string(action) % params
Adds types actions etc. to given argument specification. For example default = 3 implies type = int.
def _guess(kwargs): """ Adds types, actions, etc. to given argument specification. For example, ``default=3`` implies ``type=int``. :param arg: a :class:`argh.utils.Arg` instance """ guessed = {} # Parser actions that accept argument 'type' TYPE_AWARE_ACTIONS = 'store', 'append' # guess type/action from default value value = kwargs.get('default') if value is not None: if isinstance(value, bool): if kwargs.get('action') is None: # infer action from default value guessed['action'] = 'store_false' if value else 'store_true' elif kwargs.get('type') is None: # infer type from default value # (make sure that action handler supports this keyword) if kwargs.get('action', 'store') in TYPE_AWARE_ACTIONS: guessed['type'] = type(value) # guess type from choices (first item) if kwargs.get('choices') and 'type' not in list(guessed) + list(kwargs): guessed['type'] = type(kwargs['choices'][0]) return dict(kwargs, **guessed)
Sets default command ( i. e. a function ) for given parser.
def set_default_command(parser, function): """ Sets default command (i.e. a function) for given parser. If `parser.description` is empty and the function has a docstring, it is used as the description. .. note:: An attempt to set default command to a parser which already has subparsers (e.g. added with :func:`~argh.assembling.add_commands`) results in a `AssemblingError`. .. note:: If there are both explicitly declared arguments (e.g. via :func:`~argh.decorators.arg`) and ones inferred from the function signature (e.g. via :func:`~argh.decorators.command`), declared ones will be merged into inferred ones. If an argument does not conform function signature, `AssemblingError` is raised. .. note:: If the parser was created with ``add_help=True`` (which is by default), option name ``-h`` is silently removed from any argument. """ if parser._subparsers: _require_support_for_default_command_with_subparsers() spec = get_arg_spec(function) declared_args = getattr(function, ATTR_ARGS, []) inferred_args = list(_get_args_from_signature(function)) if inferred_args and declared_args: # We've got a mixture of declared and inferred arguments # a mapping of "dest" strings to argument declarations. # # * a "dest" string is a normalized form of argument name, i.e.: # # '-f', '--foo' → 'foo' # 'foo-bar' → 'foo_bar' # # * argument declaration is a dictionary representing an argument; # it is obtained either from _get_args_from_signature() or from # an @arg decorator (as is). # dests = OrderedDict() for argspec in inferred_args: dest = _get_parser_param_kwargs(parser, argspec)['dest'] dests[dest] = argspec for declared_kw in declared_args: # an argument is declared via decorator dest = _get_dest(parser, declared_kw) if dest in dests: # the argument is already known from function signature # # now make sure that this declared arg conforms to the function # signature and therefore only refines an inferred arg: # # @arg('my-foo') maps to func(my_foo) # @arg('--my-bar') maps to func(my_bar=...) # either both arguments are positional or both are optional decl_positional = _is_positional(declared_kw['option_strings']) infr_positional = _is_positional(dests[dest]['option_strings']) if decl_positional != infr_positional: kinds = {True: 'positional', False: 'optional'} raise AssemblingError( '{func}: argument "{dest}" declared as {kind_i} ' '(in function signature) and {kind_d} (via decorator)' .format( func=function.__name__, dest=dest, kind_i=kinds[infr_positional], kind_d=kinds[decl_positional], )) # merge explicit argument declaration into the inferred one # (e.g. `help=...`) dests[dest].update(**declared_kw) else: # the argument is not in function signature varkw = getattr(spec, 'varkw', getattr(spec, 'keywords', [])) if varkw: # function accepts **kwargs; the argument goes into it dests[dest] = declared_kw else: # there's no way we can map the argument declaration # to function signature xs = (dests[x]['option_strings'] for x in dests) raise AssemblingError( '{func}: argument {flags} does not fit ' 'function signature: {sig}'.format( flags=', '.join(declared_kw['option_strings']), func=function.__name__, sig=', '.join('/'.join(x) for x in xs))) # pack the modified data back into a list inferred_args = dests.values() command_args = inferred_args or declared_args # add types, actions, etc. (e.g. default=3 implies type=int) command_args = [_guess(x) for x in command_args] for draft in command_args: draft = draft.copy() if 'help' not in draft: draft.update(help=DEFAULT_ARGUMENT_TEMPLATE) dest_or_opt_strings = draft.pop('option_strings') if parser.add_help and '-h' in dest_or_opt_strings: dest_or_opt_strings = [x for x in dest_or_opt_strings if x != '-h'] completer = draft.pop('completer', None) try: action = parser.add_argument(*dest_or_opt_strings, **draft) if COMPLETION_ENABLED and completer: action.completer = completer except Exception as e: raise type(e)('{func}: cannot add arg {args}: {msg}'.format( args='/'.join(dest_or_opt_strings), func=function.__name__, msg=e)) if function.__doc__ and not parser.description: parser.description = function.__doc__ parser.set_defaults(**{ DEST_FUNCTION: function, })
Adds given functions as commands to given parser.
def add_commands(parser, functions, namespace=None, namespace_kwargs=None, func_kwargs=None, # deprecated args: title=None, description=None, help=None): """ Adds given functions as commands to given parser. :param parser: an :class:`argparse.ArgumentParser` instance. :param functions: a list of functions. A subparser is created for each of them. If the function is decorated with :func:`~argh.decorators.arg`, the arguments are passed to :class:`argparse.ArgumentParser.add_argument`. See also :func:`~argh.dispatching.dispatch` for requirements concerning function signatures. The command name is inferred from the function name. Note that the underscores in the name are replaced with hyphens, i.e. function name "foo_bar" becomes command name "foo-bar". :param namespace: an optional string representing the group of commands. For example, if a command named "hello" is added without the namespace, it will be available as "prog.py hello"; if the namespace if specified as "greet", then the command will be accessible as "prog.py greet hello". The namespace itself is not callable, so "prog.py greet" will fail and only display a help message. :param func_kwargs: a `dict` of keyword arguments to be passed to each nested ArgumentParser instance created per command (i.e. per function). Members of this dictionary have the highest priority, so a function's docstring is overridden by a `help` in `func_kwargs` (if present). :param namespace_kwargs: a `dict` of keyword arguments to be passed to the nested ArgumentParser instance under given `namespace`. Deprecated params that should be moved into `namespace_kwargs`: :param title: passed to :meth:`argparse.ArgumentParser.add_subparsers` as `title`. .. deprecated:: 0.26.0 Please use `namespace_kwargs` instead. :param description: passed to :meth:`argparse.ArgumentParser.add_subparsers` as `description`. .. deprecated:: 0.26.0 Please use `namespace_kwargs` instead. :param help: passed to :meth:`argparse.ArgumentParser.add_subparsers` as `help`. .. deprecated:: 0.26.0 Please use `namespace_kwargs` instead. .. note:: This function modifies the parser object. Generally side effects are bad practice but we don't seem to have any choice as ArgumentParser is pretty opaque. You may prefer :class:`~argh.helpers.ArghParser.add_commands` for a bit more predictable API. .. note:: An attempt to add commands to a parser which already has a default function (e.g. added with :func:`~argh.assembling.set_default_command`) results in `AssemblingError`. """ # FIXME "namespace" is a correct name but it clashes with the "namespace" # that represents arguments (argparse.Namespace and our ArghNamespace). # We should rename the argument here. if DEST_FUNCTION in parser._defaults: _require_support_for_default_command_with_subparsers() namespace_kwargs = namespace_kwargs or {} # FIXME remove this by 1.0 # if title: warnings.warn('argument `title` is deprecated in add_commands(),' ' use `parser_kwargs` instead', DeprecationWarning) namespace_kwargs['description'] = title if help: warnings.warn('argument `help` is deprecated in add_commands(),' ' use `parser_kwargs` instead', DeprecationWarning) namespace_kwargs['help'] = help if description: warnings.warn('argument `description` is deprecated in add_commands(),' ' use `parser_kwargs` instead', DeprecationWarning) namespace_kwargs['description'] = description # # / subparsers_action = get_subparsers(parser, create=True) if namespace: # Make a nested parser and init a deeper _SubParsersAction under it. # Create a named group of commands. It will be listed along with # root-level commands in ``app.py --help``; in that context its `title` # can be used as a short description on the right side of its name. # Normally `title` is shown above the list of commands # in ``app.py my-namespace --help``. subsubparser_kw = { 'help': namespace_kwargs.get('title'), } subsubparser = subparsers_action.add_parser(namespace, **subsubparser_kw) subparsers_action = subsubparser.add_subparsers(**namespace_kwargs) else: assert not namespace_kwargs, ('`parser_kwargs` only makes sense ' 'with `namespace`.') for func in functions: cmd_name, func_parser_kwargs = _extract_command_meta_from_func(func) # override any computed kwargs by manually supplied ones if func_kwargs: func_parser_kwargs.update(func_kwargs) # create and set up the parser for this command command_parser = subparsers_action.add_parser(cmd_name, **func_parser_kwargs) set_default_command(command_parser, func)
A wrapper for: func: add_commands.
def add_subcommands(parser, namespace, functions, **namespace_kwargs): """ A wrapper for :func:`add_commands`. These examples are equivalent:: add_commands(parser, [get, put], namespace='db', namespace_kwargs={ 'title': 'database commands', 'help': 'CRUD for our silly database' }) add_subcommands(parser, 'db', [get, put], title='database commands', help='CRUD for our silly database') """ add_commands(parser, functions, namespace=namespace, namespace_kwargs=namespace_kwargs)
Returns the: class: argparse. _SubParsersAction instance for given: class: ArgumentParser instance as would have been returned by: meth: ArgumentParser. add_subparsers. The problem with the latter is that it only works once and raises an exception on the second attempt and the public API seems to lack a method to get * existing * subparsers.
def get_subparsers(parser, create=False): """ Returns the :class:`argparse._SubParsersAction` instance for given :class:`ArgumentParser` instance as would have been returned by :meth:`ArgumentParser.add_subparsers`. The problem with the latter is that it only works once and raises an exception on the second attempt, and the public API seems to lack a method to get *existing* subparsers. :param create: If `True`, creates the subparser if it does not exist. Default if `False`. """ # note that ArgumentParser._subparsers is *not* what is returned by # ArgumentParser.add_subparsers(). if parser._subparsers: actions = [a for a in parser._actions if isinstance(a, argparse._SubParsersAction)] assert len(actions) == 1 return actions[0] else: if create: return parser.add_subparsers()
Returns argument specification for given function. Omits special arguments of instance methods ( self ) and static methods ( usually cls or something like this ).
def get_arg_spec(function): """ Returns argument specification for given function. Omits special arguments of instance methods (`self`) and static methods (usually `cls` or something like this). """ while hasattr(function, '__wrapped__'): function = function.__wrapped__ spec = compat.getargspec(function) if inspect.ismethod(function): spec = spec._replace(args=spec.args[1:]) return spec
Sets given string as command name instead of the function name. The string is used verbatim without further processing.
def named(new_name): """ Sets given string as command name instead of the function name. The string is used verbatim without further processing. Usage:: @named('load') def do_load_some_stuff_and_keep_the_original_function_name(args): ... The resulting command will be available only as ``load``. To add aliases without renaming the command, check :func:`aliases`. .. versionadded:: 0.19 """ def wrapper(func): setattr(func, ATTR_NAME, new_name) return func return wrapper
Defines alternative command name ( s ) for given function ( along with its original name ). Usage::
def aliases(*names): """ Defines alternative command name(s) for given function (along with its original name). Usage:: @aliases('co', 'check') def checkout(args): ... The resulting command will be available as ``checkout``, ``check`` and ``co``. .. note:: This decorator only works with a recent version of argparse (see `Python issue 9324`_ and `Python rev 4c0426`_). Such version ships with **Python 3.2+** and may be available in other environments as a separate package. Argh does not issue warnings and simply ignores aliases if they are not supported. See :attr:`~argh.assembling.SUPPORTS_ALIASES`. .. _Python issue 9324: http://bugs.python.org/issue9324 .. _Python rev 4c0426: http://hg.python.org/cpython/rev/4c0426261148/ .. versionadded:: 0.19 """ def wrapper(func): setattr(func, ATTR_ALIASES, names) return func return wrapper
Declares an argument for given function. Does not register the function anywhere nor does it modify the function in any way.
def arg(*args, **kwargs): """ Declares an argument for given function. Does not register the function anywhere, nor does it modify the function in any way. The signature of the decorator matches that of :meth:`argparse.ArgumentParser.add_argument`, only some keywords are not required if they can be easily guessed (e.g. you don't have to specify type or action when an `int` or `bool` default value is supplied). Typical use cases: - In combination with :func:`expects_obj` (which is not recommended); - in combination with ordinary function signatures to add details that cannot be expressed with that syntax (e.g. help message). Usage:: from argh import arg @arg('path', help='path to the file to load') @arg('--format', choices=['yaml','json']) @arg('-v', '--verbosity', choices=range(0,3), default=2) def load(path, something=None, format='json', dry_run=False, verbosity=1): loaders = {'json': json.load, 'yaml': yaml.load} loader = loaders[args.format] data = loader(args.path) if not args.dry_run: if verbosity < 1: print('saving to the database') put_to_database(data) In this example: - `path` declaration is extended with `help`; - `format` declaration is extended with `choices`; - `dry_run` declaration is not duplicated; - `verbosity` is extended with `choices` and the default value is overridden. (If both function signature and `@arg` define a default value for an argument, `@arg` wins.) .. note:: It is recommended to avoid using this decorator unless there's no way to tune the argument's behaviour or presentation using ordinary function signatures. Readability counts, don't repeat yourself. """ def wrapper(func): declared_args = getattr(func, ATTR_ARGS, []) # The innermost decorator is called first but appears last in the code. # We need to preserve the expected order of positional arguments, so # the outermost decorator inserts its value before the innermost's: declared_args.insert(0, dict(option_strings=args, **kwargs)) setattr(func, ATTR_ARGS, declared_args) return func return wrapper
Decorator. Wraps given exceptions into: class: ~argh. exceptions. CommandError. Usage::
def wrap_errors(errors=None, processor=None, *args): """ Decorator. Wraps given exceptions into :class:`~argh.exceptions.CommandError`. Usage:: @wrap_errors([AssertionError]) def foo(x=None, y=None): assert x or y, 'x or y must be specified' If the assertion fails, its message will be correctly printed and the stack hidden. This helps to avoid boilerplate code. :param errors: A list of exception classes to catch. :param processor: A callable that expects the exception object and returns a string. For example, this renders all wrapped errors in red colour:: from termcolor import colored def failure(err): return colored(str(err), 'red') @wrap_errors(processor=failure) def my_command(...): ... """ def wrapper(func): if errors: setattr(func, ATTR_WRAPPED_EXCEPTIONS, errors) if processor: setattr(func, ATTR_WRAPPED_EXCEPTIONS_PROCESSOR, processor) return func return wrapper
A shortcut for typical confirmation prompt.
def confirm(action, default=None, skip=False): """ A shortcut for typical confirmation prompt. :param action: a string describing the action, e.g. "Apply changes". A question mark will be appended. :param default: `bool` or `None`. Determines what happens when user hits :kbd:`Enter` without typing in a choice. If `True`, default choice is "yes". If `False`, it is "no". If `None` the prompt keeps reappearing until user types in a choice (not necessarily acceptable) or until the number of iteration reaches the limit. Default is `None`. :param skip: `bool`; if `True`, no interactive prompt is used and default choice is returned (useful for batch mode). Default is `False`. Usage:: def delete(key, silent=False): item = db.get(Item, args.key) if confirm('Delete '+item.title, default=True, skip=silent): item.delete() print('Item deleted.') else: print('Operation cancelled.') Returns `None` on `KeyboardInterrupt` event. """ MAX_ITERATIONS = 3 if skip: return default else: defaults = { None: ('y','n'), True: ('Y','n'), False: ('y','N'), } y, n = defaults[default] prompt = text_type('{action}? ({y}/{n})').format(**locals()) choice = None try: if default is None: cnt = 1 while not choice and cnt < MAX_ITERATIONS: choice = safe_input(prompt) cnt += 1 else: choice = safe_input(prompt) except KeyboardInterrupt: return None if choice in ('yes', 'y', 'Y'): return True if choice in ('no', 'n', 'N'): return False if default is not None: return default return None
Select the provided column names from the model do not return an entity do not involve the rom session just get the raw and/ or processed column data from Redis.
def select(self, *column_names, **kwargs): ''' Select the provided column names from the model, do not return an entity, do not involve the rom session, just get the raw and/or processed column data from Redis. Keyword-only arguments: * *include_pk=False* - whether to include the primary key in the returned data (we need to get this in some cases, so we fetch it anyway; if you want it, we can return it to you - just be careful with the namedtuple option - see the warning below) * *decode=True* - whether to take a pass through normal data decoding in the model (will not return an entity/model) * *ff=_dict_data_factory* - the type of data to return from the select after all filters/limits/order_by are applied .. warning:: If ``include_pk = True`` and if you don't provide the primary key column, it will be appended to your list of columns. .. note:: if you want to provide a new factory function for the returned data, it must be of the form (below is the actual dict factory function) :: def _dict_data_factory(columns): _dict = dict _zip = zip def make(data): # do whatever you need to turn your tuple of columns plus # your list of data into whatever you want: return _dict(_zip(columns, data)) return make Available factory functions: * *``rom.query._dict_data_factory``* - default * *``rom.query._list_data_factory``* - lowest overhead, as the ``data`` passed in above is a list that you can do anything to * *``rom.query._tuple_data_factory``* - when you want tuples instead * *``rom.query._namedtuple_data_factory``* - get namedtuples, see see warning below .. warning:: If you use the ``_namedtuple_data_factory``, and your columns include underscore prefixes, they will be stripped. If this results in a name collision, you *will* get an exception. If you want differerent behavior, write your own 20 line factory function that does exactly what you want, and pass it; they are really easy! ''' include_pk = kwargs.pop('include_pk', False) decode = kwargs.pop('decode', True) ff = kwargs.pop('ff', _dict_data_factory) if isinstance(column_names[0], (list, tuple)): column_names = column_names[0] if not column_names: raise QueryError("Must provide at least one column to query for raw data") if len(set(column_names)) != len(column_names): raise QueryError("Column names must be unique") missing = [c for c in column_names if c not in self._model._columns] if missing: raise QueryError("No such columns known: %r"%(missing,)) remove_last = False if self._model._pkey not in column_names: column_names += (self._model._pkey,) remove_last = not include_pk return self.replace(select=(column_names, decode, remove_last, ff))
Copy the Query object optionally replacing the filters order_by or limit information on the copy. This is mostly an internal detail that you can ignore.
def replace(self, **kwargs): ''' Copy the Query object, optionally replacing the filters, order_by, or limit information on the copy. This is mostly an internal detail that you can ignore. ''' data = { 'model': self._model, 'filters': self._filters, 'order_by': self._order_by, 'limit': self._limit, 'select': self._select, } data.update(**kwargs) return Query(**data)
Only columns/ attributes that have been specified as having an index with the index = True option on the column definition can be filtered with this method. Prefix suffix and pattern match filters must be provided using the. startswith (). endswith () and the. like () methods on the query object respectively. Geo location queries should be performed using the. near () method.
def filter(self, **kwargs): ''' Only columns/attributes that have been specified as having an index with the ``index=True`` option on the column definition can be filtered with this method. Prefix, suffix, and pattern match filters must be provided using the ``.startswith()``, ``.endswith()``, and the ``.like()`` methods on the query object, respectively. Geo location queries should be performed using the ``.near()`` method. Filters should be of the form:: # for numeric ranges, use None for open-ended ranges attribute=(min, max) # you can also query for equality by passing a single number attribute=value # for string searches, passing a plain string will require that # string to be in the index as a literal attribute=string # to perform an 'or' query on strings, you can pass a list of # strings attribute=[string1, string2] As an example, the following will return entities that have both ``hello`` and ``world`` in the ``String`` column ``scol`` and has a ``Numeric`` column ``ncol`` with value between 2 and 10 (including the endpoints):: results = MyModel.query \\ .filter(scol='hello') \\ .filter(scol='world') \\ .filter(ncol=(2, 10)) \\ .all() If you only want to match a single value as part of your range query, you can pass an integer, float, or Decimal object by itself, similar to the ``Model.get_by()`` method:: results = MyModel.query \\ .filter(ncol=5) \\ .execute() .. note:: Trying to use a range query `attribute=(min, max)` on indexed string columns won't return any results. .. note:: This method only filters columns that have been defined with ``index=True``. ''' cur_filters = list(self._filters) for attr, value in kwargs.items(): value = self._check(attr, value, which='filter') if isinstance(value, NUMERIC_TYPES): # for simple numeric equiality filters value = (value, value) if isinstance(value, six.string_types): cur_filters.append('%s:%s'%(attr, value)) elif six.PY3 and isinstance(value, bytes): cur_filters.append('%s:%s'%(attr, value.decode('latin-1'))) elif isinstance(value, tuple): if value is NOT_NULL: from .columns import OneToOne, ManyToOne ctype = type(self._model._columns[attr]) if not issubclass(ctype, (OneToOne, ManyToOne)): raise QueryError("Can only query for non-null column values " \ "on OneToOne or ManyToOne columns, %r is of type %r"%(attr, ctype)) if len(value) != 2: raise QueryError("Numeric ranges require 2 endpoints, you provided %s with %r"%(len(value), value)) tt = [] for v in value: if isinstance(v, date): v = dt2ts(v) if isinstance(v, dtime): v = t2ts(v) tt.append(v) value = tt cur_filters.append((attr, value[0], value[1])) elif isinstance(value, list) and value: cur_filters.append(['%s:%s'%(attr, _ts(v)) for v in value]) else: raise QueryError("Sorry, we don't know how to filter %r by %r"%(attr, value)) return self.replace(filters=tuple(cur_filters))
When provided with keyword arguments of the form col = prefix this will limit the entities returned to those that have a word with the provided prefix in the specified column ( s ). This requires that the prefix = True option was provided during column definition.
def startswith(self, **kwargs): ''' When provided with keyword arguments of the form ``col=prefix``, this will limit the entities returned to those that have a word with the provided prefix in the specified column(s). This requires that the ``prefix=True`` option was provided during column definition. Usage:: User.query.startswith(email='user@').execute() ''' new = [] for k, v in kwargs.items(): v = self._check(k, v, 'startswith') new.append(Prefix(k, v)) return self.replace(filters=self._filters+tuple(new))
When provided with keyword arguments of the form col = suffix this will limit the entities returned to those that have a word with the provided suffix in the specified column ( s ). This requires that the suffix = True option was provided during column definition.
def endswith(self, **kwargs): ''' When provided with keyword arguments of the form ``col=suffix``, this will limit the entities returned to those that have a word with the provided suffix in the specified column(s). This requires that the ``suffix=True`` option was provided during column definition. Usage:: User.query.endswith(email='@gmail.com').execute() ''' new = [] for k, v in kwargs.items(): v = self._check(k, v, 'endswith') new.append(Suffix(k, v[::-1])) return self.replace(filters=self._filters+tuple(new))
When provided with keyword arguments of the form col = pattern this will limit the entities returned to those that include the provided pattern. Note that like queries require that the prefix = True option must have been provided as part of the column definition.
def like(self, **kwargs): ''' When provided with keyword arguments of the form ``col=pattern``, this will limit the entities returned to those that include the provided pattern. Note that 'like' queries require that the ``prefix=True`` option must have been provided as part of the column definition. Patterns allow for 4 wildcard characters, whose semantics are as follows: * *?* - will match 0 or 1 of any character * *\** - will match 0 or more of any character * *+* - will match 1 or more of any character * *!* - will match exactly 1 of any character As an example, imagine that you have enabled the required prefix matching on your ``User.email`` column. And lets say that you want to find everyone with an email address that contains the name 'frank' before the ``@`` sign. You can use either of the following patterns to discover those users. * *\*frank\*@* * *\*frank\*@* .. note:: Like queries implicitly start at the beginning of strings checked, so if you want to match a pattern that doesn't start at the beginning of a string, you should prefix it with one of the wildcard characters (like ``*`` as we did with the 'frank' pattern). ''' new = [] for k, v in kwargs.items(): v = self._check(k, v, 'like') new.append(Pattern(k, v)) return self.replace(filters=self._filters+tuple(new))
When provided with a column name will sort the results of your query::
def order_by(self, column): ''' When provided with a column name, will sort the results of your query:: # returns all users, ordered by the created_at column in # descending order User.query.order_by('-created_at').execute() ''' cname = column.lstrip('-') col = self._check(cname) if type(col).__name__ in ('String', 'Text', 'Json') and col._keygen.__name__ not in _STRING_SORT_KEYGENS: warnings.warn("You are trying to order by a non-numeric column %r. " "Unless you have provided your own keygen or are using " "one of the sortable keygens: (%s), this probably won't " "work the way you expect it to."%(cname, STRING_SORT_KEYGENS_STR), stacklevel=2) return self.replace(order_by=column)
Will return the total count of the objects that match the specified filters.::
def count(self): ''' Will return the total count of the objects that match the specified filters.:: # counts the number of users created in the last 24 hours User.query.filter(created_at=(time.time()-86400, time.time())).count() ''' filters = self._filters if self._order_by: filters += (self._order_by.lstrip('-'),) if not filters: # We can actually count entities here... size = _connect(self._model).hlen(self._model._namespace + '::') limit = self._limit or (0, 2**64) size = max(size - max(limit[0], 0), 0) return min(size, limit[1]) return self._model._gindex.count(_connect(self._model), filters)
Iterate over the results of your query instead of getting them all with. all (). Will only perform a single query. If you expect that your processing will take more than 30 seconds to process 100 items you should pass timeout and pagesize to reflect an appropriate timeout and page size to fetch at once.
def iter_result(self, timeout=30, pagesize=100, no_hscan=False): ''' Iterate over the results of your query instead of getting them all with `.all()`. Will only perform a single query. If you expect that your processing will take more than 30 seconds to process 100 items, you should pass `timeout` and `pagesize` to reflect an appropriate timeout and page size to fetch at once. Usage:: for user in User.query.endswith(email='@gmail.com').iter_result(): # do something with user ... ''' if not self._filters and not self._order_by: if self._model._columns[self._model._pkey]._index: return self._iter_all_pkey() conn = _connect(self._model) version = list(map(int, conn.info()['redis_version'].split('.')[:2])) if version >= [2,8] and not no_hscan: return self._iter_all_hscan() return self._iter_all() return self._iter_results(timeout, pagesize)
This will execute the query returning the key where a ZSET of your results will be stored for pagination further operations etc.
def cached_result(self, timeout): ''' This will execute the query, returning the key where a ZSET of your results will be stored for pagination, further operations, etc. The timeout must be a positive integer number of seconds for which to set the expiration time on the key (this is to ensure that any cached query results are eventually deleted, unless you make the explicit step to use the PERSIST command). .. note:: Limit clauses are ignored and not passed. Usage:: ukey = User.query.endswith(email='@gmail.com').cached_result(30) for i in xrange(0, conn.zcard(ukey), 100): # refresh the expiration conn.expire(ukey, 30) users = User.get(conn.zrange(ukey, i, i+99)) ... ''' if not (self._filters or self._order_by): raise QueryError("You are missing filter or order criteria") timeout = int(timeout) if timeout < 1: raise QueryError("You must specify a timeout >= 1, you gave %r"%timeout) return self._model._gindex.search( _connect(self._model), self._filters, self._order_by, timeout=timeout)
Returns only the first result from the query if any.
def first(self): ''' Returns only the first result from the query, if any. ''' lim = [0, 1] if self._limit: lim[0] = self._limit[0] if not self._filters and not self._order_by: for ent in self: return ent return None ids = self.limit(*lim)._search() if ids: return self._model.get(ids[0]) return None
Will delete the entities that match at the time the query is executed.
def delete(self, blocksize=100): ''' Will delete the entities that match at the time the query is executed. Used like:: MyModel.query.filter(email=...).delete() MyModel.query.endswith(email='@host.com').delete() .. warning:: can't be used on models on either side of a ``OneToMany``, ``ManyToOne``, or ``OneToOne`` relationship. ''' from .columns import MODELS_REFERENCED if not self._model._no_fk or self._model._namespace in MODELS_REFERENCED: raise QueryError("Can't delete entities of models with foreign key relationships") de = [] i = 0 for result in self.iter_result(pagesize=blocksize): de.append(result) i += 1 if i >= blocksize: session.delete(de) # one round-trip to delete "chunk" items del de[:] i = 0 if de: session.delete(de)
This function handles all on_delete semantics defined on OneToMany columns.
def _on_delete(ent): ''' This function handles all on_delete semantics defined on OneToMany columns. This function only exists because 'cascade' is *very* hard to get right. ''' seen_d = set([ent._pk]) to_delete = [ent] seen_s = set() to_save = [] def _set_default(ent, attr, de=NULL): pk = ent._pk if pk in seen_d: # going to be deleted, don't need to modify return col = ent.__class__._columns[attr] de = de if de is not NULL else col._default if de in (None, NULL): setattr(ent, attr, None) elif callable(col._default): setattr(ent, attr, col._default()) else: setattr(ent, attr, col._default) if pk not in seen_s: seen_s.add(pk) to_save.append(ent) for self in to_delete: for tbl, attr, action in MODELS_REFERENCED.get(self._namespace, ()): if action == 'no action': continue refs = MODELS[tbl].get_by(**{attr: self.id}) if not refs: continue if action == 'restrict': # raise the exception here for a better traceback raise _restrict(self, attr, refs) elif action == 'set null': for ref in refs: _set_default(ref, attr, None) continue elif action == 'set default': for ref in refs: _set_default(ref, attr) continue # otherwise col._on_delete == 'cascade' for ent in (refs if isinstance(refs, list) else [refs]): if ent._pk not in seen_d: seen_d.add(ent._pk) to_delete.append(ent) # If we got here, then to_delete includes all items to delete. Let's delete # them! for self in to_delete: self.delete(skip_on_delete_i_really_mean_it=SKIP_ON_DELETE) for self in to_save: # Careful not to resurrect deleted entities if self._pk not in seen_d: self.save()
Performs the actual prefix suffix and pattern match operations.
def redis_prefix_lua(conn, dest, index, prefix, is_first, pattern=None): ''' Performs the actual prefix, suffix, and pattern match operations. ''' tkey = '%s:%s'%(index.partition(':')[0], uuid.uuid4()) start, end = _start_end(prefix) return _redis_prefix_lua(conn, [dest, tkey, index], [start, end, pattern or prefix, int(pattern is not None), int(bool(is_first))] )
Estimates the total work necessary to calculate the prefix match over the given index with the provided prefix.
def estimate_work_lua(conn, index, prefix): ''' Estimates the total work necessary to calculate the prefix match over the given index with the provided prefix. ''' if index.endswith(':idx'): args = [] if not prefix else list(prefix) if args: args[0] = '-inf' if args[0] is None else repr(float(args[0])) args[1] = 'inf' if args[1] is None else repr(float(args[1])) return _estimate_work_lua(conn, [index], args, force_eval=True) elif index.endswith(':geo'): return _estimate_work_lua(conn, [index], filter(None, [prefix]), force_eval=True) start, end = _start_end(prefix) return _estimate_work_lua(conn, [index], [start, '(' + end], force_eval=True)
Search for model ids that match the provided filters.
def search(self, conn, filters, order_by, offset=None, count=None, timeout=None): ''' Search for model ids that match the provided filters. Arguments: * *filters* - A list of filters that apply to the search of one of the following two forms: 1. ``'column:string'`` - a plain string will match a word in a text search on the column .. note:: Read the documentation about the ``Query`` object for what is actually passed during text search 2. ``('column', min, max)`` - a numeric column range search, between min and max (inclusive by default) .. note:: Read the documentation about the ``Query`` object for information about open-ended ranges 3. ``['column:string1', 'column:string2']`` - will match any of the provided words in a text search on the column 4. ``Prefix('column', 'prefix')`` - will match prefixes of words in a text search on the column 5. ``Suffix('column', 'suffix')`` - will match suffixes of words in a text search on the column 6. ``Pattern('column', 'pattern')`` - will match patterns over words in a text search on the column * *order_by* - A string that names the numeric column by which to sort the results by. Prefixing with '-' will return results in descending order .. note:: While you can technically pass a non-numeric index as an *order_by* clause, the results will basically be to order the results by string comparison of the ids (10 will come before 2). .. note:: If you omit the ``order_by`` argument, results will be ordered by the last filter. If the last filter was a text filter, see the previous note. If the last filter was numeric, then results will be ordered by that result. * *offset* - A numeric starting offset for results * *count* - The maximum number of results to return from the query ''' # prepare the filters pipe, intersect, temp_id = self._prepare(conn, filters) # handle ordering if order_by: reverse = order_by and order_by.startswith('-') order_clause = '%s:%s:idx'%(self.namespace, order_by.lstrip('-')) intersect(temp_id, {temp_id:0, order_clause: -1 if reverse else 1}) # handle returning the temporary result key if timeout is not None: pipe.expire(temp_id, timeout) pipe.execute() return temp_id offset = offset if offset is not None else 0 end = (offset + count - 1) if count and count > 0 else -1 pipe.zrange(temp_id, offset, end) pipe.delete(temp_id) return pipe.execute()[-2]
Returns the count of the items that match the provided filters.
def count(self, conn, filters): ''' Returns the count of the items that match the provided filters. For the meaning of what the ``filters`` argument means, see the ``.search()`` method docs. ''' pipe, intersect, temp_id = self._prepare(conn, filters) pipe.zcard(temp_id) pipe.delete(temp_id) return pipe.execute()[-2]
Tries to get the _conn attribute from a model. Barring that gets the global default connection using other methods.
def _connect(obj): ''' Tries to get the _conn attribute from a model. Barring that, gets the global default connection using other methods. ''' from .columns import MODELS if isinstance(obj, MODELS['Model']): obj = obj.__class__ if hasattr(obj, '_conn'): return obj._conn if hasattr(obj, 'CONN'): return obj.CONN return get_connection()
This is a basic full - text index keygen function. Words are lowercased split by whitespace and stripped of punctuation from both ends before an inverted index is created for term searching.
def FULL_TEXT(val): ''' This is a basic full-text index keygen function. Words are lowercased, split by whitespace, and stripped of punctuation from both ends before an inverted index is created for term searching. ''' if isinstance(val, float): val = repr(val) elif val in (None, ''): return None elif not isinstance(val, six.string_types): if six.PY3 and isinstance(val, bytes): val = val.decode('latin-1') else: val = str(val) r = sorted(set([x for x in [s.lower().strip(string.punctuation) for s in val.split()] if x])) if not isinstance(val, str): # unicode on py2k return [s.encode('utf-8') for s in r] return r
This is a basic case - sensitive sorted order index keygen function for strings. This will return a value that is suitable to be used for ordering by a 7 - byte prefix of a string ( that is 7 characters from a byte - string and 1. 75 - 7 characters from a unicode string depending on character - > encoding length ).
def SIMPLE(val): ''' This is a basic case-sensitive "sorted order" index keygen function for strings. This will return a value that is suitable to be used for ordering by a 7-byte prefix of a string (that is 7 characters from a byte-string, and 1.75-7 characters from a unicode string, depending on character -> encoding length). .. warning:: Case sensitivity is based on the (encoded) byte prefixes of the strings/text being indexed, so ordering *may be different* than a native comparison ordering (especially if an order is different based on characters past the 7th encoded byte). ''' if not val: return None if not isinstance(val, six.string_types): if six.PY3 and isinstance(val, bytes): val = val.decode('latin-1') else: val = str(val) return {'': _prefix_score(val)}
This is a basic equality index keygen primarily meant to be used for things like::
def IDENTITY(val): ''' This is a basic "equality" index keygen, primarily meant to be used for things like:: Model.query.filter(col='value') Where ``FULL_TEXT`` would transform a sentence like "A Simple Sentence" into an inverted index searchable by the words "a", "simple", and/or "sentence", ``IDENTITY`` will only be searchable by the orginal full sentence with the same capitalization - "A Simple Sentence". See ``IDENTITY_CI`` for the same function, only case-insensitive. ''' if not val: return None if not isinstance(val, six.string_types_ex): val = str(val) return [val]
This utility function will iterate over all entities of a provided model refreshing their indices. This is primarily useful after adding an index on a column.
def refresh_indices(model, block_size=100): ''' This utility function will iterate over all entities of a provided model, refreshing their indices. This is primarily useful after adding an index on a column. Arguments: * *model* - the model whose entities you want to reindex * *block_size* - the maximum number of entities you want to fetch from Redis at a time, defaulting to 100 This function will yield its progression through re-indexing all of your entities. Example use:: for progress, total in refresh_indices(MyModel, block_size=200): print "%s of %s"%(progress, total) .. note:: This uses the session object to handle index refresh via calls to ``.commit()``. If you have any outstanding entities known in the session, they will be committed. ''' conn = _connect(model) max_id = int(conn.get('%s:%s:'%(model._namespace, model._pkey)) or '0') block_size = max(block_size, 10) for i in range(1, max_id+1, block_size): # fetches entities, keeping a record in the session models = model.get(list(range(i, i+block_size))) models # for pyflakes # re-save un-modified data, resulting in index-only updates session.commit(all=True) yield min(i+block_size, max_id), max_id
This utility function will clean out old index data that was accidentally left during item deletion in rom versions < = 0. 27. 0. You should run this after you have upgraded all of your clients to version 0. 28. 0 or later.
def clean_old_index(model, block_size=100, **kwargs): ''' This utility function will clean out old index data that was accidentally left during item deletion in rom versions <= 0.27.0 . You should run this after you have upgraded all of your clients to version 0.28.0 or later. Arguments: * *model* - the model whose entities you want to reindex * *block_size* - the maximum number of items to check at a time defaulting to 100 This function will yield its progression through re-checking all of the data that could be left over. Example use:: for progress, total in clean_old_index(MyModel, block_size=200): print "%s of %s"%(progress, total) ''' conn = _connect(model) version = list(map(int, conn.info()['redis_version'].split('.')[:2])) has_hscan = version >= [2, 8] pipe = conn.pipeline(True) prefix = '%s:'%model._namespace index = prefix + ':' block_size = max(block_size, 10) force_hscan = kwargs.get('force_hscan', False) if (has_hscan or force_hscan) and force_hscan is not None: max_id = conn.hlen(index) cursor = None scanned = 0 while cursor != b'0': cursor, remove = _scan_index_lua(conn, [index, prefix], [cursor or '0', block_size, 0, 0]) if remove: _clean_index_lua(conn, [model._namespace], remove) scanned += block_size if scanned > max_id: max_id = scanned + 1 yield scanned, max_id # need to scan over unique indexes :/ for uniq in chain(model._unique, model._cunique): name = uniq if isinstance(uniq, six.string_types) else ':'.join(uniq) idx = prefix + name + ':uidx' cursor = None while cursor != b'0': cursor, remove = _scan_index_lua(conn, [idx, prefix], [cursor or '0', block_size, 1, 0]) if remove: conn.hdel(idx, *remove) scanned += block_size if scanned > max_id: max_id = scanned + 1 yield scanned, max_id else: if model._unique or model._cunique: if has_hscan: warnings.warn("You have disabled the use of HSCAN to clean up indexes, this will prevent unique index cleanup", stacklevel=2) else: warnings.warn("Unique indexes cannot be cleaned up in Redis versions prior to 2.8", stacklevel=2) max_id = int(conn.get('%s%s:'%(prefix, model._pkey)) or '0') for i in range(1, max_id+1, block_size): ids = list(range(i, min(i+block_size, max_id+1))) for id in ids: pipe.exists(prefix + str(id)) pipe.hexists(index, id) result = iter(pipe.execute()) remove = [id for id, ent, ind in zip(ids, result, result) if ind and not ent] if remove: _clean_index_lua(conn, [model._namespace], remove) yield min(i+block_size, max_id-1), max_id yield max_id, max_id
This utility function will print the progress of a passed iterator job as started by refresh_indices () and clean_old_index ().
def show_progress(job): ''' This utility function will print the progress of a passed iterator job as started by ``refresh_indices()`` and ``clean_old_index()``. Usage example:: class RomTest(Model): pass for i in xrange(1000): RomTest().save() util.show_progress(util.clean_old_index(RomTest)) ''' start = time.time() last_print = 0 last_line = 0 for prog, total in chain(job, [(1, 1)]): # Only print a line when we start, finish, or every .1 seconds if (time.time() - last_print) > .1 or prog >= total: delta = (time.time() - start) or .0001 line = "%.1f%% complete, %.1f seconds elapsed, %.1f seconds remaining"%( 100. * prog / (total or 1), delta, total * delta / (prog or 1) - delta) length = len(line) # pad the line out with spaces just in case our line got shorter line += max(last_line - length, 0) * ' ' print(line, end="\r") last_line = length last_print = time.time() print()
Borrowed/ modified from my book Redis in Action: https:// github. com/ josiahcarlson/ redis - in - action/ blob/ master/ python/ ch11_listing_source. py
def _script_load(script): ''' Borrowed/modified from my book, Redis in Action: https://github.com/josiahcarlson/redis-in-action/blob/master/python/ch11_listing_source.py Used for Lua scripting support when writing against Redis 2.6+ to allow for multiple unique columns per model. ''' script = script.encode('utf-8') if isinstance(script, six.text_type) else script sha = [None, sha1(script).hexdigest()] def call(conn, keys=[], args=[], force_eval=False): keys = tuple(keys) args = tuple(args) if not force_eval: if not sha[0]: try: # executing the script implicitly loads it return conn.execute_command( 'EVAL', script, len(keys), *(keys + args)) finally: # thread safe by re-using the GIL ;) del sha[:-1] try: return conn.execute_command( "EVALSHA", sha[0], len(keys), *(keys+args)) except redis.exceptions.ResponseError as msg: if not any(msg.args[0].startswith(nsm) for nsm in NO_SCRIPT_MESSAGES): raise return conn.execute_command( "EVAL", script, len(keys), *(keys+args)) return call
Useful when you want exclusive access to an entity across all writers.::
def EntityLock(entity, acquire_timeout, lock_timeout): ''' Useful when you want exclusive access to an entity across all writers.:: # example import rom class Document(rom.Model): owner = rom.ManyToOne('User', on_delete='restrict') ... def change_owner(document, new_owner): with rom.util.EntityLock(document, 5, 90): document.owner = new_owner document.save() ''' return Lock(entity._connection, entity._pk, acquire_timeout, lock_timeout)
Adds an entity to the session.
def add(self, obj): ''' Adds an entity to the session. ''' if self.null_session: return self._init() pk = obj._pk if not pk.endswith(':None'): self.known[pk] = obj self.wknown[pk] = obj
Forgets about an entity ( automatically called when an entity is deleted ). Call this to ensure that an entity that you ve modified is not automatically saved on session. commit ().
def forget(self, obj): ''' Forgets about an entity (automatically called when an entity is deleted). Call this to ensure that an entity that you've modified is not automatically saved on ``session.commit()`` . ''' self._init() self.known.pop(obj._pk, None) self.wknown.pop(obj._pk, None)
Fetches an entity from the session based on primary key.
def get(self, pk): ''' Fetches an entity from the session based on primary key. ''' self._init() return self.known.get(pk) or self.wknown.get(pk)
Call. save () on all modified entities in the session. Use when you want to flush changes to Redis but don t want to lose your local session cache.
def flush(self, full=False, all=False, force=False): ''' Call ``.save()`` on all modified entities in the session. Use when you want to flush changes to Redis, but don't want to lose your local session cache. See the ``.commit()`` method for arguments and their meanings. ''' self._init() return self.save(*self.known.values(), full=full, all=all, force=force)
Call. save () on all modified entities in the session. Also forgets all known entities in the session so this should only be called at the end of a request.
def commit(self, full=False, all=False, force=False): ''' Call ``.save()`` on all modified entities in the session. Also forgets all known entities in the session, so this should only be called at the end of a request. Arguments: * *full* - pass ``True`` to force save full entities, not only changes * *all* - pass ``True`` to save all entities known, not only those entities that have been modified. * *full* - pass ``True`` to force-save all entities known, ignoring DataRaceError and EntityDeletedError exceptions ''' changes = self.flush(full, all, force) self.known = {} return changes
This method is an alternate API for saving many entities ( possibly not tracked by the session ). You can call::
def save(self, *objects, **kwargs): ''' This method is an alternate API for saving many entities (possibly not tracked by the session). You can call:: session.save(obj) session.save(obj1, obj2, ...) session.save([obj1, obj2, ...]) And the entities will be flushed to Redis. You can pass the keyword arguments ``full``, ``all``, and ``force`` with the same meaning and semantics as the ``.commit()`` method. ''' from rom import Model full = kwargs.get('full') all = kwargs.get('all') force = kwargs.get('force') changes = 0 items = deque() items.extend(objects) while items: o = items.popleft() if isinstance(o, (list, tuple)): items.extendleft(reversed(o)) elif isinstance(o, Model): if not o._deleted and (all or o._modified): changes += o.save(full, force) else: raise ORMError( "Cannot save an object that is not an instance of a Model (you provided %r)"%( o,)) return changes
This method offers the ability to delete multiple entities in a single round trip to Redis ( assuming your models are all stored on the same server ). You can call::
def delete(self, *objects, **kwargs): ''' This method offers the ability to delete multiple entities in a single round trip to Redis (assuming your models are all stored on the same server). You can call:: session.delete(obj) session.delete(obj1, obj2, ...) session.delete([obj1, obj2, ...]) The keyword argument ``force=True`` can be provided, which can force the deletion of an entitiy again, even if we believe it to already be deleted. If ``force=True``, we won't re-call the object's ``_before_delete()`` method, but we will re-call ``_after_delete()``. .. note:: Objects are automatically dropped from the session after delete for the sake of cache coherency. ''' force = kwargs.get('force') from .model import Model, SKIP_ON_DELETE flat = [] items = deque() items.extend(objects) types = set() # flatten what was passed in, more or less arbitrarily deep while items: o = items.popleft() if isinstance(o, (list, tuple)): items.extendleft(reversed(o)) elif isinstance(o, Model): if force or not o._deleted: flat.append(o) types.add(type(o)) # make sure we can bulk delete everything we've been requested to from .columns import MODELS_REFERENCED for t in types: if not t._no_fk or t._namespace in MODELS_REFERENCED: raise ORMError("Can't bulk delete entities of models with foreign key relationships") c2p = {} for o in flat: # prepare delete if not o._deleted: o._before_delete() # make sure we've got connections c = o._connection if c not in c2p: c2p[c] = c.pipeline() # use our existing delete, and pass through a pipeline :P o.delete(_conn=c2p[c], skip_on_delete_i_really_mean_it=SKIP_ON_DELETE) # actually delete the data in Redis for p in c2p.values(): p.execute() # remove the objects from the session forget = self.forget for o in flat: if o._deleted == 1: o._after_delete() o._deleted = 2 forget(o)
This method is an alternate API for refreshing many entities ( possibly not tracked by the session ). You can call::
def refresh(self, *objects, **kwargs): ''' This method is an alternate API for refreshing many entities (possibly not tracked by the session). You can call:: session.refresh(obj) session.refresh(obj1, obj2, ...) session.refresh([obj1, obj2, ...]) And all provided entities will be reloaded from Redis. To force reloading for modified entities, you can pass ``force=True``. ''' self._init() from rom import Model force = kwargs.get('force') for o in objects: if isinstance(o, (list, tuple)): self.refresh(*o, force=force) elif isinstance(o, Model): if not o._new: o.refresh(force=force) else: # all objects are re-added to the session after refresh, # except for deleted entities... self.add(o) else: raise ORMError( "Cannot refresh an object that is not an instance of a Model (you provided %r)"%( o,))
This method is an alternate API for refreshing all entities tracked by the session. You can call::
def refresh_all(self, *objects, **kwargs): ''' This method is an alternate API for refreshing all entities tracked by the session. You can call:: session.refresh_all() session.refresh_all(force=True) And all entities known by the session will be reloaded from Redis. To force reloading for modified entities, you can pass ``force=True``. ''' self.refresh(*self.known.values(), force=kwargs.get('force'))
... Actually write data to Redis. This is an internal detail. Please don t call me directly.
def redis_writer_lua(conn, pkey, namespace, id, unique, udelete, delete, data, keys, scored, prefix, suffix, geo, old_data, is_delete): ''' ... Actually write data to Redis. This is an internal detail. Please don't call me directly. ''' ldata = [] for pair in data.items(): ldata.extend(pair) for item in prefix: item.append(_prefix_score(item[-1])) for item in suffix: item.append(_prefix_score(item[-1])) data = [json.dumps(x, default=_fix_bytes) for x in (unique, udelete, delete, ldata, keys, scored, prefix, suffix, geo, is_delete, old_data)] result = _redis_writer_lua(conn, [], [namespace, id] + data) if isinstance(conn, _Pipeline): # we're in a pipelined write situation, don't parse the pipeline :P return if six.PY3: result = result.decode() result = json.loads(result) if 'unique' in result: result = result['unique'] raise UniqueKeyViolation( "Value %r for %s:%s:uidx not distinct (failed for pk=%s)"%( unique[result], namespace, result, id), namespace, id) if 'race' in result: result = result['race'] if pkey in result: raise EntityDeletedError( "Entity %s:%s deleted by another writer; use .save(force=True) to re-save"%( namespace, id), namespace, id) raise DataRaceError( "%s:%s Column(s) %r updated by another writer, write aborted!"%( namespace, id, result), namespace, id)
Saves the current entity to Redis. Will only save changed data by default but you can force a full save by passing full = True.
def save(self, full=False, force=False): ''' Saves the current entity to Redis. Will only save changed data by default, but you can force a full save by passing ``full=True``. If the underlying entity was deleted and you want to re-save the entity, you can pass ``force=True`` to force a full re-save of the entity. ''' # handle the pre-commit hooks was_new = self._new if was_new: self._before_insert() else: self._before_update() new = self.to_dict() ret, data = self._apply_changes( self._last, new, full or self._new or force, is_new=self._new or force) self._last = data self._new = False self._modified = False self._deleted = False # handle the post-commit hooks if was_new: self._after_insert() else: self._after_update() return ret
Deletes the entity immediately. Also performs any on_delete operations specified as part of column definitions.
def delete(self, **kwargs): ''' Deletes the entity immediately. Also performs any on_delete operations specified as part of column definitions. ''' if kwargs.get('skip_on_delete_i_really_mean_it') is not SKIP_ON_DELETE: # handle the pre-commit hook self._before_delete() # handle any foreign key references + cascade options _on_delete(self) session.forget(self) self._apply_changes(self._last, {}, delete=True, _conn=kwargs.get('_conn')) self._modified = True self._deleted = True # handle the post-commit hooks if kwargs.get('skip_on_delete_i_really_mean_it') is not SKIP_ON_DELETE: self._after_delete()
Creates a shallow copy of the given entity ( any entities that can be retrieved from a OneToMany relationship will not be copied ).
def copy(self): ''' Creates a shallow copy of the given entity (any entities that can be retrieved from a OneToMany relationship will not be copied). ''' x = self.to_dict() x.pop(self._pkey) return self.__class__(**x)
Will fetch one or more entities of this type from the session or Redis.
def get(cls, ids): ''' Will fetch one or more entities of this type from the session or Redis. Used like:: MyModel.get(5) MyModel.get([1, 6, 2, 4]) Passing a list or a tuple will return multiple entities, in the same order that the ids were passed. ''' conn = _connect(cls) # prepare the ids single = not isinstance(ids, (list, tuple, set, frozenset)) if single: ids = [ids] pks = ['%s:%s'%(cls._namespace, id) for id in map(int, ids)] # get from the session, if possible out = list(map(session.get, pks)) # if we couldn't get an instance from the session, load from Redis if None in out: pipe = conn.pipeline(True) idxs = [] # Fetch missing data for i, data in enumerate(out): if data is None: idxs.append(i) pipe.hgetall(pks[i]) # Update output list for i, data in zip(idxs, pipe.execute()): if data: if six.PY3: data = dict((k.decode(), v.decode()) for k, v in data.items()) out[i] = cls(_loading=True, **data) # Get rid of missing models out = [x for x in out if x] if single: return out[0] if out else None return out
This method offers a simple query method for fetching entities of this type via attribute numeric ranges ( such columns must be indexed ) or via unique columns.
def get_by(cls, **kwargs): ''' This method offers a simple query method for fetching entities of this type via attribute numeric ranges (such columns must be ``indexed``), or via ``unique`` columns. Some examples:: user = User.get_by(email_address='user@domain.com') # gets up to 25 users created in the last 24 hours users = User.get_by( created_at=(time.time()-86400, time.time()), _limit=(0, 25)) Optional keyword-only arguments: * *_limit* - A 2-tuple of (offset, count) that can be used to paginate or otherwise limit results returned by a numeric range query * *_numeric* - An optional boolean defaulting to False that forces the use of a numeric index for ``.get_by(col=val)`` queries even when ``col`` has an existing unique index If you would like to make queries against multiple columns or with multiple criteria, look into the Model.query class property. .. note:: rom will attempt to use a unique index first, then a numeric index if there was no unique index. You can explicitly tell rom to only use the numeric index by using ``.get_by(..., _numeric=True)``. .. note:: Ranged queries with `get_by(col=(start, end))` will only work with columns that use a numeric index. ''' conn = _connect(cls) model = cls._namespace # handle limits and query requirements _limit = kwargs.pop('_limit', ()) if _limit and len(_limit) != 2: raise QueryError("Limit must include both 'offset' and 'count' parameters") elif _limit and not all(isinstance(x, six.integer_types) for x in _limit): raise QueryError("Limit arguments must both be integers") if len(kwargs) != 1: raise QueryError("We can only fetch object(s) by exactly one attribute, you provided %s"%(len(kwargs),)) _numeric = bool(kwargs.pop('_numeric', None)) for attr, value in kwargs.items(): plain_attr = attr.partition(':')[0] if isinstance(value, tuple) and len(value) != 2: raise QueryError("Range queries must include exactly two endpoints") # handle unique index lookups if attr in cls._unique and (plain_attr not in cls._index or not _numeric): if isinstance(value, tuple): raise QueryError("Cannot query a unique index with a range of values") single = not isinstance(value, list) if single: value = [value] qvalues = list(map(cls._columns[attr]._to_redis, value)) ids = [x for x in conn.hmget('%s:%s:uidx'%(model, attr), qvalues) if x] if not ids: return None if single else [] return cls.get(ids[0] if single else ids) if plain_attr not in cls._index: raise QueryError("Cannot query on a column without an index") if isinstance(value, NUMERIC_TYPES) and not isinstance(value, bool): value = (value, value) if isinstance(value, tuple): # this is a numeric range query, we'll just pull it directly args = list(value) for i, a in enumerate(args): # Handle the ranges where None is -inf on the left and inf # on the right when used in the context of a range tuple. args[i] = ('-inf', 'inf')[i] if a is None else cls._columns[attr]._to_redis(a) if _limit: args.extend(_limit) ids = conn.zrangebyscore('%s:%s:idx'%(model, attr), *args) if not ids: return [] return cls.get(ids) # defer other index lookups to the query object query = cls.query.filter(**{attr: value}) if _limit: query = query.limit(*_limit) return query.all()
Updates multiple attributes in a model. If args are provided this method will assign attributes in the order returned by list ( self. _columns ) until one or both are exhausted.
def update(self, *args, **kwargs): ''' Updates multiple attributes in a model. If ``args`` are provided, this method will assign attributes in the order returned by ``list(self._columns)`` until one or both are exhausted. If ``kwargs`` are provided, this method will assign attributes to the names provided, after ``args`` have been processed. ''' sa = setattr for a, v in zip(self._columns, args): sa(self, a, v) for a, v in kwargs.items(): sa(self, a, v) return self
Replacement for pickle. dump () using _LokyPickler.
def dump(obj, file, reducers=None, protocol=None): '''Replacement for pickle.dump() using _LokyPickler.''' global _LokyPickler _LokyPickler(file, reducers=reducers, protocol=protocol).dump(obj)
Attach a reducer function to a given type in the dispatch table.
def register(cls, type, reduce_func): """Attach a reducer function to a given type in the dispatch table.""" if sys.version_info < (3,): # Python 2 pickler dispatching is not explicitly customizable. # Let us use a closure to workaround this limitation. def dispatcher(cls, obj): reduced = reduce_func(obj) cls.save_reduce(obj=obj, *reduced) cls.dispatch_table[type] = dispatcher else: cls.dispatch_table[type] = reduce_func
Construct or retrieve a semaphore with the given name
def _sem_open(name, value=None): """ Construct or retrieve a semaphore with the given name If value is None, try to retrieve an existing named semaphore. Else create a new semaphore with the given value """ if value is None: handle = pthread.sem_open(ctypes.c_char_p(name), 0) else: handle = pthread.sem_open(ctypes.c_char_p(name), SEM_OFLAG, SEM_PERM, ctypes.c_int(value)) if handle == SEM_FAILURE: e = ctypes.get_errno() if e == errno.EEXIST: raise FileExistsError("a semaphore named %s already exists" % name) elif e == errno.ENOENT: raise FileNotFoundError('cannot find semaphore named %s' % name) elif e == errno.ENOSYS: raise NotImplementedError('No semaphore implementation on this ' 'system') else: raiseFromErrno() return handle
Return the number of CPUs the current process can use.
def cpu_count(): """Return the number of CPUs the current process can use. The returned number of CPUs accounts for: * the number of CPUs in the system, as given by ``multiprocessing.cpu_count``; * the CPU affinity settings of the current process (available with Python 3.4+ on some Unix systems); * CFS scheduler CPU bandwidth limit (available on Linux only, typically set by docker and similar container orchestration systems); * the value of the LOKY_MAX_CPU_COUNT environment variable if defined. and is given as the minimum of these constraints. It is also always larger or equal to 1. """ import math try: cpu_count_mp = mp.cpu_count() except NotImplementedError: cpu_count_mp = 1 # Number of available CPUs given affinity settings cpu_count_affinity = cpu_count_mp if hasattr(os, 'sched_getaffinity'): try: cpu_count_affinity = len(os.sched_getaffinity(0)) except NotImplementedError: pass # CFS scheduler CPU bandwidth limit # available in Linux since 2.6 kernel cpu_count_cfs = cpu_count_mp cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us" if os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname): with open(cfs_quota_fname, 'r') as fh: cfs_quota_us = int(fh.read()) with open(cfs_period_fname, 'r') as fh: cfs_period_us = int(fh.read()) if cfs_quota_us > 0 and cfs_period_us > 0: # Make sure this quantity is an int as math.ceil returns a # float in python2.7. (See issue #165) cpu_count_cfs = int(math.ceil(cfs_quota_us / cfs_period_us)) # User defined soft-limit passed as an loky specific environment variable. cpu_count_loky = int(os.environ.get('LOKY_MAX_CPU_COUNT', cpu_count_mp)) aggregate_cpu_count = min(cpu_count_mp, cpu_count_affinity, cpu_count_cfs, cpu_count_loky) return max(aggregate_cpu_count, 1)
Returns a queue object
def Queue(self, maxsize=0, reducers=None): '''Returns a queue object''' from .queues import Queue return Queue(maxsize, reducers=reducers, ctx=self.get_context())
Returns a queue object
def SimpleQueue(self, reducers=None): '''Returns a queue object''' from .queues import SimpleQueue return SimpleQueue(reducers=reducers, ctx=self.get_context())
Iterates over zip () ed iterables in chunks.
def _get_chunks(chunksize, *iterables): """Iterates over zip()ed iterables in chunks. """ if sys.version_info < (3, 3): it = itertools.izip(*iterables) else: it = zip(*iterables) while True: chunk = tuple(itertools.islice(it, chunksize)) if not chunk: return yield chunk
Safely send back the given result or exception
def _sendback_result(result_queue, work_id, result=None, exception=None): """Safely send back the given result or exception""" try: result_queue.put(_ResultItem(work_id, result=result, exception=exception)) except BaseException as e: exc = _ExceptionWithTraceback(e) result_queue.put(_ResultItem(work_id, exception=exc))
Evaluates calls from call_queue and places the results in result_queue.
def _process_worker(call_queue, result_queue, initializer, initargs, processes_management_lock, timeout, worker_exit_lock, current_depth): """Evaluates calls from call_queue and places the results in result_queue. This worker is run in a separate process. Args: call_queue: A ctx.Queue of _CallItems that will be read and evaluated by the worker. result_queue: A ctx.Queue of _ResultItems that will written to by the worker. initializer: A callable initializer, or None initargs: A tuple of args for the initializer process_management_lock: A ctx.Lock avoiding worker timeout while some workers are being spawned. timeout: maximum time to wait for a new item in the call_queue. If that time is expired, the worker will shutdown. worker_exit_lock: Lock to avoid flagging the executor as broken on workers timeout. current_depth: Nested parallelism level, to avoid infinite spawning. """ if initializer is not None: try: initializer(*initargs) except BaseException: _base.LOGGER.critical('Exception in initializer:', exc_info=True) # The parent will notice that the process stopped and # mark the pool broken return # set the global _CURRENT_DEPTH mechanism to limit recursive call global _CURRENT_DEPTH _CURRENT_DEPTH = current_depth _process_reference_size = None _last_memory_leak_check = None pid = os.getpid() mp.util.debug('Worker started with timeout=%s' % timeout) while True: try: call_item = call_queue.get(block=True, timeout=timeout) if call_item is None: mp.util.info("Shutting down worker on sentinel") except queue.Empty: mp.util.info("Shutting down worker after timeout %0.3fs" % timeout) if processes_management_lock.acquire(block=False): processes_management_lock.release() call_item = None else: mp.util.info("Could not acquire processes_management_lock") continue except BaseException as e: previous_tb = traceback.format_exc() try: result_queue.put(_RemoteTraceback(previous_tb)) except BaseException: # If we cannot format correctly the exception, at least print # the traceback. print(previous_tb) sys.exit(1) if call_item is None: # Notify queue management thread about clean worker shutdown result_queue.put(pid) with worker_exit_lock: return try: r = call_item() except BaseException as e: exc = _ExceptionWithTraceback(e) result_queue.put(_ResultItem(call_item.work_id, exception=exc)) else: _sendback_result(result_queue, call_item.work_id, result=r) del r # Free the resource as soon as possible, to avoid holding onto # open files or shared memory that is not needed anymore del call_item if _USE_PSUTIL: if _process_reference_size is None: # Make reference measurement after the first call _process_reference_size = _get_memory_usage(pid, force_gc=True) _last_memory_leak_check = time() continue if time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY: mem_usage = _get_memory_usage(pid) _last_memory_leak_check = time() if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE: # Memory usage stays within bounds: everything is fine. continue # Check again memory usage; this time take the measurement # after a forced garbage collection to break any reference # cycles. mem_usage = _get_memory_usage(pid, force_gc=True) _last_memory_leak_check = time() if mem_usage - _process_reference_size < _MAX_MEMORY_LEAK_SIZE: # The GC managed to free the memory: everything is fine. continue # The process is leaking memory: let the master process # know that we need to start a new worker. mp.util.info("Memory leak detected: shutting down worker") result_queue.put(pid) with worker_exit_lock: return else: # if psutil is not installed, trigger gc.collect events # regularly to limit potential memory leaks due to reference cycles if ((_last_memory_leak_check is None) or (time() - _last_memory_leak_check > _MEMORY_LEAK_CHECK_DELAY)): gc.collect() _last_memory_leak_check = time()
Fills call_queue with _WorkItems from pending_work_items.
def _add_call_item_to_queue(pending_work_items, running_work_items, work_ids, call_queue): """Fills call_queue with _WorkItems from pending_work_items. This function never blocks. Args: pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids are consumed and the corresponding _WorkItems from pending_work_items are transformed into _CallItems and put in call_queue. call_queue: A ctx.Queue that will be filled with _CallItems derived from _WorkItems. """ while True: if call_queue.full(): return try: work_id = work_ids.get(block=False) except queue.Empty: return else: work_item = pending_work_items[work_id] if work_item.future.set_running_or_notify_cancel(): running_work_items += [work_id] call_queue.put(_CallItem(work_id, work_item.fn, work_item.args, work_item.kwargs), block=True) else: del pending_work_items[work_id] continue
Manages the communication between this process and the worker processes.
def _queue_management_worker(executor_reference, executor_flags, processes, pending_work_items, running_work_items, work_ids_queue, call_queue, result_queue, thread_wakeup, processes_management_lock): """Manages the communication between this process and the worker processes. This function is run in a local thread. Args: executor_reference: A weakref.ref to the ProcessPoolExecutor that owns this thread. Used to determine if the ProcessPoolExecutor has been garbage collected and that this function can exit. executor_flags: A ExecutorFlags holding internal states of the ProcessPoolExecutor. It permits to know if the executor is broken even the object has been gc. process: A list of the ctx.Process instances used as workers. pending_work_items: A dict mapping work ids to _WorkItems e.g. {5: <_WorkItem...>, 6: <_WorkItem...>, ...} work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). call_queue: A ctx.Queue that will be filled with _CallItems derived from _WorkItems for processing by the process workers. result_queue: A ctx.SimpleQueue of _ResultItems generated by the process workers. thread_wakeup: A _ThreadWakeup to allow waking up the queue_manager_thread from the main Thread and avoid deadlocks caused by permanently locked queues. """ executor = None def is_shutting_down(): # No more work items can be added if: # - The interpreter is shutting down OR # - The executor that own this worker is not broken AND # * The executor that owns this worker has been collected OR # * The executor that owns this worker has been shutdown. # If the executor is broken, it should be detected in the next loop. return (_global_shutdown or ((executor is None or executor_flags.shutdown) and not executor_flags.broken)) def shutdown_all_workers(): mp.util.debug("queue management thread shutting down") executor_flags.flag_as_shutting_down() # Create a list to avoid RuntimeError due to concurrent modification of # processes. nb_children_alive is thus an upper bound. Also release the # processes' _worker_exit_lock to accelerate the shutdown procedure, as # there is no need for hand-shake here. with processes_management_lock: n_children_alive = 0 for p in list(processes.values()): p._worker_exit_lock.release() n_children_alive += 1 n_children_to_stop = n_children_alive n_sentinels_sent = 0 # Send the right number of sentinels, to make sure all children are # properly terminated. while n_sentinels_sent < n_children_to_stop and n_children_alive > 0: for i in range(n_children_to_stop - n_sentinels_sent): try: call_queue.put_nowait(None) n_sentinels_sent += 1 except Full: break with processes_management_lock: n_children_alive = sum( p.is_alive() for p in list(processes.values()) ) # Release the queue's resources as soon as possible. Flag the feeder # thread for clean exit to avoid having the crash detection thread flag # the Executor as broken during the shutdown. This is safe as either: # * We don't need to communicate with the workers anymore # * There is nothing left in the Queue buffer except None sentinels mp.util.debug("closing call_queue") call_queue.close() mp.util.debug("joining processes") # If .join() is not called on the created processes then # some ctx.Queue methods may deadlock on Mac OS X. while processes: _, p = processes.popitem() p.join() mp.util.debug("queue management thread clean shutdown of worker " "processes: {}".format(list(processes))) result_reader = result_queue._reader wakeup_reader = thread_wakeup._reader readers = [result_reader, wakeup_reader] while True: _add_call_item_to_queue(pending_work_items, running_work_items, work_ids_queue, call_queue) # Wait for a result to be ready in the result_queue while checking # that all worker processes are still running, or for a wake up # signal send. The wake up signals come either from new tasks being # submitted, from the executor being shutdown/gc-ed, or from the # shutdown of the python interpreter. worker_sentinels = [p.sentinel for p in list(processes.values())] ready = wait(readers + worker_sentinels) broken = ("A worker process managed by the executor was unexpectedly " "terminated. This could be caused by a segmentation fault " "while calling the function or by an excessive memory usage " "causing the Operating System to kill the worker.", None, TerminatedWorkerError) if result_reader in ready: try: result_item = result_reader.recv() broken = None if isinstance(result_item, _RemoteTraceback): broken = ("A task has failed to un-serialize. Please " "ensure that the arguments of the function are " "all picklable.", result_item.tb, BrokenProcessPool) except BaseException as e: tb = getattr(e, "__traceback__", None) if tb is None: _, _, tb = sys.exc_info() broken = ("A result has failed to un-serialize. Please " "ensure that the objects returned by the function " "are always picklable.", traceback.format_exception(type(e), e, tb), BrokenProcessPool) elif wakeup_reader in ready: broken = None result_item = None thread_wakeup.clear() if broken is not None: msg, cause_tb, exc_type = broken if (issubclass(exc_type, TerminatedWorkerError) and (sys.platform != "win32")): # In Windows, introspecting terminated workers exitcodes seems # unstable, therefore they are not appended in the exception # message. msg += " The exit codes of the workers are {}".format( get_exitcodes_terminated_worker(processes)) bpe = exc_type(msg) if cause_tb is not None: bpe = set_cause(bpe, _RemoteTraceback( "\n'''\n{}'''".format(''.join(cause_tb)))) # Mark the process pool broken so that submits fail right now. executor_flags.flag_as_broken(bpe) # All futures in flight must be marked failed for work_id, work_item in pending_work_items.items(): work_item.future.set_exception(bpe) # Delete references to object. See issue16284 del work_item pending_work_items.clear() # Terminate remaining workers forcibly: the queues or their # locks may be in a dirty state and block forever. while processes: _, p = processes.popitem() mp.util.debug('terminate process {}'.format(p.name)) try: recursive_terminate(p) except ProcessLookupError: # pragma: no cover pass shutdown_all_workers() return if isinstance(result_item, int): # Clean shutdown of a worker using its PID, either on request # by the executor.shutdown method or by the timeout of the worker # itself: we should not mark the executor as broken. with processes_management_lock: p = processes.pop(result_item, None) # p can be None is the executor is concurrently shutting down. if p is not None: p._worker_exit_lock.release() p.join() del p # Make sure the executor have the right number of worker, even if a # worker timeout while some jobs were submitted. If some work is # pending or there is less processes than running items, we need to # start a new Process and raise a warning. n_pending = len(pending_work_items) n_running = len(running_work_items) if (n_pending - n_running > 0 or n_running > len(processes)): executor = executor_reference() if (executor is not None and len(processes) < executor._max_workers): warnings.warn( "A worker stopped while some jobs were given to the " "executor. This can be caused by a too short worker " "timeout or by a memory leak.", UserWarning ) executor._adjust_process_count() executor = None elif result_item is not None: work_item = pending_work_items.pop(result_item.work_id, None) # work_item can be None if another process terminated if work_item is not None: if result_item.exception: work_item.future.set_exception(result_item.exception) else: work_item.future.set_result(result_item.result) # Delete references to object. See issue16284 del work_item running_work_items.remove(result_item.work_id) # Delete reference to result_item del result_item # Check whether we should start shutting down. executor = executor_reference() # No more work items can be added if: # - The interpreter is shutting down OR # - The executor that owns this worker has been collected OR # - The executor that owns this worker has been shutdown. if is_shutting_down(): # bpo-33097: Make sure that the executor is flagged as shutting # down even if it is shutdown by the interpreter exiting. with executor_flags.shutdown_lock: executor_flags.shutdown = True if executor_flags.kill_workers: while pending_work_items: _, work_item = pending_work_items.popitem() work_item.future.set_exception(ShutdownExecutorError( "The Executor was shutdown before this job could " "complete.")) del work_item # Terminate remaining workers forcibly: the queues or their # locks may be in a dirty state and block forever. while processes: _, p = processes.popitem() recursive_terminate(p) shutdown_all_workers() return # Since no new work items can be added, it is safe to shutdown # this thread if there are no pending work items. if not pending_work_items: shutdown_all_workers() return elif executor_flags.broken: return executor = None
ensures all workers and management thread are running
def _ensure_executor_running(self): """ensures all workers and management thread are running """ with self._processes_management_lock: if len(self._processes) != self._max_workers: self._adjust_process_count() self._start_queue_management_thread()
Returns an iterator equivalent to map ( fn iter ).
def map(self, fn, *iterables, **kwargs): """Returns an iterator equivalent to map(fn, iter). Args: fn: A callable that will take as many arguments as there are passed iterables. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. chunksize: If greater than one, the iterables will be chopped into chunks of size chunksize and submitted to the process pool. If set to one, the items in the list will be sent one at a time. Returns: An iterator equivalent to: map(func, *iterables) but the calls may be evaluated out-of-order. Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. Exception: If fn(*args) raises for any values. """ timeout = kwargs.get('timeout', None) chunksize = kwargs.get('chunksize', 1) if chunksize < 1: raise ValueError("chunksize must be >= 1.") results = super(ProcessPoolExecutor, self).map( partial(_process_chunk, fn), _get_chunks(chunksize, *iterables), timeout=timeout) return _chain_from_iterable_of_lists(results)
Wrapper for non - picklable object to use cloudpickle to serialize them.
def wrap_non_picklable_objects(obj, keep_wrapper=True): """Wrapper for non-picklable object to use cloudpickle to serialize them. Note that this wrapper tends to slow down the serialization process as it is done with cloudpickle which is typically slower compared to pickle. The proper way to solve serialization issues is to avoid defining functions and objects in the main scripts and to implement __reduce__ functions for complex classes. """ if not cloudpickle: raise ImportError("could not import cloudpickle. Please install " "cloudpickle to allow extended serialization. " "(`pip install cloudpickle`).") # If obj is a class, create a CloudpickledClassWrapper which instantiates # the object internally and wrap it directly in a CloudpickledObjectWrapper if inspect.isclass(obj): class CloudpickledClassWrapper(CloudpickledObjectWrapper): def __init__(self, *args, **kwargs): self._obj = obj(*args, **kwargs) self._keep_wrapper = keep_wrapper CloudpickledClassWrapper.__name__ = obj.__name__ return CloudpickledClassWrapper # If obj is an instance of a class, just wrap it in a regular # CloudpickledObjectWrapper return _wrap_non_picklable_objects(obj, keep_wrapper=keep_wrapper)