INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Return dictionary of transformed results with keys being output names. Returns None if execution result isn t a success.
def transformed_values(self): '''Return dictionary of transformed results, with keys being output names. Returns None if execution result isn't a success. Reconstructs the pipeline context to materialize values. ''' if self.success and self.transforms: with self.reconstruct_context() as context: values = { result.step_output_data.output_name: self._get_value( context, result.step_output_data ) for result in self.transforms if result.is_successful_output } return values else: return None
Returns transformed value either for DEFAULT_OUTPUT or for the output given as output_name. Returns None if execution result isn t a success.
def transformed_value(self, output_name=DEFAULT_OUTPUT): '''Returns transformed value either for DEFAULT_OUTPUT or for the output given as output_name. Returns None if execution result isn't a success. Reconstructs the pipeline context to materialize value. ''' check.str_param(output_name, 'output_name') if not self.solid.definition.has_output(output_name): raise DagsterInvariantViolationError( '{output_name} not defined in solid {solid}'.format( output_name=output_name, solid=self.solid.name ) ) if self.success: for result in self.transforms: if ( result.is_successful_output and result.step_output_data.output_name == output_name ): with self.reconstruct_context() as context: value = self._get_value(context, result.step_output_data) return value raise DagsterInvariantViolationError( ( 'Did not find result {output_name} in solid {self.solid.name} ' 'execution result' ).format(output_name=output_name, self=self) ) else: return None
Returns the failing step s data that happened during this solid s execution if any
def failure_data(self): '''Returns the failing step's data that happened during this solid's execution, if any''' for result in itertools.chain( self.input_expectations, self.output_expectations, self.transforms ): if result.event_type == DagsterEventType.STEP_FAILURE: return result.step_failure_data
A: py: class: Dict with a name allowing it to be referenced by that name.
def NamedDict(name, fields, description=None, type_attributes=DEFAULT_TYPE_ATTRIBUTES): ''' A :py:class:`Dict` with a name allowing it to be referenced by that name. ''' check_user_facing_fields_dict(fields, 'NamedDict named "{}"'.format(name)) class _NamedDict(_ConfigComposite): def __init__(self): super(_NamedDict, self).__init__( key=name, name=name, fields=fields, description=description, type_attributes=type_attributes, ) return _NamedDict
Schema for configuration data with string keys and typed values via: py: class: Field.
def Dict(fields): ''' Schema for configuration data with string keys and typed values via :py:class:`Field` . Args: fields (Dict[str, Field]) ''' check_user_facing_fields_dict(fields, 'Dict') class _Dict(_ConfigComposite): def __init__(self): key = 'Dict.' + str(DictCounter.get_next_count()) super(_Dict, self).__init__( name=None, key=key, fields=fields, description='A configuration dictionary with typed fields', type_attributes=ConfigTypeAttributes(is_builtin=True), ) return _Dict
A permissive dict will permit the user to partially specify the permitted fields. Any fields that are specified and passed in will be type checked. Other fields will be allowed but will be ignored by the type checker.
def PermissiveDict(fields=None): '''A permissive dict will permit the user to partially specify the permitted fields. Any fields that are specified and passed in will be type checked. Other fields will be allowed, but will be ignored by the type checker. ''' if fields: check_user_facing_fields_dict(fields, 'PermissiveDict') class _PermissiveDict(_ConfigComposite): def __init__(self): key = 'PermissiveDict.' + str(DictCounter.get_next_count()) super(_PermissiveDict, self).__init__( name=None, key=key, fields=fields or dict(), description='A configuration dictionary with typed fields', type_attributes=ConfigTypeAttributes(is_builtin=True), ) @property def is_permissive_composite(self): return True return _PermissiveDict
Selectors are used when you want to be able present several different options to the user but force them to select one. For example it would not make much sense to allow them to say that a single input should be sourced from a csv and a parquet file: They must choose.
def Selector(fields): '''Selectors are used when you want to be able present several different options to the user but force them to select one. For example, it would not make much sense to allow them to say that a single input should be sourced from a csv and a parquet file: They must choose. Note that in other type systems this might be called an "input union." Args: fields (Dict[str, Field]): ''' check_user_facing_fields_dict(fields, 'Selector') class _Selector(_ConfigSelector): def __init__(self): key = 'Selector.' + str(DictCounter.get_next_count()) super(_Selector, self).__init__( key=key, name=None, fields=fields, # description='A configuration dictionary with typed fields', type_attributes=ConfigTypeAttributes(is_builtin=True), ) return _Selector
A: py: class Selector with a name allowing it to be referenced by that name. Args: name ( str ): fields ( Dict [ str Field ] )
def NamedSelector(name, fields, description=None, type_attributes=DEFAULT_TYPE_ATTRIBUTES): ''' A :py:class`Selector` with a name, allowing it to be referenced by that name. Args: name (str): fields (Dict[str, Field]) ''' check.str_param(name, 'name') check_user_facing_fields_dict(fields, 'NamedSelector named "{}"'.format(name)) class _NamedSelector(_ConfigSelector): def __init__(self): super(_NamedSelector, self).__init__( key=name, name=name, fields=fields, description=description, type_attributes=type_attributes, ) return _NamedSelector
Datasets must be of form project. dataset or dataset
def _is_valid_dataset(config_value): '''Datasets must be of form "project.dataset" or "dataset" ''' return re.match( # regex matches: project.table -- OR -- table r'^' + RE_PROJECT + r'\.' + RE_DS_TABLE + r'$|^' + RE_DS_TABLE + r'$', config_value, )
Tables must be of form project. dataset. table or dataset. table
def _is_valid_table(config_value): '''Tables must be of form "project.dataset.table" or "dataset.table" ''' return re.match( r'^' + RE_PROJECT # project + r'\.' # . + RE_DS_TABLE # dataset + r'\.' # . + RE_DS_TABLE # table + r'$|^' # -- OR -- + RE_DS_TABLE # dataset + r'\.' # . + RE_DS_TABLE # table + r'$', config_value, )
Execute the user - specified transform for the solid. Wrap in an error boundary and do all relevant logging and metrics tracking
def _execute_core_transform(transform_context, inputs): ''' Execute the user-specified transform for the solid. Wrap in an error boundary and do all relevant logging and metrics tracking ''' check.inst_param(transform_context, 'transform_context', SystemTransformExecutionContext) check.dict_param(inputs, 'inputs', key_type=str) step = transform_context.step solid = step.solid transform_context.log.debug( 'Executing core transform for solid {solid}.'.format(solid=solid.name) ) all_results = [] for step_output in _yield_transform_results(transform_context, inputs): yield step_output if isinstance(step_output, StepOutputValue): all_results.append(step_output) if len(all_results) != len(solid.definition.output_defs): emitted_result_names = {r.output_name for r in all_results} solid_output_names = {output_def.name for output_def in solid.definition.output_defs} omitted_outputs = solid_output_names.difference(emitted_result_names) transform_context.log.info( 'Solid {solid} did not fire outputs {outputs}'.format( solid=solid.name, outputs=repr(omitted_outputs) ) )
Decorator version of as_dagster_type. See documentation for: py: func: as_dagster_type.
def dagster_type( name=None, description=None, input_schema=None, output_schema=None, serialization_strategy=None, storage_plugins=None, ): ''' Decorator version of as_dagster_type. See documentation for :py:func:`as_dagster_type` . ''' def _with_args(bare_cls): check.type_param(bare_cls, 'bare_cls') new_name = name if name else bare_cls.__name__ return _decorate_as_dagster_type( bare_cls=bare_cls, key=new_name, name=new_name, description=description, input_schema=input_schema, output_schema=output_schema, serialization_strategy=serialization_strategy, storage_plugins=storage_plugins, ) # check for no args, no parens case if callable(name): klass = name new_name = klass.__name__ return _decorate_as_dagster_type( bare_cls=klass, key=new_name, name=new_name, description=None ) return _with_args
Takes a python cls and creates a type for it in the Dagster domain.
def as_dagster_type( existing_type, name=None, description=None, input_schema=None, output_schema=None, serialization_strategy=None, storage_plugins=None, ): ''' Takes a python cls and creates a type for it in the Dagster domain. Args: existing_type (cls) The python type you want to project in to the Dagster type system. name (Optional[str]): description (Optiona[str]): input_schema (Optional[InputSchema]): An instance of a class that inherits from :py:class:`InputSchema` that can map config data to a value of this type. output_schema (Optiona[OutputSchema]): An instance of a class that inherits from :py:class:`OutputSchema` that can map config data to persisting values of this type. serialization_strategy (Optional[SerializationStrategy]): The default behavior for how to serialize this value for persisting between execution steps. storage_plugins (Optional[Dict[RunStorageMode, TypeStoragePlugin]]): Storage type specific overrides for the serialization strategy. This allows for storage specific optimzations such as effecient distributed storage on S3. ''' check.type_param(existing_type, 'existing_type') check.opt_str_param(name, 'name') check.opt_str_param(description, 'description') check.opt_inst_param(input_schema, 'input_schema', InputSchema) check.opt_inst_param(output_schema, 'output_schema', OutputSchema) check.opt_inst_param(serialization_strategy, 'serialization_strategy', SerializationStrategy) storage_plugins = check.opt_dict_param(storage_plugins, 'storage_plugins') if serialization_strategy is None: serialization_strategy = PickleSerializationStrategy() name = existing_type.__name__ if name is None else name return _decorate_as_dagster_type( existing_type, key=name, name=name, description=description, input_schema=input_schema, output_schema=output_schema, serialization_strategy=serialization_strategy, storage_plugins=storage_plugins, )
A decorator for creating a resource. The decorated function will be used as the resource_fn in a ResourceDefinition.
def resource(config_field=None, description=None): '''A decorator for creating a resource. The decorated function will be used as the resource_fn in a ResourceDefinition. ''' # This case is for when decorator is used bare, without arguments. # E.g. @resource versus @resource() if callable(config_field): return ResourceDefinition(resource_fn=config_field) def _wrap(resource_fn): return ResourceDefinition(resource_fn, config_field, description) return _wrap
See https:// bit. ly/ 2OpksJC for source of the subprocess stdout/ stderr capture pattern in this function.
def run_spark_subprocess(cmd, logger): """See https://bit.ly/2OpksJC for source of the subprocess stdout/stderr capture pattern in this function. """ # Spark sometimes logs in log4j format. In those cases, we detect and parse. # Example log line from Spark that this is intended to match: # 2019-03-27 16:00:19 INFO ContextHandler:781 - Started o.s.j.s.ServletContextHandler... log4j_regex = r'^(\d{4}\-\d{2}\-\d{2} \d{2}:\d{2}:\d{2}) ([A-Z]{3,5})(.*?)$' def reader(pipe, pipe_name, p, msg_queue): try: with pipe: while p.poll() is None: for line in pipe.readlines(): match = re.match(log4j_regex, line) if match: line = match.groups()[2] msg_queue.put((pipe_name, line)) finally: # Use None as sentinel for done state, detected by iter() below msg_queue.put(None) p = subprocess.Popen( ' '.join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, universal_newlines=True, shell=True, ) q = queue.Queue() Thread(target=reader, args=[p.stdout, 'stdout', p, q]).start() Thread(target=reader, args=[p.stderr, 'stderr', p, q]).start() for _ in range(2): # There will be two None sentinels, one for each stream for pipe_name, line in iter(q.get, None): if pipe_name == 'stdout': logger.info(line) elif pipe_name == 'stderr': logger.error(line) p.wait() return p.returncode
For each key - value pair in spark conf we need to pass to CLI in format:
def parse_spark_config(spark_conf): '''For each key-value pair in spark conf, we need to pass to CLI in format: --conf "key=value" ''' spark_conf_list = flatten_dict(spark_conf) return list( itertools.chain.from_iterable([('--conf', '{}={}'.format(*c)) for c in spark_conf_list]) )
A SystemNamedDict object is simply a NamedDict intended for internal ( dagster ) use.
def SystemNamedDict(name, fields, description=None): '''A SystemNamedDict object is simply a NamedDict intended for internal (dagster) use. ''' return NamedDict(name, fields, description, ConfigTypeAttributes(is_system_config=True))
Events API v2 enables you to add PagerDuty s advanced event and incident management functionality to any system that can make an outbound HTTP connection.
def EventV2_create( self, summary, source, severity, event_action='trigger', dedup_key=None, timestamp=None, component=None, group=None, event_class=None, custom_details=None, ): '''Events API v2 enables you to add PagerDuty's advanced event and incident management functionality to any system that can make an outbound HTTP connection. Arguments: summary {string} -- A high-level, text summary message of the event. Will be used to construct an alert's description. Example: "PING OK - Packet loss = 0%, RTA = 1.41 ms" "Host 'acme-andromeda-sv1-c40 :: 179.21.24.50' is DOWN" source {string} -- Specific human-readable unique identifier, such as a hostname, for the system having the problem. Examples: "prod05.theseus.acme-widgets.com" "171.26.23.22" "aws:elasticache:us-east-1:852511987:cluster/api-stats-prod-003" "9c09acd49a25" severity {string} -- How impacted the affected system is. Displayed to users in lists and influences the priority of any created incidents. Must be one of {info, warning, error, critical} Keyword Arguments: event_action {str} -- There are three types of events that PagerDuty recognizes, and are used to represent different types of activity in your monitored systems. (default: 'trigger') * trigger: When PagerDuty receives a trigger event, it will either open a new alert, or add a new trigger log entry to an existing alert, depending on the provided dedup_key. Your monitoring tools should send PagerDuty a trigger when a new problem has been detected. You may send additional triggers when a previously detected problem has occurred again. * acknowledge: acknowledge events cause the referenced incident to enter the acknowledged state. While an incident is acknowledged, it won't generate any additional notifications, even if it receives new trigger events. Your monitoring tools should send PagerDuty an acknowledge event when they know someone is presently working on the problem. * resolve: resolve events cause the referenced incident to enter the resolved state. Once an incident is resolved, it won't generate any additional notifications. New trigger events with the same dedup_key as a resolved incident won't re-open the incident. Instead, a new incident will be created. Your monitoring tools should send PagerDuty a resolve event when the problem that caused the initial trigger event has been fixed. dedup_key {string} -- Deduplication key for correlating triggers and resolves. The maximum permitted length of this property is 255 characters. timestamp {string} -- Timestamp (ISO 8601). When the upstream system detected / created the event. This is useful if a system batches or holds events before sending them to PagerDuty. Optional - Will be auto-generated by PagerDuty if not provided. Example: 2015-07-17T08:42:58.315+0000 component {string} -- The part or component of the affected system that is broken. Examples: "keepalive" "webping" "mysql" "wqueue" group {string} -- A cluster or grouping of sources. For example, sources β€œprod-datapipe-02” and β€œprod-datapipe-03” might both be part of β€œprod-datapipe” Examples: "prod-datapipe" "www" "web_stack" event_class {string} -- The class/type of the event. Examples: "High CPU" "Latency" "500 Error" custom_details {Dict[str, str]} -- Additional details about the event and affected system. Example: {"ping time": "1500ms", "load avg": 0.75 } ''' data = { 'routing_key': self.routing_key, 'event_action': event_action, 'payload': {'summary': summary, 'source': source, 'severity': severity}, } if dedup_key is not None: data['dedup_key'] = dedup_key if timestamp is not None: data['payload']['timestamp'] = timestamp if component is not None: data['payload']['component'] = component if group is not None: data['payload']['group'] = group if event_class is not None: data['payload']['class'] = event_class if custom_details is not None: data['payload']['custom_details'] = custom_details return pypd.EventV2.create(data=data)
Groups execution steps by solid in topological order of the solids.
def coalesce_execution_steps(execution_plan): '''Groups execution steps by solid, in topological order of the solids.''' solid_order = _coalesce_solid_order(execution_plan) steps = defaultdict(list) for solid_name, solid_steps in itertools.groupby( execution_plan.topological_steps(), lambda x: x.solid_name ): steps[solid_name] += list(solid_steps) return OrderedDict([(solid_name, steps[solid_name]) for solid_name in solid_order])
Default method to acquire database connection parameters.
def get_connection_params(self): """ Default method to acquire database connection parameters. Sets connection parameters to match settings.py, and sets default values to blank fields. """ valid_settings = { 'NAME': 'name', 'HOST': 'host', 'PORT': 'port', 'USER': 'username', 'PASSWORD': 'password', 'AUTH_SOURCE': 'authSource', 'AUTH_MECHANISM': 'authMechanism', 'ENFORCE_SCHEMA': 'enforce_schema', 'REPLICASET': 'replicaset', 'SSL': 'ssl', 'SSL_CERTFILE': 'ssl_certfile', 'SSL_CA_CERTS': 'ssl_ca_certs', 'READ_PREFERENCE': 'read_preference' } connection_params = { 'name': 'djongo_test', 'enforce_schema': True } for setting_name, kwarg in valid_settings.items(): try: setting = self.settings_dict[setting_name] except KeyError: continue if setting or setting is False: connection_params[kwarg] = setting return connection_params
Receives a dictionary connection_params to setup a connection to the database.
def get_new_connection(self, connection_params): """ Receives a dictionary connection_params to setup a connection to the database. Dictionary correct setup is made through the get_connection_params method. TODO: This needs to be made more generic to accept other MongoClient parameters. """ name = connection_params.pop('name') es = connection_params.pop('enforce_schema') connection_params['document_class'] = OrderedDict # connection_params['tz_aware'] = True # To prevent leaving unclosed connections behind, # client_conn must be closed before a new connection # is created. if self.client_connection is not None: self.client_connection.close() self.client_connection = Database.connect(**connection_params) database = self.client_connection[name] self.djongo_connection = DjongoClient(database, es) return self.client_connection[name]
Returns an active connection cursor to the database.
def create_cursor(self, name=None): """ Returns an active connection cursor to the database. """ return Cursor(self.client_connection, self.connection, self.djongo_connection)
Closes the client connection to the database.
def _close(self): """ Closes the client connection to the database. """ if self.connection: with self.wrap_database_errors: self.connection.client.close()
Builds an instance of model from the model_dict.
def make_mdl(model, model_dict): """ Builds an instance of model from the model_dict. """ for field_name in model_dict: field = model._meta.get_field(field_name) model_dict[field_name] = field.to_python(model_dict[field_name]) return model(**model_dict)
Overrides standard to_python method from django models to allow correct translation of Mongo array to a python list.
def to_python(self, value): """ Overrides standard to_python method from django models to allow correct translation of Mongo array to a python list. """ if value is None: return value assert isinstance(value, list) ret = [] for mdl_dict in value: if isinstance(mdl_dict, self.model_container): ret.append(mdl_dict) continue mdl = make_mdl(self.model_container, mdl_dict) ret.append(mdl) return ret
Returns the formfield for the array.
def formfield(self, **kwargs): """ Returns the formfield for the array. """ defaults = { 'form_class': ArrayFormField, 'model_container': self.model_container, 'model_form_class': self.model_form_class, 'name': self.attname, 'mdl_form_kw_l': self.model_form_kwargs_l } defaults.update(kwargs) return super().formfield(**defaults)
Overrides Django s default to_python to allow correct translation to instance.
def to_python(self, value): """ Overrides Django's default to_python to allow correct translation to instance. """ if value is None or isinstance(value, self.model_container): return value assert isinstance(value, dict) instance = make_mdl(self.model_container, value) return instance
Filter the queryset for the instance this manager is bound to.
def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset = queryset.filter(**self.core_filters) return queryset
Computes the expected number of false positives caused by using u to approximate set sizes in the interval [ l u ] assuming uniform distribution of set sizes within the interval.
def _compute_nfp_uniform(l, u, cum_counts, sizes): """Computes the expected number of false positives caused by using u to approximate set sizes in the interval [l, u], assuming uniform distribution of set sizes within the interval. Args: l: the lower bound on set sizes. u: the upper bound on set sizes. cum_counts: the complete cummulative distribution of set sizes. sizes: the complete domain of set sizes. Return (float): the expected number of false positives. """ if l > u: raise ValueError("l must be less or equal to u") if l == 0: n = cum_counts[u] else: n = cum_counts[u]-cum_counts[l-1] return n * float(sizes[u] - sizes[l]) / float(2*sizes[u])
Computes the matrix of expected false positives for all possible sub - intervals of the complete domain of set sizes assuming uniform distribution of set_sizes within each sub - intervals.
def _compute_nfps_uniform(cum_counts, sizes): """Computes the matrix of expected false positives for all possible sub-intervals of the complete domain of set sizes, assuming uniform distribution of set_sizes within each sub-intervals. Args: cum_counts: the complete cummulative distribution of set sizes. sizes: the complete domain of set sizes. Return (np.array): the 2-D array of expected number of false positives for every pair of [l, u] interval, where l is axis-0 and u is axis-1. """ nfps = np.zeros((len(sizes), len(sizes))) # All u an l are inclusive bounds for intervals. # Compute p = 1, the NFPs for l in range(len(sizes)): for u in range(l, len(sizes)): nfps[l, u] = _compute_nfp_uniform(l, u, cum_counts, sizes) return nfps
Computes the expected number of false positives caused by using u to approximate set sizes in the interval [ l u ] using the real set size distribution.
def _compute_nfp_real(l, u, counts, sizes): """Computes the expected number of false positives caused by using u to approximate set sizes in the interval [l, u], using the real set size distribution. Args: l: the lower bound on set sizes. u: the upper bound on set sizes. counts: the complete distribution of set sizes. sizes: the complete domain of set sizes. Return (float): the expected number of false positives. """ if l > u: raise ValueError("l must be less or equal to u") return np.sum((float(sizes[u])-sizes[l:u+1])/float(sizes[u])*counts[l:u+1])
Computes the matrix of expected false positives for all possible sub - intervals of the complete domain of set sizes.
def _compute_nfps_real(counts, sizes): """Computes the matrix of expected false positives for all possible sub-intervals of the complete domain of set sizes. Args: counts: the complete distribution of set sizes. sizes: the complete domain of set sizes. Return (np.array): the 2-D array of expected number of false positives for every pair of [l, u] interval, where l is axis-0 and u is axis-1. """ nfps = np.zeros((len(sizes), len(sizes))) # All u an l are inclusive bounds for intervals. # Compute p = 1, the NFPs for l in range(len(sizes)): for u in range(l, len(sizes)): nfps[l, u] = _compute_nfp_real(l, u, counts, sizes) return nfps
Computes the optimal partitions given the size distributions and computed number of expected false positives for all sub - intervals.
def _compute_best_partitions(num_part, sizes, nfps): """Computes the optimal partitions given the size distributions and computed number of expected false positives for all sub-intervals. Args: num_part (int): The number of partitions to create. sizes (numpy.array): The complete domain of set sizes in sorted order. nfps (numpy.array): The computed number of expected false positives for all sub-intervals; axis-0 is for the indexes of lower bounds and axis-1 is for the indexes of upper bounds. Returns: partitions (list): list of lower and upper bounds of set sizes for all partitions. total_nfps (float): total number of expected false positives from all partitions. cost (numpy.array): a N x p-1 matrix of the computed optimal NFPs for all sub-problems given upper bound set size and number of partitions. """ if num_part < 2: raise ValueError("num_part cannot be less than 2") if num_part > len(sizes): raise ValueError("num_part cannot be greater than the domain size of " "all set sizes") # If number of partitions is 2, then simply find the upper bound # of the first partition. if num_part == 2: total_nfps, u = min((nfps[0, u1]+nfps[u1+1, len(sizes)-1], u1) for u1 in range(0, len(sizes)-1)) return [(sizes[0], sizes[u]), (sizes[u+1], sizes[-1]),], \ total_nfps, None # Initialize subproblem total NFPs. cost = np.zeros((len(sizes), num_part-2)) # Note: p is the number of partitions in the subproblem. # p2i translates the number of partition into the index in the matrix. p2i = lambda p : p - 2 # Compute p >= 2 until before p = num_part. for p in range(2, num_part): # Compute best partition for subproblems with increasing # max index u, starting from the smallest possible u given the p. # The smallest possible u can be considered as the max index that # generates p partitions each with only one size. for u in range(p-1, len(sizes)): if p == 2: cost[u, p2i(p)] = min(nfps[0, u1]+nfps[u1+1,u] for u1 in range(u)) else: cost[u, p2i(p)] = min(cost[u1, p2i(p-1)] + nfps[u1+1, u] for u1 in range((p-1)-1, u)) p = num_part # Find the optimal upper bound index of the 2nd right-most partition given # the number of partitions (p). total_nfps, u = min((cost[u1, p2i(p-1)]+nfps[u1+1, len(sizes)-1], u1) for u1 in range((p-1)-1, len(sizes)-1)) partitions = [(sizes[u+1], sizes[-1]),] p -= 1 # Back track to find the best partitions. while p > 1: # Find the optimal upper bound index of the 2nd right-most partition # givne the number of partitions (p) and upper bound index (u) in this # sub-problem. _, u1_best = min((cost[u1, p2i(p)]+nfps[u1+1, u], u1) for u1 in range((p-1)-1, u)) partitions.insert(0, (sizes[u1_best+1], sizes[u])) u = u1_best p -= 1 partitions.insert(0, (sizes[0], sizes[u])) return [partitions, total_nfps, cost]
Compute the optimal partitions given a distribution of set sizes.
def optimal_partitions(sizes, counts, num_part): """Compute the optimal partitions given a distribution of set sizes. Args: sizes (numpy.array): The complete domain of set sizes in ascending order. counts (numpy.array): The frequencies of all set sizes in the same order as `sizes`. num_part (int): The number of partitions to create. Returns: list: A list of partitions in the form of `(lower, upper)` tuples, where `lower` and `upper` are lower and upper bound (inclusive) set sizes of each partition. """ if num_part < 2: return [(sizes[0], sizes[-1])] if num_part >= len(sizes): partitions = [(x, x) for x in sizes] return partitions nfps = _compute_nfps_real(counts, sizes) partitions, _, _ = _compute_best_partitions(num_part, sizes, nfps) return partitions
Estimate the Jaccard similarity ( resemblance ) between this b - bit MinHash and the other.
def jaccard(self, other): ''' Estimate the Jaccard similarity (resemblance) between this b-bit MinHash and the other. ''' if self.b != other.b: raise ValueError("Cannot compare two b-bit MinHashes with different\ b values") if self.seed != other.seed: raise ValueError("Cannot compare two b-bit MinHashes with different\ set of permutations") intersection = np.count_nonzero(self.hashvalues==other.hashvalues) raw_est = float(intersection) / float(self.hashvalues.size) a1 = self._calc_a(self.r, self.b) a2 = self._calc_a(other.r, other.b) c1, c2 = self._calc_c(a1, a2, self.r, other.r) return (raw_est - c1) / (1 - c2)
Compute the function A ( r b )
def _calc_a(self, r, b): ''' Compute the function A(r, b) ''' if r == 0.0: # Find the limit of A(r, b) as r -> 0. return 1.0 / (1 << b) return r * (1 - r) ** (2 ** b - 1) / (1 - (1 - r) ** (2 * b))
Compute the functions C1 and C2
def _calc_c(self, a1, a2, r1, r2): ''' Compute the functions C1 and C2 ''' if r1 == 0.0 and r2 == 0.0: # Find the limits of C1 and C2 as r1 -> 0 and r2 -> 0 # Since the b-value must be the same and r1 = r2, # we have A1(r1, b1) = A2(r2, b2) = A, # then the limits for both C1 and C2 are A. return a1, a2 div = 1 / (r1 + r2) c1 = (a1 * r2 + a2 * r1) * div c2 = (a1 * r1 + a2 * r2) * div return c1, c2
Initialize the slots of the LeanMinHash.
def _initialize_slots(self, seed, hashvalues): '''Initialize the slots of the LeanMinHash. Args: seed (int): The random seed controls the set of random permutation functions generated for this LeanMinHash. hashvalues: The hash values is the internal state of the LeanMinHash. ''' self.seed = seed self.hashvalues = self._parse_hashvalues(hashvalues)
Compute the byte size after serialization.
def bytesize(self, byteorder='@'): '''Compute the byte size after serialization. Args: byteorder (str, optional): This is byte order of the serialized data. Use one of the `byte order characters <https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_: ``@``, ``=``, ``<``, ``>``, and ``!``. Default is ``@`` -- the native order. Returns: int: Size in number of bytes after serialization. ''' # Use 8 bytes to store the seed integer seed_size = struct.calcsize(byteorder+'q') # Use 4 bytes to store the number of hash values length_size = struct.calcsize(byteorder+'i') # Use 4 bytes to store each hash value as we are using the lower 32 bit hashvalue_size = struct.calcsize(byteorder+'I') return seed_size + length_size + len(self) * hashvalue_size
Serialize this lean MinHash and store the result in an allocated buffer.
def serialize(self, buf, byteorder='@'): ''' Serialize this lean MinHash and store the result in an allocated buffer. Args: buf (buffer): `buf` must implement the `buffer`_ interface. One such example is the built-in `bytearray`_ class. byteorder (str, optional): This is byte order of the serialized data. Use one of the `byte order characters <https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_: ``@``, ``=``, ``<``, ``>``, and ``!``. Default is ``@`` -- the native order. This is preferred over using `pickle`_ if the serialized lean MinHash needs to be used by another program in a different programming language. The serialization schema: 1. The first 8 bytes is the seed integer 2. The next 4 bytes is the number of hash values 3. The rest is the serialized hash values, each uses 4 bytes Example: To serialize a single lean MinHash into a `bytearray`_ buffer. .. code-block:: python buf = bytearray(lean_minhash.bytesize()) lean_minhash.serialize(buf) To serialize multiple lean MinHash into a `bytearray`_ buffer. .. code-block:: python # assuming lean_minhashs is a list of LeanMinHash with the same size size = lean_minhashs[0].bytesize() buf = bytearray(size*len(lean_minhashs)) for i, lean_minhash in enumerate(lean_minhashs): lean_minhash.serialize(buf[i*size:]) .. _`buffer`: https://docs.python.org/3/c-api/buffer.html .. _`bytearray`: https://docs.python.org/3.6/library/functions.html#bytearray .. _`byteorder`: https://docs.python.org/3/library/struct.html ''' if len(buf) < self.bytesize(): raise ValueError("The buffer does not have enough space\ for holding this MinHash.") fmt = "%sqi%dI" % (byteorder, len(self)) struct.pack_into(fmt, buf, 0, self.seed, len(self), *self.hashvalues)
Deserialize a lean MinHash from a buffer.
def deserialize(cls, buf, byteorder='@'): ''' Deserialize a lean MinHash from a buffer. Args: buf (buffer): `buf` must implement the `buffer`_ interface. One such example is the built-in `bytearray`_ class. byteorder (str. optional): This is byte order of the serialized data. Use one of the `byte order characters <https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_: ``@``, ``=``, ``<``, ``>``, and ``!``. Default is ``@`` -- the native order. Return: datasketch.LeanMinHash: The deserialized lean MinHash Example: To deserialize a lean MinHash from a buffer. .. code-block:: python lean_minhash = LeanMinHash.deserialize(buf) ''' fmt_seed_size = "%sqi" % byteorder fmt_hash = byteorder + "%dI" try: seed, num_perm = struct.unpack_from(fmt_seed_size, buf, 0) except TypeError: seed, num_perm = struct.unpack_from(fmt_seed_size, buffer(buf), 0) offset = struct.calcsize(fmt_seed_size) try: hashvalues = struct.unpack_from(fmt_hash % num_perm, buf, offset) except TypeError: hashvalues = struct.unpack_from(fmt_hash % num_perm, buffer(buf), offset) lmh = object.__new__(LeanMinHash) lmh._initialize_slots(seed, hashvalues) return lmh
Update this MinHash with a new value. The value will be hashed using the hash function specified by the hashfunc argument in the constructor.
def update(self, b): '''Update this MinHash with a new value. The value will be hashed using the hash function specified by the `hashfunc` argument in the constructor. Args: b: The value to be hashed using the hash function specified. Example: To update with a new string value (using the default SHA1 hash function, which requires bytes as input): .. code-block:: python minhash = Minhash() minhash.update("new value".encode('utf-8')) We can also use a different hash function, for example, `pyfarmhash`: .. code-block:: python import farmhash def _hash_32(b): return farmhash.hash32(b) minhash = MinHash(hashfunc=_hash_32) minhash.update("new value") ''' hv = self.hashfunc(b) a, b = self.permutations phv = np.bitwise_and((a * hv + b) % _mersenne_prime, np.uint64(_max_hash)) self.hashvalues = np.minimum(phv, self.hashvalues)
Estimate the Jaccard similarity _ ( resemblance ) between the sets represented by this MinHash and the other.
def jaccard(self, other): '''Estimate the `Jaccard similarity`_ (resemblance) between the sets represented by this MinHash and the other. Args: other (datasketch.MinHash): The other MinHash. Returns: float: The Jaccard similarity, which is between 0.0 and 1.0. ''' if other.seed != self.seed: raise ValueError("Cannot compute Jaccard given MinHash with\ different seeds") if len(self) != len(other): raise ValueError("Cannot compute Jaccard given MinHash with\ different numbers of permutation functions") return np.float(np.count_nonzero(self.hashvalues==other.hashvalues)) /\ np.float(len(self))
Estimate the cardinality count based on the technique described in this paper <http:// ieeexplore. ieee. org/ stamp/ stamp. jsp?arnumber = 365694 > _.
def count(self): '''Estimate the cardinality count based on the technique described in `this paper <http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=365694>`_. Returns: int: The estimated cardinality of the set represented by this MinHash. ''' k = len(self) return np.float(k) / np.sum(self.hashvalues / np.float(_max_hash)) - 1.0
Merge the other MinHash with this one making this one the union of both.
def merge(self, other): '''Merge the other MinHash with this one, making this one the union of both. Args: other (datasketch.MinHash): The other MinHash. ''' if other.seed != self.seed: raise ValueError("Cannot merge MinHash with\ different seeds") if len(self) != len(other): raise ValueError("Cannot merge MinHash with\ different numbers of permutation functions") self.hashvalues = np.minimum(other.hashvalues, self.hashvalues)
: returns: datasketch. MinHash -- A copy of this MinHash by exporting its state.
def copy(self): ''' :returns: datasketch.MinHash -- A copy of this MinHash by exporting its state. ''' return MinHash(seed=self.seed, hashfunc=self.hashfunc, hashvalues=self.digest(), permutations=self.permutations)
Create a MinHash which is the union of the MinHash objects passed as arguments.
def union(cls, *mhs): '''Create a MinHash which is the union of the MinHash objects passed as arguments. Args: *mhs: The MinHash objects to be united. The argument list length is variable, but must be at least 2. Returns: datasketch.MinHash: A new union MinHash. ''' if len(mhs) < 2: raise ValueError("Cannot union less than 2 MinHash") num_perm = len(mhs[0]) seed = mhs[0].seed if any((seed != m.seed or num_perm != len(m)) for m in mhs): raise ValueError("The unioning MinHash must have the\ same seed and number of permutation functions") hashvalues = np.minimum.reduce([m.hashvalues for m in mhs]) permutations = mhs[0].permutations return cls(num_perm=num_perm, seed=seed, hashvalues=hashvalues, permutations=permutations)
Compute the false positive probability given the containment threshold. xq is the ratio of x/ q.
def _false_positive_probability(threshold, b, r, xq): ''' Compute the false positive probability given the containment threshold. xq is the ratio of x/q. ''' _probability = lambda t : 1 - (1 - (t/(1 + xq - t))**float(r))**float(b) if xq >= threshold: a, err = integrate(_probability, 0.0, threshold) return a a, err = integrate(_probability, 0.0, xq) return a
Compute the optimal parameters that minimizes the weighted sum of probabilities of false positive and false negative. xq is the ratio of x/ q.
def _optimal_param(threshold, num_perm, max_r, xq, false_positive_weight, false_negative_weight): ''' Compute the optimal parameters that minimizes the weighted sum of probabilities of false positive and false negative. xq is the ratio of x/q. ''' min_error = float("inf") opt = (0, 0) for b in range(1, num_perm+1): for r in range(1, max_r+1): if b*r > num_perm: continue fp = _false_positive_probability(threshold, b, r, xq) fn = _false_negative_probability(threshold, b, r, xq) error = fp*false_positive_weight + fn*false_negative_weight if error < min_error: min_error = error opt = (b, r) return opt
Index all sets given their keys MinHashes and sizes. It can be called only once after the index is created.
def index(self, entries): ''' Index all sets given their keys, MinHashes, and sizes. It can be called only once after the index is created. Args: entries (`iterable` of `tuple`): An iterable of tuples, each must be in the form of `(key, minhash, size)`, where `key` is the unique identifier of a set, `minhash` is the MinHash of the set, and `size` is the size or number of unique items in the set. Note: `size` must be positive. ''' if not self.is_empty(): raise ValueError("Cannot call index again on a non-empty index") if not isinstance(entries, list): queue = deque([]) for key, minhash, size in entries: if size <= 0: raise ValueError("Set size must be positive") queue.append((key, minhash, size)) entries = list(queue) if len(entries) == 0: raise ValueError("entries is empty") # Create optimal partitions. sizes, counts = np.array(sorted( Counter(e[2] for e in entries).most_common())).T partitions = optimal_partitions(sizes, counts, len(self.indexes)) for i, (lower, upper) in enumerate(partitions): self.lowers[i], self.uppers[i] = lower, upper # Insert into partitions. entries.sort(key=lambda e : e[2]) curr_part = 0 for key, minhash, size in entries: if size > self.uppers[curr_part]: curr_part += 1 for r in self.indexes[curr_part]: self.indexes[curr_part][r].insert(key, minhash)
Giving the MinHash and size of the query set retrieve keys that references sets with containment with respect to the query set greater than the threshold.
def query(self, minhash, size): ''' Giving the MinHash and size of the query set, retrieve keys that references sets with containment with respect to the query set greater than the threshold. Args: minhash (datasketch.MinHash): The MinHash of the query set. size (int): The size (number of unique items) of the query set. Returns: `iterator` of keys. ''' for i, index in enumerate(self.indexes): u = self.uppers[i] if u is None: continue b, r = self._get_optimal_param(u, size) for key in index[r]._query_b(minhash, b): yield key
Returns: bool: Check if the index is empty.
def is_empty(self): ''' Returns: bool: Check if the index is empty. ''' return all(all(index[r].is_empty() for r in index) for index in self.indexes)
Estimate the weighted Jaccard similarity _ between the multi - sets represented by this weighted MinHash and the other. Args: other ( datasketch. WeightedMinHash ): The other weighted MinHash.
def jaccard(self, other): '''Estimate the `weighted Jaccard similarity`_ between the multi-sets represented by this weighted MinHash and the other. Args: other (datasketch.WeightedMinHash): The other weighted MinHash. Returns: float: The weighted Jaccard similarity between 0.0 and 1.0. .. _`weighted Jaccard similarity`: http://mathoverflow.net/questions/123339/weighted-jaccard-similarity ''' if other.seed != self.seed: raise ValueError("Cannot compute Jaccard given WeightedMinHash objects with\ different seeds") if len(self) != len(other): raise ValueError("Cannot compute Jaccard given WeightedMinHash objects with\ different numbers of hash values") # Check how many pairs of (k, t) hashvalues are equal intersection = 0 for this, that in zip(self.hashvalues, other.hashvalues): if np.array_equal(this, that): intersection += 1 return float(intersection) / float(len(self))
Create a new weighted MinHash given a weighted Jaccard vector. Each dimension is an integer frequency of the corresponding element in the multi - set represented by the vector.
def minhash(self, v): '''Create a new weighted MinHash given a weighted Jaccard vector. Each dimension is an integer frequency of the corresponding element in the multi-set represented by the vector. Args: v (numpy.array): The Jaccard vector. ''' if not isinstance(v, collections.Iterable): raise TypeError("Input vector must be an iterable") if not len(v) == self.dim: raise ValueError("Input dimension mismatch, expecting %d" % self.dim) if not isinstance(v, np.ndarray): v = np.array(v, dtype=np.float32) elif v.dtype != np.float32: v = v.astype(np.float32) hashvalues = np.zeros((self.sample_size, 2), dtype=np.int) vzeros = (v == 0) if vzeros.all(): raise ValueError("Input is all zeros") v[vzeros] = np.nan vlog = np.log(v) for i in range(self.sample_size): t = np.floor((vlog / self.rs[i]) + self.betas[i]) ln_y = (t - self.betas[i]) * self.rs[i] ln_a = self.ln_cs[i] - ln_y - self.rs[i] k = np.nanargmin(ln_a) hashvalues[i][0], hashvalues[i][1] = k, int(t[k]) return WeightedMinHash(self.seed, hashvalues)
Insert a key to the index together with a MinHash ( or weighted MinHash ) of the set referenced by the key.
def insert(self, key, minhash, check_duplication=True): ''' Insert a key to the index, together with a MinHash (or weighted MinHash) of the set referenced by the key. :param str key: The identifier of the set. :param datasketch.MinHash minhash: The MinHash of the set. :param bool check_duplication: To avoid duplicate keys in the storage (`default=True`). It's recommended to not change the default, but if you want to avoid the overhead during insert you can set `check_duplication = False`. ''' self._insert(key, minhash, check_duplication=check_duplication, buffer=False)
Remove the key from the index.
def remove(self, key): ''' Remove the key from the index. Args: key (hashable): The unique identifier of a set. ''' if self.prepickle: key = pickle.dumps(key) if key not in self.keys: raise ValueError("The given key does not exist") for H, hashtable in zip(self.keys[key], self.hashtables): hashtable.remove_val(H, key) if not hashtable.get(H): hashtable.remove(H) self.keys.remove(key)
Returns the bucket allocation counts ( see: func: ~datasketch. MinHashLSH. get_counts above ) restricted to the list of keys given.
def get_subset_counts(self, *keys): ''' Returns the bucket allocation counts (see :func:`~datasketch.MinHashLSH.get_counts` above) restricted to the list of keys given. Args: keys (hashable) : the keys for which to get the bucket allocation counts ''' if self.prepickle: key_set = [pickle.dumps(key) for key in set(keys)] else: key_set = list(set(keys)) hashtables = [unordered_storage({'type': 'dict'}) for _ in range(self.b)] Hss = self.keys.getmany(*key_set) for key, Hs in zip(key_set, Hss): for H, hashtable in zip(Hs, hashtables): hashtable.insert(H, key) return [hashtable.itemcounts() for hashtable in hashtables]
Update the HyperLogLog with a new data value in bytes. The value will be hashed using the hash function specified by the hashfunc argument in the constructor.
def update(self, b): ''' Update the HyperLogLog with a new data value in bytes. The value will be hashed using the hash function specified by the `hashfunc` argument in the constructor. Args: b: The value to be hashed using the hash function specified. Example: To update with a new string value (using the default SHA1 hash function, which requires bytes as input): .. code-block:: python hll = HyperLogLog() hll.update("new value".encode('utf-8')) We can also use a different hash function, for example, `pyfarmhash`: .. code-block:: python import farmhash def _hash_32(b): return farmhash.hash32(b) hll = HyperLogLog(hashfunc=_hash_32) hll.update("new value") ''' # Digest the hash object to get the hash value hv = self.hashfunc(b) # Get the index of the register using the first p bits of the hash reg_index = hv & (self.m - 1) # Get the rest of the hash bits = hv >> self.p # Update the register self.reg[reg_index] = max(self.reg[reg_index], self._get_rank(bits))
Estimate the cardinality of the data values seen so far.
def count(self): ''' Estimate the cardinality of the data values seen so far. Returns: int: The estimated cardinality. ''' # Use HyperLogLog estimation function e = self.alpha * float(self.m ** 2) / np.sum(2.0**(-self.reg)) # Small range correction if e <= (5.0 / 2.0) * self.m: num_zero = self.m - np.count_nonzero(self.reg) return self._linearcounting(num_zero) # Normal range, no correction if e <= (1.0 / 30.0) * (1 << 32): return e # Large range correction return self._largerange_correction(e)
Merge the other HyperLogLog with this one making this the union of the two.
def merge(self, other): ''' Merge the other HyperLogLog with this one, making this the union of the two. Args: other (datasketch.HyperLogLog): ''' if self.m != other.m or self.p != other.p: raise ValueError("Cannot merge HyperLogLog with different\ precisions.") self.reg = np.maximum(self.reg, other.reg)
Reset the current HyperLogLog to empty.
def clear(self): ''' Reset the current HyperLogLog to empty. ''' self.reg = np.zeros((self.m,), dtype=np.int8)
Computes the average precision at k.
def apk(actual, predicted, k=10): """ Computes the average precision at k. This function computes the average prescision at k between two lists of items. Parameters ---------- actual : list A list of elements that are to be predicted (order doesn't matter) predicted : list A list of predicted elements (order does matter) k : int, optional The maximum number of predicted elements Returns ------- score : double The average precision at k over the input lists """ if len(predicted)>k: predicted = predicted[:k] score = 0.0 num_hits = 0.0 for i,p in enumerate(predicted): if p in actual and p not in predicted[:i]: num_hits += 1.0 score += num_hits / (i+1.0) if len(actual) == 0: return 0.0 return score / min(len(actual), k)
Computes the mean average precision at k.
def mapk(actual, predicted, k=10): """ Computes the mean average precision at k. This function computes the mean average prescision at k between two lists of lists of items. Parameters ---------- actual : list A list of lists of elements that are to be predicted (order doesn't matter in the lists) predicted : list A list of lists of predicted elements (order matters in the lists) k : int, optional The maximum number of predicted elements Returns ------- score : double The mean average precision at k over the input lists """ return np.mean([apk(a,p,k) for a,p in zip(actual, predicted)])
Add a unique key together with a MinHash ( or weighted MinHash ) of the set referenced by the key.
def add(self, key, minhash): ''' Add a unique key, together with a MinHash (or weighted MinHash) of the set referenced by the key. Note: The key won't be searchbale until the :func:`datasketch.MinHashLSHForest.index` method is called. Args: key (hashable): The unique identifier of the set. minhash (datasketch.MinHash): The MinHash of the set. ''' if len(minhash) < self.k*self.l: raise ValueError("The num_perm of MinHash out of range") if key in self.keys: raise ValueError("The given key has already been added") self.keys[key] = [self._H(minhash.hashvalues[start:end]) for start, end in self.hashranges] for H, hashtable in zip(self.keys[key], self.hashtables): hashtable[H].append(key)
Index all the keys added so far and make them searchable.
def index(self): ''' Index all the keys added so far and make them searchable. ''' for i, hashtable in enumerate(self.hashtables): self.sorted_hashtables[i] = [H for H in hashtable.keys()] self.sorted_hashtables[i].sort()
Return the approximate top - k keys that have the highest Jaccard similarities to the query set.
def query(self, minhash, k): ''' Return the approximate top-k keys that have the highest Jaccard similarities to the query set. Args: minhash (datasketch.MinHash): The MinHash of the query set. k (int): The maximum number of keys to return. Returns: `list` of at most k keys. ''' if k <= 0: raise ValueError("k must be positive") if len(minhash) < self.k*self.l: raise ValueError("The num_perm of MinHash out of range") results = set() r = self.k while r > 0: for key in self._query(minhash, r, self.l): results.add(key) if len(results) >= k: return list(results) r -= 1 return list(results)
https:// golang. org/ src/ sort/ search. go?s = 2247: 2287#L49
def _binary_search(self, n, func): ''' https://golang.org/src/sort/search.go?s=2247:2287#L49 ''' i, j = 0, n while i < j: h = int(i + (j - i) / 2) if not func(h): i = h + 1 else: j = h return i
Cleanup client resources and disconnect from AsyncMinHashLSH storage.
async def close(self): """ Cleanup client resources and disconnect from AsyncMinHashLSH storage. """ async with self._lock: for t in self.hashtables: await t.close() if self.keys is not None: await self.keys.close() self._initialized = False
see: class: datasketch. MinHashLSH.
async def query(self, minhash): """ see :class:`datasketch.MinHashLSH`. """ if len(minhash) != self.h: raise ValueError("Expecting minhash with length %d, " "got %d" % (self.h, len(minhash))) fs = (hashtable.get(self._H(minhash.hashvalues[start:end])) for (start, end), hashtable in zip(self.hashranges, self.hashtables)) candidates = frozenset(chain.from_iterable(await asyncio.gather(*fs))) return list(candidates)
see: class: datasketch. MinHashLSH.
async def get_counts(self): """ see :class:`datasketch.MinHashLSH`. """ fs = (hashtable.itemcounts() for hashtable in self.hashtables) return await asyncio.gather(*fs)
Return ordered storage system based on the specified config.
def ordered_storage(config, name=None): '''Return ordered storage system based on the specified config. The canonical example of such a storage container is ``defaultdict(list)``. Thus, the return value of this method contains keys and values. The values are ordered lists with the last added item at the end. Args: config (dict): Defines the configurations for the storage. For in-memory storage, the config ``{'type': 'dict'}`` will suffice. For Redis storage, the type should be ``'redis'`` and the configurations for the Redis database should be supplied under the key ``'redis'``. These parameters should be in a form suitable for `redis.Redis`. The parameters may alternatively contain references to environment variables, in which case literal configuration values should be replaced by dicts of the form:: {'env': 'REDIS_HOSTNAME', 'default': 'localhost'} For a full example, see :ref:`minhash_lsh_at_scale` name (bytes, optional): A reference name for this storage container. For dict-type containers, this is ignored. For Redis containers, this name is used to prefix keys pertaining to this storage container within the database. ''' tp = config['type'] if tp == 'dict': return DictListStorage(config) if tp == 'redis': return RedisListStorage(config, name=name)
Return an unordered storage system based on the specified config.
def unordered_storage(config, name=None): '''Return an unordered storage system based on the specified config. The canonical example of such a storage container is ``defaultdict(set)``. Thus, the return value of this method contains keys and values. The values are unordered sets. Args: config (dict): Defines the configurations for the storage. For in-memory storage, the config ``{'type': 'dict'}`` will suffice. For Redis storage, the type should be ``'redis'`` and the configurations for the Redis database should be supplied under the key ``'redis'``. These parameters should be in a form suitable for `redis.Redis`. The parameters may alternatively contain references to environment variables, in which case literal configuration values should be replaced by dicts of the form:: {'env': 'REDIS_HOSTNAME', 'default': 'localhost'} For a full example, see :ref:`minhash_lsh_at_scale` name (bytes, optional): A reference name for this storage container. For dict-type containers, this is ignored. For Redis containers, this name is used to prefix keys pertaining to this storage container within the database. ''' tp = config['type'] if tp == 'dict': return DictSetStorage(config) if tp == 'redis': return RedisSetStorage(config, name=name)
Returns a dict where the keys are the keys of the container. The values are the * lengths * of the value sequences stored in this container.
def itemcounts(self, **kwargs): '''Returns a dict where the keys are the keys of the container. The values are the *lengths* of the value sequences stored in this container. ''' return {k: len(v) for k, v in self._dict.items()}
: param adapter: allauth. socialaccount Adapter subclass. Usually OAuthAdapter or Auth2Adapter: param app: allauth. socialaccount. SocialApp instance: param token: allauth. socialaccount. SocialToken instance: param response: Provider s response for OAuth1. Not used in the: returns: A populated instance of the allauth. socialaccount. SocialLoginView instance
def get_social_login(self, adapter, app, token, response): """ :param adapter: allauth.socialaccount Adapter subclass. Usually OAuthAdapter or Auth2Adapter :param app: `allauth.socialaccount.SocialApp` instance :param token: `allauth.socialaccount.SocialToken` instance :param response: Provider's response for OAuth1. Not used in the :returns: A populated instance of the `allauth.socialaccount.SocialLoginView` instance """ request = self._get_request() social_login = adapter.complete_login(request, app, token, response=response) social_login.token = token return social_login
Required to allow using custom USER_DETAILS_SERIALIZER in JWTSerializer. Defining it here to avoid circular imports
def get_user(self, obj): """ Required to allow using custom USER_DETAILS_SERIALIZER in JWTSerializer. Defining it here to avoid circular imports """ rest_auth_serializers = getattr(settings, 'REST_AUTH_SERIALIZERS', {}) JWTUserDetailsSerializer = import_callable( rest_auth_serializers.get('USER_DETAILS_SERIALIZER', UserDetailsSerializer) ) user_data = JWTUserDetailsSerializer(obj['user'], context=self.context).data return user_data
Set the social login process state to connect rather than login Refer to the implementation of get_social_login in base class and to the allauth. socialaccount. helpers module complete_social_login function.
def get_social_login(self, *args, **kwargs): """ Set the social login process state to connect rather than login Refer to the implementation of get_social_login in base class and to the allauth.socialaccount.helpers module complete_social_login function. """ social_login = super(SocialConnectMixin, self).get_social_login(*args, **kwargs) social_login.state['process'] = AuthProcess.CONNECT return social_login
Select the correct text from the Japanese number reading and alternatives
def select_text(text, reading=False, prefer=None): """Select the correct text from the Japanese number, reading and alternatives""" # select kanji number or kana reading if reading: text = text[1] else: text = text[0] # select the preferred one or the first one from multiple alternatives if not isinstance(text, strtype): common = set(text) & set(prefer or set()) if len(common) == 1: text = common.pop() else: text = text[0] return text
Merge lpair < rpair while applying semi - irregular rendaku rules
def rendaku_merge_pairs(lpair, rpair): """Merge lpair < rpair while applying semi-irregular rendaku rules""" ltext, lnum = lpair rtext, rnum = rpair if lnum > rnum: raise ValueError if rpair == ("ひゃく", 100): if lpair == ("さん", 3): rtext = "びゃく" elif lpair == ("ろく", 6): ltext = "ろっ" rtext = "ぴゃく" elif lpair == ("はけ", 8): ltext = "はっ" rtext = "ぴゃく" elif rpair == ("せん", 1000): if lpair == ("さん", 3): rtext = "γœγ‚“" elif lpair == ("はけ", 8): ltext = "はっ" elif rpair == ("けょう", 10**12): if lpair == ("いけ", 1): ltext = "いっ" elif lpair == ("はけ", 8): ltext = "はっ" elif lpair == ("γ˜γ‚…γ†", 10): ltext = "γ˜γ‚…γ£" elif rpair == ("けい", 10**16): if lpair == ("いけ", 1): ltext = "いっ" elif lpair == ("ろく", 6): ltext = "ろっ" elif lpair == ("はけ", 8): ltext = "はっ" elif lpair == ("γ˜γ‚…γ†", 10): ltext = "γ˜γ‚…γ£" elif lpair == ("ひゃく", 100): ltext = "ひゃっ" return ("%s%s" % (ltext, rtext), lnum * rnum)
starting here it groups the number by three from the tail 1234567 - > (( 1 ) ( 234 ) ( 567 )): param number: str: rtype: tuple
def split_by_3(self, number): """ starting here, it groups the number by three from the tail '1234567' -> (('1',),('234',),('567',)) :param number:str :rtype:tuple """ blocks = () length = len(number) if length < 3: blocks += ((number,),) else: len_of_first_block = length % 3 if len_of_first_block > 0: first_block = number[0:len_of_first_block], blocks += first_block, for i in range(len_of_first_block, length, 3): next_block = (number[i:i + 3],), blocks += next_block return blocks
it adds the list of spelling to the blocks ( ( 1 ) ( 034 )) - > (( 1 [ satu ] ) ( 234 [ tiga puluh empat ] ) ): param blocks: tuple: rtype: tuple
def spell(self, blocks): """ it adds the list of spelling to the blocks ( ('1',),('034',)) -> (('1',['satu']),('234',['tiga', 'puluh', 'empat']) ) :param blocks: tuple :rtype: tuple """ word_blocks = () first_block = blocks[0] if len(first_block[0]) == 1: if first_block[0] == '0': spelling = ['nol'] else: spelling = self.BASE[int(first_block[0])] elif len(first_block[0]) == 2: spelling = self.puluh(first_block[0]) else: spelling = ( self.ratus(first_block[0][0]) + self.puluh(first_block[0][1:3]) ) word_blocks += (first_block[0], spelling), for block in blocks[1:]: spelling = self.ratus(block[0][0]) + self.puluh(block[0][1:3]) block += spelling, word_blocks += block, return word_blocks
join the words by first join lists in the tuple: param word_blocks: tuple: rtype: str
def join(self, word_blocks, float_part): """ join the words by first join lists in the tuple :param word_blocks: tuple :rtype: str """ word_list = [] length = len(word_blocks) - 1 first_block = word_blocks[0], start = 0 if length == 1 and first_block[0][0] == '1': word_list += ['seribu'] start = 1 for i in range(start, length + 1, 1): word_list += word_blocks[i][1] if not word_blocks[i][1]: continue if i == length: break word_list += [self.TENS_TO[(length - i) * 3]] return ' '.join(word_list) + float_part
Args: val: Numeric value currency ( str ): Currency code cents ( bool ): Verbose cents separator ( str ): Cent separator adjective ( bool ): Prefix currency name with adjective Returns: str: Formatted string
def to_currency(self, val, currency='EUR', cents=True, separator=',', adjective=False): """ Args: val: Numeric value currency (str): Currency code cents (bool): Verbose cents separator (str): Cent separator adjective (bool): Prefix currency name with adjective Returns: str: Formatted string """ left, right, is_negative = parse_currency_parts(val) try: cr1, cr2 = self.CURRENCY_FORMS[currency] except KeyError: raise NotImplementedError( 'Currency code "%s" not implemented for "%s"' % (currency, self.__class__.__name__)) if adjective and currency in self.CURRENCY_ADJECTIVES: cr1 = prefix_currency(self.CURRENCY_ADJECTIVES[currency], cr1) minus_str = "%s " % self.negword if is_negative else "" cents_str = self._cents_verbose(right, currency) \ if cents else self._cents_terse(right, currency) return u'%s%s %s%s %s %s' % ( minus_str, self.to_cardinal(left), self.pluralize(left, cr1), separator, cents_str, self.pluralize(right, cr2) )
Parse scoped selector.
def parse_scoped_selector(scoped_selector): """Parse scoped selector.""" # Conver Macro (%scope/name) to (scope/name/macro.value) if scoped_selector[0] == '%': if scoped_selector.endswith('.value'): err_str = '{} is invalid cannot use % and end with .value' raise ValueError(err_str.format(scoped_selector)) scoped_selector = scoped_selector[1:] + '/macro.value' scope_selector_list = scoped_selector.rsplit('/', 1) scope = ''.join(scope_selector_list[:-1]) selector = scope_selector_list[-1] return scope, selector
Parse a single statement.
def parse_statement(self): """Parse a single statement. Returns: Either a `BindingStatement`, `ImportStatement`, `IncludeStatement`, or `None` if no more statements can be parsed (EOF reached). """ self._skip_whitespace_and_comments() if self._current_token.kind == tokenize.ENDMARKER: return None # Save off location, but ignore char_num for any statement-level errors. stmt_loc = self._current_location(ignore_char_num=True) binding_key_or_keyword = self._parse_selector() statement = None if self._current_token.value != '=': if binding_key_or_keyword == 'import': module = self._parse_selector(scoped=False) statement = ImportStatement(module, stmt_loc) elif binding_key_or_keyword == 'include': str_loc = self._current_location() success, filename = self._maybe_parse_basic_type() if not success or not isinstance(filename, str): self._raise_syntax_error('Expected file path as string.', str_loc) statement = IncludeStatement(filename, stmt_loc) else: self._raise_syntax_error("Expected '='.") else: # We saw an '='. self._advance_one_token() value = self.parse_value() scope, selector, arg_name = parse_binding_key(binding_key_or_keyword) statement = BindingStatement(scope, selector, arg_name, value, stmt_loc) assert statement, 'Internal parsing error.' if (self._current_token.kind != tokenize.NEWLINE and self._current_token.kind != tokenize.ENDMARKER): self._raise_syntax_error('Expected newline.') elif self._current_token.kind == tokenize.NEWLINE: self._advance_one_token() return statement
Parse a single literal value.
def parse_value(self): """Parse a single literal value. Returns: The parsed value. """ parsers = [ self._maybe_parse_container, self._maybe_parse_basic_type, self._maybe_parse_configurable_reference, self._maybe_parse_macro ] for parser in parsers: success, value = parser() if success: return value self._raise_syntax_error('Unable to parse value.')
Advances to next line.
def advance_one_line(self): """Advances to next line.""" current_line = self._current_token.line_number while current_line == self._current_token.line_number: self._current_token = ConfigParser.Token(*next(self._token_generator))
Parse a ( possibly scoped ) selector.
def _parse_selector(self, scoped=True, allow_periods_in_scope=False): """Parse a (possibly scoped) selector. A selector is a sequence of one or more valid Python-style identifiers separated by periods (see also `SelectorMap`). A scoped selector is a selector that may be preceded by scope names (separated by slashes). Args: scoped: Whether scopes are allowed. allow_periods_in_scope: Whether to allow period characters in the scope names preceding the selector. Returns: The parsed selector (as a string). Raises: SyntaxError: If the scope or selector is malformatted. """ if self._current_token.kind != tokenize.NAME: self._raise_syntax_error('Unexpected token.') begin_line_num = self._current_token.begin[0] begin_char_num = self._current_token.begin[1] end_char_num = self._current_token.end[1] line = self._current_token.line selector_parts = [] # This accepts an alternating sequence of NAME and '/' or '.' tokens. step_parity = 0 while (step_parity == 0 and self._current_token.kind == tokenize.NAME or step_parity == 1 and self._current_token.value in ('/', '.')): selector_parts.append(self._current_token.value) step_parity = not step_parity end_char_num = self._current_token.end[1] self._advance_one_token() self._skip_whitespace_and_comments() # Due to tokenization, most whitespace has been stripped already. To prevent # whitespace inside the scoped selector, we verify that it matches an # untokenized version of the selector obtained from the first through last # character positions of the consumed tokens in the line being parsed. scoped_selector = ''.join(selector_parts) untokenized_scoped_selector = line[begin_char_num:end_char_num] # Also check that it's properly formatted (e.g., no consecutive slashes). scope_re = IDENTIFIER_RE if allow_periods_in_scope: scope_re = MODULE_RE selector_re = MODULE_RE scope_parts = scoped_selector.split('/') valid_format = all(scope_re.match(scope) for scope in scope_parts[:-1]) valid_format &= bool(selector_re.match(scope_parts[-1])) valid_format &= bool(scoped or len(scope_parts) == 1) if untokenized_scoped_selector != scoped_selector or not valid_format: location = (self._filename, begin_line_num, begin_char_num + 1, line) self._raise_syntax_error('Malformatted scope or selector.', location) return scoped_selector
Try to parse a container type ( dict list or tuple ).
def _maybe_parse_container(self): """Try to parse a container type (dict, list, or tuple).""" bracket_types = { '{': ('}', dict, self._parse_dict_item), '(': (')', tuple, self.parse_value), '[': (']', list, self.parse_value) } if self._current_token.value in bracket_types: open_bracket = self._current_token.value close_bracket, type_fn, parse_item = bracket_types[open_bracket] self._advance() values = [] saw_comma = False while self._current_token.value != close_bracket: values.append(parse_item()) if self._current_token.value == ',': saw_comma = True self._advance() elif self._current_token.value != close_bracket: self._raise_syntax_error("Expected ',' or '%s'." % close_bracket) # If it's just a single value enclosed in parentheses without a trailing # comma, it's not a tuple, so just grab the value. if type_fn is tuple and len(values) == 1 and not saw_comma: type_fn = lambda x: x[0] self._advance() return True, type_fn(values) return False, None
Try to parse a basic type ( str bool number ).
def _maybe_parse_basic_type(self): """Try to parse a basic type (str, bool, number).""" token_value = '' # Allow a leading dash to handle negative numbers. if self._current_token.value == '-': token_value += self._current_token.value self._advance() basic_type_tokens = [tokenize.NAME, tokenize.NUMBER, tokenize.STRING] continue_parsing = self._current_token.kind in basic_type_tokens if not continue_parsing: return False, None while continue_parsing: token_value += self._current_token.value try: value = ast.literal_eval(token_value) except Exception as e: # pylint: disable=broad-except err_str = "{}\n Failed to parse token '{}'" self._raise_syntax_error(err_str.format(e, token_value)) was_string = self._current_token.kind == tokenize.STRING self._advance() is_string = self._current_token.kind == tokenize.STRING continue_parsing = was_string and is_string return True, value
Try to parse a configurable reference (
def _maybe_parse_configurable_reference(self): """Try to parse a configurable reference (@[scope/name/]fn_name[()]).""" if self._current_token.value != '@': return False, None location = self._current_location() self._advance_one_token() scoped_name = self._parse_selector(allow_periods_in_scope=True) evaluate = False if self._current_token.value == '(': evaluate = True self._advance() if self._current_token.value != ')': self._raise_syntax_error("Expected ')'.") self._advance_one_token() self._skip_whitespace_and_comments() with utils.try_with_location(location): reference = self._delegate.configurable_reference(scoped_name, evaluate) return True, reference
Try to parse an macro ( %scope/ name ).
def _maybe_parse_macro(self): """Try to parse an macro (%scope/name).""" if self._current_token.value != '%': return False, None location = self._current_location() self._advance_one_token() scoped_name = self._parse_selector(allow_periods_in_scope=True) with utils.try_with_location(location): macro = self._delegate.macro(scoped_name) return True, macro
Reraises exception appending message to its string representation.
def augment_exception_message_and_reraise(exception, message): """Reraises `exception`, appending `message` to its string representation.""" class ExceptionProxy(type(exception)): """Acts as a proxy for an exception with an augmented message.""" __module__ = type(exception).__module__ def __init__(self): pass def __getattr__(self, attr_name): return getattr(exception, attr_name) def __str__(self): return str(exception) + message ExceptionProxy.__name__ = type(exception).__name__ proxy = ExceptionProxy() if six.PY3: ExceptionProxy.__qualname__ = type(exception).__qualname__ six.raise_from(proxy.with_traceback(exception.__traceback__), None) else: six.reraise(proxy, None, sys.exc_info()[2])
Convert an operative config string to markdown format.
def _markdownify_operative_config_str(self, string): """Convert an operative config string to markdown format.""" # TODO: Total hack below. Implement more principled formatting. def process(line): """Convert a single line to markdown format.""" if not line.startswith('#'): return ' ' + line line = line[2:] if line.startswith('===='): return '' if line.startswith('None'): return ' # None.' if line.endswith(':'): return '#### ' + line return line output_lines = [] for line in string.splitlines(): procd_line = process(line) if procd_line is not None: output_lines.append(procd_line) return '\n'.join(output_lines)
Writes out Gin s operative config and maybe adds a summary of it.
def after_create_session(self, session=None, coord=None): """Writes out Gin's operative config, and maybe adds a summary of it.""" config_str = config.operative_config_str() if not tf.gfile.IsDirectory(self._output_dir): tf.gfile.MakeDirs(self._output_dir) global_step_val = 0 if session is not None: global_step = tf.train.get_global_step() if global_step is not None: global_step_val = session.run(global_step) filename = '%s-%s.gin' % (self._base_name, global_step_val) config_path = os.path.join(self._output_dir, filename) with tf.gfile.GFile(config_path, 'w') as f: f.write(config_str) if self._summarize_config: md_config_str = self._markdownify_operative_config_str(config_str) summary_metadata = summary_pb2.SummaryMetadata() summary_metadata.plugin_data.plugin_name = 'text' summary_metadata.plugin_data.content = b'{}' text_tensor = tf.make_tensor_proto(md_config_str) summary = summary_pb2.Summary() summary.value.add( tag='gin/' + self._base_name, tensor=text_tensor, metadata=summary_metadata) if not self._summary_writer: # Creating the FileWriter also creates the events file, so it should be # done here (where it is most likely to only occur on chief workers), as # opposed to in the constructor. self._summary_writer = tf.summary.FileWriterCache.get(self._output_dir) self._summary_writer.add_summary(summary, global_step_val) self._summary_writer.flush()
Find the first __init__ or __new__ method in the given class s MRO.
def _find_class_construction_fn(cls): """Find the first __init__ or __new__ method in the given class's MRO.""" for base in type.mro(cls): if '__init__' in base.__dict__: return base.__init__ if '__new__' in base.__dict__: return base.__new__
Make sure fn can be wrapped cleanly by functools. wraps.
def _ensure_wrappability(fn): """Make sure `fn` can be wrapped cleanly by functools.wraps.""" # Handle "wrapped_descriptor" and "method-wrapper" types. if isinstance(fn, (type(object.__init__), type(object.__call__))): # pylint: disable=unnecessary-lambda wrappable_fn = lambda *args, **kwargs: fn(*args, **kwargs) wrappable_fn.__name__ = fn.__name__ wrappable_fn.__doc__ = fn.__doc__ wrappable_fn.__module__ = '' # These types have no __module__, sigh. wrappable_fn.__wrapped__ = fn return wrappable_fn # Otherwise we're good to go... return fn
Decorate a function or class with the given decorator.
def _decorate_fn_or_cls(decorator, fn_or_cls, subclass=False): """Decorate a function or class with the given decorator. When `fn_or_cls` is a function, applies `decorator` to the function and returns the (decorated) result. When `fn_or_cls` is a class and the `subclass` parameter is `False`, this will replace `fn_or_cls.__init__` with the result of applying `decorator` to it. When `fn_or_cls` is a class and `subclass` is `True`, this will subclass the class, but with `__init__` defined to be the result of applying `decorator` to `fn_or_cls.__init__`. The decorated class has metadata (docstring, name, and module information) copied over from `fn_or_cls`. The goal is to provide a decorated class the behaves as much like the original as possible, without modifying it (for example, inspection operations using `isinstance` or `issubclass` should behave the same way as on the original class). Args: decorator: The decorator to use. fn_or_cls: The function or class to decorate. subclass: Whether to decorate classes by subclassing. This argument is ignored if `fn_or_cls` is not a class. Returns: The decorated function or class. """ if not inspect.isclass(fn_or_cls): return decorator(_ensure_wrappability(fn_or_cls)) construction_fn = _find_class_construction_fn(fn_or_cls) if subclass: class DecoratedClass(fn_or_cls): __doc__ = fn_or_cls.__doc__ __module__ = fn_or_cls.__module__ DecoratedClass.__name__ = fn_or_cls.__name__ if six.PY3: DecoratedClass.__qualname__ = fn_or_cls.__qualname__ cls = DecoratedClass else: cls = fn_or_cls decorated_fn = decorator(_ensure_wrappability(construction_fn)) if construction_fn.__name__ == '__new__': decorated_fn = staticmethod(decorated_fn) setattr(cls, construction_fn.__name__, decorated_fn) return cls
Checks whether selector should be skipped ( if unknown ).
def _should_skip(selector, skip_unknown): """Checks whether `selector` should be skipped (if unknown).""" _validate_skip_unknown(skip_unknown) if _REGISTRY.matching_selectors(selector): return False # Never skip known configurables. if isinstance(skip_unknown, (list, tuple, set)): return selector in skip_unknown return skip_unknown
Returns value in a format parseable by parse_value or None.
def _format_value(value): """Returns `value` in a format parseable by `parse_value`, or `None`. Simply put, This function ensures that when it returns a string value, the following will hold: parse_value(_format_value(value)) == value Args: value: The value to format. Returns: A string representation of `value` when `value` is literally representable, or `None`. """ literal = repr(value) try: if parse_value(literal) == value: return literal except SyntaxError: pass return None
Clears the global configuration.
def clear_config(clear_constants=False): """Clears the global configuration. This clears any parameter values set by `bind_parameter` or `parse_config`, as well as the set of dynamically imported modules. It does not remove any configurable functions or classes from the registry of configurables. Args: clear_constants: Whether to clear constants created by `constant`. Defaults to False. """ _set_config_is_locked(False) _CONFIG.clear() _SINGLETONS.clear() if clear_constants: _CONSTANTS.clear() else: saved_constants = _CONSTANTS.copy() _CONSTANTS.clear() # Clear then redefine constants (re-adding bindings). for name, value in six.iteritems(saved_constants): constant(name, value) _IMPORTED_MODULES.clear() _OPERATIVE_CONFIG.clear()