INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Gets a re - labeled clone of this expression.
def relabeled_clone(self, relabels): """Gets a re-labeled clone of this expression.""" return self.__class__( relabels.get(self.alias, self.alias), self.target, self.hstore_key, self.output_field )
Resolves the expression into a: see: HStoreColumn expression.
def resolve_expression(self, *args, **kwargs) -> HStoreColumn: """Resolves the expression into a :see:HStoreColumn expression.""" original_expression = super().resolve_expression(*args, **kwargs) expression = HStoreColumn( original_expression.alias, original_expression.target, self.key ) return expression
Compiles this expression into SQL.
def as_sql(self, compiler, connection): """Compiles this expression into SQL.""" sql, params = super().as_sql(compiler, connection) return 'EXTRACT(epoch FROM {})'.format(sql), params
Renames the aliases for the specified annotations:
def rename_annotations(self, annotations) -> None: """Renames the aliases for the specified annotations: .annotate(myfield=F('somestuf__myfield')) .rename_annotations(myfield='field') Arguments: annotations: The annotations to rename. Mapping the old name to the new name. """ for old_name, new_name in annotations.items(): annotation = self.annotations.get(old_name) if not annotation: raise SuspiciousOperation(( 'Cannot rename annotation "{old_name}" to "{new_name}", because there' ' is no annotation named "{old_name}".' ).format(old_name=old_name, new_name=new_name)) self._annotations = OrderedDict( [(new_name, v) if k == old_name else (k, v) for k, v in self._annotations.items()]) if django.VERSION < (2, 0): self.set_annotation_mask( (new_name if v == old_name else v for v in (self.annotation_select_mask or [])))
Adds an extra condition to an existing JOIN.
def add_join_conditions(self, conditions: Dict[str, Any]) -> None: """Adds an extra condition to an existing JOIN. This allows you to for example do: INNER JOIN othertable ON (mytable.id = othertable.other_id AND [extra conditions]) This does not work if nothing else in your query doesn't already generate the initial join in the first place. """ alias = self.get_initial_alias() opts = self.get_meta() for name, value in conditions.items(): parts = name.split(LOOKUP_SEP) join_info = self.setup_joins(parts, opts, alias, allow_many=True) self.trim_joins(join_info[1], join_info[3], join_info[4]) target_table = join_info[3][-1] field = join_info[1][-1] join = self.alias_map.get(target_table) if not join: raise SuspiciousOperation(( 'Cannot add an extra join condition for "%s", there\'s no' ' existing join to add it to.' ) % target_table) # convert the Join object into a ConditionalJoin object, which # allows us to add the extra condition if not isinstance(join, ConditionalJoin): self.alias_map[target_table] = ConditionalJoin.from_join(join) join = self.alias_map[target_table] join.add_condition(field, value)
Adds the given ( model ) fields to the select set. The field names are added in the order specified.
def add_fields(self, field_names: List[str], allow_m2m: bool=True) -> bool: """ Adds the given (model) fields to the select set. The field names are added in the order specified. This overrides the base class's add_fields method. This is called by the .values() or .values_list() method of the query set. It instructs the ORM to only select certain values. A lot of processing is neccesarry because it can be used to easily do joins. For example, `my_fk__name` pulls in the `name` field in foreign key `my_fk`. In our case, we want to be able to do `title__en`, where `title` is a HStoreField and `en` a key. This doesn't really involve a join. We iterate over the specified field names and filter out the ones that refer to HStoreField and compile it into an expression which is added to the list of to be selected fields using `self.add_select`. """ alias = self.get_initial_alias() opts = self.get_meta() cols = [] for name in field_names: parts = name.split(LOOKUP_SEP) # it cannot be a special hstore thing if there's no __ in it if len(parts) > 1: column_name, hstore_key = parts[:2] is_hstore, field = self._is_hstore_field(column_name) if is_hstore: cols.append( HStoreColumn(self.model._meta.db_table or self.model.name, field, hstore_key) ) continue join_info = self.setup_joins(parts, opts, alias, allow_many=allow_m2m) targets, final_alias, joins = self.trim_joins( join_info[1], join_info[3], join_info[4] ) for target in targets: cols.append(target.get_col(final_alias)) if cols: self.set_select(cols)
Gets whether the field with the specified name is a HStoreField.
def _is_hstore_field(self, field_name: str) -> Tuple[bool, Optional[models.Field]]: """Gets whether the field with the specified name is a HStoreField. Returns A tuple of a boolean indicating whether the field with the specified name is a HStoreField, and the field instance. """ field_instance = None for field in self.model._meta.local_concrete_fields: if field.name == field_name or field.column == field_name: field_instance = field break return isinstance(field_instance, HStoreField), field_instance
Sets the values to be used in this query.
def values(self, objs: List, insert_fields: List, update_fields: List=[]): """Sets the values to be used in this query. Insert fields are fields that are definitely going to be inserted, and if an existing row is found, are going to be overwritten with the specified value. Update fields are fields that should be overwritten in case an update takes place rather than an insert. If we're dealing with a INSERT, these will not be used. Arguments: objs: The objects to apply this query to. insert_fields: The fields to use in the INSERT statement update_fields: The fields to only use in the UPDATE statement. """ self.insert_values(insert_fields, objs, raw=False) self.update_fields = update_fields
Ran when a new model is created.
def create_model(self, model): """Ran when a new model is created.""" for field in model._meta.local_fields: if not isinstance(field, HStoreField): continue self.add_field(model, field)
Ran when a model is being deleted.
def delete_model(self, model): """Ran when a model is being deleted.""" for field in model._meta.local_fields: if not isinstance(field, HStoreField): continue self.remove_field(model, field)
Ran when the name of a model is changed.
def alter_db_table(self, model, old_db_table, new_db_table): """Ran when the name of a model is changed.""" for field in model._meta.local_fields: if not isinstance(field, HStoreField): continue for key in self._iterate_required_keys(field): self._rename_hstore_required( old_db_table, new_db_table, field, field, key )
Ran when a field is added to a model.
def add_field(self, model, field): """Ran when a field is added to a model.""" for key in self._iterate_required_keys(field): self._create_hstore_required( model._meta.db_table, field, key )
Ran when a field is removed from a model.
def remove_field(self, model, field): """Ran when a field is removed from a model.""" for key in self._iterate_required_keys(field): self._drop_hstore_required( model._meta.db_table, field, key )
Ran when the configuration on a field changed.
def alter_field(self, model, old_field, new_field, strict=False): """Ran when the configuration on a field changed.""" is_old_field_hstore = isinstance(old_field, HStoreField) is_new_field_hstore = isinstance(new_field, HStoreField) if not is_old_field_hstore and not is_new_field_hstore: return old_required = getattr(old_field, 'required', []) or [] new_required = getattr(new_field, 'required', []) or [] # handle field renames before moving on if str(old_field.column) != str(new_field.column): for key in self._iterate_required_keys(old_field): self._rename_hstore_required( model._meta.db_table, model._meta.db_table, old_field, new_field, key ) # drop the constraints for keys that have been removed for key in old_required: if key not in new_required: self._drop_hstore_required( model._meta.db_table, old_field, key ) # create new constraints for keys that have been added for key in new_required: if key not in old_required: self._create_hstore_required( model._meta.db_table, new_field, key )
Creates a REQUIRED CONSTRAINT for the specified hstore key.
def _create_hstore_required(self, table_name, field, key): """Creates a REQUIRED CONSTRAINT for the specified hstore key.""" name = self._required_constraint_name( table_name, field, key) sql = self.sql_hstore_required_create.format( name=self.quote_name(name), table=self.quote_name(table_name), field=self.quote_name(field.column), key=key ) self.execute(sql)
Renames an existing REQUIRED CONSTRAINT for the specified hstore key.
def _rename_hstore_required(self, old_table_name, new_table_name, old_field, new_field, key): """Renames an existing REQUIRED CONSTRAINT for the specified hstore key.""" old_name = self._required_constraint_name( old_table_name, old_field, key) new_name = self._required_constraint_name( new_table_name, new_field, key) sql = self.sql_hstore_required_rename.format( table=self.quote_name(new_table_name), old_name=self.quote_name(old_name), new_name=self.quote_name(new_name) ) self.execute(sql)
Drops a REQUIRED CONSTRAINT for the specified hstore key.
def _drop_hstore_required(self, table_name, field, key): """Drops a REQUIRED CONSTRAINT for the specified hstore key.""" name = self._required_constraint_name( table_name, field, key) sql = self.sql_hstore_required_drop.format( table=self.quote_name(table_name), name=self.quote_name(name) ) self.execute(sql)
Gets the name for a CONSTRAINT that applies to a single hstore key.
def _required_constraint_name(table: str, field, key): """Gets the name for a CONSTRAINT that applies to a single hstore key. Arguments: table: The name of the table the field is a part of. field: The hstore field to create a UNIQUE INDEX for. key: The name of the hstore key to create the name for. Returns: The name for the UNIQUE index. """ return '{table}_{field}_required_{postfix}'.format( table=table, field=field.column, postfix=key )
Creates the actual SQL used when applying the migration.
def create_sql(self, model, schema_editor, using=''): """Creates the actual SQL used when applying the migration.""" if django.VERSION >= (2, 0): statement = super().create_sql(model, schema_editor, using) statement.template = self.sql_create_index statement.parts['condition'] = self.condition return statement else: sql_create_index = self.sql_create_index sql_parameters = { **Index.get_sql_create_template_values(self, model, schema_editor, using), 'condition': self.condition } return sql_create_index % sql_parameters
Serializes the: see: ConditionalUniqueIndex for the migrations file.
def deconstruct(self): """Serializes the :see:ConditionalUniqueIndex for the migrations file.""" path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__) path = path.replace('django.db.models.indexes', 'django.db.models') return path, (), {'fields': self.fields, 'name': self.name, 'condition': self.condition}
Creates a custom setup. py command.
def create_command(text, commands): """Creates a custom setup.py command.""" class CustomCommand(BaseCommand): description = text def run(self): for cmd in commands: subprocess.check_call(cmd) return CustomCommand
Gets the base class for the custom database back - end.
def _get_backend_base(): """Gets the base class for the custom database back-end. This should be the Django PostgreSQL back-end. However, some people are already using a custom back-end from another package. We are nice people and expose an option that allows them to configure the back-end we base upon. As long as the specified base eventually also has the PostgreSQL back-end as a base, then everything should work as intended. """ base_class_name = getattr( settings, 'POSTGRES_EXTRA_DB_BACKEND_BASE', 'django.db.backends.postgresql' ) base_class_module = importlib.import_module(base_class_name + '.base') base_class = getattr(base_class_module, 'DatabaseWrapper', None) if not base_class: raise ImproperlyConfigured(( '\'%s\' is not a valid database back-end.' ' The module does not define a DatabaseWrapper class.' ' Check the value of POSTGRES_EXTRA_DB_BACKEND_BASE.' ) % base_class_name) if isinstance(base_class, Psycopg2DatabaseWrapper): raise ImproperlyConfigured(( '\'%s\' is not a valid database back-end.' ' It does inherit from the PostgreSQL back-end.' ' Check the value of POSTGRES_EXTRA_DB_BACKEND_BASE.' ) % base_class_name) return base_class
Ran when a new model is created.
def create_model(self, model): """Ran when a new model is created.""" super().create_model(model) for mixin in self.post_processing_mixins: mixin.create_model(model)
Ran when a model is being deleted.
def delete_model(self, model): """Ran when a model is being deleted.""" for mixin in self.post_processing_mixins: mixin.delete_model(model) super().delete_model(model)
Ran when the name of a model is changed.
def alter_db_table(self, model, old_db_table, new_db_table): """Ran when the name of a model is changed.""" super(SchemaEditor, self).alter_db_table( model, old_db_table, new_db_table ) for mixin in self.post_processing_mixins: mixin.alter_db_table( model, old_db_table, new_db_table )
Ran when a field is added to a model.
def add_field(self, model, field): """Ran when a field is added to a model.""" super(SchemaEditor, self).add_field(model, field) for mixin in self.post_processing_mixins: mixin.add_field(model, field)
Ran when a field is removed from a model.
def remove_field(self, model, field): """Ran when a field is removed from a model.""" for mixin in self.post_processing_mixins: mixin.remove_field(model, field) super(SchemaEditor, self).remove_field(model, field)
Ran when the configuration on a field changed.
def alter_field(self, model, old_field, new_field, strict=False): """Ran when the configuration on a field changed.""" super(SchemaEditor, self).alter_field( model, old_field, new_field, strict ) for mixin in self.post_processing_mixins: mixin.alter_field( model, old_field, new_field, strict )
Ran to prepare the configured database.
def prepare_database(self): """Ran to prepare the configured database. This is where we enable the `hstore` extension if it wasn't enabled yet.""" super().prepare_database() with self.cursor() as cursor: try: cursor.execute('CREATE EXTENSION IF NOT EXISTS hstore') except ProgrammingError: # permission denied logger.warning( 'Failed to create "hstore" extension. ' 'Tables with hstore columns may fail to migrate. ' 'If hstore is needed, make sure you are connected ' 'to the database as a superuser ' 'or add the extension manually.', exc_info=True)
Override the base class so it doesn t cast all values to strings.
def get_prep_value(self, value): """Override the base class so it doesn't cast all values to strings. psqlextra supports expressions in hstore fields, so casting all values to strings is a bad idea.""" value = Field.get_prep_value(self, value) if isinstance(value, dict): prep_value = {} for key, val in value.items(): if isinstance(val, Expression): prep_value[key] = val elif val is not None: prep_value[key] = str(val) else: prep_value[key] = val value = prep_value if isinstance(value, list): value = [str(item) for item in value] return value
Gets the values to pass to: see: __init__ when re - creating this object.
def deconstruct(self): """Gets the values to pass to :see:__init__ when re-creating this object.""" name, path, args, kwargs = super( HStoreField, self).deconstruct() if self.uniqueness is not None: kwargs['uniqueness'] = self.uniqueness if self.required is not None: kwargs['required'] = self.required return name, path, args, kwargs
Extra prep on query values by converting dictionaries into: see: HStoreValue expressions.
def _prepare_query_values(self): """Extra prep on query values by converting dictionaries into :see:HStoreValue expressions. This allows putting expressions in a dictionary. The :see:HStoreValue will take care of resolving the expressions inside the dictionary.""" new_query_values = [] for field, model, val in self.query.values: if isinstance(val, dict): val = HStoreValue(val) new_query_values.append(( field, model, val )) self.query.values = new_query_values
Builds the RETURNING part of the query.
def _form_returning(self): """Builds the RETURNING part of the query.""" qn = self.connection.ops.quote_name return ' RETURNING %s' % qn(self.query.model._meta.pk.attname)
Builds the SQL INSERT statement.
def as_sql(self, return_id=False): """Builds the SQL INSERT statement.""" queries = [ self._rewrite_insert(sql, params, return_id) for sql, params in super().as_sql() ] return queries
Rewrites a formed SQL INSERT query to include the ON CONFLICT clause.
def _rewrite_insert(self, sql, params, return_id=False): """Rewrites a formed SQL INSERT query to include the ON CONFLICT clause. Arguments: sql: The SQL INSERT query to rewrite. params: The parameters passed to the query. returning: What to put in the `RETURNING` clause of the resulting query. Returns: A tuple of the rewritten SQL query and new params. """ returning = self.qn(self.query.model._meta.pk.attname) if return_id else '*' if self.query.conflict_action.value == 'UPDATE': return self._rewrite_insert_update(sql, params, returning) elif self.query.conflict_action.value == 'NOTHING': return self._rewrite_insert_nothing(sql, params, returning) raise SuspiciousOperation(( '%s is not a valid conflict action, specify ' 'ConflictAction.UPDATE or ConflictAction.NOTHING.' ) % str(self.query.conflict_action))
Rewrites a formed SQL INSERT query to include the ON CONFLICT DO UPDATE clause.
def _rewrite_insert_update(self, sql, params, returning): """Rewrites a formed SQL INSERT query to include the ON CONFLICT DO UPDATE clause.""" update_columns = ', '.join([ '{0} = EXCLUDED.{0}'.format(self.qn(field.column)) for field in self.query.update_fields ]) # build the conflict target, the columns to watch # for conflicts conflict_target = self._build_conflict_target() index_predicate = self.query.index_predicate sql_template = ( '{insert} ON CONFLICT {conflict_target} DO UPDATE ' 'SET {update_columns} RETURNING {returning}' ) if index_predicate: sql_template = ( '{insert} ON CONFLICT {conflict_target} WHERE {index_predicate} DO UPDATE ' 'SET {update_columns} RETURNING {returning}' ) return ( sql_template.format( insert=sql, conflict_target=conflict_target, update_columns=update_columns, returning=returning, index_predicate=index_predicate, ), params )
Rewrites a formed SQL INSERT query to include the ON CONFLICT DO NOTHING clause.
def _rewrite_insert_nothing(self, sql, params, returning): """Rewrites a formed SQL INSERT query to include the ON CONFLICT DO NOTHING clause.""" # build the conflict target, the columns to watch # for conflicts conflict_target = self._build_conflict_target() where_clause = ' AND '.join([ '{0} = %s'.format(self._format_field_name(field_name)) for field_name in self.query.conflict_target ]) where_clause_params = [ self._format_field_value(field_name) for field_name in self.query.conflict_target ] params = params + tuple(where_clause_params) # this looks complicated, and it is, but it is for a reason... a normal # ON CONFLICT DO NOTHING doesn't return anything if the row already exists # so we do DO UPDATE instead that never executes to lock the row, and then # select from the table in case we're dealing with an existing row.. return ( ( 'WITH insdata AS (' '{insert} ON CONFLICT {conflict_target} DO UPDATE' ' SET {pk_column} = NULL WHERE FALSE RETURNING {returning})' ' SELECT * FROM insdata UNION ALL' ' SELECT {returning} FROM {table} WHERE {where_clause} LIMIT 1;' ).format( insert=sql, conflict_target=conflict_target, pk_column=self.qn(self.query.model._meta.pk.column), returning=returning, table=self.query.objs[0]._meta.db_table, where_clause=where_clause ), params )
Builds the conflict_target for the ON CONFLICT clause.
def _build_conflict_target(self): """Builds the `conflict_target` for the ON CONFLICT clause.""" conflict_target = [] if not isinstance(self.query.conflict_target, list): raise SuspiciousOperation(( '%s is not a valid conflict target, specify ' 'a list of column names, or tuples with column ' 'names and hstore key.' ) % str(self.query.conflict_target)) def _assert_valid_field(field_name): field_name = self._normalize_field_name(field_name) if self._get_model_field(field_name): return raise SuspiciousOperation(( '%s is not a valid conflict target, specify ' 'a list of column names, or tuples with column ' 'names and hstore key.' ) % str(field_name)) for field_name in self.query.conflict_target: _assert_valid_field(field_name) # special handling for hstore keys if isinstance(field_name, tuple): conflict_target.append( '(%s->\'%s\')' % ( self._format_field_name(field_name), field_name[1] ) ) else: conflict_target.append( self._format_field_name(field_name)) return '(%s)' % ','.join(conflict_target)
Gets the field on a model with the specified name.
def _get_model_field(self, name: str): """Gets the field on a model with the specified name. Arguments: name: The name of the field to look for. This can be both the actual field name, or the name of the column, both will work :) Returns: The field with the specified name or None if no such field exists. """ field_name = self._normalize_field_name(name) # 'pk' has special meaning and always refers to the primary # key of a model, we have to respect this de-facto standard behaviour if field_name == 'pk' and self.query.model._meta.pk: return self.query.model._meta.pk for field in self.query.model._meta.local_concrete_fields: if field.name == field_name or field.column == field_name: return field return None
Formats a field s name for usage in SQL.
def _format_field_name(self, field_name) -> str: """Formats a field's name for usage in SQL. Arguments: field_name: The field name to format. Returns: The specified field name formatted for usage in SQL. """ field = self._get_model_field(field_name) return self.qn(field.column)
Formats a field s value for usage in SQL.
def _format_field_value(self, field_name) -> str: """Formats a field's value for usage in SQL. Arguments: field_name: The name of the field to format the value of. Returns: The field's value formatted for usage in SQL. """ field_name = self._normalize_field_name(field_name) field = self._get_model_field(field_name) return SQLInsertCompiler.prepare_value( self, field, # Note: this deliberately doesn't use `pre_save_val` as we don't # want things like auto_now on DateTimeField (etc.) to change the # value. We rely on pre_save having already been done by the # underlying compiler so that things like FileField have already had # the opportunity to save out their data. getattr(self.query.objs[0], field.attname) )
Normalizes a field name into a string by extracting the field name if it was specified as a reference to a HStore key ( as a tuple ).
def _normalize_field_name(self, field_name) -> str: """Normalizes a field name into a string by extracting the field name if it was specified as a reference to a HStore key (as a tuple). Arguments: field_name: The field name to normalize. Returns: The normalized field name. """ if isinstance(field_name, tuple): field_name, _ = field_name return field_name
Ran when the name of a model is changed.
def alter_db_table(self, model, old_db_table, new_db_table): """Ran when the name of a model is changed.""" for field in model._meta.local_fields: if not isinstance(field, HStoreField): continue for keys in self._iterate_uniqueness_keys(field): self._rename_hstore_unique( old_db_table, new_db_table, field, field, keys )
Ran when a field is added to a model.
def add_field(self, model, field): """Ran when a field is added to a model.""" for keys in self._iterate_uniqueness_keys(field): self._create_hstore_unique( model, field, keys )
Ran when a field is removed from a model.
def remove_field(self, model, field): """Ran when a field is removed from a model.""" for keys in self._iterate_uniqueness_keys(field): self._drop_hstore_unique( model, field, keys )
Ran when the configuration on a field changed.
def alter_field(self, model, old_field, new_field, strict=False): """Ran when the configuration on a field changed.""" is_old_field_hstore = isinstance(old_field, HStoreField) is_new_field_hstore = isinstance(new_field, HStoreField) if not is_old_field_hstore and not is_new_field_hstore: return old_uniqueness = getattr(old_field, 'uniqueness', []) or [] new_uniqueness = getattr(new_field, 'uniqueness', []) or [] # handle field renames before moving on if str(old_field.column) != str(new_field.column): for keys in self._iterate_uniqueness_keys(old_field): self._rename_hstore_unique( model._meta.db_table, model._meta.db_table, old_field, new_field, keys ) # drop the indexes for keys that have been removed for keys in old_uniqueness: if keys not in new_uniqueness: self._drop_hstore_unique( model, old_field, self._compose_keys(keys) ) # create new indexes for keys that have been added for keys in new_uniqueness: if keys not in old_uniqueness: self._create_hstore_unique( model, new_field, self._compose_keys(keys) )
Creates a UNIQUE constraint for the specified hstore keys.
def _create_hstore_unique(self, model, field, keys): """Creates a UNIQUE constraint for the specified hstore keys.""" name = self._unique_constraint_name( model._meta.db_table, field, keys) columns = [ '(%s->\'%s\')' % (field.column, key) for key in keys ] sql = self.sql_hstore_unique_create.format( name=self.quote_name(name), table=self.quote_name(model._meta.db_table), columns=','.join(columns) ) self.execute(sql)
Renames an existing UNIQUE constraint for the specified hstore keys.
def _rename_hstore_unique(self, old_table_name, new_table_name, old_field, new_field, keys): """Renames an existing UNIQUE constraint for the specified hstore keys.""" old_name = self._unique_constraint_name( old_table_name, old_field, keys) new_name = self._unique_constraint_name( new_table_name, new_field, keys) sql = self.sql_hstore_unique_rename.format( old_name=self.quote_name(old_name), new_name=self.quote_name(new_name) ) self.execute(sql)
Drops a UNIQUE constraint for the specified hstore keys.
def _drop_hstore_unique(self, model, field, keys): """Drops a UNIQUE constraint for the specified hstore keys.""" name = self._unique_constraint_name( model._meta.db_table, field, keys) sql = self.sql_hstore_unique_drop.format(name=self.quote_name(name)) self.execute(sql)
Gets the name for a UNIQUE INDEX that applies to one or more keys in a hstore field.
def _unique_constraint_name(table: str, field, keys): """Gets the name for a UNIQUE INDEX that applies to one or more keys in a hstore field. Arguments: table: The name of the table the field is a part of. field: The hstore field to create a UNIQUE INDEX for. key: The name of the hstore key to create the name for. This can also be a tuple of multiple names. Returns: The name for the UNIQUE index. """ postfix = '_'.join(keys) return '{table}_{field}_unique_{postfix}'.format( table=table, field=field.column, postfix=postfix )
Iterates over the keys marked as unique in the specified field.
def _iterate_uniqueness_keys(self, field): """Iterates over the keys marked as "unique" in the specified field. Arguments: field: The field of which key's to iterate over. """ uniqueness = getattr(field, 'uniqueness', None) if not uniqueness: return for keys in uniqueness: composed_keys = self._compose_keys(keys) yield composed_keys
Adds an extra condition to this join.
def add_condition(self, field, value: Any) -> None: """Adds an extra condition to this join. Arguments: field: The field that the condition will apply to. value: The value to compare. """ self.extra_conditions.append((field, value))
Compiles this JOIN into a SQL string.
def as_sql(self, compiler, connection) -> Tuple[str, List[Any]]: """Compiles this JOIN into a SQL string.""" sql, params = super().as_sql(compiler, connection) qn = compiler.quote_name_unless_alias # generate the extra conditions extra_conditions = ' AND '.join([ '{}.{} = %s'.format( qn(self.table_name), qn(field.column) ) for field, value in self.extra_conditions ]) # add to the existing params, so the connector will # actually nicely format the value for us for _, value in self.extra_conditions: params.append(value) # rewrite the sql to include the extra conditions rewritten_sql = sql.replace(')', ' AND {})'.format(extra_conditions)) return rewritten_sql, params
Creates a new: see: ConditionalJoin from the specified: see: Join object.
def from_join(cls, join: Join) -> 'ConditionalJoin': """Creates a new :see:ConditionalJoin from the specified :see:Join object. Arguments: join: The :see:Join object to create the :see:ConditionalJoin object from. Returns: A :see:ConditionalJoin object created from the :see:Join object. """ return cls( join.table_name, join.parent_alias, join.table_alias, join.join_type, join.join_field, join.nullable )
Approximate the 95% confidence interval for Student s T distribution.
def tdist95conf_level(df): """Approximate the 95% confidence interval for Student's T distribution. Given the degrees of freedom, returns an approximation to the 95% confidence interval for the Student's T distribution. Args: df: An integer, the number of degrees of freedom. Returns: A float. """ df = int(round(df)) highest_table_df = len(_T_DIST_95_CONF_LEVELS) if df >= 200: return 1.960 if df >= 100: return 1.984 if df >= 80: return 1.990 if df >= 60: return 2.000 if df >= 50: return 2.009 if df >= 40: return 2.021 if df >= highest_table_df: return _T_DIST_95_CONF_LEVELS[highest_table_df - 1] return _T_DIST_95_CONF_LEVELS[df]
Find the pooled sample variance for two samples.
def pooled_sample_variance(sample1, sample2): """Find the pooled sample variance for two samples. Args: sample1: one sample. sample2: the other sample. Returns: Pooled sample variance, as a float. """ deg_freedom = len(sample1) + len(sample2) - 2 mean1 = statistics.mean(sample1) squares1 = ((x - mean1) ** 2 for x in sample1) mean2 = statistics.mean(sample2) squares2 = ((x - mean2) ** 2 for x in sample2) return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)
Calculate a t - test score for the difference between two samples.
def tscore(sample1, sample2): """Calculate a t-test score for the difference between two samples. Args: sample1: one sample. sample2: the other sample. Returns: The t-test score, as a float. """ if len(sample1) != len(sample2): raise ValueError("different number of values") error = pooled_sample_variance(sample1, sample2) / len(sample1) diff = statistics.mean(sample1) - statistics.mean(sample2) return diff / math.sqrt(error * 2)
Determine whether two samples differ significantly.
def is_significant(sample1, sample2): """Determine whether two samples differ significantly. This uses a Student's two-sample, two-tailed t-test with alpha=0.95. Args: sample1: one sample. sample2: the other sample. Returns: (significant, t_score) where significant is a bool indicating whether the two samples differ significantly; t_score is the score from the two-sample T test. """ deg_freedom = len(sample1) + len(sample2) - 2 critical_value = tdist95conf_level(deg_freedom) t_score = tscore(sample1, sample2) return (abs(t_score) >= critical_value, t_score)
Return a topological sorting of nodes in a graph.
def topoSort(roots, getParents): """Return a topological sorting of nodes in a graph. roots - list of root nodes to search from getParents - function which returns the parents of a given node """ results = [] visited = set() # Use iterative version to avoid stack limits for large datasets stack = [(node, 0) for node in roots] while stack: current, state = stack.pop() if state == 0: # before recursing if current not in visited: visited.add(current) stack.append((current, 1)) stack.extend((parent, 0) for parent in getParents(current)) else: # after recursing assert(current in visited) results.append(current) return results
permutations ( range ( 3 ) 2 ) -- > ( 0 1 ) ( 0 2 ) ( 1 0 ) ( 1 2 ) ( 2 0 ) ( 2 1 )
def permutations(iterable, r=None): """permutations(range(3), 2) --> (0,1) (0,2) (1,0) (1,2) (2,0) (2,1)""" pool = tuple(iterable) n = len(pool) if r is None: r = n indices = list(range(n)) cycles = list(range(n - r + 1, n + 1))[::-1] yield tuple(pool[i] for i in indices[:r]) while n: for i in reversed(range(r)): cycles[i] -= 1 if cycles[i] == 0: indices[i:] = indices[i + 1:] + indices[i:i + 1] cycles[i] = n - i else: j = cycles[i] indices[i], indices[-j] = indices[-j], indices[i] yield tuple(pool[i] for i in indices[:r]) break else: return
N - Queens solver.
def n_queens(queen_count): """N-Queens solver. Args: queen_count: the number of queens to solve for. This is also the board size. Yields: Solutions to the problem. Each yielded value is looks like (3, 8, 2, 1, 4, ..., 6) where each number is the column position for the queen, and the index into the tuple indicates the row. """ cols = range(queen_count) for vec in permutations(cols): if (queen_count == len(set(vec[i] + i for i in cols)) == len(set(vec[i] - i for i in cols))): yield vec
uct tree search
def play(self, board): """ uct tree search """ color = board.color node = self path = [node] while True: pos = node.select(board) if pos == PASS: break board.move(pos) child = node.pos_child[pos] if not child: child = node.pos_child[pos] = UCTNode() child.unexplored = board.useful_moves() child.pos = pos child.parent = node path.append(child) break path.append(child) node = child self.random_playout(board) self.update_path(board, color, path)
select move ; unexplored children first then according to uct value
def select(self, board): """ select move; unexplored children first, then according to uct value """ if self.unexplored: i = random.randrange(len(self.unexplored)) pos = self.unexplored[i] self.unexplored[i] = self.unexplored[len(self.unexplored) - 1] self.unexplored.pop() return pos elif self.bestchild: return self.bestchild.pos else: return PASS
random play until both players pass
def random_playout(self, board): """ random play until both players pass """ for x in range(MAXMOVES): # XXX while not self.finished? if board.finished: break board.move(board.random_move())
update win/ loss count along path
def update_path(self, board, color, path): """ update win/loss count along path """ wins = board.score(BLACK) >= board.score(WHITE) for node in path: if color == BLACK: color = WHITE else: color = BLACK if wins == (color == BLACK): node.wins += 1 else: node.losses += 1 if node.parent: node.parent.bestchild = node.parent.best_child()
Filters out benchmarks not supported by both Pythons.
def filter_benchmarks(benchmarks, bench_funcs, base_ver): """Filters out benchmarks not supported by both Pythons. Args: benchmarks: a set() of benchmark names bench_funcs: dict mapping benchmark names to functions python: the interpereter commands (as lists) Returns: The filtered set of benchmark names """ for bm in list(benchmarks): func = bench_funcs[bm] if getattr(func, '_python2_only', False) and (3, 0) <= base_ver: benchmarks.discard(bm) logging.info("Skipping Python2-only benchmark %s; " "not compatible with Python %s" % (bm, base_ver)) continue return benchmarks
Recursively expand name benchmark names.
def expand_benchmark_name(bm_name, bench_groups): """Recursively expand name benchmark names. Args: bm_name: string naming a benchmark or benchmark group. Yields: Names of actual benchmarks, with all group names fully expanded. """ expansion = bench_groups.get(bm_name) if expansion: for name in expansion: for name in expand_benchmark_name(name, bench_groups): yield name else: yield bm_name
Generates the list of strings that will be used in the benchmarks.
def gen_string_table(n): """Generates the list of strings that will be used in the benchmarks. All strings have repeated prefixes and suffices, and n specifies the number of repetitions. """ strings = [] def append(s): if USE_BYTES_IN_PY3K: strings.append(s.encode('latin1')) else: strings.append(s) append('-' * n + 'Perl' + '-' * n) append('P' * n + 'Perl' + 'P' * n) append('-' * n + 'Perl' + '-' * n) append('-' * n + 'Perl' + '-' * n) append('-' * n + 'Python' + '-' * n) append('P' * n + 'Python' + 'P' * n) append('-' * n + 'Python' + '-' * n) append('-' * n + 'Python' + '-' * n) append('-' * n + 'Python' + '-' * n) append('-' * n + 'Python' + '-' * n) append('-' * n + 'Perl' + '-' * n) append('P' * n + 'Perl' + 'P' * n) append('-' * n + 'Perl' + '-' * n) append('-' * n + 'Perl' + '-' * n) append('-' * n + 'PythonPython' + '-' * n) append('P' * n + 'PythonPython' + 'P' * n) append('-' * n + 'a5,b7,c9,' + '-' * n) append('-' * n + 'a5,b7,c9,' + '-' * n) append('-' * n + 'a5,b7,c9,' + '-' * n) append('-' * n + 'a5,b7,c9,' + '-' * n) append('-' * n + 'Python' + '-' * n) return strings
Initialize the strings we ll run the regexes against.
def init_benchmarks(n_values=None): """Initialize the strings we'll run the regexes against. The strings used in the benchmark are prefixed and suffixed by strings that are repeated n times. The sequence n_values contains the values for n. If n_values is None the values of n from the original benchmark are used. The generated list of strings is cached in the string_tables variable, which is indexed by n. Returns: A list of string prefix/suffix lengths. """ if n_values is None: n_values = (0, 5, 50, 250, 1000, 5000, 10000) string_tables = {n: gen_string_table(n) for n in n_values} regexs = gen_regex_table() data = [] for n in n_values: for id in xrange(len(regexs)): regex = regexs[id] string = string_tables[n][id] data.append((regex, string)) return data
Pure - Python implementation of itertools. combinations ( l 2 ).
def combinations(l): """Pure-Python implementation of itertools.combinations(l, 2).""" result = [] for x in xrange(len(l) - 1): ls = l[x + 1:] for y in ls: result.append((l[x], y)) return result
Returns the domain of the B - Spline
def GetDomain(self): """Returns the domain of the B-Spline""" return (self.knots[self.degree - 1], self.knots[len(self.knots) - self.degree])
Fetch the messages.
def fetch_items(self, category, **kwargs): """Fetch the messages. :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Fetching messages of '%s' - '%s' channel from %s", self.url, self.channel, str(from_date)) fetching = True page = 0 nposts = 0 # Convert timestamp to integer for comparing since = int(from_date.timestamp() * 1000) while fetching: raw_posts = self.client.posts(self.channel, page=page) posts_before = nposts for post in self._parse_posts(raw_posts): if post['update_at'] < since: fetching = False break # Fetch user data user_id = post['user_id'] user = self._get_or_fetch_user(user_id) post['user_data'] = user yield post nposts += 1 if fetching: # If no new posts were fetched; stop the process if posts_before == nposts: fetching = False else: page += 1 logger.info("Fetch process completed: %s posts fetched", nposts)
Init client
def _init_client(self, from_archive=False): """Init client""" return MattermostClient(self.url, self.api_token, max_items=self.max_items, sleep_for_rate=self.sleep_for_rate, min_rate_to_sleep=self.min_rate_to_sleep, sleep_time=self.sleep_time, archive=self.archive, from_archive=from_archive)
Parse posts and returns in order.
def _parse_posts(self, raw_posts): """Parse posts and returns in order.""" parsed_posts = self.parse_json(raw_posts) # Posts are not sorted. The order is provided by # 'order' key. for post_id in parsed_posts['order']: yield parsed_posts['posts'][post_id]
Fetch the history of a channel.
def posts(self, channel, page=None): """Fetch the history of a channel.""" entrypoint = self.RCHANNELS + '/' + channel + '/' + self.RPOSTS params = { self.PPER_PAGE: self.max_items } if page is not None: params[self.PPAGE] = page response = self._fetch(entrypoint, params) return response
Fetch user data.
def user(self, user): """Fetch user data.""" entrypoint = self.RUSERS + '/' + user response = self._fetch(entrypoint, None) return response
Fetch a resource.
def _fetch(self, entry_point, params): """Fetch a resource. :param entrypoint: entrypoint to access :param params: dict with the HTTP parameters needed to access the given entry point """ url = self.API_URL % {'base_url': self.base_url, 'entrypoint': entry_point} logger.debug("Mattermost client requests: %s params: %s", entry_point, str(params)) r = self.fetch(url, payload=params) return r.text
Initialize mailing lists directory path
def _pre_init(self): """Initialize mailing lists directory path""" if not self.parsed_args.mboxes_path: base_path = os.path.expanduser('~/.perceval/mailinglists/') dirpath = os.path.join(base_path, self.parsed_args.url) else: dirpath = self.parsed_args.mboxes_path setattr(self.parsed_args, 'dirpath', dirpath)
Fetch the mbox files from the remote archiver.
def fetch(self, from_date=DEFAULT_DATETIME): """Fetch the mbox files from the remote archiver. Stores the archives in the path given during the initialization of this object. Those archives which a not valid extension will be ignored. Pipermail archives usually have on their file names the date of the archives stored following the schema year-month. When `from_date` property is called, it will return the mboxes which their year and month are equal or after that date. :param from_date: fetch archives that store messages equal or after the given date; only year and month values are compared :returns: a list of tuples, storing the links and paths of the fetched archives """ logger.info("Downloading mboxes from '%s' to since %s", self.url, str(from_date)) logger.debug("Storing mboxes in '%s'", self.dirpath) from_date = datetime_to_utc(from_date) r = requests.get(self.url, verify=self.verify) r.raise_for_status() links = self._parse_archive_links(r.text) fetched = [] if not os.path.exists(self.dirpath): os.makedirs(self.dirpath) for l in links: filename = os.path.basename(l) mbox_dt = self._parse_date_from_filepath(filename) if ((from_date.year == mbox_dt.year and from_date.month == mbox_dt.month) or from_date < mbox_dt): filepath = os.path.join(self.dirpath, filename) success = self._download_archive(l, filepath) if success: fetched.append((l, filepath)) logger.info("%s/%s MBoxes downloaded", len(fetched), len(links)) return fetched
Get the mboxes managed by this mailing list.
def mboxes(self): """Get the mboxes managed by this mailing list. Returns the archives sorted by date in ascending order. :returns: a list of `.MBoxArchive` objects """ archives = [] for mbox in super().mboxes: dt = self._parse_date_from_filepath(mbox.filepath) archives.append((dt, mbox)) archives.sort(key=lambda x: x[0]) return [a[1] for a in archives]
Fetch the entries from the url.
def fetch(self, category=CATEGORY_ENTRY): """Fetch the entries from the url. The method retrieves all entries from a RSS url :param category: the category of items to fetch :returns: a generator of entries """ kwargs = {} items = super().fetch(category, **kwargs) return items
Fetch the entries
def fetch_items(self, category, **kwargs): """Fetch the entries :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ logger.info("Looking for rss entries at feed '%s'", self.url) nentries = 0 # number of entries raw_entries = self.client.get_entries() entries = self.parse_feed(raw_entries)['entries'] for item in entries: yield item nentries += 1 logger.info("Total number of entries: %i", nentries)
Init client
def _init_client(self, from_archive=False): """Init client""" return RSSClient(self.url, self.archive, from_archive)
Returns the RSS argument parser.
def setup_cmd_parser(cls): """Returns the RSS argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, archive=True) # Required arguments parser.parser.add_argument('url', help="URL of the RSS feed") return parser
Fetch the bugs from the repository.
def fetch(self, category=CATEGORY_BUG, from_date=DEFAULT_DATETIME): """Fetch the bugs from the repository. The method retrieves, from a Bugzilla repository, the bugs updated since the given date. :param category: the category of items to fetch :param from_date: obtain bugs updated since this date :returns: a generator of bugs """ if not from_date: from_date = DEFAULT_DATETIME kwargs = {'from_date': from_date} items = super().fetch(category, **kwargs) return items
Fetch the bugs
def fetch_items(self, category, **kwargs): """Fetch the bugs :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Looking for bugs: '%s' updated from '%s'", self.url, str(from_date)) nbugs = 0 for bug in self.__fetch_and_parse_bugs(from_date): nbugs += 1 yield bug logger.info("Fetch process completed: %s bugs fetched", nbugs)
Init client
def _init_client(self, from_archive=False): """Init client""" return BugzillaRESTClient(self.url, user=self.user, password=self.password, api_token=self.api_token, archive=self.archive, from_archive=from_archive)
Authenticate a user in the server.
def login(self, user, password): """Authenticate a user in the server. :param user: Bugzilla user :param password: user password """ params = { self.PBUGZILLA_LOGIN: user, self.PBUGZILLA_PASSWORD: password } try: r = self.call(self.RLOGIN, params) except requests.exceptions.HTTPError as e: cause = ("Bugzilla REST client could not authenticate user %s. " "See exception: %s") % (user, str(e)) raise BackendError(cause=cause) data = json.loads(r) self.api_token = data['token']
Get the information of a list of bugs.
def bugs(self, from_date=DEFAULT_DATETIME, offset=None, max_bugs=MAX_BUGS): """Get the information of a list of bugs. :param from_date: retrieve bugs that where updated from that date; dates are converted to UTC :param offset: starting position for the search; i.e to return 11th element, set this value to 10. :param max_bugs: maximum number of bugs to reteurn per query """ date = datetime_to_utc(from_date) date = date.strftime("%Y-%m-%dT%H:%M:%SZ") params = { self.PLAST_CHANGE_TIME: date, self.PLIMIT: max_bugs, self.PORDER: self.VCHANGE_DATE_ORDER, self.PINCLUDE_FIELDS: self.VINCLUDE_ALL } if offset: params[self.POFFSET] = offset response = self.call(self.RBUG, params) return response
Get the comments of the given bugs.
def comments(self, *bug_ids): """Get the comments of the given bugs. :param bug_ids: list of bug identifiers """ # Hack. The first value must be a valid bug id resource = urijoin(self.RBUG, bug_ids[0], self.RCOMMENT) params = { self.PIDS: bug_ids } response = self.call(resource, params) return response
Get the history of the given bugs.
def history(self, *bug_ids): """Get the history of the given bugs. :param bug_ids: list of bug identifiers """ resource = urijoin(self.RBUG, bug_ids[0], self.RHISTORY) params = { self.PIDS: bug_ids } response = self.call(resource, params) return response
Get the attachments of the given bugs.
def attachments(self, *bug_ids): """Get the attachments of the given bugs. :param bug_id: list of bug identifiers """ resource = urijoin(self.RBUG, bug_ids[0], self.RATTACHMENT) params = { self.PIDS: bug_ids, self.PEXCLUDE_FIELDS: self.VEXCLUDE_ATTCH_DATA } response = self.call(resource, params) return response
Retrive the given resource.
def call(self, resource, params): """Retrive the given resource. :param resource: resource to retrieve :param params: dict with the HTTP parameters needed to retrieve the given resource :raises BugzillaRESTError: raised when an error is returned by the server """ url = self.URL % {'base': self.base_url, 'resource': resource} if self.api_token: params[self.PBUGZILLA_TOKEN] = self.api_token logger.debug("Bugzilla REST client requests: %s params: %s", resource, str(params)) r = self.fetch(url, payload=params) # Check for possible Bugzilla API errors result = r.json() if result.get('error', False): raise BugzillaRESTError(error=result['message'], code=result['code']) return r.text
Sanitize payload of a HTTP request by removing the login password and token information before storing/ retrieving archived items
def sanitize_for_archive(url, headers, payload): """Sanitize payload of a HTTP request by removing the login, password and token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload """ if BugzillaRESTClient.PBUGZILLA_LOGIN in payload: payload.pop(BugzillaRESTClient.PBUGZILLA_LOGIN) if BugzillaRESTClient.PBUGZILLA_PASSWORD in payload: payload.pop(BugzillaRESTClient.PBUGZILLA_PASSWORD) if BugzillaRESTClient.PBUGZILLA_TOKEN in payload: payload.pop(BugzillaRESTClient.PBUGZILLA_TOKEN) return url, headers, payload
Fetch the items ( issues or merge_requests )
def fetch_items(self, category, **kwargs): """Fetch the items (issues or merge_requests) :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] if category == CATEGORY_ISSUE: items = self.__fetch_issues(from_date) else: items = self.__fetch_merge_requests(from_date) return items
Fetch the issues
def __fetch_issues(self, from_date): """Fetch the issues""" issues_groups = self.client.issues(from_date=from_date) for raw_issues in issues_groups: issues = json.loads(raw_issues) for issue in issues: issue_id = issue['iid'] if self.blacklist_ids and issue_id in self.blacklist_ids: logger.warning("Skipping blacklisted issue %s", issue_id) continue self.__init_issue_extra_fields(issue) issue['notes_data'] = \ self.__get_issue_notes(issue_id) issue['award_emoji_data'] = \ self.__get_award_emoji(GitLabClient.ISSUES, issue_id) yield issue
Get issue notes
def __get_issue_notes(self, issue_id): """Get issue notes""" notes = [] group_notes = self.client.notes(GitLabClient.ISSUES, issue_id) for raw_notes in group_notes: for note in json.loads(raw_notes): note_id = note['id'] note['award_emoji_data'] = \ self.__get_note_award_emoji(GitLabClient.ISSUES, issue_id, note_id) notes.append(note) return notes
Fetch the merge requests
def __fetch_merge_requests(self, from_date): """Fetch the merge requests""" merges_groups = self.client.merges(from_date=from_date) for raw_merges in merges_groups: merges = json.loads(raw_merges) for merge in merges: merge_id = merge['iid'] if self.blacklist_ids and merge_id in self.blacklist_ids: logger.warning("Skipping blacklisted merge request %s", merge_id) continue # The single merge_request API call returns a more # complete merge request, thus we inflate it with # other data (e.g., notes, emojis, versions) merge_full_raw = self.client.merge(merge_id) merge_full = json.loads(merge_full_raw) self.__init_merge_extra_fields(merge_full) merge_full['notes_data'] = self.__get_merge_notes(merge_id) merge_full['award_emoji_data'] = self.__get_award_emoji(GitLabClient.MERGES, merge_id) merge_full['versions_data'] = self.__get_merge_versions(merge_id) yield merge_full
Get merge notes
def __get_merge_notes(self, merge_id): """Get merge notes""" notes = [] group_notes = self.client.notes(GitLabClient.MERGES, merge_id) for raw_notes in group_notes: for note in json.loads(raw_notes): note_id = note['id'] note['award_emoji_data'] = \ self.__get_note_award_emoji(GitLabClient.MERGES, merge_id, note_id) notes.append(note) return notes
Get merge versions
def __get_merge_versions(self, merge_id): """Get merge versions""" versions = [] group_versions = self.client.merge_versions(merge_id) for raw_versions in group_versions: for version in json.loads(raw_versions): version_id = version['id'] version_full_raw = self.client.merge_version(merge_id, version_id) version_full = json.loads(version_full_raw) version_full.pop('diffs', None) versions.append(version_full) return versions