partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
delete_subnet
Delete a subnet. : param context: neutron api request context : param id: UUID representing the subnet to delete.
quark/plugin_modules/subnets.py
def delete_subnet(context, id): """Delete a subnet. : param context: neutron api request context : param id: UUID representing the subnet to delete. """ LOG.info("delete_subnet %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): subnet = db_api.subnet_find(context, id=id, scope=db_api.ONE) if not subnet: raise n_exc.SubnetNotFound(subnet_id=id) if not context.is_admin: if STRATEGY.is_provider_network(subnet.network_id): if subnet.tenant_id == context.tenant_id: # A tenant can't delete subnets on provider network raise n_exc.NotAuthorized(subnet_id=id) else: # Raise a NotFound here because the foreign tenant # does not have to know about other tenant's subnet # existence. raise n_exc.SubnetNotFound(subnet_id=id) _delete_subnet(context, subnet)
def delete_subnet(context, id): """Delete a subnet. : param context: neutron api request context : param id: UUID representing the subnet to delete. """ LOG.info("delete_subnet %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): subnet = db_api.subnet_find(context, id=id, scope=db_api.ONE) if not subnet: raise n_exc.SubnetNotFound(subnet_id=id) if not context.is_admin: if STRATEGY.is_provider_network(subnet.network_id): if subnet.tenant_id == context.tenant_id: # A tenant can't delete subnets on provider network raise n_exc.NotAuthorized(subnet_id=id) else: # Raise a NotFound here because the foreign tenant # does not have to know about other tenant's subnet # existence. raise n_exc.SubnetNotFound(subnet_id=id) _delete_subnet(context, subnet)
[ "Delete", "a", "subnet", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/subnets.py#L454-L477
[ "def", "delete_subnet", "(", "context", ",", "id", ")", ":", "LOG", ".", "info", "(", "\"delete_subnet %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "subnet", "=", "db_api", ".", "subnet_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "subnet", ":", "raise", "n_exc", ".", "SubnetNotFound", "(", "subnet_id", "=", "id", ")", "if", "not", "context", ".", "is_admin", ":", "if", "STRATEGY", ".", "is_provider_network", "(", "subnet", ".", "network_id", ")", ":", "if", "subnet", ".", "tenant_id", "==", "context", ".", "tenant_id", ":", "# A tenant can't delete subnets on provider network", "raise", "n_exc", ".", "NotAuthorized", "(", "subnet_id", "=", "id", ")", "else", ":", "# Raise a NotFound here because the foreign tenant", "# does not have to know about other tenant's subnet", "# existence.", "raise", "n_exc", ".", "SubnetNotFound", "(", "subnet_id", "=", "id", ")", "_delete_subnet", "(", "context", ",", "subnet", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Mac_address_ranges.get_resources
Returns Ext Resources.
quark/api/extensions/mac_address_ranges.py
def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() controller = MacAddressRangesController(plugin) return [extensions.ResourceExtension(Mac_address_ranges.get_alias(), controller)]
def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() controller = MacAddressRangesController(plugin) return [extensions.ResourceExtension(Mac_address_ranges.get_alias(), controller)]
[ "Returns", "Ext", "Resources", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/api/extensions/mac_address_ranges.py#L103-L108
[ "def", "get_resources", "(", "cls", ")", ":", "plugin", "=", "directory", ".", "get_plugin", "(", ")", "controller", "=", "MacAddressRangesController", "(", "plugin", ")", "return", "[", "extensions", ".", "ResourceExtension", "(", "Mac_address_ranges", ".", "get_alias", "(", ")", ",", "controller", ")", "]" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
_filter_update_security_group_rule
Only two fields are allowed for modification: external_service and external_service_id
quark/plugin_modules/security_groups.py
def _filter_update_security_group_rule(rule): '''Only two fields are allowed for modification: external_service and external_service_id ''' allowed = ['external_service', 'external_service_id'] filtered = {} for k, val in rule.iteritems(): if k in allowed: if isinstance(val, basestring) and \ len(val) <= GROUP_NAME_MAX_LENGTH: filtered[k] = val return filtered
def _filter_update_security_group_rule(rule): '''Only two fields are allowed for modification: external_service and external_service_id ''' allowed = ['external_service', 'external_service_id'] filtered = {} for k, val in rule.iteritems(): if k in allowed: if isinstance(val, basestring) and \ len(val) <= GROUP_NAME_MAX_LENGTH: filtered[k] = val return filtered
[ "Only", "two", "fields", "are", "allowed", "for", "modification", ":" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/security_groups.py#L80-L92
[ "def", "_filter_update_security_group_rule", "(", "rule", ")", ":", "allowed", "=", "[", "'external_service'", ",", "'external_service_id'", "]", "filtered", "=", "{", "}", "for", "k", ",", "val", "in", "rule", ".", "iteritems", "(", ")", ":", "if", "k", "in", "allowed", ":", "if", "isinstance", "(", "val", ",", "basestring", ")", "and", "len", "(", "val", ")", "<=", "GROUP_NAME_MAX_LENGTH", ":", "filtered", "[", "k", "]", "=", "val", "return", "filtered" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
_perform_async_update_rule
Updates a SG rule async and return the job information. Only happens if the security group has associated ports. If the async connection fails the update continues (legacy mode).
quark/plugin_modules/security_groups.py
def _perform_async_update_rule(context, id, db_sg_group, rule_id, action): """Updates a SG rule async and return the job information. Only happens if the security group has associated ports. If the async connection fails the update continues (legacy mode). """ rpc_reply = None sg_rpc = sg_rpc_api.QuarkSGAsyncProcessClient() ports = db_api.sg_gather_associated_ports(context, db_sg_group) if len(ports) > 0: rpc_reply = sg_rpc.start_update(context, id, rule_id, action) if rpc_reply: job_id = rpc_reply['job_id'] job_api.add_job_to_context(context, job_id) else: LOG.error("Async update failed. Is the worker running?")
def _perform_async_update_rule(context, id, db_sg_group, rule_id, action): """Updates a SG rule async and return the job information. Only happens if the security group has associated ports. If the async connection fails the update continues (legacy mode). """ rpc_reply = None sg_rpc = sg_rpc_api.QuarkSGAsyncProcessClient() ports = db_api.sg_gather_associated_ports(context, db_sg_group) if len(ports) > 0: rpc_reply = sg_rpc.start_update(context, id, rule_id, action) if rpc_reply: job_id = rpc_reply['job_id'] job_api.add_job_to_context(context, job_id) else: LOG.error("Async update failed. Is the worker running?")
[ "Updates", "a", "SG", "rule", "async", "and", "return", "the", "job", "information", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/security_groups.py#L176-L191
[ "def", "_perform_async_update_rule", "(", "context", ",", "id", ",", "db_sg_group", ",", "rule_id", ",", "action", ")", ":", "rpc_reply", "=", "None", "sg_rpc", "=", "sg_rpc_api", ".", "QuarkSGAsyncProcessClient", "(", ")", "ports", "=", "db_api", ".", "sg_gather_associated_ports", "(", "context", ",", "db_sg_group", ")", "if", "len", "(", "ports", ")", ">", "0", ":", "rpc_reply", "=", "sg_rpc", ".", "start_update", "(", "context", ",", "id", ",", "rule_id", ",", "action", ")", "if", "rpc_reply", ":", "job_id", "=", "rpc_reply", "[", "'job_id'", "]", "job_api", ".", "add_job_to_context", "(", "context", ",", "job_id", ")", "else", ":", "LOG", ".", "error", "(", "\"Async update failed. Is the worker running?\"", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
create_security_group_rule
Creates a rule and updates the ports (async) if enabled.
quark/plugin_modules/security_groups.py
def create_security_group_rule(context, security_group_rule): """Creates a rule and updates the ports (async) if enabled.""" LOG.info("create_security_group for tenant %s" % (context.tenant_id)) with context.session.begin(): rule = _validate_security_group_rule( context, security_group_rule["security_group_rule"]) rule["id"] = uuidutils.generate_uuid() group_id = rule["security_group_id"] group = db_api.security_group_find(context, id=group_id, scope=db_api.ONE) if not group: raise sg_ext.SecurityGroupNotFound(id=group_id) quota.QUOTAS.limit_check( context, context.tenant_id, security_rules_per_group=len(group.get("rules", [])) + 1) new_rule = db_api.security_group_rule_create(context, **rule) if group: _perform_async_update_rule(context, group_id, group, new_rule.id, RULE_CREATE) return v._make_security_group_rule_dict(new_rule)
def create_security_group_rule(context, security_group_rule): """Creates a rule and updates the ports (async) if enabled.""" LOG.info("create_security_group for tenant %s" % (context.tenant_id)) with context.session.begin(): rule = _validate_security_group_rule( context, security_group_rule["security_group_rule"]) rule["id"] = uuidutils.generate_uuid() group_id = rule["security_group_id"] group = db_api.security_group_find(context, id=group_id, scope=db_api.ONE) if not group: raise sg_ext.SecurityGroupNotFound(id=group_id) quota.QUOTAS.limit_check( context, context.tenant_id, security_rules_per_group=len(group.get("rules", [])) + 1) new_rule = db_api.security_group_rule_create(context, **rule) if group: _perform_async_update_rule(context, group_id, group, new_rule.id, RULE_CREATE) return v._make_security_group_rule_dict(new_rule)
[ "Creates", "a", "rule", "and", "updates", "the", "ports", "(", "async", ")", "if", "enabled", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/security_groups.py#L194-L217
[ "def", "create_security_group_rule", "(", "context", ",", "security_group_rule", ")", ":", "LOG", ".", "info", "(", "\"create_security_group for tenant %s\"", "%", "(", "context", ".", "tenant_id", ")", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "rule", "=", "_validate_security_group_rule", "(", "context", ",", "security_group_rule", "[", "\"security_group_rule\"", "]", ")", "rule", "[", "\"id\"", "]", "=", "uuidutils", ".", "generate_uuid", "(", ")", "group_id", "=", "rule", "[", "\"security_group_id\"", "]", "group", "=", "db_api", ".", "security_group_find", "(", "context", ",", "id", "=", "group_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "group", ":", "raise", "sg_ext", ".", "SecurityGroupNotFound", "(", "id", "=", "group_id", ")", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "security_rules_per_group", "=", "len", "(", "group", ".", "get", "(", "\"rules\"", ",", "[", "]", ")", ")", "+", "1", ")", "new_rule", "=", "db_api", ".", "security_group_rule_create", "(", "context", ",", "*", "*", "rule", ")", "if", "group", ":", "_perform_async_update_rule", "(", "context", ",", "group_id", ",", "group", ",", "new_rule", ".", "id", ",", "RULE_CREATE", ")", "return", "v", ".", "_make_security_group_rule_dict", "(", "new_rule", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
update_security_group_rule
Updates a rule and updates the ports
quark/plugin_modules/security_groups.py
def update_security_group_rule(context, id, security_group_rule): '''Updates a rule and updates the ports''' LOG.info("update_security_group_rule for tenant %s" % (context.tenant_id)) new_rule = security_group_rule["security_group_rule"] # Only allow updatable fields new_rule = _filter_update_security_group_rule(new_rule) with context.session.begin(): rule = db_api.security_group_rule_find(context, id=id, scope=db_api.ONE) if not rule: raise sg_ext.SecurityGroupRuleNotFound(id=id) db_rule = db_api.security_group_rule_update(context, rule, **new_rule) group_id = db_rule.group_id group = db_api.security_group_find(context, id=group_id, scope=db_api.ONE) if not group: raise sg_ext.SecurityGroupNotFound(id=group_id) if group: _perform_async_update_rule(context, group_id, group, rule.id, RULE_UPDATE) return v._make_security_group_rule_dict(db_rule)
def update_security_group_rule(context, id, security_group_rule): '''Updates a rule and updates the ports''' LOG.info("update_security_group_rule for tenant %s" % (context.tenant_id)) new_rule = security_group_rule["security_group_rule"] # Only allow updatable fields new_rule = _filter_update_security_group_rule(new_rule) with context.session.begin(): rule = db_api.security_group_rule_find(context, id=id, scope=db_api.ONE) if not rule: raise sg_ext.SecurityGroupRuleNotFound(id=id) db_rule = db_api.security_group_rule_update(context, rule, **new_rule) group_id = db_rule.group_id group = db_api.security_group_find(context, id=group_id, scope=db_api.ONE) if not group: raise sg_ext.SecurityGroupNotFound(id=group_id) if group: _perform_async_update_rule(context, group_id, group, rule.id, RULE_UPDATE) return v._make_security_group_rule_dict(db_rule)
[ "Updates", "a", "rule", "and", "updates", "the", "ports" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/security_groups.py#L220-L246
[ "def", "update_security_group_rule", "(", "context", ",", "id", ",", "security_group_rule", ")", ":", "LOG", ".", "info", "(", "\"update_security_group_rule for tenant %s\"", "%", "(", "context", ".", "tenant_id", ")", ")", "new_rule", "=", "security_group_rule", "[", "\"security_group_rule\"", "]", "# Only allow updatable fields", "new_rule", "=", "_filter_update_security_group_rule", "(", "new_rule", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "rule", "=", "db_api", ".", "security_group_rule_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "rule", ":", "raise", "sg_ext", ".", "SecurityGroupRuleNotFound", "(", "id", "=", "id", ")", "db_rule", "=", "db_api", ".", "security_group_rule_update", "(", "context", ",", "rule", ",", "*", "*", "new_rule", ")", "group_id", "=", "db_rule", ".", "group_id", "group", "=", "db_api", ".", "security_group_find", "(", "context", ",", "id", "=", "group_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "group", ":", "raise", "sg_ext", ".", "SecurityGroupNotFound", "(", "id", "=", "group_id", ")", "if", "group", ":", "_perform_async_update_rule", "(", "context", ",", "group_id", ",", "group", ",", "rule", ".", "id", ",", "RULE_UPDATE", ")", "return", "v", ".", "_make_security_group_rule_dict", "(", "db_rule", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
delete_security_group_rule
Deletes a rule and updates the ports (async) if enabled.
quark/plugin_modules/security_groups.py
def delete_security_group_rule(context, id): """Deletes a rule and updates the ports (async) if enabled.""" LOG.info("delete_security_group %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): rule = db_api.security_group_rule_find(context, id=id, scope=db_api.ONE) if not rule: raise sg_ext.SecurityGroupRuleNotFound(id=id) group = db_api.security_group_find(context, id=rule["group_id"], scope=db_api.ONE) if not group: raise sg_ext.SecurityGroupNotFound(id=id) rule["id"] = id db_api.security_group_rule_delete(context, rule) if group: _perform_async_update_rule(context, group.id, group, id, RULE_DELETE)
def delete_security_group_rule(context, id): """Deletes a rule and updates the ports (async) if enabled.""" LOG.info("delete_security_group %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): rule = db_api.security_group_rule_find(context, id=id, scope=db_api.ONE) if not rule: raise sg_ext.SecurityGroupRuleNotFound(id=id) group = db_api.security_group_find(context, id=rule["group_id"], scope=db_api.ONE) if not group: raise sg_ext.SecurityGroupNotFound(id=id) rule["id"] = id db_api.security_group_rule_delete(context, rule) if group: _perform_async_update_rule(context, group.id, group, id, RULE_DELETE)
[ "Deletes", "a", "rule", "and", "updates", "the", "ports", "(", "async", ")", "if", "enabled", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/security_groups.py#L259-L277
[ "def", "delete_security_group_rule", "(", "context", ",", "id", ")", ":", "LOG", ".", "info", "(", "\"delete_security_group %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "rule", "=", "db_api", ".", "security_group_rule_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "rule", ":", "raise", "sg_ext", ".", "SecurityGroupRuleNotFound", "(", "id", "=", "id", ")", "group", "=", "db_api", ".", "security_group_find", "(", "context", ",", "id", "=", "rule", "[", "\"group_id\"", "]", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "group", ":", "raise", "sg_ext", ".", "SecurityGroupNotFound", "(", "id", "=", "id", ")", "rule", "[", "\"id\"", "]", "=", "id", "db_api", ".", "security_group_rule_delete", "(", "context", ",", "rule", ")", "if", "group", ":", "_perform_async_update_rule", "(", "context", ",", "group", ".", "id", ",", "group", ",", "id", ",", "RULE_DELETE", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_data
Returns combined list of tuples: [(table, column)]. List is built, based on retrieved tables, where column with name ``tenant_id`` exists.
quark/db/migration/alembic/versions/79b768afed65_rename_tenant_id_indexes.py
def get_data(): """Returns combined list of tuples: [(table, column)]. List is built, based on retrieved tables, where column with name ``tenant_id`` exists. """ output = [] tables = get_tables() for table in tables: try: columns = get_columns(table) except sa.exc.NoSuchTableError: continue for column in columns: if column['name'] == 'tenant_id': output.append((table, column)) return output
def get_data(): """Returns combined list of tuples: [(table, column)]. List is built, based on retrieved tables, where column with name ``tenant_id`` exists. """ output = [] tables = get_tables() for table in tables: try: columns = get_columns(table) except sa.exc.NoSuchTableError: continue for column in columns: if column['name'] == 'tenant_id': output.append((table, column)) return output
[ "Returns", "combined", "list", "of", "tuples", ":", "[", "(", "table", "column", ")", "]", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/db/migration/alembic/versions/79b768afed65_rename_tenant_id_indexes.py#L93-L112
[ "def", "get_data", "(", ")", ":", "output", "=", "[", "]", "tables", "=", "get_tables", "(", ")", "for", "table", "in", "tables", ":", "try", ":", "columns", "=", "get_columns", "(", "table", ")", "except", "sa", ".", "exc", ".", "NoSuchTableError", ":", "continue", "for", "column", "in", "columns", ":", "if", "column", "[", "'name'", "]", "==", "'tenant_id'", ":", "output", ".", "append", "(", "(", "table", ",", "column", ")", ")", "return", "output" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
JSONStrategy.get_public_net_id
Returns the public net id
quark/network_strategy.py
def get_public_net_id(self): """Returns the public net id""" for id, net_params in self.strategy.iteritems(): if id == CONF.QUARK.public_net_id: return id return None
def get_public_net_id(self): """Returns the public net id""" for id, net_params in self.strategy.iteritems(): if id == CONF.QUARK.public_net_id: return id return None
[ "Returns", "the", "public", "net", "id" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/network_strategy.py#L104-L109
[ "def", "get_public_net_id", "(", "self", ")", ":", "for", "id", ",", "net_params", "in", "self", ".", "strategy", ".", "iteritems", "(", ")", ":", "if", "id", "==", "CONF", ".", "QUARK", ".", "public_net_id", ":", "return", "id", "return", "None" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
opt_args_decorator
A decorator to be used on another decorator This is done to allow separate handling on the basis of argument values
quark/utils.py
def opt_args_decorator(func): """A decorator to be used on another decorator This is done to allow separate handling on the basis of argument values """ @wraps(func) def wrapped_dec(*args, **kwargs): if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): # actual decorated function return func(args[0]) else: # decorator arguments return lambda realf: func(realf, *args, **kwargs) return wrapped_dec
def opt_args_decorator(func): """A decorator to be used on another decorator This is done to allow separate handling on the basis of argument values """ @wraps(func) def wrapped_dec(*args, **kwargs): if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): # actual decorated function return func(args[0]) else: # decorator arguments return lambda realf: func(realf, *args, **kwargs) return wrapped_dec
[ "A", "decorator", "to", "be", "used", "on", "another", "decorator" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/utils.py#L82-L96
[ "def", "opt_args_decorator", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapped_dec", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", "==", "1", "and", "len", "(", "kwargs", ")", "==", "0", "and", "callable", "(", "args", "[", "0", "]", ")", ":", "# actual decorated function", "return", "func", "(", "args", "[", "0", "]", ")", "else", ":", "# decorator arguments", "return", "lambda", "realf", ":", "func", "(", "realf", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped_dec" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Plugin._fix_missing_tenant_id
Will add the tenant_id to the context from body. It is assumed that the body must have a tenant_id because neutron core could never have gotten here otherwise.
quark/plugin.py
def _fix_missing_tenant_id(self, context, body, key): """Will add the tenant_id to the context from body. It is assumed that the body must have a tenant_id because neutron core could never have gotten here otherwise. """ if not body: raise n_exc.BadRequest(resource=key, msg="Body malformed") resource = body.get(key) if not resource: raise n_exc.BadRequest(resource=key, msg="Body malformed") if context.tenant_id is None: context.tenant_id = resource.get("tenant_id") if context.tenant_id is None: msg = _("Running without keystone AuthN requires " "that tenant_id is specified") raise n_exc.BadRequest(resource=key, msg=msg)
def _fix_missing_tenant_id(self, context, body, key): """Will add the tenant_id to the context from body. It is assumed that the body must have a tenant_id because neutron core could never have gotten here otherwise. """ if not body: raise n_exc.BadRequest(resource=key, msg="Body malformed") resource = body.get(key) if not resource: raise n_exc.BadRequest(resource=key, msg="Body malformed") if context.tenant_id is None: context.tenant_id = resource.get("tenant_id") if context.tenant_id is None: msg = _("Running without keystone AuthN requires " "that tenant_id is specified") raise n_exc.BadRequest(resource=key, msg=msg)
[ "Will", "add", "the", "tenant_id", "to", "the", "context", "from", "body", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin.py#L143-L161
[ "def", "_fix_missing_tenant_id", "(", "self", ",", "context", ",", "body", ",", "key", ")", ":", "if", "not", "body", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "key", ",", "msg", "=", "\"Body malformed\"", ")", "resource", "=", "body", ".", "get", "(", "key", ")", "if", "not", "resource", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "key", ",", "msg", "=", "\"Body malformed\"", ")", "if", "context", ".", "tenant_id", "is", "None", ":", "context", ".", "tenant_id", "=", "resource", ".", "get", "(", "\"tenant_id\"", ")", "if", "context", ".", "tenant_id", "is", "None", ":", "msg", "=", "_", "(", "\"Running without keystone AuthN requires \"", "\"that tenant_id is specified\"", ")", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "key", ",", "msg", "=", "msg", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
AllocationPools._validate_allocation_pools
Validate IP allocation pools. Verify start and end address for each allocation pool are valid, ie: constituted by valid and appropriately ordered IP addresses. Also, verify pools do not overlap among themselves. Finally, verify that each range fall within the subnet's CIDR.
quark/allocation_pool.py
def _validate_allocation_pools(self): """Validate IP allocation pools. Verify start and end address for each allocation pool are valid, ie: constituted by valid and appropriately ordered IP addresses. Also, verify pools do not overlap among themselves. Finally, verify that each range fall within the subnet's CIDR. """ ip_pools = self._alloc_pools subnet_cidr = self._subnet_cidr LOG.debug(_("Performing IP validity checks on allocation pools")) ip_sets = [] for ip_pool in ip_pools: try: start_ip = netaddr.IPAddress(ip_pool['start']) end_ip = netaddr.IPAddress(ip_pool['end']) except netaddr.AddrFormatError: LOG.info(_("Found invalid IP address in pool: " "%(start)s - %(end)s:"), {'start': ip_pool['start'], 'end': ip_pool['end']}) raise n_exc_ext.InvalidAllocationPool(pool=ip_pool) if (start_ip.version != self._subnet_cidr.version or end_ip.version != self._subnet_cidr.version): LOG.info(_("Specified IP addresses do not match " "the subnet IP version")) raise n_exc_ext.InvalidAllocationPool(pool=ip_pool) if end_ip < start_ip: LOG.info(_("Start IP (%(start)s) is greater than end IP " "(%(end)s)"), {'start': ip_pool['start'], 'end': ip_pool['end']}) raise n_exc_ext.InvalidAllocationPool(pool=ip_pool) if (start_ip < self._subnet_first_ip or end_ip > self._subnet_last_ip): LOG.info(_("Found pool larger than subnet " "CIDR:%(start)s - %(end)s"), {'start': ip_pool['start'], 'end': ip_pool['end']}) raise n_exc_ext.OutOfBoundsAllocationPool( pool=ip_pool, subnet_cidr=subnet_cidr) # Valid allocation pool # Create an IPSet for it for easily verifying overlaps ip_sets.append(netaddr.IPSet(netaddr.IPRange( ip_pool['start'], ip_pool['end']).cidrs())) LOG.debug(_("Checking for overlaps among allocation pools " "and gateway ip")) ip_ranges = ip_pools[:] # Use integer cursors as an efficient way for implementing # comparison and avoiding comparing the same pair twice for l_cursor in xrange(len(ip_sets)): for r_cursor in xrange(l_cursor + 1, len(ip_sets)): if ip_sets[l_cursor] & ip_sets[r_cursor]: l_range = ip_ranges[l_cursor] r_range = ip_ranges[r_cursor] LOG.info(_("Found overlapping ranges: %(l_range)s and " "%(r_range)s"), {'l_range': l_range, 'r_range': r_range}) raise n_exc_ext.OverlappingAllocationPools( pool_1=l_range, pool_2=r_range, subnet_cidr=subnet_cidr)
def _validate_allocation_pools(self): """Validate IP allocation pools. Verify start and end address for each allocation pool are valid, ie: constituted by valid and appropriately ordered IP addresses. Also, verify pools do not overlap among themselves. Finally, verify that each range fall within the subnet's CIDR. """ ip_pools = self._alloc_pools subnet_cidr = self._subnet_cidr LOG.debug(_("Performing IP validity checks on allocation pools")) ip_sets = [] for ip_pool in ip_pools: try: start_ip = netaddr.IPAddress(ip_pool['start']) end_ip = netaddr.IPAddress(ip_pool['end']) except netaddr.AddrFormatError: LOG.info(_("Found invalid IP address in pool: " "%(start)s - %(end)s:"), {'start': ip_pool['start'], 'end': ip_pool['end']}) raise n_exc_ext.InvalidAllocationPool(pool=ip_pool) if (start_ip.version != self._subnet_cidr.version or end_ip.version != self._subnet_cidr.version): LOG.info(_("Specified IP addresses do not match " "the subnet IP version")) raise n_exc_ext.InvalidAllocationPool(pool=ip_pool) if end_ip < start_ip: LOG.info(_("Start IP (%(start)s) is greater than end IP " "(%(end)s)"), {'start': ip_pool['start'], 'end': ip_pool['end']}) raise n_exc_ext.InvalidAllocationPool(pool=ip_pool) if (start_ip < self._subnet_first_ip or end_ip > self._subnet_last_ip): LOG.info(_("Found pool larger than subnet " "CIDR:%(start)s - %(end)s"), {'start': ip_pool['start'], 'end': ip_pool['end']}) raise n_exc_ext.OutOfBoundsAllocationPool( pool=ip_pool, subnet_cidr=subnet_cidr) # Valid allocation pool # Create an IPSet for it for easily verifying overlaps ip_sets.append(netaddr.IPSet(netaddr.IPRange( ip_pool['start'], ip_pool['end']).cidrs())) LOG.debug(_("Checking for overlaps among allocation pools " "and gateway ip")) ip_ranges = ip_pools[:] # Use integer cursors as an efficient way for implementing # comparison and avoiding comparing the same pair twice for l_cursor in xrange(len(ip_sets)): for r_cursor in xrange(l_cursor + 1, len(ip_sets)): if ip_sets[l_cursor] & ip_sets[r_cursor]: l_range = ip_ranges[l_cursor] r_range = ip_ranges[r_cursor] LOG.info(_("Found overlapping ranges: %(l_range)s and " "%(r_range)s"), {'l_range': l_range, 'r_range': r_range}) raise n_exc_ext.OverlappingAllocationPools( pool_1=l_range, pool_2=r_range, subnet_cidr=subnet_cidr)
[ "Validate", "IP", "allocation", "pools", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/allocation_pool.py#L47-L112
[ "def", "_validate_allocation_pools", "(", "self", ")", ":", "ip_pools", "=", "self", ".", "_alloc_pools", "subnet_cidr", "=", "self", ".", "_subnet_cidr", "LOG", ".", "debug", "(", "_", "(", "\"Performing IP validity checks on allocation pools\"", ")", ")", "ip_sets", "=", "[", "]", "for", "ip_pool", "in", "ip_pools", ":", "try", ":", "start_ip", "=", "netaddr", ".", "IPAddress", "(", "ip_pool", "[", "'start'", "]", ")", "end_ip", "=", "netaddr", ".", "IPAddress", "(", "ip_pool", "[", "'end'", "]", ")", "except", "netaddr", ".", "AddrFormatError", ":", "LOG", ".", "info", "(", "_", "(", "\"Found invalid IP address in pool: \"", "\"%(start)s - %(end)s:\"", ")", ",", "{", "'start'", ":", "ip_pool", "[", "'start'", "]", ",", "'end'", ":", "ip_pool", "[", "'end'", "]", "}", ")", "raise", "n_exc_ext", ".", "InvalidAllocationPool", "(", "pool", "=", "ip_pool", ")", "if", "(", "start_ip", ".", "version", "!=", "self", ".", "_subnet_cidr", ".", "version", "or", "end_ip", ".", "version", "!=", "self", ".", "_subnet_cidr", ".", "version", ")", ":", "LOG", ".", "info", "(", "_", "(", "\"Specified IP addresses do not match \"", "\"the subnet IP version\"", ")", ")", "raise", "n_exc_ext", ".", "InvalidAllocationPool", "(", "pool", "=", "ip_pool", ")", "if", "end_ip", "<", "start_ip", ":", "LOG", ".", "info", "(", "_", "(", "\"Start IP (%(start)s) is greater than end IP \"", "\"(%(end)s)\"", ")", ",", "{", "'start'", ":", "ip_pool", "[", "'start'", "]", ",", "'end'", ":", "ip_pool", "[", "'end'", "]", "}", ")", "raise", "n_exc_ext", ".", "InvalidAllocationPool", "(", "pool", "=", "ip_pool", ")", "if", "(", "start_ip", "<", "self", ".", "_subnet_first_ip", "or", "end_ip", ">", "self", ".", "_subnet_last_ip", ")", ":", "LOG", ".", "info", "(", "_", "(", "\"Found pool larger than subnet \"", "\"CIDR:%(start)s - %(end)s\"", ")", ",", "{", "'start'", ":", "ip_pool", "[", "'start'", "]", ",", "'end'", ":", "ip_pool", "[", "'end'", "]", "}", ")", "raise", "n_exc_ext", ".", "OutOfBoundsAllocationPool", "(", "pool", "=", "ip_pool", ",", "subnet_cidr", "=", "subnet_cidr", ")", "# Valid allocation pool", "# Create an IPSet for it for easily verifying overlaps", "ip_sets", ".", "append", "(", "netaddr", ".", "IPSet", "(", "netaddr", ".", "IPRange", "(", "ip_pool", "[", "'start'", "]", ",", "ip_pool", "[", "'end'", "]", ")", ".", "cidrs", "(", ")", ")", ")", "LOG", ".", "debug", "(", "_", "(", "\"Checking for overlaps among allocation pools \"", "\"and gateway ip\"", ")", ")", "ip_ranges", "=", "ip_pools", "[", ":", "]", "# Use integer cursors as an efficient way for implementing", "# comparison and avoiding comparing the same pair twice", "for", "l_cursor", "in", "xrange", "(", "len", "(", "ip_sets", ")", ")", ":", "for", "r_cursor", "in", "xrange", "(", "l_cursor", "+", "1", ",", "len", "(", "ip_sets", ")", ")", ":", "if", "ip_sets", "[", "l_cursor", "]", "&", "ip_sets", "[", "r_cursor", "]", ":", "l_range", "=", "ip_ranges", "[", "l_cursor", "]", "r_range", "=", "ip_ranges", "[", "r_cursor", "]", "LOG", ".", "info", "(", "_", "(", "\"Found overlapping ranges: %(l_range)s and \"", "\"%(r_range)s\"", ")", ",", "{", "'l_range'", ":", "l_range", ",", "'r_range'", ":", "r_range", "}", ")", "raise", "n_exc_ext", ".", "OverlappingAllocationPools", "(", "pool_1", "=", "l_range", ",", "pool_2", "=", "r_range", ",", "subnet_cidr", "=", "subnet_cidr", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
add_job_to_context
Adds job to neutron context for use later.
quark/plugin_modules/jobs.py
def add_job_to_context(context, job_id): """Adds job to neutron context for use later.""" db_job = db_api.async_transaction_find( context, id=job_id, scope=db_api.ONE) if not db_job: return context.async_job = {"job": v._make_job_dict(db_job)}
def add_job_to_context(context, job_id): """Adds job to neutron context for use later.""" db_job = db_api.async_transaction_find( context, id=job_id, scope=db_api.ONE) if not db_job: return context.async_job = {"job": v._make_job_dict(db_job)}
[ "Adds", "job", "to", "neutron", "context", "for", "use", "later", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/jobs.py#L28-L34
[ "def", "add_job_to_context", "(", "context", ",", "job_id", ")", ":", "db_job", "=", "db_api", ".", "async_transaction_find", "(", "context", ",", "id", "=", "job_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "db_job", ":", "return", "context", ".", "async_job", "=", "{", "\"job\"", ":", "v", ".", "_make_job_dict", "(", "db_job", ")", "}" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
create_job
Creates a job with support for subjobs. If parent_id is not in the body: * the job is considered a parent job * it will have a NULL transaction id * its transaction id == its id * all subjobs will use its transaction id as theirs Else: * the job is a sub job * the parent id is the id passed in * the transaction id is the root of the job tree
quark/plugin_modules/jobs.py
def create_job(context, body): """Creates a job with support for subjobs. If parent_id is not in the body: * the job is considered a parent job * it will have a NULL transaction id * its transaction id == its id * all subjobs will use its transaction id as theirs Else: * the job is a sub job * the parent id is the id passed in * the transaction id is the root of the job tree """ LOG.info("create_job for tenant %s" % context.tenant_id) if not context.is_admin: raise n_exc.NotAuthorized() job = body.get('job') if 'parent_id' in job: parent_id = job['parent_id'] if not parent_id: raise q_exc.JobNotFound(job_id=parent_id) parent_job = db_api.async_transaction_find( context, id=parent_id, scope=db_api.ONE) if not parent_job: raise q_exc.JobNotFound(job_id=parent_id) tid = parent_id if parent_job.get('transaction_id'): tid = parent_job.get('transaction_id') job['transaction_id'] = tid if not job: raise n_exc.BadRequest(resource="job", msg="Invalid request body.") with context.session.begin(subtransactions=True): new_job = db_api.async_transaction_create(context, **job) return v._make_job_dict(new_job)
def create_job(context, body): """Creates a job with support for subjobs. If parent_id is not in the body: * the job is considered a parent job * it will have a NULL transaction id * its transaction id == its id * all subjobs will use its transaction id as theirs Else: * the job is a sub job * the parent id is the id passed in * the transaction id is the root of the job tree """ LOG.info("create_job for tenant %s" % context.tenant_id) if not context.is_admin: raise n_exc.NotAuthorized() job = body.get('job') if 'parent_id' in job: parent_id = job['parent_id'] if not parent_id: raise q_exc.JobNotFound(job_id=parent_id) parent_job = db_api.async_transaction_find( context, id=parent_id, scope=db_api.ONE) if not parent_job: raise q_exc.JobNotFound(job_id=parent_id) tid = parent_id if parent_job.get('transaction_id'): tid = parent_job.get('transaction_id') job['transaction_id'] = tid if not job: raise n_exc.BadRequest(resource="job", msg="Invalid request body.") with context.session.begin(subtransactions=True): new_job = db_api.async_transaction_create(context, **job) return v._make_job_dict(new_job)
[ "Creates", "a", "job", "with", "support", "for", "subjobs", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/jobs.py#L55-L91
[ "def", "create_job", "(", "context", ",", "body", ")", ":", "LOG", ".", "info", "(", "\"create_job for tenant %s\"", "%", "context", ".", "tenant_id", ")", "if", "not", "context", ".", "is_admin", ":", "raise", "n_exc", ".", "NotAuthorized", "(", ")", "job", "=", "body", ".", "get", "(", "'job'", ")", "if", "'parent_id'", "in", "job", ":", "parent_id", "=", "job", "[", "'parent_id'", "]", "if", "not", "parent_id", ":", "raise", "q_exc", ".", "JobNotFound", "(", "job_id", "=", "parent_id", ")", "parent_job", "=", "db_api", ".", "async_transaction_find", "(", "context", ",", "id", "=", "parent_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "parent_job", ":", "raise", "q_exc", ".", "JobNotFound", "(", "job_id", "=", "parent_id", ")", "tid", "=", "parent_id", "if", "parent_job", ".", "get", "(", "'transaction_id'", ")", ":", "tid", "=", "parent_job", ".", "get", "(", "'transaction_id'", ")", "job", "[", "'transaction_id'", "]", "=", "tid", "if", "not", "job", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"job\"", ",", "msg", "=", "\"Invalid request body.\"", ")", "with", "context", ".", "session", ".", "begin", "(", "subtransactions", "=", "True", ")", ":", "new_job", "=", "db_api", ".", "async_transaction_create", "(", "context", ",", "*", "*", "job", ")", "return", "v", ".", "_make_job_dict", "(", "new_job", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
delete_job
Delete an ip address. : param context: neutron api request context : param id: UUID representing the ip address to delete.
quark/plugin_modules/jobs.py
def delete_job(context, id, **filters): """Delete an ip address. : param context: neutron api request context : param id: UUID representing the ip address to delete. """ LOG.info("delete_ip_address %s for tenant %s" % (id, context.tenant_id)) if not context.is_admin: raise n_exc.NotAuthorized() with context.session.begin(): job = db_api.async_transaction_find(context, id=id, scope=db_api.ONE, **filters) if not job: raise q_exc.JobNotFound(job_id=id) db_api.async_transaction_delete(context, job)
def delete_job(context, id, **filters): """Delete an ip address. : param context: neutron api request context : param id: UUID representing the ip address to delete. """ LOG.info("delete_ip_address %s for tenant %s" % (id, context.tenant_id)) if not context.is_admin: raise n_exc.NotAuthorized() with context.session.begin(): job = db_api.async_transaction_find(context, id=id, scope=db_api.ONE, **filters) if not job: raise q_exc.JobNotFound(job_id=id) db_api.async_transaction_delete(context, job)
[ "Delete", "an", "ip", "address", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/jobs.py#L109-L124
[ "def", "delete_job", "(", "context", ",", "id", ",", "*", "*", "filters", ")", ":", "LOG", ".", "info", "(", "\"delete_ip_address %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "if", "not", "context", ".", "is_admin", ":", "raise", "n_exc", ".", "NotAuthorized", "(", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "job", "=", "db_api", ".", "async_transaction_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ",", "*", "*", "filters", ")", "if", "not", "job", ":", "raise", "q_exc", ".", "JobNotFound", "(", "job_id", "=", "id", ")", "db_api", ".", "async_transaction_delete", "(", "context", ",", "job", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Ip_policies.get_resources
Returns Ext Resources.
quark/api/extensions/ip_policies.py
def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() controller = IPPoliciesController(plugin) return [extensions.ResourceExtension(Ip_policies.get_alias(), controller)]
def get_resources(cls): """Returns Ext Resources.""" plugin = directory.get_plugin() controller = IPPoliciesController(plugin) return [extensions.ResourceExtension(Ip_policies.get_alias(), controller)]
[ "Returns", "Ext", "Resources", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/api/extensions/ip_policies.py#L102-L107
[ "def", "get_resources", "(", "cls", ")", ":", "plugin", "=", "directory", ".", "get_plugin", "(", ")", "controller", "=", "IPPoliciesController", "(", "plugin", ")", "return", "[", "extensions", ".", "ResourceExtension", "(", "Ip_policies", ".", "get_alias", "(", ")", ",", "controller", ")", "]" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
NVPDriver._lswitch_select_open
Selects an open lswitch for a network. Note that it does not select the most full switch, but merely one with ports available.
quark/drivers/nvp_driver.py
def _lswitch_select_open(self, context, switches=None, **kwargs): """Selects an open lswitch for a network. Note that it does not select the most full switch, but merely one with ports available. """ if switches is not None: for res in switches["results"]: count = res["_relations"]["LogicalSwitchStatus"]["lport_count"] if (self.limits['max_ports_per_switch'] == 0 or count < self.limits['max_ports_per_switch']): return res["uuid"] return None
def _lswitch_select_open(self, context, switches=None, **kwargs): """Selects an open lswitch for a network. Note that it does not select the most full switch, but merely one with ports available. """ if switches is not None: for res in switches["results"]: count = res["_relations"]["LogicalSwitchStatus"]["lport_count"] if (self.limits['max_ports_per_switch'] == 0 or count < self.limits['max_ports_per_switch']): return res["uuid"] return None
[ "Selects", "an", "open", "lswitch", "for", "a", "network", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/nvp_driver.py#L587-L600
[ "def", "_lswitch_select_open", "(", "self", ",", "context", ",", "switches", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "switches", "is", "not", "None", ":", "for", "res", "in", "switches", "[", "\"results\"", "]", ":", "count", "=", "res", "[", "\"_relations\"", "]", "[", "\"LogicalSwitchStatus\"", "]", "[", "\"lport_count\"", "]", "if", "(", "self", ".", "limits", "[", "'max_ports_per_switch'", "]", "==", "0", "or", "count", "<", "self", ".", "limits", "[", "'max_ports_per_switch'", "]", ")", ":", "return", "res", "[", "\"uuid\"", "]", "return", "None" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
NVPDriver._add_default_tz_bindings
Configure any additional default transport zone bindings.
quark/drivers/nvp_driver.py
def _add_default_tz_bindings(self, context, switch, network_id): """Configure any additional default transport zone bindings.""" default_tz = CONF.NVP.default_tz # If there is no default tz specified it's pointless to try # and add any additional default tz bindings. if not default_tz: LOG.warn("additional_default_tz_types specified, " "but no default_tz. Skipping " "_add_default_tz_bindings().") return # This should never be called without a neutron network uuid, # we require it to bind some segment allocations. if not network_id: LOG.warn("neutron network_id not specified, skipping " "_add_default_tz_bindings()") return for net_type in CONF.NVP.additional_default_tz_types: if net_type in TZ_BINDINGS: binding = TZ_BINDINGS[net_type] binding.add(context, switch, default_tz, network_id) else: LOG.warn("Unknown default tz type %s" % (net_type))
def _add_default_tz_bindings(self, context, switch, network_id): """Configure any additional default transport zone bindings.""" default_tz = CONF.NVP.default_tz # If there is no default tz specified it's pointless to try # and add any additional default tz bindings. if not default_tz: LOG.warn("additional_default_tz_types specified, " "but no default_tz. Skipping " "_add_default_tz_bindings().") return # This should never be called without a neutron network uuid, # we require it to bind some segment allocations. if not network_id: LOG.warn("neutron network_id not specified, skipping " "_add_default_tz_bindings()") return for net_type in CONF.NVP.additional_default_tz_types: if net_type in TZ_BINDINGS: binding = TZ_BINDINGS[net_type] binding.add(context, switch, default_tz, network_id) else: LOG.warn("Unknown default tz type %s" % (net_type))
[ "Configure", "any", "additional", "default", "transport", "zone", "bindings", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/nvp_driver.py#L636-L660
[ "def", "_add_default_tz_bindings", "(", "self", ",", "context", ",", "switch", ",", "network_id", ")", ":", "default_tz", "=", "CONF", ".", "NVP", ".", "default_tz", "# If there is no default tz specified it's pointless to try", "# and add any additional default tz bindings.", "if", "not", "default_tz", ":", "LOG", ".", "warn", "(", "\"additional_default_tz_types specified, \"", "\"but no default_tz. Skipping \"", "\"_add_default_tz_bindings().\"", ")", "return", "# This should never be called without a neutron network uuid,", "# we require it to bind some segment allocations.", "if", "not", "network_id", ":", "LOG", ".", "warn", "(", "\"neutron network_id not specified, skipping \"", "\"_add_default_tz_bindings()\"", ")", "return", "for", "net_type", "in", "CONF", ".", "NVP", ".", "additional_default_tz_types", ":", "if", "net_type", "in", "TZ_BINDINGS", ":", "binding", "=", "TZ_BINDINGS", "[", "net_type", "]", "binding", ".", "add", "(", "context", ",", "switch", ",", "default_tz", ",", "network_id", ")", "else", ":", "LOG", ".", "warn", "(", "\"Unknown default tz type %s\"", "%", "(", "net_type", ")", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
NVPDriver._remove_default_tz_bindings
Deconfigure any additional default transport zone bindings.
quark/drivers/nvp_driver.py
def _remove_default_tz_bindings(self, context, network_id): """Deconfigure any additional default transport zone bindings.""" default_tz = CONF.NVP.default_tz if not default_tz: LOG.warn("additional_default_tz_types specified, " "but no default_tz. Skipping " "_remove_default_tz_bindings().") return if not network_id: LOG.warn("neutron network_id not specified, skipping " "_remove_default_tz_bindings()") return for net_type in CONF.NVP.additional_default_tz_types: if net_type in TZ_BINDINGS: binding = TZ_BINDINGS[net_type] binding.remove(context, default_tz, network_id) else: LOG.warn("Unknown default tz type %s" % (net_type))
def _remove_default_tz_bindings(self, context, network_id): """Deconfigure any additional default transport zone bindings.""" default_tz = CONF.NVP.default_tz if not default_tz: LOG.warn("additional_default_tz_types specified, " "but no default_tz. Skipping " "_remove_default_tz_bindings().") return if not network_id: LOG.warn("neutron network_id not specified, skipping " "_remove_default_tz_bindings()") return for net_type in CONF.NVP.additional_default_tz_types: if net_type in TZ_BINDINGS: binding = TZ_BINDINGS[net_type] binding.remove(context, default_tz, network_id) else: LOG.warn("Unknown default tz type %s" % (net_type))
[ "Deconfigure", "any", "additional", "default", "transport", "zone", "bindings", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/nvp_driver.py#L662-L682
[ "def", "_remove_default_tz_bindings", "(", "self", ",", "context", ",", "network_id", ")", ":", "default_tz", "=", "CONF", ".", "NVP", ".", "default_tz", "if", "not", "default_tz", ":", "LOG", ".", "warn", "(", "\"additional_default_tz_types specified, \"", "\"but no default_tz. Skipping \"", "\"_remove_default_tz_bindings().\"", ")", "return", "if", "not", "network_id", ":", "LOG", ".", "warn", "(", "\"neutron network_id not specified, skipping \"", "\"_remove_default_tz_bindings()\"", ")", "return", "for", "net_type", "in", "CONF", ".", "NVP", ".", "additional_default_tz_types", ":", "if", "net_type", "in", "TZ_BINDINGS", ":", "binding", "=", "TZ_BINDINGS", "[", "net_type", "]", "binding", ".", "remove", "(", "context", ",", "default_tz", ",", "network_id", ")", "else", ":", "LOG", ".", "warn", "(", "\"Unknown default tz type %s\"", "%", "(", "net_type", ")", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
NVPDriver.get_lswitch_ids_for_network
Public interface for fetching lswitch ids for a given network. NOTE(morgabra) This is here because calling private methods from outside the class feels wrong, and we need to be able to fetch lswitch ids for use in other drivers.
quark/drivers/nvp_driver.py
def get_lswitch_ids_for_network(self, context, network_id): """Public interface for fetching lswitch ids for a given network. NOTE(morgabra) This is here because calling private methods from outside the class feels wrong, and we need to be able to fetch lswitch ids for use in other drivers. """ lswitches = self._lswitches_for_network(context, network_id).results() return [s['uuid'] for s in lswitches["results"]]
def get_lswitch_ids_for_network(self, context, network_id): """Public interface for fetching lswitch ids for a given network. NOTE(morgabra) This is here because calling private methods from outside the class feels wrong, and we need to be able to fetch lswitch ids for use in other drivers. """ lswitches = self._lswitches_for_network(context, network_id).results() return [s['uuid'] for s in lswitches["results"]]
[ "Public", "interface", "for", "fetching", "lswitch", "ids", "for", "a", "given", "network", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/nvp_driver.py#L740-L748
[ "def", "get_lswitch_ids_for_network", "(", "self", ",", "context", ",", "network_id", ")", ":", "lswitches", "=", "self", ".", "_lswitches_for_network", "(", "context", ",", "network_id", ")", ".", "results", "(", ")", "return", "[", "s", "[", "'uuid'", "]", "for", "s", "in", "lswitches", "[", "\"results\"", "]", "]" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
UnicornDriver.register_floating_ip
Register a floating ip with Unicorn :param floating_ip: The quark.db.models.IPAddress to register :param port_fixed_ips: A dictionary containing the port and fixed ips to associate the floating IP with. Has the structure of: {"<id of port>": {"port": <quark.db.models.Port>, "fixed_ip": "<fixed ip address>"}} :return: None
quark/drivers/unicorn_driver.py
def register_floating_ip(self, floating_ip, port_fixed_ips): """Register a floating ip with Unicorn :param floating_ip: The quark.db.models.IPAddress to register :param port_fixed_ips: A dictionary containing the port and fixed ips to associate the floating IP with. Has the structure of: {"<id of port>": {"port": <quark.db.models.Port>, "fixed_ip": "<fixed ip address>"}} :return: None """ url = CONF.QUARK.floating_ip_base_url timeout = CONF.QUARK.unicorn_api_timeout_seconds req = self._build_request_body(floating_ip, port_fixed_ips) try: LOG.info("Calling unicorn to register floating ip: %s %s" % (url, req)) r = requests.post(url, data=json.dumps(req), timeout=timeout) except Exception as e: LOG.error("Unhandled Exception caught when trying to register " "floating ip %s with the unicorn API. Error: %s" % (floating_ip.id, e.message)) raise ex.RegisterFloatingIpFailure(id=floating_ip.id) if r.status_code != 200 and r.status_code != 201: msg = "Unexpected status from unicorn API: Status Code %s, " \ "Message: %s" % (r.status_code, r.json()) LOG.error("register_floating_ip: %s" % msg) raise ex.RegisterFloatingIpFailure(id=floating_ip.id)
def register_floating_ip(self, floating_ip, port_fixed_ips): """Register a floating ip with Unicorn :param floating_ip: The quark.db.models.IPAddress to register :param port_fixed_ips: A dictionary containing the port and fixed ips to associate the floating IP with. Has the structure of: {"<id of port>": {"port": <quark.db.models.Port>, "fixed_ip": "<fixed ip address>"}} :return: None """ url = CONF.QUARK.floating_ip_base_url timeout = CONF.QUARK.unicorn_api_timeout_seconds req = self._build_request_body(floating_ip, port_fixed_ips) try: LOG.info("Calling unicorn to register floating ip: %s %s" % (url, req)) r = requests.post(url, data=json.dumps(req), timeout=timeout) except Exception as e: LOG.error("Unhandled Exception caught when trying to register " "floating ip %s with the unicorn API. Error: %s" % (floating_ip.id, e.message)) raise ex.RegisterFloatingIpFailure(id=floating_ip.id) if r.status_code != 200 and r.status_code != 201: msg = "Unexpected status from unicorn API: Status Code %s, " \ "Message: %s" % (r.status_code, r.json()) LOG.error("register_floating_ip: %s" % msg) raise ex.RegisterFloatingIpFailure(id=floating_ip.id)
[ "Register", "a", "floating", "ip", "with", "Unicorn" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/unicorn_driver.py#L54-L82
[ "def", "register_floating_ip", "(", "self", ",", "floating_ip", ",", "port_fixed_ips", ")", ":", "url", "=", "CONF", ".", "QUARK", ".", "floating_ip_base_url", "timeout", "=", "CONF", ".", "QUARK", ".", "unicorn_api_timeout_seconds", "req", "=", "self", ".", "_build_request_body", "(", "floating_ip", ",", "port_fixed_ips", ")", "try", ":", "LOG", ".", "info", "(", "\"Calling unicorn to register floating ip: %s %s\"", "%", "(", "url", ",", "req", ")", ")", "r", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "req", ")", ",", "timeout", "=", "timeout", ")", "except", "Exception", "as", "e", ":", "LOG", ".", "error", "(", "\"Unhandled Exception caught when trying to register \"", "\"floating ip %s with the unicorn API. Error: %s\"", "%", "(", "floating_ip", ".", "id", ",", "e", ".", "message", ")", ")", "raise", "ex", ".", "RegisterFloatingIpFailure", "(", "id", "=", "floating_ip", ".", "id", ")", "if", "r", ".", "status_code", "!=", "200", "and", "r", ".", "status_code", "!=", "201", ":", "msg", "=", "\"Unexpected status from unicorn API: Status Code %s, \"", "\"Message: %s\"", "%", "(", "r", ".", "status_code", ",", "r", ".", "json", "(", ")", ")", "LOG", ".", "error", "(", "\"register_floating_ip: %s\"", "%", "msg", ")", "raise", "ex", ".", "RegisterFloatingIpFailure", "(", "id", "=", "floating_ip", ".", "id", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
UnicornDriver.remove_floating_ip
Register a floating ip with Unicorn :param floating_ip: The quark.db.models.IPAddress to remove :return: None
quark/drivers/unicorn_driver.py
def remove_floating_ip(self, floating_ip): """Register a floating ip with Unicorn :param floating_ip: The quark.db.models.IPAddress to remove :return: None """ url = "%s/%s" % (CONF.QUARK.floating_ip_base_url, floating_ip.address_readable) timeout = CONF.QUARK.unicorn_api_timeout_seconds try: LOG.info("Calling unicorn to remove floating ip: %s" % url) r = requests.delete(url, timeout=timeout) except Exception as e: LOG.error("Unhandled Exception caught when trying to un-register " "floating ip %s with the unicorn API. Error: %s" % (floating_ip.id, e.message)) raise ex.RemoveFloatingIpFailure(id=floating_ip.id) if r.status_code == 404: LOG.warn("The floating IP %s does not exist in the unicorn system." % floating_ip.address_readable) elif r.status_code != 204: msg = "Unexpected status from unicorn API: Status Code %s, " \ "Message: %s" % (r.status_code, r.json()) LOG.error("remove_floating_ip: %s" % msg) raise ex.RemoveFloatingIpFailure(id=floating_ip.id)
def remove_floating_ip(self, floating_ip): """Register a floating ip with Unicorn :param floating_ip: The quark.db.models.IPAddress to remove :return: None """ url = "%s/%s" % (CONF.QUARK.floating_ip_base_url, floating_ip.address_readable) timeout = CONF.QUARK.unicorn_api_timeout_seconds try: LOG.info("Calling unicorn to remove floating ip: %s" % url) r = requests.delete(url, timeout=timeout) except Exception as e: LOG.error("Unhandled Exception caught when trying to un-register " "floating ip %s with the unicorn API. Error: %s" % (floating_ip.id, e.message)) raise ex.RemoveFloatingIpFailure(id=floating_ip.id) if r.status_code == 404: LOG.warn("The floating IP %s does not exist in the unicorn system." % floating_ip.address_readable) elif r.status_code != 204: msg = "Unexpected status from unicorn API: Status Code %s, " \ "Message: %s" % (r.status_code, r.json()) LOG.error("remove_floating_ip: %s" % msg) raise ex.RemoveFloatingIpFailure(id=floating_ip.id)
[ "Register", "a", "floating", "ip", "with", "Unicorn" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/unicorn_driver.py#L115-L141
[ "def", "remove_floating_ip", "(", "self", ",", "floating_ip", ")", ":", "url", "=", "\"%s/%s\"", "%", "(", "CONF", ".", "QUARK", ".", "floating_ip_base_url", ",", "floating_ip", ".", "address_readable", ")", "timeout", "=", "CONF", ".", "QUARK", ".", "unicorn_api_timeout_seconds", "try", ":", "LOG", ".", "info", "(", "\"Calling unicorn to remove floating ip: %s\"", "%", "url", ")", "r", "=", "requests", ".", "delete", "(", "url", ",", "timeout", "=", "timeout", ")", "except", "Exception", "as", "e", ":", "LOG", ".", "error", "(", "\"Unhandled Exception caught when trying to un-register \"", "\"floating ip %s with the unicorn API. Error: %s\"", "%", "(", "floating_ip", ".", "id", ",", "e", ".", "message", ")", ")", "raise", "ex", ".", "RemoveFloatingIpFailure", "(", "id", "=", "floating_ip", ".", "id", ")", "if", "r", ".", "status_code", "==", "404", ":", "LOG", ".", "warn", "(", "\"The floating IP %s does not exist in the unicorn system.\"", "%", "floating_ip", ".", "address_readable", ")", "elif", "r", ".", "status_code", "!=", "204", ":", "msg", "=", "\"Unexpected status from unicorn API: Status Code %s, \"", "\"Message: %s\"", "%", "(", "r", ".", "status_code", ",", "r", ".", "json", "(", ")", ")", "LOG", ".", "error", "(", "\"remove_floating_ip: %s\"", "%", "msg", ")", "raise", "ex", ".", "RemoveFloatingIpFailure", "(", "id", "=", "floating_ip", ".", "id", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
QuarkAsyncServer._load_worker_plugin_with_module
Instantiates worker plugins that have requsite properties. The required properties are: * must have PLUGIN_EP entrypoint registered (or it wouldn't be in the list) * must have class attribute versions (list) of supported RPC versions * must subclass QuarkAsyncPluginBase
quark/tools/async_worker.py
def _load_worker_plugin_with_module(self, module, version): """Instantiates worker plugins that have requsite properties. The required properties are: * must have PLUGIN_EP entrypoint registered (or it wouldn't be in the list) * must have class attribute versions (list) of supported RPC versions * must subclass QuarkAsyncPluginBase """ classes = inspect.getmembers(module, inspect.isclass) loaded = 0 for cls_name, cls in classes: if hasattr(cls, 'versions'): if version not in cls.versions: continue else: continue if issubclass(cls, base_worker.QuarkAsyncPluginBase): LOG.debug("Loading plugin %s" % cls_name) plugin = cls() self.plugins.append(plugin) loaded += 1 LOG.debug("Found %d possible plugins and loaded %d" % (len(classes), loaded))
def _load_worker_plugin_with_module(self, module, version): """Instantiates worker plugins that have requsite properties. The required properties are: * must have PLUGIN_EP entrypoint registered (or it wouldn't be in the list) * must have class attribute versions (list) of supported RPC versions * must subclass QuarkAsyncPluginBase """ classes = inspect.getmembers(module, inspect.isclass) loaded = 0 for cls_name, cls in classes: if hasattr(cls, 'versions'): if version not in cls.versions: continue else: continue if issubclass(cls, base_worker.QuarkAsyncPluginBase): LOG.debug("Loading plugin %s" % cls_name) plugin = cls() self.plugins.append(plugin) loaded += 1 LOG.debug("Found %d possible plugins and loaded %d" % (len(classes), loaded))
[ "Instantiates", "worker", "plugins", "that", "have", "requsite", "properties", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tools/async_worker.py#L64-L87
[ "def", "_load_worker_plugin_with_module", "(", "self", ",", "module", ",", "version", ")", ":", "classes", "=", "inspect", ".", "getmembers", "(", "module", ",", "inspect", ".", "isclass", ")", "loaded", "=", "0", "for", "cls_name", ",", "cls", "in", "classes", ":", "if", "hasattr", "(", "cls", ",", "'versions'", ")", ":", "if", "version", "not", "in", "cls", ".", "versions", ":", "continue", "else", ":", "continue", "if", "issubclass", "(", "cls", ",", "base_worker", ".", "QuarkAsyncPluginBase", ")", ":", "LOG", ".", "debug", "(", "\"Loading plugin %s\"", "%", "cls_name", ")", "plugin", "=", "cls", "(", ")", "self", ".", "plugins", ".", "append", "(", "plugin", ")", "loaded", "+=", "1", "LOG", ".", "debug", "(", "\"Found %d possible plugins and loaded %d\"", "%", "(", "len", "(", "classes", ")", ",", "loaded", ")", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
QuarkAsyncServer._discover_via_entrypoints
Looks for modules with amtching entry points.
quark/tools/async_worker.py
def _discover_via_entrypoints(self): """Looks for modules with amtching entry points.""" emgr = extension.ExtensionManager(PLUGIN_EP, invoke_on_load=False) return ((ext.name, ext.plugin) for ext in emgr)
def _discover_via_entrypoints(self): """Looks for modules with amtching entry points.""" emgr = extension.ExtensionManager(PLUGIN_EP, invoke_on_load=False) return ((ext.name, ext.plugin) for ext in emgr)
[ "Looks", "for", "modules", "with", "amtching", "entry", "points", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tools/async_worker.py#L89-L92
[ "def", "_discover_via_entrypoints", "(", "self", ")", ":", "emgr", "=", "extension", ".", "ExtensionManager", "(", "PLUGIN_EP", ",", "invoke_on_load", "=", "False", ")", "return", "(", "(", "ext", ".", "name", ",", "ext", ".", "plugin", ")", "for", "ext", "in", "emgr", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
QuarkAsyncServer.serve_rpc
Launches configured # of workers per loaded plugin.
quark/tools/async_worker.py
def serve_rpc(self): """Launches configured # of workers per loaded plugin.""" if cfg.CONF.QUARK_ASYNC.rpc_workers < 1: cfg.CONF.set_override('rpc_workers', 1, "QUARK_ASYNC") try: rpc = service.RpcWorker(self.plugins) launcher = common_service.ProcessLauncher(CONF, wait_interval=1.0) launcher.launch_service(rpc, workers=CONF.QUARK_ASYNC.rpc_workers) return launcher except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Unrecoverable error: please check log for ' 'details.'))
def serve_rpc(self): """Launches configured # of workers per loaded plugin.""" if cfg.CONF.QUARK_ASYNC.rpc_workers < 1: cfg.CONF.set_override('rpc_workers', 1, "QUARK_ASYNC") try: rpc = service.RpcWorker(self.plugins) launcher = common_service.ProcessLauncher(CONF, wait_interval=1.0) launcher.launch_service(rpc, workers=CONF.QUARK_ASYNC.rpc_workers) return launcher except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Unrecoverable error: please check log for ' 'details.'))
[ "Launches", "configured", "#", "of", "workers", "per", "loaded", "plugin", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tools/async_worker.py#L99-L113
[ "def", "serve_rpc", "(", "self", ")", ":", "if", "cfg", ".", "CONF", ".", "QUARK_ASYNC", ".", "rpc_workers", "<", "1", ":", "cfg", ".", "CONF", ".", "set_override", "(", "'rpc_workers'", ",", "1", ",", "\"QUARK_ASYNC\"", ")", "try", ":", "rpc", "=", "service", ".", "RpcWorker", "(", "self", ".", "plugins", ")", "launcher", "=", "common_service", ".", "ProcessLauncher", "(", "CONF", ",", "wait_interval", "=", "1.0", ")", "launcher", ".", "launch_service", "(", "rpc", ",", "workers", "=", "CONF", ".", "QUARK_ASYNC", ".", "rpc_workers", ")", "return", "launcher", "except", "Exception", ":", "with", "excutils", ".", "save_and_reraise_exception", "(", ")", ":", "LOG", ".", "exception", "(", "_LE", "(", "'Unrecoverable error: please check log for '", "'details.'", ")", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
QuarkAsyncServer.start_api_and_rpc_workers
Initializes eventlet and starts wait for workers to exit. Spawns the workers returned from serve_rpc
quark/tools/async_worker.py
def start_api_and_rpc_workers(self): """Initializes eventlet and starts wait for workers to exit. Spawns the workers returned from serve_rpc """ pool = eventlet.GreenPool() quark_rpc = self.serve_rpc() pool.spawn(quark_rpc.wait) pool.waitall()
def start_api_and_rpc_workers(self): """Initializes eventlet and starts wait for workers to exit. Spawns the workers returned from serve_rpc """ pool = eventlet.GreenPool() quark_rpc = self.serve_rpc() pool.spawn(quark_rpc.wait) pool.waitall()
[ "Initializes", "eventlet", "and", "starts", "wait", "for", "workers", "to", "exit", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tools/async_worker.py#L115-L125
[ "def", "start_api_and_rpc_workers", "(", "self", ")", ":", "pool", "=", "eventlet", ".", "GreenPool", "(", ")", "quark_rpc", "=", "self", ".", "serve_rpc", "(", ")", "pool", ".", "spawn", "(", "quark_rpc", ".", "wait", ")", "pool", ".", "waitall", "(", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Scalingip.get_resources
Returns Ext Resources.
quark/api/extensions/scalingip.py
def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) # attr.PLURALS.update(plural_mappings) return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, None, register_quota=True)
def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) # attr.PLURALS.update(plural_mappings) return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, None, register_quota=True)
[ "Returns", "Ext", "Resources", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/api/extensions/scalingip.py#L127-L135
[ "def", "get_resources", "(", "cls", ")", ":", "plural_mappings", "=", "resource_helper", ".", "build_plural_mappings", "(", "{", "}", ",", "RESOURCE_ATTRIBUTE_MAP", ")", "# attr.PLURALS.update(plural_mappings)", "return", "resource_helper", ".", "build_resource_info", "(", "plural_mappings", ",", "RESOURCE_ATTRIBUTE_MAP", ",", "None", ",", "register_quota", "=", "True", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
BaseSegmentAllocation._chunks
Chunks data into chunk with size<=chunk_size.
quark/segment_allocations.py
def _chunks(self, iterable, chunk_size): """Chunks data into chunk with size<=chunk_size.""" iterator = iter(iterable) chunk = list(itertools.islice(iterator, 0, chunk_size)) while chunk: yield chunk chunk = list(itertools.islice(iterator, 0, chunk_size))
def _chunks(self, iterable, chunk_size): """Chunks data into chunk with size<=chunk_size.""" iterator = iter(iterable) chunk = list(itertools.islice(iterator, 0, chunk_size)) while chunk: yield chunk chunk = list(itertools.islice(iterator, 0, chunk_size))
[ "Chunks", "data", "into", "chunk", "with", "size<", "=", "chunk_size", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/segment_allocations.py#L38-L44
[ "def", "_chunks", "(", "self", ",", "iterable", ",", "chunk_size", ")", ":", "iterator", "=", "iter", "(", "iterable", ")", "chunk", "=", "list", "(", "itertools", ".", "islice", "(", "iterator", ",", "0", ",", "chunk_size", ")", ")", "while", "chunk", ":", "yield", "chunk", "chunk", "=", "list", "(", "itertools", ".", "islice", "(", "iterator", ",", "0", ",", "chunk_size", ")", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
BaseSegmentAllocation._check_collisions
Check for overlapping ranges.
quark/segment_allocations.py
def _check_collisions(self, new_range, existing_ranges): """Check for overlapping ranges.""" def _contains(num, r1): return (num >= r1[0] and num <= r1[1]) def _is_overlap(r1, r2): return (_contains(r1[0], r2) or _contains(r1[1], r2) or _contains(r2[0], r1) or _contains(r2[1], r1)) for existing_range in existing_ranges: if _is_overlap(new_range, existing_range): return True return False
def _check_collisions(self, new_range, existing_ranges): """Check for overlapping ranges.""" def _contains(num, r1): return (num >= r1[0] and num <= r1[1]) def _is_overlap(r1, r2): return (_contains(r1[0], r2) or _contains(r1[1], r2) or _contains(r2[0], r1) or _contains(r2[1], r1)) for existing_range in existing_ranges: if _is_overlap(new_range, existing_range): return True return False
[ "Check", "for", "overlapping", "ranges", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/segment_allocations.py#L46-L61
[ "def", "_check_collisions", "(", "self", ",", "new_range", ",", "existing_ranges", ")", ":", "def", "_contains", "(", "num", ",", "r1", ")", ":", "return", "(", "num", ">=", "r1", "[", "0", "]", "and", "num", "<=", "r1", "[", "1", "]", ")", "def", "_is_overlap", "(", "r1", ",", "r2", ")", ":", "return", "(", "_contains", "(", "r1", "[", "0", "]", ",", "r2", ")", "or", "_contains", "(", "r1", "[", "1", "]", ",", "r2", ")", "or", "_contains", "(", "r2", "[", "0", "]", ",", "r1", ")", "or", "_contains", "(", "r2", "[", "1", "]", ",", "r1", ")", ")", "for", "existing_range", "in", "existing_ranges", ":", "if", "_is_overlap", "(", "new_range", ",", "existing_range", ")", ":", "return", "True", "return", "False" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
BaseSegmentAllocation._try_allocate
Find a deallocated network segment id and reallocate it. NOTE(morgabra) This locks the segment table, but only the rows in use by the segment, which is pretty handy if we ever have more than 1 segment or segment type.
quark/segment_allocations.py
def _try_allocate(self, context, segment_id, network_id): """Find a deallocated network segment id and reallocate it. NOTE(morgabra) This locks the segment table, but only the rows in use by the segment, which is pretty handy if we ever have more than 1 segment or segment type. """ LOG.info("Attempting to allocate segment for network %s " "segment_id %s segment_type %s" % (network_id, segment_id, self.segment_type)) filter_dict = { "segment_id": segment_id, "segment_type": self.segment_type, "do_not_use": False } available_ranges = db_api.segment_allocation_range_find( context, scope=db_api.ALL, **filter_dict) available_range_ids = [r["id"] for r in available_ranges] try: with context.session.begin(subtransactions=True): # Search for any deallocated segment ids for the # given segment. filter_dict = { "deallocated": True, "segment_id": segment_id, "segment_type": self.segment_type, "segment_allocation_range_ids": available_range_ids } # NOTE(morgabra) We select 100 deallocated segment ids from # the table here, and then choose 1 randomly. This is to help # alleviate the case where an uncaught exception might leave # an allocation active on a remote service but we do not have # a record of it locally. If we *do* end up choosing a # conflicted id, the caller should simply allocate another one # and mark them all as reserved. If a single object has # multiple reservations on the same segment, they will not be # deallocated, and the operator must resolve the conficts # manually. allocations = db_api.segment_allocation_find( context, lock_mode=True, **filter_dict).limit(100).all() if allocations: allocation = random.choice(allocations) # Allocate the chosen segment. update_dict = { "deallocated": False, "deallocated_at": None, "network_id": network_id } allocation = db_api.segment_allocation_update( context, allocation, **update_dict) LOG.info("Allocated segment %s for network %s " "segment_id %s segment_type %s" % (allocation["id"], network_id, segment_id, self.segment_type)) return allocation except Exception: LOG.exception("Error in segment reallocation.") LOG.info("Cannot find reallocatable segment for network %s " "segment_id %s segment_type %s" % (network_id, segment_id, self.segment_type))
def _try_allocate(self, context, segment_id, network_id): """Find a deallocated network segment id and reallocate it. NOTE(morgabra) This locks the segment table, but only the rows in use by the segment, which is pretty handy if we ever have more than 1 segment or segment type. """ LOG.info("Attempting to allocate segment for network %s " "segment_id %s segment_type %s" % (network_id, segment_id, self.segment_type)) filter_dict = { "segment_id": segment_id, "segment_type": self.segment_type, "do_not_use": False } available_ranges = db_api.segment_allocation_range_find( context, scope=db_api.ALL, **filter_dict) available_range_ids = [r["id"] for r in available_ranges] try: with context.session.begin(subtransactions=True): # Search for any deallocated segment ids for the # given segment. filter_dict = { "deallocated": True, "segment_id": segment_id, "segment_type": self.segment_type, "segment_allocation_range_ids": available_range_ids } # NOTE(morgabra) We select 100 deallocated segment ids from # the table here, and then choose 1 randomly. This is to help # alleviate the case where an uncaught exception might leave # an allocation active on a remote service but we do not have # a record of it locally. If we *do* end up choosing a # conflicted id, the caller should simply allocate another one # and mark them all as reserved. If a single object has # multiple reservations on the same segment, they will not be # deallocated, and the operator must resolve the conficts # manually. allocations = db_api.segment_allocation_find( context, lock_mode=True, **filter_dict).limit(100).all() if allocations: allocation = random.choice(allocations) # Allocate the chosen segment. update_dict = { "deallocated": False, "deallocated_at": None, "network_id": network_id } allocation = db_api.segment_allocation_update( context, allocation, **update_dict) LOG.info("Allocated segment %s for network %s " "segment_id %s segment_type %s" % (allocation["id"], network_id, segment_id, self.segment_type)) return allocation except Exception: LOG.exception("Error in segment reallocation.") LOG.info("Cannot find reallocatable segment for network %s " "segment_id %s segment_type %s" % (network_id, segment_id, self.segment_type))
[ "Find", "a", "deallocated", "network", "segment", "id", "and", "reallocate", "it", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/segment_allocations.py#L131-L196
[ "def", "_try_allocate", "(", "self", ",", "context", ",", "segment_id", ",", "network_id", ")", ":", "LOG", ".", "info", "(", "\"Attempting to allocate segment for network %s \"", "\"segment_id %s segment_type %s\"", "%", "(", "network_id", ",", "segment_id", ",", "self", ".", "segment_type", ")", ")", "filter_dict", "=", "{", "\"segment_id\"", ":", "segment_id", ",", "\"segment_type\"", ":", "self", ".", "segment_type", ",", "\"do_not_use\"", ":", "False", "}", "available_ranges", "=", "db_api", ".", "segment_allocation_range_find", "(", "context", ",", "scope", "=", "db_api", ".", "ALL", ",", "*", "*", "filter_dict", ")", "available_range_ids", "=", "[", "r", "[", "\"id\"", "]", "for", "r", "in", "available_ranges", "]", "try", ":", "with", "context", ".", "session", ".", "begin", "(", "subtransactions", "=", "True", ")", ":", "# Search for any deallocated segment ids for the", "# given segment.", "filter_dict", "=", "{", "\"deallocated\"", ":", "True", ",", "\"segment_id\"", ":", "segment_id", ",", "\"segment_type\"", ":", "self", ".", "segment_type", ",", "\"segment_allocation_range_ids\"", ":", "available_range_ids", "}", "# NOTE(morgabra) We select 100 deallocated segment ids from", "# the table here, and then choose 1 randomly. This is to help", "# alleviate the case where an uncaught exception might leave", "# an allocation active on a remote service but we do not have", "# a record of it locally. If we *do* end up choosing a", "# conflicted id, the caller should simply allocate another one", "# and mark them all as reserved. If a single object has", "# multiple reservations on the same segment, they will not be", "# deallocated, and the operator must resolve the conficts", "# manually.", "allocations", "=", "db_api", ".", "segment_allocation_find", "(", "context", ",", "lock_mode", "=", "True", ",", "*", "*", "filter_dict", ")", ".", "limit", "(", "100", ")", ".", "all", "(", ")", "if", "allocations", ":", "allocation", "=", "random", ".", "choice", "(", "allocations", ")", "# Allocate the chosen segment.", "update_dict", "=", "{", "\"deallocated\"", ":", "False", ",", "\"deallocated_at\"", ":", "None", ",", "\"network_id\"", ":", "network_id", "}", "allocation", "=", "db_api", ".", "segment_allocation_update", "(", "context", ",", "allocation", ",", "*", "*", "update_dict", ")", "LOG", ".", "info", "(", "\"Allocated segment %s for network %s \"", "\"segment_id %s segment_type %s\"", "%", "(", "allocation", "[", "\"id\"", "]", ",", "network_id", ",", "segment_id", ",", "self", ".", "segment_type", ")", ")", "return", "allocation", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Error in segment reallocation.\"", ")", "LOG", ".", "info", "(", "\"Cannot find reallocatable segment for network %s \"", "\"segment_id %s segment_type %s\"", "%", "(", "network_id", ",", "segment_id", ",", "self", ".", "segment_type", ")", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
delete_locks
Deletes locks for each IP address that is no longer null-routed.
quark/tools/null_routes.py
def delete_locks(context, network_ids, addresses): """Deletes locks for each IP address that is no longer null-routed.""" addresses_no_longer_null_routed = _find_addresses_to_be_unlocked( context, network_ids, addresses) LOG.info("Deleting %s lock holders on IPAddress with ids: %s", len(addresses_no_longer_null_routed), [addr.id for addr in addresses_no_longer_null_routed]) for address in addresses_no_longer_null_routed: lock_holder = None try: lock_holder = db_api.lock_holder_find( context, lock_id=address.lock_id, name=LOCK_NAME, scope=db_api.ONE) if lock_holder: db_api.lock_holder_delete(context, address, lock_holder) except Exception: LOG.exception("Failed to delete lock holder %s", lock_holder) continue context.session.flush()
def delete_locks(context, network_ids, addresses): """Deletes locks for each IP address that is no longer null-routed.""" addresses_no_longer_null_routed = _find_addresses_to_be_unlocked( context, network_ids, addresses) LOG.info("Deleting %s lock holders on IPAddress with ids: %s", len(addresses_no_longer_null_routed), [addr.id for addr in addresses_no_longer_null_routed]) for address in addresses_no_longer_null_routed: lock_holder = None try: lock_holder = db_api.lock_holder_find( context, lock_id=address.lock_id, name=LOCK_NAME, scope=db_api.ONE) if lock_holder: db_api.lock_holder_delete(context, address, lock_holder) except Exception: LOG.exception("Failed to delete lock holder %s", lock_holder) continue context.session.flush()
[ "Deletes", "locks", "for", "each", "IP", "address", "that", "is", "no", "longer", "null", "-", "routed", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tools/null_routes.py#L117-L136
[ "def", "delete_locks", "(", "context", ",", "network_ids", ",", "addresses", ")", ":", "addresses_no_longer_null_routed", "=", "_find_addresses_to_be_unlocked", "(", "context", ",", "network_ids", ",", "addresses", ")", "LOG", ".", "info", "(", "\"Deleting %s lock holders on IPAddress with ids: %s\"", ",", "len", "(", "addresses_no_longer_null_routed", ")", ",", "[", "addr", ".", "id", "for", "addr", "in", "addresses_no_longer_null_routed", "]", ")", "for", "address", "in", "addresses_no_longer_null_routed", ":", "lock_holder", "=", "None", "try", ":", "lock_holder", "=", "db_api", ".", "lock_holder_find", "(", "context", ",", "lock_id", "=", "address", ".", "lock_id", ",", "name", "=", "LOCK_NAME", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "lock_holder", ":", "db_api", ".", "lock_holder_delete", "(", "context", ",", "address", ",", "lock_holder", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Failed to delete lock holder %s\"", ",", "lock_holder", ")", "continue", "context", ".", "session", ".", "flush", "(", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
create_locks
Creates locks for each IP address that is null-routed. The function creates the IP address if it is not present in the database.
quark/tools/null_routes.py
def create_locks(context, network_ids, addresses): """Creates locks for each IP address that is null-routed. The function creates the IP address if it is not present in the database. """ for address in addresses: address_model = None try: address_model = _find_or_create_address( context, network_ids, address) lock_holder = None if address_model.lock_id: lock_holder = db_api.lock_holder_find( context, lock_id=address_model.lock_id, name=LOCK_NAME, scope=db_api.ONE) if not lock_holder: LOG.info("Creating lock holder on IPAddress %s with id %s", address_model.address_readable, address_model.id) db_api.lock_holder_create( context, address_model, name=LOCK_NAME, type="ip_address") except Exception: LOG.exception("Failed to create lock holder on IPAddress %s", address_model) continue context.session.flush()
def create_locks(context, network_ids, addresses): """Creates locks for each IP address that is null-routed. The function creates the IP address if it is not present in the database. """ for address in addresses: address_model = None try: address_model = _find_or_create_address( context, network_ids, address) lock_holder = None if address_model.lock_id: lock_holder = db_api.lock_holder_find( context, lock_id=address_model.lock_id, name=LOCK_NAME, scope=db_api.ONE) if not lock_holder: LOG.info("Creating lock holder on IPAddress %s with id %s", address_model.address_readable, address_model.id) db_api.lock_holder_create( context, address_model, name=LOCK_NAME, type="ip_address") except Exception: LOG.exception("Failed to create lock holder on IPAddress %s", address_model) continue context.session.flush()
[ "Creates", "locks", "for", "each", "IP", "address", "that", "is", "null", "-", "routed", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tools/null_routes.py#L162-L191
[ "def", "create_locks", "(", "context", ",", "network_ids", ",", "addresses", ")", ":", "for", "address", "in", "addresses", ":", "address_model", "=", "None", "try", ":", "address_model", "=", "_find_or_create_address", "(", "context", ",", "network_ids", ",", "address", ")", "lock_holder", "=", "None", "if", "address_model", ".", "lock_id", ":", "lock_holder", "=", "db_api", ".", "lock_holder_find", "(", "context", ",", "lock_id", "=", "address_model", ".", "lock_id", ",", "name", "=", "LOCK_NAME", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "lock_holder", ":", "LOG", ".", "info", "(", "\"Creating lock holder on IPAddress %s with id %s\"", ",", "address_model", ".", "address_readable", ",", "address_model", ".", "id", ")", "db_api", ".", "lock_holder_create", "(", "context", ",", "address_model", ",", "name", "=", "LOCK_NAME", ",", "type", "=", "\"ip_address\"", ")", "except", "Exception", ":", "LOG", ".", "exception", "(", "\"Failed to create lock holder on IPAddress %s\"", ",", "address_model", ")", "continue", "context", ".", "session", ".", "flush", "(", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
IronicDriver.select_ipam_strategy
Return relevant IPAM strategy name. :param network_id: neutron network id. :param network_strategy: default strategy for the network. NOTE(morgabra) This feels like a hack but I can't think of a better idea. The root problem is we can now attach ports to networks with a different backend driver/ipam strategy than the network speficies. We handle the the backend driver part with allowing network_plugin to be specified for port objects. This works pretty well because nova or whatever knows when we are hooking up an Ironic node so it can pass along that key during port_create(). IPAM is a little trickier, especially in Ironic's case, because we *must* use a specific IPAM for provider networks. There isn't really much of an option other than involve the backend driver when selecting the IPAM strategy.
quark/drivers/ironic_driver.py
def select_ipam_strategy(self, network_id, network_strategy, **kwargs): """Return relevant IPAM strategy name. :param network_id: neutron network id. :param network_strategy: default strategy for the network. NOTE(morgabra) This feels like a hack but I can't think of a better idea. The root problem is we can now attach ports to networks with a different backend driver/ipam strategy than the network speficies. We handle the the backend driver part with allowing network_plugin to be specified for port objects. This works pretty well because nova or whatever knows when we are hooking up an Ironic node so it can pass along that key during port_create(). IPAM is a little trickier, especially in Ironic's case, because we *must* use a specific IPAM for provider networks. There isn't really much of an option other than involve the backend driver when selecting the IPAM strategy. """ LOG.info("Selecting IPAM strategy for network_id:%s " "network_strategy:%s" % (network_id, network_strategy)) net_type = "tenant" if STRATEGY.is_provider_network(network_id): net_type = "provider" strategy = self._ipam_strategies.get(net_type, {}) default = strategy.get("default") overrides = strategy.get("overrides", {}) # If we override a particular strategy explicitly, we use it. if network_strategy in overrides: LOG.info("Selected overridden IPAM strategy: %s" % (overrides[network_strategy])) return overrides[network_strategy] # Otherwise, we are free to use an explicit default. if default: LOG.info("Selected default IPAM strategy for tenant " "network: %s" % (default)) return default # Fallback to the network-specified IPAM strategy LOG.info("Selected network strategy for tenant " "network: %s" % (network_strategy)) return network_strategy
def select_ipam_strategy(self, network_id, network_strategy, **kwargs): """Return relevant IPAM strategy name. :param network_id: neutron network id. :param network_strategy: default strategy for the network. NOTE(morgabra) This feels like a hack but I can't think of a better idea. The root problem is we can now attach ports to networks with a different backend driver/ipam strategy than the network speficies. We handle the the backend driver part with allowing network_plugin to be specified for port objects. This works pretty well because nova or whatever knows when we are hooking up an Ironic node so it can pass along that key during port_create(). IPAM is a little trickier, especially in Ironic's case, because we *must* use a specific IPAM for provider networks. There isn't really much of an option other than involve the backend driver when selecting the IPAM strategy. """ LOG.info("Selecting IPAM strategy for network_id:%s " "network_strategy:%s" % (network_id, network_strategy)) net_type = "tenant" if STRATEGY.is_provider_network(network_id): net_type = "provider" strategy = self._ipam_strategies.get(net_type, {}) default = strategy.get("default") overrides = strategy.get("overrides", {}) # If we override a particular strategy explicitly, we use it. if network_strategy in overrides: LOG.info("Selected overridden IPAM strategy: %s" % (overrides[network_strategy])) return overrides[network_strategy] # Otherwise, we are free to use an explicit default. if default: LOG.info("Selected default IPAM strategy for tenant " "network: %s" % (default)) return default # Fallback to the network-specified IPAM strategy LOG.info("Selected network strategy for tenant " "network: %s" % (network_strategy)) return network_strategy
[ "Return", "relevant", "IPAM", "strategy", "name", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/ironic_driver.py#L165-L211
[ "def", "select_ipam_strategy", "(", "self", ",", "network_id", ",", "network_strategy", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "\"Selecting IPAM strategy for network_id:%s \"", "\"network_strategy:%s\"", "%", "(", "network_id", ",", "network_strategy", ")", ")", "net_type", "=", "\"tenant\"", "if", "STRATEGY", ".", "is_provider_network", "(", "network_id", ")", ":", "net_type", "=", "\"provider\"", "strategy", "=", "self", ".", "_ipam_strategies", ".", "get", "(", "net_type", ",", "{", "}", ")", "default", "=", "strategy", ".", "get", "(", "\"default\"", ")", "overrides", "=", "strategy", ".", "get", "(", "\"overrides\"", ",", "{", "}", ")", "# If we override a particular strategy explicitly, we use it.", "if", "network_strategy", "in", "overrides", ":", "LOG", ".", "info", "(", "\"Selected overridden IPAM strategy: %s\"", "%", "(", "overrides", "[", "network_strategy", "]", ")", ")", "return", "overrides", "[", "network_strategy", "]", "# Otherwise, we are free to use an explicit default.", "if", "default", ":", "LOG", ".", "info", "(", "\"Selected default IPAM strategy for tenant \"", "\"network: %s\"", "%", "(", "default", ")", ")", "return", "default", "# Fallback to the network-specified IPAM strategy", "LOG", ".", "info", "(", "\"Selected network strategy for tenant \"", "\"network: %s\"", "%", "(", "network_strategy", ")", ")", "return", "network_strategy" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
IronicDriver._get_base_network_info
Return a dict of extra network information. :param context: neutron request context. :param network_id: neturon network id. :param net_driver: network driver associated with network_id. :raises IronicException: Any unexpected data fetching failures will be logged and IronicException raised. This driver can attach to networks managed by other drivers. We may need some information from these drivers, or otherwise inform downstream about the type of network we are attaching to. We can make these decisions here.
quark/drivers/ironic_driver.py
def _get_base_network_info(self, context, network_id, base_net_driver): """Return a dict of extra network information. :param context: neutron request context. :param network_id: neturon network id. :param net_driver: network driver associated with network_id. :raises IronicException: Any unexpected data fetching failures will be logged and IronicException raised. This driver can attach to networks managed by other drivers. We may need some information from these drivers, or otherwise inform downstream about the type of network we are attaching to. We can make these decisions here. """ driver_name = base_net_driver.get_name() net_info = {"network_type": driver_name} LOG.debug('_get_base_network_info: %s %s' % (driver_name, network_id)) # If the driver is NVP, we need to look up the lswitch id we should # be attaching to. if driver_name == 'NVP': LOG.debug('looking up lswitch ids for network %s' % (network_id)) lswitch_ids = base_net_driver.get_lswitch_ids_for_network( context, network_id) if not lswitch_ids or len(lswitch_ids) > 1: msg = ('lswitch id lookup failed, %s ids found.' % (len(lswitch_ids))) LOG.error(msg) raise IronicException(msg) lswitch_id = lswitch_ids.pop() LOG.info('found lswitch for network %s: %s' % (network_id, lswitch_id)) net_info['lswitch_id'] = lswitch_id LOG.debug('_get_base_network_info finished: %s %s %s' % (driver_name, network_id, net_info)) return net_info
def _get_base_network_info(self, context, network_id, base_net_driver): """Return a dict of extra network information. :param context: neutron request context. :param network_id: neturon network id. :param net_driver: network driver associated with network_id. :raises IronicException: Any unexpected data fetching failures will be logged and IronicException raised. This driver can attach to networks managed by other drivers. We may need some information from these drivers, or otherwise inform downstream about the type of network we are attaching to. We can make these decisions here. """ driver_name = base_net_driver.get_name() net_info = {"network_type": driver_name} LOG.debug('_get_base_network_info: %s %s' % (driver_name, network_id)) # If the driver is NVP, we need to look up the lswitch id we should # be attaching to. if driver_name == 'NVP': LOG.debug('looking up lswitch ids for network %s' % (network_id)) lswitch_ids = base_net_driver.get_lswitch_ids_for_network( context, network_id) if not lswitch_ids or len(lswitch_ids) > 1: msg = ('lswitch id lookup failed, %s ids found.' % (len(lswitch_ids))) LOG.error(msg) raise IronicException(msg) lswitch_id = lswitch_ids.pop() LOG.info('found lswitch for network %s: %s' % (network_id, lswitch_id)) net_info['lswitch_id'] = lswitch_id LOG.debug('_get_base_network_info finished: %s %s %s' % (driver_name, network_id, net_info)) return net_info
[ "Return", "a", "dict", "of", "extra", "network", "information", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/ironic_driver.py#L248-L288
[ "def", "_get_base_network_info", "(", "self", ",", "context", ",", "network_id", ",", "base_net_driver", ")", ":", "driver_name", "=", "base_net_driver", ".", "get_name", "(", ")", "net_info", "=", "{", "\"network_type\"", ":", "driver_name", "}", "LOG", ".", "debug", "(", "'_get_base_network_info: %s %s'", "%", "(", "driver_name", ",", "network_id", ")", ")", "# If the driver is NVP, we need to look up the lswitch id we should", "# be attaching to.", "if", "driver_name", "==", "'NVP'", ":", "LOG", ".", "debug", "(", "'looking up lswitch ids for network %s'", "%", "(", "network_id", ")", ")", "lswitch_ids", "=", "base_net_driver", ".", "get_lswitch_ids_for_network", "(", "context", ",", "network_id", ")", "if", "not", "lswitch_ids", "or", "len", "(", "lswitch_ids", ")", ">", "1", ":", "msg", "=", "(", "'lswitch id lookup failed, %s ids found.'", "%", "(", "len", "(", "lswitch_ids", ")", ")", ")", "LOG", ".", "error", "(", "msg", ")", "raise", "IronicException", "(", "msg", ")", "lswitch_id", "=", "lswitch_ids", ".", "pop", "(", ")", "LOG", ".", "info", "(", "'found lswitch for network %s: %s'", "%", "(", "network_id", ",", "lswitch_id", ")", ")", "net_info", "[", "'lswitch_id'", "]", "=", "lswitch_id", "LOG", ".", "debug", "(", "'_get_base_network_info finished: %s %s %s'", "%", "(", "driver_name", ",", "network_id", ",", "net_info", ")", ")", "return", "net_info" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
IronicDriver.create_port
Create a port. :param context: neutron api request context. :param network_id: neutron network id. :param port_id: neutron port id. :param kwargs: required keys - device_id: neutron port device_id (instance_id) instance_node_id: nova hypervisor host id mac_address: neutron port mac address base_net_driver: the base network driver optional keys - addresses: list of allocated IPAddress models security_groups: list of associated security groups :raises IronicException: If the client is unable to create the downstream port for any reason, the exception will be logged and IronicException raised.
quark/drivers/ironic_driver.py
def create_port(self, context, network_id, port_id, **kwargs): """Create a port. :param context: neutron api request context. :param network_id: neutron network id. :param port_id: neutron port id. :param kwargs: required keys - device_id: neutron port device_id (instance_id) instance_node_id: nova hypervisor host id mac_address: neutron port mac address base_net_driver: the base network driver optional keys - addresses: list of allocated IPAddress models security_groups: list of associated security groups :raises IronicException: If the client is unable to create the downstream port for any reason, the exception will be logged and IronicException raised. """ LOG.info("create_port %s %s %s" % (context.tenant_id, network_id, port_id)) # sanity check if not kwargs.get('base_net_driver'): raise IronicException(msg='base_net_driver required.') base_net_driver = kwargs['base_net_driver'] if not kwargs.get('device_id'): raise IronicException(msg='device_id required.') device_id = kwargs['device_id'] if not kwargs.get('instance_node_id'): raise IronicException(msg='instance_node_id required.') instance_node_id = kwargs['instance_node_id'] if not kwargs.get('mac_address'): raise IronicException(msg='mac_address is required.') mac_address = str(netaddr.EUI(kwargs["mac_address"]["address"])) mac_address = mac_address.replace('-', ':') # TODO(morgabra): Change this when we enable security groups. if kwargs.get('security_groups'): msg = 'ironic driver does not support security group operations.' raise IronicException(msg=msg) # unroll the given address models into a fixed_ips list we can # pass downstream fixed_ips = [] addresses = kwargs.get('addresses') if not isinstance(addresses, list): addresses = [addresses] for address in addresses: fixed_ips.append(self._make_fixed_ip_dict(context, address)) body = { "id": port_id, "network_id": network_id, "device_id": device_id, "device_owner": kwargs.get('device_owner', ''), "tenant_id": context.tenant_id or "quark", "roles": context.roles, "mac_address": mac_address, "fixed_ips": fixed_ips, "switch:hardware_id": instance_node_id, "dynamic_network": not STRATEGY.is_provider_network(network_id) } net_info = self._get_base_network_info( context, network_id, base_net_driver) body.update(net_info) try: LOG.info("creating downstream port: %s" % (body)) port = self._create_port(context, body) LOG.info("created downstream port: %s" % (port)) return {"uuid": port['port']['id'], "vlan_id": port['port']['vlan_id']} except Exception as e: msg = "failed to create downstream port. Exception: %s" % (e) raise IronicException(msg=msg)
def create_port(self, context, network_id, port_id, **kwargs): """Create a port. :param context: neutron api request context. :param network_id: neutron network id. :param port_id: neutron port id. :param kwargs: required keys - device_id: neutron port device_id (instance_id) instance_node_id: nova hypervisor host id mac_address: neutron port mac address base_net_driver: the base network driver optional keys - addresses: list of allocated IPAddress models security_groups: list of associated security groups :raises IronicException: If the client is unable to create the downstream port for any reason, the exception will be logged and IronicException raised. """ LOG.info("create_port %s %s %s" % (context.tenant_id, network_id, port_id)) # sanity check if not kwargs.get('base_net_driver'): raise IronicException(msg='base_net_driver required.') base_net_driver = kwargs['base_net_driver'] if not kwargs.get('device_id'): raise IronicException(msg='device_id required.') device_id = kwargs['device_id'] if not kwargs.get('instance_node_id'): raise IronicException(msg='instance_node_id required.') instance_node_id = kwargs['instance_node_id'] if not kwargs.get('mac_address'): raise IronicException(msg='mac_address is required.') mac_address = str(netaddr.EUI(kwargs["mac_address"]["address"])) mac_address = mac_address.replace('-', ':') # TODO(morgabra): Change this when we enable security groups. if kwargs.get('security_groups'): msg = 'ironic driver does not support security group operations.' raise IronicException(msg=msg) # unroll the given address models into a fixed_ips list we can # pass downstream fixed_ips = [] addresses = kwargs.get('addresses') if not isinstance(addresses, list): addresses = [addresses] for address in addresses: fixed_ips.append(self._make_fixed_ip_dict(context, address)) body = { "id": port_id, "network_id": network_id, "device_id": device_id, "device_owner": kwargs.get('device_owner', ''), "tenant_id": context.tenant_id or "quark", "roles": context.roles, "mac_address": mac_address, "fixed_ips": fixed_ips, "switch:hardware_id": instance_node_id, "dynamic_network": not STRATEGY.is_provider_network(network_id) } net_info = self._get_base_network_info( context, network_id, base_net_driver) body.update(net_info) try: LOG.info("creating downstream port: %s" % (body)) port = self._create_port(context, body) LOG.info("created downstream port: %s" % (port)) return {"uuid": port['port']['id'], "vlan_id": port['port']['vlan_id']} except Exception as e: msg = "failed to create downstream port. Exception: %s" % (e) raise IronicException(msg=msg)
[ "Create", "a", "port", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/ironic_driver.py#L290-L367
[ "def", "create_port", "(", "self", ",", "context", ",", "network_id", ",", "port_id", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "\"create_port %s %s %s\"", "%", "(", "context", ".", "tenant_id", ",", "network_id", ",", "port_id", ")", ")", "# sanity check", "if", "not", "kwargs", ".", "get", "(", "'base_net_driver'", ")", ":", "raise", "IronicException", "(", "msg", "=", "'base_net_driver required.'", ")", "base_net_driver", "=", "kwargs", "[", "'base_net_driver'", "]", "if", "not", "kwargs", ".", "get", "(", "'device_id'", ")", ":", "raise", "IronicException", "(", "msg", "=", "'device_id required.'", ")", "device_id", "=", "kwargs", "[", "'device_id'", "]", "if", "not", "kwargs", ".", "get", "(", "'instance_node_id'", ")", ":", "raise", "IronicException", "(", "msg", "=", "'instance_node_id required.'", ")", "instance_node_id", "=", "kwargs", "[", "'instance_node_id'", "]", "if", "not", "kwargs", ".", "get", "(", "'mac_address'", ")", ":", "raise", "IronicException", "(", "msg", "=", "'mac_address is required.'", ")", "mac_address", "=", "str", "(", "netaddr", ".", "EUI", "(", "kwargs", "[", "\"mac_address\"", "]", "[", "\"address\"", "]", ")", ")", "mac_address", "=", "mac_address", ".", "replace", "(", "'-'", ",", "':'", ")", "# TODO(morgabra): Change this when we enable security groups.", "if", "kwargs", ".", "get", "(", "'security_groups'", ")", ":", "msg", "=", "'ironic driver does not support security group operations.'", "raise", "IronicException", "(", "msg", "=", "msg", ")", "# unroll the given address models into a fixed_ips list we can", "# pass downstream", "fixed_ips", "=", "[", "]", "addresses", "=", "kwargs", ".", "get", "(", "'addresses'", ")", "if", "not", "isinstance", "(", "addresses", ",", "list", ")", ":", "addresses", "=", "[", "addresses", "]", "for", "address", "in", "addresses", ":", "fixed_ips", ".", "append", "(", "self", ".", "_make_fixed_ip_dict", "(", "context", ",", "address", ")", ")", "body", "=", "{", "\"id\"", ":", "port_id", ",", "\"network_id\"", ":", "network_id", ",", "\"device_id\"", ":", "device_id", ",", "\"device_owner\"", ":", "kwargs", ".", "get", "(", "'device_owner'", ",", "''", ")", ",", "\"tenant_id\"", ":", "context", ".", "tenant_id", "or", "\"quark\"", ",", "\"roles\"", ":", "context", ".", "roles", ",", "\"mac_address\"", ":", "mac_address", ",", "\"fixed_ips\"", ":", "fixed_ips", ",", "\"switch:hardware_id\"", ":", "instance_node_id", ",", "\"dynamic_network\"", ":", "not", "STRATEGY", ".", "is_provider_network", "(", "network_id", ")", "}", "net_info", "=", "self", ".", "_get_base_network_info", "(", "context", ",", "network_id", ",", "base_net_driver", ")", "body", ".", "update", "(", "net_info", ")", "try", ":", "LOG", ".", "info", "(", "\"creating downstream port: %s\"", "%", "(", "body", ")", ")", "port", "=", "self", ".", "_create_port", "(", "context", ",", "body", ")", "LOG", ".", "info", "(", "\"created downstream port: %s\"", "%", "(", "port", ")", ")", "return", "{", "\"uuid\"", ":", "port", "[", "'port'", "]", "[", "'id'", "]", ",", "\"vlan_id\"", ":", "port", "[", "'port'", "]", "[", "'vlan_id'", "]", "}", "except", "Exception", "as", "e", ":", "msg", "=", "\"failed to create downstream port. Exception: %s\"", "%", "(", "e", ")", "raise", "IronicException", "(", "msg", "=", "msg", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
IronicDriver.update_port
Update a port. :param context: neutron api request context. :param port_id: neutron port id. :param kwargs: optional kwargs. :raises IronicException: If the client is unable to update the downstream port for any reason, the exception will be logged and IronicException raised. TODO(morgabra) It does not really make sense in the context of Ironic to allow updating ports. fixed_ips and mac_address are burned in the configdrive on the host, and we otherwise cannot migrate a port between instances. Eventually we will need to support security groups, but for now it's a no-op on port data changes, and we need to rely on the API/Nova to not allow updating data on active ports.
quark/drivers/ironic_driver.py
def update_port(self, context, port_id, **kwargs): """Update a port. :param context: neutron api request context. :param port_id: neutron port id. :param kwargs: optional kwargs. :raises IronicException: If the client is unable to update the downstream port for any reason, the exception will be logged and IronicException raised. TODO(morgabra) It does not really make sense in the context of Ironic to allow updating ports. fixed_ips and mac_address are burned in the configdrive on the host, and we otherwise cannot migrate a port between instances. Eventually we will need to support security groups, but for now it's a no-op on port data changes, and we need to rely on the API/Nova to not allow updating data on active ports. """ LOG.info("update_port %s %s" % (context.tenant_id, port_id)) # TODO(morgabra): Change this when we enable security groups. if kwargs.get("security_groups"): msg = 'ironic driver does not support security group operations.' raise IronicException(msg=msg) return {"uuid": port_id}
def update_port(self, context, port_id, **kwargs): """Update a port. :param context: neutron api request context. :param port_id: neutron port id. :param kwargs: optional kwargs. :raises IronicException: If the client is unable to update the downstream port for any reason, the exception will be logged and IronicException raised. TODO(morgabra) It does not really make sense in the context of Ironic to allow updating ports. fixed_ips and mac_address are burned in the configdrive on the host, and we otherwise cannot migrate a port between instances. Eventually we will need to support security groups, but for now it's a no-op on port data changes, and we need to rely on the API/Nova to not allow updating data on active ports. """ LOG.info("update_port %s %s" % (context.tenant_id, port_id)) # TODO(morgabra): Change this when we enable security groups. if kwargs.get("security_groups"): msg = 'ironic driver does not support security group operations.' raise IronicException(msg=msg) return {"uuid": port_id}
[ "Update", "a", "port", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/ironic_driver.py#L369-L393
[ "def", "update_port", "(", "self", ",", "context", ",", "port_id", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "\"update_port %s %s\"", "%", "(", "context", ".", "tenant_id", ",", "port_id", ")", ")", "# TODO(morgabra): Change this when we enable security groups.", "if", "kwargs", ".", "get", "(", "\"security_groups\"", ")", ":", "msg", "=", "'ironic driver does not support security group operations.'", "raise", "IronicException", "(", "msg", "=", "msg", ")", "return", "{", "\"uuid\"", ":", "port_id", "}" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
IronicDriver.delete_port
Delete a port. :param context: neutron api request context. :param port_id: neutron port id. :param kwargs: optional kwargs. :raises IronicException: If the client is unable to delete the downstream port for any reason, the exception will be logged and IronicException raised.
quark/drivers/ironic_driver.py
def delete_port(self, context, port_id, **kwargs): """Delete a port. :param context: neutron api request context. :param port_id: neutron port id. :param kwargs: optional kwargs. :raises IronicException: If the client is unable to delete the downstream port for any reason, the exception will be logged and IronicException raised. """ LOG.info("delete_port %s %s" % (context.tenant_id, port_id)) try: self._delete_port(context, port_id) LOG.info("deleted downstream port: %s" % (port_id)) except Exception: LOG.error("failed deleting downstream port, it is now " "orphaned! port_id: %s" % (port_id))
def delete_port(self, context, port_id, **kwargs): """Delete a port. :param context: neutron api request context. :param port_id: neutron port id. :param kwargs: optional kwargs. :raises IronicException: If the client is unable to delete the downstream port for any reason, the exception will be logged and IronicException raised. """ LOG.info("delete_port %s %s" % (context.tenant_id, port_id)) try: self._delete_port(context, port_id) LOG.info("deleted downstream port: %s" % (port_id)) except Exception: LOG.error("failed deleting downstream port, it is now " "orphaned! port_id: %s" % (port_id))
[ "Delete", "a", "port", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/ironic_driver.py#L413-L429
[ "def", "delete_port", "(", "self", ",", "context", ",", "port_id", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "\"delete_port %s %s\"", "%", "(", "context", ".", "tenant_id", ",", "port_id", ")", ")", "try", ":", "self", ".", "_delete_port", "(", "context", ",", "port_id", ")", "LOG", ".", "info", "(", "\"deleted downstream port: %s\"", "%", "(", "port_id", ")", ")", "except", "Exception", ":", "LOG", ".", "error", "(", "\"failed deleting downstream port, it is now \"", "\"orphaned! port_id: %s\"", "%", "(", "port_id", ")", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
IronicDriver.diag_port
Diagnose a port. :param context: neutron api request context. :param port_id: neutron port id. :param kwargs: optional kwargs. :raises IronicException: If the client is unable to fetch the downstream port for any reason, the exception will be logged and IronicException raised.
quark/drivers/ironic_driver.py
def diag_port(self, context, port_id, **kwargs): """Diagnose a port. :param context: neutron api request context. :param port_id: neutron port id. :param kwargs: optional kwargs. :raises IronicException: If the client is unable to fetch the downstream port for any reason, the exception will be logged and IronicException raised. """ LOG.info("diag_port %s" % port_id) try: port = self._client.show_port(port_id) except Exception as e: msg = "failed fetching downstream port: %s" % (str(e)) LOG.exception(msg) raise IronicException(msg=msg) return {"downstream_port": port}
def diag_port(self, context, port_id, **kwargs): """Diagnose a port. :param context: neutron api request context. :param port_id: neutron port id. :param kwargs: optional kwargs. :raises IronicException: If the client is unable to fetch the downstream port for any reason, the exception will be logged and IronicException raised. """ LOG.info("diag_port %s" % port_id) try: port = self._client.show_port(port_id) except Exception as e: msg = "failed fetching downstream port: %s" % (str(e)) LOG.exception(msg) raise IronicException(msg=msg) return {"downstream_port": port}
[ "Diagnose", "a", "port", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/drivers/ironic_driver.py#L431-L448
[ "def", "diag_port", "(", "self", ",", "context", ",", "port_id", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "info", "(", "\"diag_port %s\"", "%", "port_id", ")", "try", ":", "port", "=", "self", ".", "_client", ".", "show_port", "(", "port_id", ")", "except", "Exception", "as", "e", ":", "msg", "=", "\"failed fetching downstream port: %s\"", "%", "(", "str", "(", "e", ")", ")", "LOG", ".", "exception", "(", "msg", ")", "raise", "IronicException", "(", "msg", "=", "msg", ")", "return", "{", "\"downstream_port\"", ":", "port", "}" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Tag.set
Set tag on model object.
quark/tags.py
def set(self, model, value): """Set tag on model object.""" self.validate(value) self._pop(model) value = self.serialize(value) model.tags.append(value)
def set(self, model, value): """Set tag on model object.""" self.validate(value) self._pop(model) value = self.serialize(value) model.tags.append(value)
[ "Set", "tag", "on", "model", "object", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tags.py#L53-L58
[ "def", "set", "(", "self", ",", "model", ",", "value", ")", ":", "self", ".", "validate", "(", "value", ")", "self", ".", "_pop", "(", "model", ")", "value", "=", "self", ".", "serialize", "(", "value", ")", "model", ".", "tags", ".", "append", "(", "value", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Tag.get
Get a matching valid tag off the model.
quark/tags.py
def get(self, model): """Get a matching valid tag off the model.""" for tag in model.tags: if self.is_tag(tag): value = self.deserialize(tag) try: self.validate(value) return value except TagValidationError: continue return None
def get(self, model): """Get a matching valid tag off the model.""" for tag in model.tags: if self.is_tag(tag): value = self.deserialize(tag) try: self.validate(value) return value except TagValidationError: continue return None
[ "Get", "a", "matching", "valid", "tag", "off", "the", "model", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tags.py#L60-L70
[ "def", "get", "(", "self", ",", "model", ")", ":", "for", "tag", "in", "model", ".", "tags", ":", "if", "self", ".", "is_tag", "(", "tag", ")", ":", "value", "=", "self", ".", "deserialize", "(", "tag", ")", "try", ":", "self", ".", "validate", "(", "value", ")", "return", "value", "except", "TagValidationError", ":", "continue", "return", "None" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Tag._pop
Pop all matching tags off the model and return them.
quark/tags.py
def _pop(self, model): """Pop all matching tags off the model and return them.""" tags = [] # collect any exsiting tags with matching prefix for tag in model.tags: if self.is_tag(tag): tags.append(tag) # remove collected tags from model if tags: for tag in tags: model.tags.remove(tag) return tags
def _pop(self, model): """Pop all matching tags off the model and return them.""" tags = [] # collect any exsiting tags with matching prefix for tag in model.tags: if self.is_tag(tag): tags.append(tag) # remove collected tags from model if tags: for tag in tags: model.tags.remove(tag) return tags
[ "Pop", "all", "matching", "tags", "off", "the", "model", "and", "return", "them", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tags.py#L72-L86
[ "def", "_pop", "(", "self", ",", "model", ")", ":", "tags", "=", "[", "]", "# collect any exsiting tags with matching prefix", "for", "tag", "in", "model", ".", "tags", ":", "if", "self", ".", "is_tag", "(", "tag", ")", ":", "tags", ".", "append", "(", "tag", ")", "# remove collected tags from model", "if", "tags", ":", "for", "tag", "in", "tags", ":", "model", ".", "tags", ".", "remove", "(", "tag", ")", "return", "tags" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Tag.pop
Pop all matching tags off the port, return a valid one.
quark/tags.py
def pop(self, model): """Pop all matching tags off the port, return a valid one.""" tags = self._pop(model) if tags: for tag in tags: value = self.deserialize(tag) try: self.validate(value) return value except TagValidationError: continue
def pop(self, model): """Pop all matching tags off the port, return a valid one.""" tags = self._pop(model) if tags: for tag in tags: value = self.deserialize(tag) try: self.validate(value) return value except TagValidationError: continue
[ "Pop", "all", "matching", "tags", "off", "the", "port", "return", "a", "valid", "one", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tags.py#L88-L98
[ "def", "pop", "(", "self", ",", "model", ")", ":", "tags", "=", "self", ".", "_pop", "(", "model", ")", "if", "tags", ":", "for", "tag", "in", "tags", ":", "value", "=", "self", ".", "deserialize", "(", "tag", ")", "try", ":", "self", ".", "validate", "(", "value", ")", "return", "value", "except", "TagValidationError", ":", "continue" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Tag.has_tag
Does the given port have this tag?
quark/tags.py
def has_tag(self, model): """Does the given port have this tag?""" for tag in model.tags: if self.is_tag(tag): return True return False
def has_tag(self, model): """Does the given port have this tag?""" for tag in model.tags: if self.is_tag(tag): return True return False
[ "Does", "the", "given", "port", "have", "this", "tag?" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tags.py#L104-L109
[ "def", "has_tag", "(", "self", ",", "model", ")", ":", "for", "tag", "in", "model", ".", "tags", ":", "if", "self", ".", "is_tag", "(", "tag", ")", ":", "return", "True", "return", "False" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
VlanTag.validate
Validates a VLAN ID. :param value: The VLAN ID to validate against. :raises TagValidationError: Raised if the VLAN ID is invalid.
quark/tags.py
def validate(self, value): """Validates a VLAN ID. :param value: The VLAN ID to validate against. :raises TagValidationError: Raised if the VLAN ID is invalid. """ try: vlan_id_int = int(value) assert vlan_id_int >= self.MIN_VLAN_ID assert vlan_id_int <= self.MAX_VLAN_ID except Exception: msg = ("Invalid vlan_id. Got '%(vlan_id)s'. " "vlan_id should be an integer between %(min)d and %(max)d " "inclusive." % {'vlan_id': value, 'min': self.MIN_VLAN_ID, 'max': self.MAX_VLAN_ID}) raise TagValidationError(value, msg) return True
def validate(self, value): """Validates a VLAN ID. :param value: The VLAN ID to validate against. :raises TagValidationError: Raised if the VLAN ID is invalid. """ try: vlan_id_int = int(value) assert vlan_id_int >= self.MIN_VLAN_ID assert vlan_id_int <= self.MAX_VLAN_ID except Exception: msg = ("Invalid vlan_id. Got '%(vlan_id)s'. " "vlan_id should be an integer between %(min)d and %(max)d " "inclusive." % {'vlan_id': value, 'min': self.MIN_VLAN_ID, 'max': self.MAX_VLAN_ID}) raise TagValidationError(value, msg) return True
[ "Validates", "a", "VLAN", "ID", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tags.py#L118-L135
[ "def", "validate", "(", "self", ",", "value", ")", ":", "try", ":", "vlan_id_int", "=", "int", "(", "value", ")", "assert", "vlan_id_int", ">=", "self", ".", "MIN_VLAN_ID", "assert", "vlan_id_int", "<=", "self", ".", "MAX_VLAN_ID", "except", "Exception", ":", "msg", "=", "(", "\"Invalid vlan_id. Got '%(vlan_id)s'. \"", "\"vlan_id should be an integer between %(min)d and %(max)d \"", "\"inclusive.\"", "%", "{", "'vlan_id'", ":", "value", ",", "'min'", ":", "self", ".", "MIN_VLAN_ID", ",", "'max'", ":", "self", ".", "MAX_VLAN_ID", "}", ")", "raise", "TagValidationError", "(", "value", ",", "msg", ")", "return", "True" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
TagRegistry.get_all
Get all known tags from a model. Returns a dict of {<tag_name>:<tag_value>}.
quark/tags.py
def get_all(self, model): """Get all known tags from a model. Returns a dict of {<tag_name>:<tag_value>}. """ tags = {} for name, tag in self.tags.items(): for mtag in model.tags: if tag.is_tag(mtag): tags[name] = tag.get(model) return tags
def get_all(self, model): """Get all known tags from a model. Returns a dict of {<tag_name>:<tag_value>}. """ tags = {} for name, tag in self.tags.items(): for mtag in model.tags: if tag.is_tag(mtag): tags[name] = tag.get(model) return tags
[ "Get", "all", "known", "tags", "from", "a", "model", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tags.py#L142-L152
[ "def", "get_all", "(", "self", ",", "model", ")", ":", "tags", "=", "{", "}", "for", "name", ",", "tag", "in", "self", ".", "tags", ".", "items", "(", ")", ":", "for", "mtag", "in", "model", ".", "tags", ":", "if", "tag", ".", "is_tag", "(", "mtag", ")", ":", "tags", "[", "name", "]", "=", "tag", ".", "get", "(", "model", ")", "return", "tags" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
TagRegistry.set_all
Validate and set all known tags on a port.
quark/tags.py
def set_all(self, model, **tags): """Validate and set all known tags on a port.""" for name, tag in self.tags.items(): if name in tags: value = tags.pop(name) if value: try: tag.set(model, value) except TagValidationError as e: raise n_exc.BadRequest( resource="tags", msg="%s" % (e.message))
def set_all(self, model, **tags): """Validate and set all known tags on a port.""" for name, tag in self.tags.items(): if name in tags: value = tags.pop(name) if value: try: tag.set(model, value) except TagValidationError as e: raise n_exc.BadRequest( resource="tags", msg="%s" % (e.message))
[ "Validate", "and", "set", "all", "known", "tags", "on", "a", "port", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tags.py#L154-L165
[ "def", "set_all", "(", "self", ",", "model", ",", "*", "*", "tags", ")", ":", "for", "name", ",", "tag", "in", "self", ".", "tags", ".", "items", "(", ")", ":", "if", "name", "in", "tags", ":", "value", "=", "tags", ".", "pop", "(", "name", ")", "if", "value", ":", "try", ":", "tag", ".", "set", "(", "model", ",", "value", ")", "except", "TagValidationError", "as", "e", ":", "raise", "n_exc", ".", "BadRequest", "(", "resource", "=", "\"tags\"", ",", "msg", "=", "\"%s\"", "%", "(", "e", ".", "message", ")", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
SecurityGroupsClient.serialize_rules
Creates a payload for the redis server.
quark/cache/security_groups_client.py
def serialize_rules(self, rules): """Creates a payload for the redis server.""" # TODO(mdietz): If/when we support other rule types, this comment # will have to be revised. # Action and direction are static, for now. The implementation may # support 'deny' and 'egress' respectively in the future. We allow # the direction to be set to something else, technically, but current # plugin level call actually raises. It's supported here for unit # test purposes at this time serialized = [] for rule in rules: direction = rule["direction"] source = '' destination = '' if rule.get("remote_ip_prefix"): prefix = rule["remote_ip_prefix"] if direction == "ingress": source = self._convert_remote_network(prefix) else: if (Capabilities.EGRESS not in CONF.QUARK.environment_capabilities): raise q_exc.EgressSecurityGroupRulesNotEnabled() else: destination = self._convert_remote_network(prefix) optional_fields = {} # NOTE(mdietz): this will expand as we add more protocols protocol_map = protocols.PROTOCOL_MAP[rule["ethertype"]] if rule["protocol"] == protocol_map["icmp"]: optional_fields["icmp type"] = rule["port_range_min"] optional_fields["icmp code"] = rule["port_range_max"] else: optional_fields["port start"] = rule["port_range_min"] optional_fields["port end"] = rule["port_range_max"] payload = {"ethertype": rule["ethertype"], "protocol": rule["protocol"], "source network": source, "destination network": destination, "action": "allow", "direction": direction} payload.update(optional_fields) serialized.append(payload) return serialized
def serialize_rules(self, rules): """Creates a payload for the redis server.""" # TODO(mdietz): If/when we support other rule types, this comment # will have to be revised. # Action and direction are static, for now. The implementation may # support 'deny' and 'egress' respectively in the future. We allow # the direction to be set to something else, technically, but current # plugin level call actually raises. It's supported here for unit # test purposes at this time serialized = [] for rule in rules: direction = rule["direction"] source = '' destination = '' if rule.get("remote_ip_prefix"): prefix = rule["remote_ip_prefix"] if direction == "ingress": source = self._convert_remote_network(prefix) else: if (Capabilities.EGRESS not in CONF.QUARK.environment_capabilities): raise q_exc.EgressSecurityGroupRulesNotEnabled() else: destination = self._convert_remote_network(prefix) optional_fields = {} # NOTE(mdietz): this will expand as we add more protocols protocol_map = protocols.PROTOCOL_MAP[rule["ethertype"]] if rule["protocol"] == protocol_map["icmp"]: optional_fields["icmp type"] = rule["port_range_min"] optional_fields["icmp code"] = rule["port_range_max"] else: optional_fields["port start"] = rule["port_range_min"] optional_fields["port end"] = rule["port_range_max"] payload = {"ethertype": rule["ethertype"], "protocol": rule["protocol"], "source network": source, "destination network": destination, "action": "allow", "direction": direction} payload.update(optional_fields) serialized.append(payload) return serialized
[ "Creates", "a", "payload", "for", "the", "redis", "server", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/cache/security_groups_client.py#L49-L93
[ "def", "serialize_rules", "(", "self", ",", "rules", ")", ":", "# TODO(mdietz): If/when we support other rule types, this comment", "# will have to be revised.", "# Action and direction are static, for now. The implementation may", "# support 'deny' and 'egress' respectively in the future. We allow", "# the direction to be set to something else, technically, but current", "# plugin level call actually raises. It's supported here for unit", "# test purposes at this time", "serialized", "=", "[", "]", "for", "rule", "in", "rules", ":", "direction", "=", "rule", "[", "\"direction\"", "]", "source", "=", "''", "destination", "=", "''", "if", "rule", ".", "get", "(", "\"remote_ip_prefix\"", ")", ":", "prefix", "=", "rule", "[", "\"remote_ip_prefix\"", "]", "if", "direction", "==", "\"ingress\"", ":", "source", "=", "self", ".", "_convert_remote_network", "(", "prefix", ")", "else", ":", "if", "(", "Capabilities", ".", "EGRESS", "not", "in", "CONF", ".", "QUARK", ".", "environment_capabilities", ")", ":", "raise", "q_exc", ".", "EgressSecurityGroupRulesNotEnabled", "(", ")", "else", ":", "destination", "=", "self", ".", "_convert_remote_network", "(", "prefix", ")", "optional_fields", "=", "{", "}", "# NOTE(mdietz): this will expand as we add more protocols", "protocol_map", "=", "protocols", ".", "PROTOCOL_MAP", "[", "rule", "[", "\"ethertype\"", "]", "]", "if", "rule", "[", "\"protocol\"", "]", "==", "protocol_map", "[", "\"icmp\"", "]", ":", "optional_fields", "[", "\"icmp type\"", "]", "=", "rule", "[", "\"port_range_min\"", "]", "optional_fields", "[", "\"icmp code\"", "]", "=", "rule", "[", "\"port_range_max\"", "]", "else", ":", "optional_fields", "[", "\"port start\"", "]", "=", "rule", "[", "\"port_range_min\"", "]", "optional_fields", "[", "\"port end\"", "]", "=", "rule", "[", "\"port_range_max\"", "]", "payload", "=", "{", "\"ethertype\"", ":", "rule", "[", "\"ethertype\"", "]", ",", "\"protocol\"", ":", "rule", "[", "\"protocol\"", "]", ",", "\"source network\"", ":", "source", ",", "\"destination network\"", ":", "destination", ",", "\"action\"", ":", "\"allow\"", ",", "\"direction\"", ":", "direction", "}", "payload", ".", "update", "(", "optional_fields", ")", "serialized", ".", "append", "(", "payload", ")", "return", "serialized" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
SecurityGroupsClient.serialize_groups
Creates a payload for the redis server The rule schema is the following: REDIS KEY - port_device_id.port_mac_address/sg REDIS VALUE - A JSON dump of the following: port_mac_address must be lower-cased and stripped of non-alphanumeric characters {"id": "<arbitrary uuid>", "rules": [ {"ethertype": <hexademical integer>, "protocol": <integer>, "port start": <integer>, # optional "port end": <integer>, # optional "icmp type": <integer>, # optional "icmp code": <integer>, # optional "source network": <string>, "destination network": <string>, "action": <string>, "direction": <string>}, ], "security groups ack": <boolean> } Example: {"id": "004c6369-9f3d-4d33-b8f5-9416bf3567dd", "rules": [ {"ethertype": 0x800, "protocol": "tcp", "port start": 1000, "port end": 1999, "source network": "10.10.10.0/24", "destination network": "", "action": "allow", "direction": "ingress"}, ], "security groups ack": "true" } port start/end and icmp type/code are mutually exclusive pairs.
quark/cache/security_groups_client.py
def serialize_groups(self, groups): """Creates a payload for the redis server The rule schema is the following: REDIS KEY - port_device_id.port_mac_address/sg REDIS VALUE - A JSON dump of the following: port_mac_address must be lower-cased and stripped of non-alphanumeric characters {"id": "<arbitrary uuid>", "rules": [ {"ethertype": <hexademical integer>, "protocol": <integer>, "port start": <integer>, # optional "port end": <integer>, # optional "icmp type": <integer>, # optional "icmp code": <integer>, # optional "source network": <string>, "destination network": <string>, "action": <string>, "direction": <string>}, ], "security groups ack": <boolean> } Example: {"id": "004c6369-9f3d-4d33-b8f5-9416bf3567dd", "rules": [ {"ethertype": 0x800, "protocol": "tcp", "port start": 1000, "port end": 1999, "source network": "10.10.10.0/24", "destination network": "", "action": "allow", "direction": "ingress"}, ], "security groups ack": "true" } port start/end and icmp type/code are mutually exclusive pairs. """ rules = [] for group in groups: rules.extend(self.serialize_rules(group.rules)) return rules
def serialize_groups(self, groups): """Creates a payload for the redis server The rule schema is the following: REDIS KEY - port_device_id.port_mac_address/sg REDIS VALUE - A JSON dump of the following: port_mac_address must be lower-cased and stripped of non-alphanumeric characters {"id": "<arbitrary uuid>", "rules": [ {"ethertype": <hexademical integer>, "protocol": <integer>, "port start": <integer>, # optional "port end": <integer>, # optional "icmp type": <integer>, # optional "icmp code": <integer>, # optional "source network": <string>, "destination network": <string>, "action": <string>, "direction": <string>}, ], "security groups ack": <boolean> } Example: {"id": "004c6369-9f3d-4d33-b8f5-9416bf3567dd", "rules": [ {"ethertype": 0x800, "protocol": "tcp", "port start": 1000, "port end": 1999, "source network": "10.10.10.0/24", "destination network": "", "action": "allow", "direction": "ingress"}, ], "security groups ack": "true" } port start/end and icmp type/code are mutually exclusive pairs. """ rules = [] for group in groups: rules.extend(self.serialize_rules(group.rules)) return rules
[ "Creates", "a", "payload", "for", "the", "redis", "server" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/cache/security_groups_client.py#L95-L142
[ "def", "serialize_groups", "(", "self", ",", "groups", ")", ":", "rules", "=", "[", "]", "for", "group", "in", "groups", ":", "rules", ".", "extend", "(", "self", ".", "serialize_rules", "(", "group", ".", "rules", ")", ")", "return", "rules" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
SecurityGroupsClient.apply_rules
Writes a series of security group rules to a redis server.
quark/cache/security_groups_client.py
def apply_rules(self, device_id, mac_address, rules): """Writes a series of security group rules to a redis server.""" LOG.info("Applying security group rules for device %s with MAC %s" % (device_id, mac_address)) rule_dict = {SECURITY_GROUP_RULE_KEY: rules} redis_key = self.vif_key(device_id, mac_address) # TODO(mdietz): Pipeline these. Requires some rewriting self.set_field(redis_key, SECURITY_GROUP_HASH_ATTR, rule_dict) self.set_field_raw(redis_key, SECURITY_GROUP_ACK, False)
def apply_rules(self, device_id, mac_address, rules): """Writes a series of security group rules to a redis server.""" LOG.info("Applying security group rules for device %s with MAC %s" % (device_id, mac_address)) rule_dict = {SECURITY_GROUP_RULE_KEY: rules} redis_key = self.vif_key(device_id, mac_address) # TODO(mdietz): Pipeline these. Requires some rewriting self.set_field(redis_key, SECURITY_GROUP_HASH_ATTR, rule_dict) self.set_field_raw(redis_key, SECURITY_GROUP_ACK, False)
[ "Writes", "a", "series", "of", "security", "group", "rules", "to", "a", "redis", "server", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/cache/security_groups_client.py#L150-L159
[ "def", "apply_rules", "(", "self", ",", "device_id", ",", "mac_address", ",", "rules", ")", ":", "LOG", ".", "info", "(", "\"Applying security group rules for device %s with MAC %s\"", "%", "(", "device_id", ",", "mac_address", ")", ")", "rule_dict", "=", "{", "SECURITY_GROUP_RULE_KEY", ":", "rules", "}", "redis_key", "=", "self", ".", "vif_key", "(", "device_id", ",", "mac_address", ")", "# TODO(mdietz): Pipeline these. Requires some rewriting", "self", ".", "set_field", "(", "redis_key", ",", "SECURITY_GROUP_HASH_ATTR", ",", "rule_dict", ")", "self", ".", "set_field_raw", "(", "redis_key", ",", "SECURITY_GROUP_ACK", ",", "False", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
SecurityGroupsClient.get_security_group_states
Gets security groups for interfaces from Redis Returns a dictionary of xapi.VIFs with values of the current acknowledged status in Redis. States not explicitly handled: * ack key, no rules - This is the same as just tagging the VIF, the instance will be inaccessible * rules key, no ack - Nothing will happen, the VIF will not be tagged.
quark/cache/security_groups_client.py
def get_security_group_states(self, interfaces): """Gets security groups for interfaces from Redis Returns a dictionary of xapi.VIFs with values of the current acknowledged status in Redis. States not explicitly handled: * ack key, no rules - This is the same as just tagging the VIF, the instance will be inaccessible * rules key, no ack - Nothing will happen, the VIF will not be tagged. """ LOG.debug("Getting security groups from Redis for {0}".format( interfaces)) interfaces = tuple(interfaces) vif_keys = [self.vif_key(vif.device_id, vif.mac_address) for vif in interfaces] # Retrieve all fields associated with this key, which should be # 'security groups ack' and 'security group rules'. sec_grp_all = self.get_fields_all(vif_keys) ret = {} # Associate the vif with the fields in a dictionary for vif, group in zip(interfaces, sec_grp_all): if group: ret[vif] = {SECURITY_GROUP_ACK: None, SECURITY_GROUP_HASH_ATTR: []} temp_ack = group[SECURITY_GROUP_ACK].lower() temp_rules = group[SECURITY_GROUP_HASH_ATTR] if temp_rules: temp_rules = json.loads(temp_rules) ret[vif][SECURITY_GROUP_HASH_ATTR] = temp_rules["rules"] if "true" in temp_ack: ret[vif][SECURITY_GROUP_ACK] = True elif "false" in temp_ack: ret[vif][SECURITY_GROUP_ACK] = False else: ret.pop(vif, None) LOG.debug("Skipping bad ack value %s" % temp_ack) return ret
def get_security_group_states(self, interfaces): """Gets security groups for interfaces from Redis Returns a dictionary of xapi.VIFs with values of the current acknowledged status in Redis. States not explicitly handled: * ack key, no rules - This is the same as just tagging the VIF, the instance will be inaccessible * rules key, no ack - Nothing will happen, the VIF will not be tagged. """ LOG.debug("Getting security groups from Redis for {0}".format( interfaces)) interfaces = tuple(interfaces) vif_keys = [self.vif_key(vif.device_id, vif.mac_address) for vif in interfaces] # Retrieve all fields associated with this key, which should be # 'security groups ack' and 'security group rules'. sec_grp_all = self.get_fields_all(vif_keys) ret = {} # Associate the vif with the fields in a dictionary for vif, group in zip(interfaces, sec_grp_all): if group: ret[vif] = {SECURITY_GROUP_ACK: None, SECURITY_GROUP_HASH_ATTR: []} temp_ack = group[SECURITY_GROUP_ACK].lower() temp_rules = group[SECURITY_GROUP_HASH_ATTR] if temp_rules: temp_rules = json.loads(temp_rules) ret[vif][SECURITY_GROUP_HASH_ATTR] = temp_rules["rules"] if "true" in temp_ack: ret[vif][SECURITY_GROUP_ACK] = True elif "false" in temp_ack: ret[vif][SECURITY_GROUP_ACK] = False else: ret.pop(vif, None) LOG.debug("Skipping bad ack value %s" % temp_ack) return ret
[ "Gets", "security", "groups", "for", "interfaces", "from", "Redis" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/cache/security_groups_client.py#L173-L214
[ "def", "get_security_group_states", "(", "self", ",", "interfaces", ")", ":", "LOG", ".", "debug", "(", "\"Getting security groups from Redis for {0}\"", ".", "format", "(", "interfaces", ")", ")", "interfaces", "=", "tuple", "(", "interfaces", ")", "vif_keys", "=", "[", "self", ".", "vif_key", "(", "vif", ".", "device_id", ",", "vif", ".", "mac_address", ")", "for", "vif", "in", "interfaces", "]", "# Retrieve all fields associated with this key, which should be", "# 'security groups ack' and 'security group rules'.", "sec_grp_all", "=", "self", ".", "get_fields_all", "(", "vif_keys", ")", "ret", "=", "{", "}", "# Associate the vif with the fields in a dictionary", "for", "vif", ",", "group", "in", "zip", "(", "interfaces", ",", "sec_grp_all", ")", ":", "if", "group", ":", "ret", "[", "vif", "]", "=", "{", "SECURITY_GROUP_ACK", ":", "None", ",", "SECURITY_GROUP_HASH_ATTR", ":", "[", "]", "}", "temp_ack", "=", "group", "[", "SECURITY_GROUP_ACK", "]", ".", "lower", "(", ")", "temp_rules", "=", "group", "[", "SECURITY_GROUP_HASH_ATTR", "]", "if", "temp_rules", ":", "temp_rules", "=", "json", ".", "loads", "(", "temp_rules", ")", "ret", "[", "vif", "]", "[", "SECURITY_GROUP_HASH_ATTR", "]", "=", "temp_rules", "[", "\"rules\"", "]", "if", "\"true\"", "in", "temp_ack", ":", "ret", "[", "vif", "]", "[", "SECURITY_GROUP_ACK", "]", "=", "True", "elif", "\"false\"", "in", "temp_ack", ":", "ret", "[", "vif", "]", "[", "SECURITY_GROUP_ACK", "]", "=", "False", "else", ":", "ret", ".", "pop", "(", "vif", ",", "None", ")", "LOG", ".", "debug", "(", "\"Skipping bad ack value %s\"", "%", "temp_ack", ")", "return", "ret" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
SecurityGroupsClient.update_group_states_for_vifs
Updates security groups by setting the ack field
quark/cache/security_groups_client.py
def update_group_states_for_vifs(self, vifs, ack): """Updates security groups by setting the ack field""" vif_keys = [self.vif_key(vif.device_id, vif.mac_address) for vif in vifs] self.set_fields(vif_keys, SECURITY_GROUP_ACK, ack)
def update_group_states_for_vifs(self, vifs, ack): """Updates security groups by setting the ack field""" vif_keys = [self.vif_key(vif.device_id, vif.mac_address) for vif in vifs] self.set_fields(vif_keys, SECURITY_GROUP_ACK, ack)
[ "Updates", "security", "groups", "by", "setting", "the", "ack", "field" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/cache/security_groups_client.py#L217-L221
[ "def", "update_group_states_for_vifs", "(", "self", ",", "vifs", ",", "ack", ")", ":", "vif_keys", "=", "[", "self", ".", "vif_key", "(", "vif", ".", "device_id", ",", "vif", ".", "mac_address", ")", "for", "vif", "in", "vifs", "]", "self", ".", "set_fields", "(", "vif_keys", ",", "SECURITY_GROUP_ACK", ",", "ack", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
run_migrations_offline
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.
quark/db/migration/alembic/env.py
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ context.configure(url=neutron_config.database.connection) with context.begin_transaction(): context.run_migrations()
def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ context.configure(url=neutron_config.database.connection) with context.begin_transaction(): context.run_migrations()
[ "Run", "migrations", "in", "offline", "mode", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/db/migration/alembic/env.py#L24-L39
[ "def", "run_migrations_offline", "(", ")", ":", "context", ".", "configure", "(", "url", "=", "neutron_config", ".", "database", ".", "connection", ")", "with", "context", ".", "begin_transaction", "(", ")", ":", "context", ".", "run_migrations", "(", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
run_migrations_online
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
quark/db/migration/alembic/env.py
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = create_engine( neutron_config.database.connection, poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata) try: with context.begin_transaction(): context.run_migrations() finally: connection.close()
def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = create_engine( neutron_config.database.connection, poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata) try: with context.begin_transaction(): context.run_migrations() finally: connection.close()
[ "Run", "migrations", "in", "online", "mode", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/db/migration/alembic/env.py#L42-L62
[ "def", "run_migrations_online", "(", ")", ":", "engine", "=", "create_engine", "(", "neutron_config", ".", "database", ".", "connection", ",", "poolclass", "=", "pool", ".", "NullPool", ")", "connection", "=", "engine", ".", "connect", "(", ")", "context", ".", "configure", "(", "connection", "=", "connection", ",", "target_metadata", "=", "target_metadata", ")", "try", ":", "with", "context", ".", "begin_transaction", "(", ")", ":", "context", ".", "run_migrations", "(", ")", "finally", ":", "connection", ".", "close", "(", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
do_notify
Generic Notifier. Parameters: - `context`: session context - `event_type`: the event type to report, i.e. ip.usage - `payload`: dict containing the payload to send
quark/billing.py
def do_notify(context, event_type, payload): """Generic Notifier. Parameters: - `context`: session context - `event_type`: the event type to report, i.e. ip.usage - `payload`: dict containing the payload to send """ LOG.debug('IP_BILL: notifying {}'.format(payload)) notifier = n_rpc.get_notifier('network') notifier.info(context, event_type, payload)
def do_notify(context, event_type, payload): """Generic Notifier. Parameters: - `context`: session context - `event_type`: the event type to report, i.e. ip.usage - `payload`: dict containing the payload to send """ LOG.debug('IP_BILL: notifying {}'.format(payload)) notifier = n_rpc.get_notifier('network') notifier.info(context, event_type, payload)
[ "Generic", "Notifier", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/billing.py#L82-L93
[ "def", "do_notify", "(", "context", ",", "event_type", ",", "payload", ")", ":", "LOG", ".", "debug", "(", "'IP_BILL: notifying {}'", ".", "format", "(", "payload", ")", ")", "notifier", "=", "n_rpc", ".", "get_notifier", "(", "'network'", ")", "notifier", ".", "info", "(", "context", ",", "event_type", ",", "payload", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
notify
Method to send notifications. We must send USAGE when a public IPv4 address is deallocated or a FLIP is associated. Parameters: - `context`: the context for notifier - `event_type`: the event type for IP allocate, deallocate, associate, disassociate - `ipaddress`: the ipaddress object to notify about Returns: nothing Notes: this may live in the billing module
quark/billing.py
def notify(context, event_type, ipaddress, send_usage=False, *args, **kwargs): """Method to send notifications. We must send USAGE when a public IPv4 address is deallocated or a FLIP is associated. Parameters: - `context`: the context for notifier - `event_type`: the event type for IP allocate, deallocate, associate, disassociate - `ipaddress`: the ipaddress object to notify about Returns: nothing Notes: this may live in the billing module """ if (event_type == IP_ADD and not CONF.QUARK.notify_ip_add) or \ (event_type == IP_DEL and not CONF.QUARK.notify_ip_delete) or \ (event_type == IP_ASSOC and not CONF.QUARK.notify_flip_associate) or \ (event_type == IP_DISASSOC and not CONF.QUARK.notify_flip_disassociate)\ or (event_type == IP_EXISTS and not CONF.QUARK.notify_ip_exists): LOG.debug('IP_BILL: notification {} is disabled by config'. format(event_type)) return # Do not send notifications when we are undoing due to an error if 'rollback' in kwargs and kwargs['rollback']: LOG.debug('IP_BILL: not sending notification because we are in undo') return # ip.add needs the allocated_at time. # All other events need the current time. ts = ipaddress.allocated_at if event_type == IP_ADD else _now() payload = build_payload(ipaddress, event_type, event_time=ts) # Send the notification with the payload do_notify(context, event_type, payload) # When we deallocate an IP or associate a FLIP we must send # a usage message to billing. # In other words when we supply end_time we must send USAGE to billing # immediately. # Our billing period is 24 hrs. If the address was allocated after midnight # send the start_time as as. If the address was allocated yesterday, then # send midnight as the start_time. # Note: if allocated_at is empty we assume today's midnight. if send_usage: if ipaddress.allocated_at is not None and \ ipaddress.allocated_at >= _midnight_today(): start_time = ipaddress.allocated_at else: start_time = _midnight_today() payload = build_payload(ipaddress, IP_EXISTS, start_time=start_time, end_time=ts) do_notify(context, IP_EXISTS, payload)
def notify(context, event_type, ipaddress, send_usage=False, *args, **kwargs): """Method to send notifications. We must send USAGE when a public IPv4 address is deallocated or a FLIP is associated. Parameters: - `context`: the context for notifier - `event_type`: the event type for IP allocate, deallocate, associate, disassociate - `ipaddress`: the ipaddress object to notify about Returns: nothing Notes: this may live in the billing module """ if (event_type == IP_ADD and not CONF.QUARK.notify_ip_add) or \ (event_type == IP_DEL and not CONF.QUARK.notify_ip_delete) or \ (event_type == IP_ASSOC and not CONF.QUARK.notify_flip_associate) or \ (event_type == IP_DISASSOC and not CONF.QUARK.notify_flip_disassociate)\ or (event_type == IP_EXISTS and not CONF.QUARK.notify_ip_exists): LOG.debug('IP_BILL: notification {} is disabled by config'. format(event_type)) return # Do not send notifications when we are undoing due to an error if 'rollback' in kwargs and kwargs['rollback']: LOG.debug('IP_BILL: not sending notification because we are in undo') return # ip.add needs the allocated_at time. # All other events need the current time. ts = ipaddress.allocated_at if event_type == IP_ADD else _now() payload = build_payload(ipaddress, event_type, event_time=ts) # Send the notification with the payload do_notify(context, event_type, payload) # When we deallocate an IP or associate a FLIP we must send # a usage message to billing. # In other words when we supply end_time we must send USAGE to billing # immediately. # Our billing period is 24 hrs. If the address was allocated after midnight # send the start_time as as. If the address was allocated yesterday, then # send midnight as the start_time. # Note: if allocated_at is empty we assume today's midnight. if send_usage: if ipaddress.allocated_at is not None and \ ipaddress.allocated_at >= _midnight_today(): start_time = ipaddress.allocated_at else: start_time = _midnight_today() payload = build_payload(ipaddress, IP_EXISTS, start_time=start_time, end_time=ts) do_notify(context, IP_EXISTS, payload)
[ "Method", "to", "send", "notifications", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/billing.py#L97-L151
[ "def", "notify", "(", "context", ",", "event_type", ",", "ipaddress", ",", "send_usage", "=", "False", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "(", "event_type", "==", "IP_ADD", "and", "not", "CONF", ".", "QUARK", ".", "notify_ip_add", ")", "or", "(", "event_type", "==", "IP_DEL", "and", "not", "CONF", ".", "QUARK", ".", "notify_ip_delete", ")", "or", "(", "event_type", "==", "IP_ASSOC", "and", "not", "CONF", ".", "QUARK", ".", "notify_flip_associate", ")", "or", "(", "event_type", "==", "IP_DISASSOC", "and", "not", "CONF", ".", "QUARK", ".", "notify_flip_disassociate", ")", "or", "(", "event_type", "==", "IP_EXISTS", "and", "not", "CONF", ".", "QUARK", ".", "notify_ip_exists", ")", ":", "LOG", ".", "debug", "(", "'IP_BILL: notification {} is disabled by config'", ".", "format", "(", "event_type", ")", ")", "return", "# Do not send notifications when we are undoing due to an error", "if", "'rollback'", "in", "kwargs", "and", "kwargs", "[", "'rollback'", "]", ":", "LOG", ".", "debug", "(", "'IP_BILL: not sending notification because we are in undo'", ")", "return", "# ip.add needs the allocated_at time.", "# All other events need the current time.", "ts", "=", "ipaddress", ".", "allocated_at", "if", "event_type", "==", "IP_ADD", "else", "_now", "(", ")", "payload", "=", "build_payload", "(", "ipaddress", ",", "event_type", ",", "event_time", "=", "ts", ")", "# Send the notification with the payload", "do_notify", "(", "context", ",", "event_type", ",", "payload", ")", "# When we deallocate an IP or associate a FLIP we must send", "# a usage message to billing.", "# In other words when we supply end_time we must send USAGE to billing", "# immediately.", "# Our billing period is 24 hrs. If the address was allocated after midnight", "# send the start_time as as. If the address was allocated yesterday, then", "# send midnight as the start_time.", "# Note: if allocated_at is empty we assume today's midnight.", "if", "send_usage", ":", "if", "ipaddress", ".", "allocated_at", "is", "not", "None", "and", "ipaddress", ".", "allocated_at", ">=", "_midnight_today", "(", ")", ":", "start_time", "=", "ipaddress", ".", "allocated_at", "else", ":", "start_time", "=", "_midnight_today", "(", ")", "payload", "=", "build_payload", "(", "ipaddress", ",", "IP_EXISTS", ",", "start_time", "=", "start_time", ",", "end_time", "=", "ts", ")", "do_notify", "(", "context", ",", "IP_EXISTS", ",", "payload", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
build_payload
Method builds a payload out of the passed arguments. Parameters: `ipaddress`: the models.IPAddress object `event_type`: USAGE,CREATE,DELETE,SUSPEND,or UNSUSPEND `start_time`: startTime for cloudfeeds `end_time`: endTime for cloudfeeds Returns a dictionary suitable to notify billing. Message types mapping to cloud feeds for references: ip.exists - USAGE ip.add - CREATE ip.delete - DELETE ip.associate - UP ip.disassociate - DOWN Refer to: http://rax.io/cf-api for more details.
quark/billing.py
def build_payload(ipaddress, event_type, event_time=None, start_time=None, end_time=None): """Method builds a payload out of the passed arguments. Parameters: `ipaddress`: the models.IPAddress object `event_type`: USAGE,CREATE,DELETE,SUSPEND,or UNSUSPEND `start_time`: startTime for cloudfeeds `end_time`: endTime for cloudfeeds Returns a dictionary suitable to notify billing. Message types mapping to cloud feeds for references: ip.exists - USAGE ip.add - CREATE ip.delete - DELETE ip.associate - UP ip.disassociate - DOWN Refer to: http://rax.io/cf-api for more details. """ # This is the common part of all message types payload = { 'event_type': unicode(event_type), 'tenant_id': unicode(ipaddress.used_by_tenant_id), 'ip_address': unicode(ipaddress.address_readable), 'ip_version': int(ipaddress.version), 'ip_type': unicode(ipaddress.address_type), 'id': unicode(ipaddress.id) } # Depending on the message type add the appropriate fields if event_type == IP_EXISTS: if start_time is None or end_time is None: raise ValueError('IP_BILL: {} start_time/end_time cannot be empty' .format(event_type)) payload.update({ 'startTime': unicode(convert_timestamp(start_time)), 'endTime': unicode(convert_timestamp(end_time)) }) elif event_type in [IP_ADD, IP_DEL, IP_ASSOC, IP_DISASSOC]: if event_time is None: raise ValueError('IP_BILL: {}: event_time cannot be NULL' .format(event_type)) payload.update({ 'eventTime': unicode(convert_timestamp(event_time)), 'subnet_id': unicode(ipaddress.subnet_id), 'network_id': unicode(ipaddress.network_id), 'public': True if ipaddress.network_id == PUBLIC_NETWORK_ID else False, }) else: raise ValueError('IP_BILL: bad event_type: {}'.format(event_type)) return payload
def build_payload(ipaddress, event_type, event_time=None, start_time=None, end_time=None): """Method builds a payload out of the passed arguments. Parameters: `ipaddress`: the models.IPAddress object `event_type`: USAGE,CREATE,DELETE,SUSPEND,or UNSUSPEND `start_time`: startTime for cloudfeeds `end_time`: endTime for cloudfeeds Returns a dictionary suitable to notify billing. Message types mapping to cloud feeds for references: ip.exists - USAGE ip.add - CREATE ip.delete - DELETE ip.associate - UP ip.disassociate - DOWN Refer to: http://rax.io/cf-api for more details. """ # This is the common part of all message types payload = { 'event_type': unicode(event_type), 'tenant_id': unicode(ipaddress.used_by_tenant_id), 'ip_address': unicode(ipaddress.address_readable), 'ip_version': int(ipaddress.version), 'ip_type': unicode(ipaddress.address_type), 'id': unicode(ipaddress.id) } # Depending on the message type add the appropriate fields if event_type == IP_EXISTS: if start_time is None or end_time is None: raise ValueError('IP_BILL: {} start_time/end_time cannot be empty' .format(event_type)) payload.update({ 'startTime': unicode(convert_timestamp(start_time)), 'endTime': unicode(convert_timestamp(end_time)) }) elif event_type in [IP_ADD, IP_DEL, IP_ASSOC, IP_DISASSOC]: if event_time is None: raise ValueError('IP_BILL: {}: event_time cannot be NULL' .format(event_type)) payload.update({ 'eventTime': unicode(convert_timestamp(event_time)), 'subnet_id': unicode(ipaddress.subnet_id), 'network_id': unicode(ipaddress.network_id), 'public': True if ipaddress.network_id == PUBLIC_NETWORK_ID else False, }) else: raise ValueError('IP_BILL: bad event_type: {}'.format(event_type)) return payload
[ "Method", "builds", "a", "payload", "out", "of", "the", "passed", "arguments", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/billing.py#L154-L208
[ "def", "build_payload", "(", "ipaddress", ",", "event_type", ",", "event_time", "=", "None", ",", "start_time", "=", "None", ",", "end_time", "=", "None", ")", ":", "# This is the common part of all message types", "payload", "=", "{", "'event_type'", ":", "unicode", "(", "event_type", ")", ",", "'tenant_id'", ":", "unicode", "(", "ipaddress", ".", "used_by_tenant_id", ")", ",", "'ip_address'", ":", "unicode", "(", "ipaddress", ".", "address_readable", ")", ",", "'ip_version'", ":", "int", "(", "ipaddress", ".", "version", ")", ",", "'ip_type'", ":", "unicode", "(", "ipaddress", ".", "address_type", ")", ",", "'id'", ":", "unicode", "(", "ipaddress", ".", "id", ")", "}", "# Depending on the message type add the appropriate fields", "if", "event_type", "==", "IP_EXISTS", ":", "if", "start_time", "is", "None", "or", "end_time", "is", "None", ":", "raise", "ValueError", "(", "'IP_BILL: {} start_time/end_time cannot be empty'", ".", "format", "(", "event_type", ")", ")", "payload", ".", "update", "(", "{", "'startTime'", ":", "unicode", "(", "convert_timestamp", "(", "start_time", ")", ")", ",", "'endTime'", ":", "unicode", "(", "convert_timestamp", "(", "end_time", ")", ")", "}", ")", "elif", "event_type", "in", "[", "IP_ADD", ",", "IP_DEL", ",", "IP_ASSOC", ",", "IP_DISASSOC", "]", ":", "if", "event_time", "is", "None", ":", "raise", "ValueError", "(", "'IP_BILL: {}: event_time cannot be NULL'", ".", "format", "(", "event_type", ")", ")", "payload", ".", "update", "(", "{", "'eventTime'", ":", "unicode", "(", "convert_timestamp", "(", "event_time", ")", ")", ",", "'subnet_id'", ":", "unicode", "(", "ipaddress", ".", "subnet_id", ")", ",", "'network_id'", ":", "unicode", "(", "ipaddress", ".", "network_id", ")", ",", "'public'", ":", "True", "if", "ipaddress", ".", "network_id", "==", "PUBLIC_NETWORK_ID", "else", "False", ",", "}", ")", "else", ":", "raise", "ValueError", "(", "'IP_BILL: bad event_type: {}'", ".", "format", "(", "event_type", ")", ")", "return", "payload" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
build_full_day_ips
Method to build an IP list for the case 1 when the IP was allocated before the period start and is still allocated after the period end. This method only looks at public IPv4 addresses.
quark/billing.py
def build_full_day_ips(query, period_start, period_end): """Method to build an IP list for the case 1 when the IP was allocated before the period start and is still allocated after the period end. This method only looks at public IPv4 addresses. """ # Filter out only IPv4 that have not been deallocated ip_list = query.\ filter(models.IPAddress.version == 4L).\ filter(models.IPAddress.network_id == PUBLIC_NETWORK_ID).\ filter(models.IPAddress.used_by_tenant_id is not None).\ filter(models.IPAddress.allocated_at != null()).\ filter(models.IPAddress.allocated_at < period_start).\ filter(or_(models.IPAddress._deallocated is False, models.IPAddress.deallocated_at == null(), models.IPAddress.deallocated_at >= period_end)).all() return ip_list
def build_full_day_ips(query, period_start, period_end): """Method to build an IP list for the case 1 when the IP was allocated before the period start and is still allocated after the period end. This method only looks at public IPv4 addresses. """ # Filter out only IPv4 that have not been deallocated ip_list = query.\ filter(models.IPAddress.version == 4L).\ filter(models.IPAddress.network_id == PUBLIC_NETWORK_ID).\ filter(models.IPAddress.used_by_tenant_id is not None).\ filter(models.IPAddress.allocated_at != null()).\ filter(models.IPAddress.allocated_at < period_start).\ filter(or_(models.IPAddress._deallocated is False, models.IPAddress.deallocated_at == null(), models.IPAddress.deallocated_at >= period_end)).all() return ip_list
[ "Method", "to", "build", "an", "IP", "list", "for", "the", "case", "1" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/billing.py#L211-L229
[ "def", "build_full_day_ips", "(", "query", ",", "period_start", ",", "period_end", ")", ":", "# Filter out only IPv4 that have not been deallocated", "ip_list", "=", "query", ".", "filter", "(", "models", ".", "IPAddress", ".", "version", "==", "4L", ")", ".", "filter", "(", "models", ".", "IPAddress", ".", "network_id", "==", "PUBLIC_NETWORK_ID", ")", ".", "filter", "(", "models", ".", "IPAddress", ".", "used_by_tenant_id", "is", "not", "None", ")", ".", "filter", "(", "models", ".", "IPAddress", ".", "allocated_at", "!=", "null", "(", ")", ")", ".", "filter", "(", "models", ".", "IPAddress", ".", "allocated_at", "<", "period_start", ")", ".", "filter", "(", "or_", "(", "models", ".", "IPAddress", ".", "_deallocated", "is", "False", ",", "models", ".", "IPAddress", ".", "deallocated_at", "==", "null", "(", ")", ",", "models", ".", "IPAddress", ".", "deallocated_at", ">=", "period_end", ")", ")", ".", "all", "(", ")", "return", "ip_list" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
calc_periods
Returns a tuple of start_period and end_period. Assumes that the period is 24-hrs. Parameters: - `hour`: the hour from 0 to 23 when the period ends - `minute`: the minute from 0 to 59 when the period ends This method will calculate the end of the period as the closest hour/minute going backwards. It will also calculate the start of the period as the passed hour/minute but 24 hrs ago. Example, if we pass 0, 0 - we will get the events from 0:00 midnight of the day before yesterday until today's midnight. If we pass 2,0 - we will get the start time as 2am of the previous morning till 2am of today's morning. By default it's midnight.
quark/billing.py
def calc_periods(hour=0, minute=0): """Returns a tuple of start_period and end_period. Assumes that the period is 24-hrs. Parameters: - `hour`: the hour from 0 to 23 when the period ends - `minute`: the minute from 0 to 59 when the period ends This method will calculate the end of the period as the closest hour/minute going backwards. It will also calculate the start of the period as the passed hour/minute but 24 hrs ago. Example, if we pass 0, 0 - we will get the events from 0:00 midnight of the day before yesterday until today's midnight. If we pass 2,0 - we will get the start time as 2am of the previous morning till 2am of today's morning. By default it's midnight. """ # Calculate the time intervals in a usable form period_end = datetime.datetime.utcnow().replace(hour=hour, minute=minute, second=0, microsecond=0) period_start = period_end - datetime.timedelta(days=1) # period end should be slightly before the midnight. # hence, we subtract a second # this will force period_end to store something like: # datetime.datetime(2016, 5, 19, 23, 59, 59, 999999) # instead of: # datetime.datetime(2016, 5, 20, 0, 0, 0, 0) period_end -= datetime.timedelta(seconds=1) return (period_start, period_end)
def calc_periods(hour=0, minute=0): """Returns a tuple of start_period and end_period. Assumes that the period is 24-hrs. Parameters: - `hour`: the hour from 0 to 23 when the period ends - `minute`: the minute from 0 to 59 when the period ends This method will calculate the end of the period as the closest hour/minute going backwards. It will also calculate the start of the period as the passed hour/minute but 24 hrs ago. Example, if we pass 0, 0 - we will get the events from 0:00 midnight of the day before yesterday until today's midnight. If we pass 2,0 - we will get the start time as 2am of the previous morning till 2am of today's morning. By default it's midnight. """ # Calculate the time intervals in a usable form period_end = datetime.datetime.utcnow().replace(hour=hour, minute=minute, second=0, microsecond=0) period_start = period_end - datetime.timedelta(days=1) # period end should be slightly before the midnight. # hence, we subtract a second # this will force period_end to store something like: # datetime.datetime(2016, 5, 19, 23, 59, 59, 999999) # instead of: # datetime.datetime(2016, 5, 20, 0, 0, 0, 0) period_end -= datetime.timedelta(seconds=1) return (period_start, period_end)
[ "Returns", "a", "tuple", "of", "start_period", "and", "end_period", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/billing.py#L256-L288
[ "def", "calc_periods", "(", "hour", "=", "0", ",", "minute", "=", "0", ")", ":", "# Calculate the time intervals in a usable form", "period_end", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "hour", "=", "hour", ",", "minute", "=", "minute", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")", "period_start", "=", "period_end", "-", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "# period end should be slightly before the midnight.", "# hence, we subtract a second", "# this will force period_end to store something like:", "# datetime.datetime(2016, 5, 19, 23, 59, 59, 999999)", "# instead of:", "# datetime.datetime(2016, 5, 20, 0, 0, 0, 0)", "period_end", "-=", "datetime", ".", "timedelta", "(", "seconds", "=", "1", ")", "return", "(", "period_start", ",", "period_end", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
_make_job_dict
Creates the view for a job while calculating progress. Since a root job does not have a transaction id (TID) it will return its id as the TID.
quark/plugin_views.py
def _make_job_dict(job): """Creates the view for a job while calculating progress. Since a root job does not have a transaction id (TID) it will return its id as the TID. """ body = {"id": job.get('id'), "action": job.get('action'), "completed": job.get('completed'), "tenant_id": job.get('tenant_id'), "created_at": job.get('created_at'), "transaction_id": job.get('transaction_id'), "parent_id": job.get('parent_id', None)} if not body['transaction_id']: body['transaction_id'] = job.get('id') completed = 0 for sub in job.subtransactions: if sub.get('completed'): completed += 1 pct = 100 if job.get('completed') else 0 if len(job.subtransactions) > 0: pct = float(completed) / len(job.subtransactions) * 100.0 body['transaction_percent'] = int(pct) body['completed_subtransactions'] = completed body['subtransactions'] = len(job.subtransactions) return body
def _make_job_dict(job): """Creates the view for a job while calculating progress. Since a root job does not have a transaction id (TID) it will return its id as the TID. """ body = {"id": job.get('id'), "action": job.get('action'), "completed": job.get('completed'), "tenant_id": job.get('tenant_id'), "created_at": job.get('created_at'), "transaction_id": job.get('transaction_id'), "parent_id": job.get('parent_id', None)} if not body['transaction_id']: body['transaction_id'] = job.get('id') completed = 0 for sub in job.subtransactions: if sub.get('completed'): completed += 1 pct = 100 if job.get('completed') else 0 if len(job.subtransactions) > 0: pct = float(completed) / len(job.subtransactions) * 100.0 body['transaction_percent'] = int(pct) body['completed_subtransactions'] = completed body['subtransactions'] = len(job.subtransactions) return body
[ "Creates", "the", "view", "for", "a", "job", "while", "calculating", "progress", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_views.py#L369-L394
[ "def", "_make_job_dict", "(", "job", ")", ":", "body", "=", "{", "\"id\"", ":", "job", ".", "get", "(", "'id'", ")", ",", "\"action\"", ":", "job", ".", "get", "(", "'action'", ")", ",", "\"completed\"", ":", "job", ".", "get", "(", "'completed'", ")", ",", "\"tenant_id\"", ":", "job", ".", "get", "(", "'tenant_id'", ")", ",", "\"created_at\"", ":", "job", ".", "get", "(", "'created_at'", ")", ",", "\"transaction_id\"", ":", "job", ".", "get", "(", "'transaction_id'", ")", ",", "\"parent_id\"", ":", "job", ".", "get", "(", "'parent_id'", ",", "None", ")", "}", "if", "not", "body", "[", "'transaction_id'", "]", ":", "body", "[", "'transaction_id'", "]", "=", "job", ".", "get", "(", "'id'", ")", "completed", "=", "0", "for", "sub", "in", "job", ".", "subtransactions", ":", "if", "sub", ".", "get", "(", "'completed'", ")", ":", "completed", "+=", "1", "pct", "=", "100", "if", "job", ".", "get", "(", "'completed'", ")", "else", "0", "if", "len", "(", "job", ".", "subtransactions", ")", ">", "0", ":", "pct", "=", "float", "(", "completed", ")", "/", "len", "(", "job", ".", "subtransactions", ")", "*", "100.0", "body", "[", "'transaction_percent'", "]", "=", "int", "(", "pct", ")", "body", "[", "'completed_subtransactions'", "]", "=", "completed", "body", "[", "'subtransactions'", "]", "=", "len", "(", "job", ".", "subtransactions", ")", "return", "body" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_mac_address_range
Retrieve a mac_address_range. : param context: neutron api request context : param id: UUID representing the network to fetch. : param fields: a list of strings that are valid keys in a network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned.
quark/plugin_modules/mac_address_ranges.py
def get_mac_address_range(context, id, fields=None): """Retrieve a mac_address_range. : param context: neutron api request context : param id: UUID representing the network to fetch. : param fields: a list of strings that are valid keys in a network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_mac_address_range %s for tenant %s fields %s" % (id, context.tenant_id, fields)) if not context.is_admin: raise n_exc.NotAuthorized() mac_address_range = db_api.mac_address_range_find( context, id=id, scope=db_api.ONE) if not mac_address_range: raise q_exc.MacAddressRangeNotFound( mac_address_range_id=id) return v._make_mac_range_dict(mac_address_range)
def get_mac_address_range(context, id, fields=None): """Retrieve a mac_address_range. : param context: neutron api request context : param id: UUID representing the network to fetch. : param fields: a list of strings that are valid keys in a network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_mac_address_range %s for tenant %s fields %s" % (id, context.tenant_id, fields)) if not context.is_admin: raise n_exc.NotAuthorized() mac_address_range = db_api.mac_address_range_find( context, id=id, scope=db_api.ONE) if not mac_address_range: raise q_exc.MacAddressRangeNotFound( mac_address_range_id=id) return v._make_mac_range_dict(mac_address_range)
[ "Retrieve", "a", "mac_address_range", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/mac_address_ranges.py#L54-L76
[ "def", "get_mac_address_range", "(", "context", ",", "id", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_mac_address_range %s for tenant %s fields %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ",", "fields", ")", ")", "if", "not", "context", ".", "is_admin", ":", "raise", "n_exc", ".", "NotAuthorized", "(", ")", "mac_address_range", "=", "db_api", ".", "mac_address_range_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "mac_address_range", ":", "raise", "q_exc", ".", "MacAddressRangeNotFound", "(", "mac_address_range_id", "=", "id", ")", "return", "v", ".", "_make_mac_range_dict", "(", "mac_address_range", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
delete_mac_address_range
Delete a mac_address_range. : param context: neutron api request context : param id: UUID representing the mac_address_range to delete.
quark/plugin_modules/mac_address_ranges.py
def delete_mac_address_range(context, id): """Delete a mac_address_range. : param context: neutron api request context : param id: UUID representing the mac_address_range to delete. """ LOG.info("delete_mac_address_range %s for tenant %s" % (id, context.tenant_id)) if not context.is_admin: raise n_exc.NotAuthorized() with context.session.begin(): mar = db_api.mac_address_range_find(context, id=id, scope=db_api.ONE) if not mar: raise q_exc.MacAddressRangeNotFound( mac_address_range_id=id) _delete_mac_address_range(context, mar)
def delete_mac_address_range(context, id): """Delete a mac_address_range. : param context: neutron api request context : param id: UUID representing the mac_address_range to delete. """ LOG.info("delete_mac_address_range %s for tenant %s" % (id, context.tenant_id)) if not context.is_admin: raise n_exc.NotAuthorized() with context.session.begin(): mar = db_api.mac_address_range_find(context, id=id, scope=db_api.ONE) if not mar: raise q_exc.MacAddressRangeNotFound( mac_address_range_id=id) _delete_mac_address_range(context, mar)
[ "Delete", "a", "mac_address_range", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/mac_address_ranges.py#L120-L136
[ "def", "delete_mac_address_range", "(", "context", ",", "id", ")", ":", "LOG", ".", "info", "(", "\"delete_mac_address_range %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "if", "not", "context", ".", "is_admin", ":", "raise", "n_exc", ".", "NotAuthorized", "(", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "mar", "=", "db_api", ".", "mac_address_range_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "mar", ":", "raise", "q_exc", ".", "MacAddressRangeNotFound", "(", "mac_address_range_id", "=", "id", ")", "_delete_mac_address_range", "(", "context", ",", "mar", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Jobs.get_resources
Returns Ext Resources.
quark/api/extensions/jobs.py
def get_resources(cls): """Returns Ext Resources.""" job_controller = JobsController( directory.get_plugin()) resources = [] resources.append(extensions.ResourceExtension( Jobs.get_alias(), job_controller)) return resources
def get_resources(cls): """Returns Ext Resources.""" job_controller = JobsController( directory.get_plugin()) resources = [] resources.append(extensions.ResourceExtension( Jobs.get_alias(), job_controller)) return resources
[ "Returns", "Ext", "Resources", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/api/extensions/jobs.py#L103-L111
[ "def", "get_resources", "(", "cls", ")", ":", "job_controller", "=", "JobsController", "(", "directory", ".", "get_plugin", "(", ")", ")", "resources", "=", "[", "]", "resources", ".", "append", "(", "extensions", ".", "ResourceExtension", "(", "Jobs", ".", "get_alias", "(", ")", ",", "job_controller", ")", ")", "return", "resources" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
downgrade
alexm: i believe this method is never called
quark/db/migration/alembic/versions/5927940a466e_create_shared_ips_columns.py
def downgrade(): """alexm: i believe this method is never called""" with op.batch_alter_table(t2_name) as batch_op: batch_op.drop_column('do_not_use') with op.batch_alter_table(t1_name) as batch_op: batch_op.drop_column('enabled')
def downgrade(): """alexm: i believe this method is never called""" with op.batch_alter_table(t2_name) as batch_op: batch_op.drop_column('do_not_use') with op.batch_alter_table(t1_name) as batch_op: batch_op.drop_column('enabled')
[ "alexm", ":", "i", "believe", "this", "method", "is", "never", "called" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/db/migration/alembic/versions/5927940a466e_create_shared_ips_columns.py#L45-L51
[ "def", "downgrade", "(", ")", ":", "with", "op", ".", "batch_alter_table", "(", "t2_name", ")", "as", "batch_op", ":", "batch_op", ".", "drop_column", "(", "'do_not_use'", ")", "with", "op", ".", "batch_alter_table", "(", "t1_name", ")", "as", "batch_op", ":", "batch_op", ".", "drop_column", "(", "'enabled'", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
delete_segment_allocation_range
Delete a segment_allocation_range. : param context: neutron api request context : param id: UUID representing the segment_allocation_range to delete.
quark/plugin_modules/segment_allocation_ranges.py
def delete_segment_allocation_range(context, sa_id): """Delete a segment_allocation_range. : param context: neutron api request context : param id: UUID representing the segment_allocation_range to delete. """ LOG.info("delete_segment_allocation_range %s for tenant %s" % (sa_id, context.tenant_id)) if not context.is_admin: raise n_exc.NotAuthorized() with context.session.begin(): sa_range = db_api.segment_allocation_range_find( context, id=sa_id, scope=db_api.ONE) if not sa_range: raise q_exc.SegmentAllocationRangeNotFound( segment_allocation_range_id=sa_id) _delete_segment_allocation_range(context, sa_range)
def delete_segment_allocation_range(context, sa_id): """Delete a segment_allocation_range. : param context: neutron api request context : param id: UUID representing the segment_allocation_range to delete. """ LOG.info("delete_segment_allocation_range %s for tenant %s" % (sa_id, context.tenant_id)) if not context.is_admin: raise n_exc.NotAuthorized() with context.session.begin(): sa_range = db_api.segment_allocation_range_find( context, id=sa_id, scope=db_api.ONE) if not sa_range: raise q_exc.SegmentAllocationRangeNotFound( segment_allocation_range_id=sa_id) _delete_segment_allocation_range(context, sa_range)
[ "Delete", "a", "segment_allocation_range", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/segment_allocation_ranges.py#L127-L144
[ "def", "delete_segment_allocation_range", "(", "context", ",", "sa_id", ")", ":", "LOG", ".", "info", "(", "\"delete_segment_allocation_range %s for tenant %s\"", "%", "(", "sa_id", ",", "context", ".", "tenant_id", ")", ")", "if", "not", "context", ".", "is_admin", ":", "raise", "n_exc", ".", "NotAuthorized", "(", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "sa_range", "=", "db_api", ".", "segment_allocation_range_find", "(", "context", ",", "id", "=", "sa_id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "sa_range", ":", "raise", "q_exc", ".", "SegmentAllocationRangeNotFound", "(", "segment_allocation_range_id", "=", "sa_id", ")", "_delete_segment_allocation_range", "(", "context", ",", "sa_range", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
filter_factory
Returns a WSGI filter app for use with paste.deploy.
quark/tools/middleware/resp_async_id.py
def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def wrapper(app): return ResponseAsyncIdAdder(app, conf) return wrapper
def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def wrapper(app): return ResponseAsyncIdAdder(app, conf) return wrapper
[ "Returns", "a", "WSGI", "filter", "app", "for", "use", "with", "paste", ".", "deploy", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tools/middleware/resp_async_id.py#L56-L64
[ "def", "filter_factory", "(", "global_conf", ",", "*", "*", "local_conf", ")", ":", "conf", "=", "global_conf", ".", "copy", "(", ")", "conf", ".", "update", "(", "local_conf", ")", "def", "wrapper", "(", "app", ")", ":", "return", "ResponseAsyncIdAdder", "(", "app", ",", "conf", ")", "return", "wrapper" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_used_ips
Returns dictionary with keys segment_id and value used IPs count. Used IP address count is determined by: - allocated IPs - deallocated IPs whose `deallocated_at` is within the `reuse_after` window compared to the present time, excluding IPs that are accounted for in the current IP policy (because IP policy is mutable and deallocated IPs are not checked nor deleted on IP policy creation, thus deallocated IPs that don't fit the current IP policy can exist in the neutron database).
quark/ip_availability.py
def get_used_ips(session, **kwargs): """Returns dictionary with keys segment_id and value used IPs count. Used IP address count is determined by: - allocated IPs - deallocated IPs whose `deallocated_at` is within the `reuse_after` window compared to the present time, excluding IPs that are accounted for in the current IP policy (because IP policy is mutable and deallocated IPs are not checked nor deleted on IP policy creation, thus deallocated IPs that don't fit the current IP policy can exist in the neutron database). """ LOG.debug("Getting used IPs...") with session.begin(): query = session.query( models.Subnet.segment_id, func.count(models.IPAddress.address)) query = query.group_by(models.Subnet.segment_id) query = _filter(query, **kwargs) reuse_window = timeutils.utcnow() - datetime.timedelta( seconds=cfg.CONF.QUARK.ipam_reuse_after) # NOTE(asadoughi): This is an outer join instead of a regular join # to include subnets with zero IP addresses in the database. query = query.outerjoin( models.IPAddress, and_(models.Subnet.id == models.IPAddress.subnet_id, or_(not_(models.IPAddress.lock_id.is_(None)), models.IPAddress._deallocated.is_(None), models.IPAddress._deallocated == 0, models.IPAddress.deallocated_at > reuse_window))) query = query.outerjoin( models.IPPolicyCIDR, and_( models.Subnet.ip_policy_id == models.IPPolicyCIDR.ip_policy_id, models.IPAddress.address >= models.IPPolicyCIDR.first_ip, models.IPAddress.address <= models.IPPolicyCIDR.last_ip)) # NOTE(asadoughi): (address is allocated) OR # (address is deallocated and not inside subnet's IP policy) query = query.filter(or_( models.IPAddress._deallocated.is_(None), models.IPAddress._deallocated == 0, models.IPPolicyCIDR.id.is_(None))) ret = ((segment_id, address_count) for segment_id, address_count in query.all()) return dict(ret)
def get_used_ips(session, **kwargs): """Returns dictionary with keys segment_id and value used IPs count. Used IP address count is determined by: - allocated IPs - deallocated IPs whose `deallocated_at` is within the `reuse_after` window compared to the present time, excluding IPs that are accounted for in the current IP policy (because IP policy is mutable and deallocated IPs are not checked nor deleted on IP policy creation, thus deallocated IPs that don't fit the current IP policy can exist in the neutron database). """ LOG.debug("Getting used IPs...") with session.begin(): query = session.query( models.Subnet.segment_id, func.count(models.IPAddress.address)) query = query.group_by(models.Subnet.segment_id) query = _filter(query, **kwargs) reuse_window = timeutils.utcnow() - datetime.timedelta( seconds=cfg.CONF.QUARK.ipam_reuse_after) # NOTE(asadoughi): This is an outer join instead of a regular join # to include subnets with zero IP addresses in the database. query = query.outerjoin( models.IPAddress, and_(models.Subnet.id == models.IPAddress.subnet_id, or_(not_(models.IPAddress.lock_id.is_(None)), models.IPAddress._deallocated.is_(None), models.IPAddress._deallocated == 0, models.IPAddress.deallocated_at > reuse_window))) query = query.outerjoin( models.IPPolicyCIDR, and_( models.Subnet.ip_policy_id == models.IPPolicyCIDR.ip_policy_id, models.IPAddress.address >= models.IPPolicyCIDR.first_ip, models.IPAddress.address <= models.IPPolicyCIDR.last_ip)) # NOTE(asadoughi): (address is allocated) OR # (address is deallocated and not inside subnet's IP policy) query = query.filter(or_( models.IPAddress._deallocated.is_(None), models.IPAddress._deallocated == 0, models.IPPolicyCIDR.id.is_(None))) ret = ((segment_id, address_count) for segment_id, address_count in query.all()) return dict(ret)
[ "Returns", "dictionary", "with", "keys", "segment_id", "and", "value", "used", "IPs", "count", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/ip_availability.py#L85-L131
[ "def", "get_used_ips", "(", "session", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "debug", "(", "\"Getting used IPs...\"", ")", "with", "session", ".", "begin", "(", ")", ":", "query", "=", "session", ".", "query", "(", "models", ".", "Subnet", ".", "segment_id", ",", "func", ".", "count", "(", "models", ".", "IPAddress", ".", "address", ")", ")", "query", "=", "query", ".", "group_by", "(", "models", ".", "Subnet", ".", "segment_id", ")", "query", "=", "_filter", "(", "query", ",", "*", "*", "kwargs", ")", "reuse_window", "=", "timeutils", ".", "utcnow", "(", ")", "-", "datetime", ".", "timedelta", "(", "seconds", "=", "cfg", ".", "CONF", ".", "QUARK", ".", "ipam_reuse_after", ")", "# NOTE(asadoughi): This is an outer join instead of a regular join", "# to include subnets with zero IP addresses in the database.", "query", "=", "query", ".", "outerjoin", "(", "models", ".", "IPAddress", ",", "and_", "(", "models", ".", "Subnet", ".", "id", "==", "models", ".", "IPAddress", ".", "subnet_id", ",", "or_", "(", "not_", "(", "models", ".", "IPAddress", ".", "lock_id", ".", "is_", "(", "None", ")", ")", ",", "models", ".", "IPAddress", ".", "_deallocated", ".", "is_", "(", "None", ")", ",", "models", ".", "IPAddress", ".", "_deallocated", "==", "0", ",", "models", ".", "IPAddress", ".", "deallocated_at", ">", "reuse_window", ")", ")", ")", "query", "=", "query", ".", "outerjoin", "(", "models", ".", "IPPolicyCIDR", ",", "and_", "(", "models", ".", "Subnet", ".", "ip_policy_id", "==", "models", ".", "IPPolicyCIDR", ".", "ip_policy_id", ",", "models", ".", "IPAddress", ".", "address", ">=", "models", ".", "IPPolicyCIDR", ".", "first_ip", ",", "models", ".", "IPAddress", ".", "address", "<=", "models", ".", "IPPolicyCIDR", ".", "last_ip", ")", ")", "# NOTE(asadoughi): (address is allocated) OR", "# (address is deallocated and not inside subnet's IP policy)", "query", "=", "query", ".", "filter", "(", "or_", "(", "models", ".", "IPAddress", ".", "_deallocated", ".", "is_", "(", "None", ")", ",", "models", ".", "IPAddress", ".", "_deallocated", "==", "0", ",", "models", ".", "IPPolicyCIDR", ".", "id", ".", "is_", "(", "None", ")", ")", ")", "ret", "=", "(", "(", "segment_id", ",", "address_count", ")", "for", "segment_id", ",", "address_count", "in", "query", ".", "all", "(", ")", ")", "return", "dict", "(", "ret", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_unused_ips
Returns dictionary with key segment_id, and value unused IPs count. Unused IP address count is determined by: - adding subnet's cidr's size - subtracting IP policy exclusions on subnet - subtracting used ips per segment
quark/ip_availability.py
def get_unused_ips(session, used_ips_counts, **kwargs): """Returns dictionary with key segment_id, and value unused IPs count. Unused IP address count is determined by: - adding subnet's cidr's size - subtracting IP policy exclusions on subnet - subtracting used ips per segment """ LOG.debug("Getting unused IPs...") with session.begin(): query = session.query( models.Subnet.segment_id, models.Subnet) query = _filter(query, **kwargs) query = query.group_by(models.Subnet.segment_id, models.Subnet.id) ret = defaultdict(int) for segment_id, subnet in query.all(): net_size = netaddr.IPNetwork(subnet._cidr).size ip_policy = subnet["ip_policy"] or {"size": 0} ret[segment_id] += net_size - ip_policy["size"] for segment_id in used_ips_counts: ret[segment_id] -= used_ips_counts[segment_id] return ret
def get_unused_ips(session, used_ips_counts, **kwargs): """Returns dictionary with key segment_id, and value unused IPs count. Unused IP address count is determined by: - adding subnet's cidr's size - subtracting IP policy exclusions on subnet - subtracting used ips per segment """ LOG.debug("Getting unused IPs...") with session.begin(): query = session.query( models.Subnet.segment_id, models.Subnet) query = _filter(query, **kwargs) query = query.group_by(models.Subnet.segment_id, models.Subnet.id) ret = defaultdict(int) for segment_id, subnet in query.all(): net_size = netaddr.IPNetwork(subnet._cidr).size ip_policy = subnet["ip_policy"] or {"size": 0} ret[segment_id] += net_size - ip_policy["size"] for segment_id in used_ips_counts: ret[segment_id] -= used_ips_counts[segment_id] return ret
[ "Returns", "dictionary", "with", "key", "segment_id", "and", "value", "unused", "IPs", "count", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/ip_availability.py#L134-L159
[ "def", "get_unused_ips", "(", "session", ",", "used_ips_counts", ",", "*", "*", "kwargs", ")", ":", "LOG", ".", "debug", "(", "\"Getting unused IPs...\"", ")", "with", "session", ".", "begin", "(", ")", ":", "query", "=", "session", ".", "query", "(", "models", ".", "Subnet", ".", "segment_id", ",", "models", ".", "Subnet", ")", "query", "=", "_filter", "(", "query", ",", "*", "*", "kwargs", ")", "query", "=", "query", ".", "group_by", "(", "models", ".", "Subnet", ".", "segment_id", ",", "models", ".", "Subnet", ".", "id", ")", "ret", "=", "defaultdict", "(", "int", ")", "for", "segment_id", ",", "subnet", "in", "query", ".", "all", "(", ")", ":", "net_size", "=", "netaddr", ".", "IPNetwork", "(", "subnet", ".", "_cidr", ")", ".", "size", "ip_policy", "=", "subnet", "[", "\"ip_policy\"", "]", "or", "{", "\"size\"", ":", "0", "}", "ret", "[", "segment_id", "]", "+=", "net_size", "-", "ip_policy", "[", "\"size\"", "]", "for", "segment_id", "in", "used_ips_counts", ":", "ret", "[", "segment_id", "]", "-=", "used_ips_counts", "[", "segment_id", "]", "return", "ret" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
XapiClient.get_instances
Returns a dict of `VM OpaqueRef` (str) -> `xapi.VM`.
quark/agent/xapi.py
def get_instances(self, session): """Returns a dict of `VM OpaqueRef` (str) -> `xapi.VM`.""" LOG.debug("Getting instances from Xapi") recs = session.xenapi.VM.get_all_records() # NOTE(asadoughi): Copied from xen-networking-scripts/utils.py is_inst = lambda r: (r['power_state'].lower() == 'running' and not r['is_a_template'] and not r['is_control_domain'] and ('nova_uuid' in r['other_config'] or r['name_label'].startswith('instance-'))) instances = dict() for vm_ref, rec in recs.iteritems(): if not is_inst(rec): continue instances[vm_ref] = VM(ref=vm_ref, uuid=rec["other_config"]["nova_uuid"], vifs=rec["VIFs"], dom_id=rec["domid"]) return instances
def get_instances(self, session): """Returns a dict of `VM OpaqueRef` (str) -> `xapi.VM`.""" LOG.debug("Getting instances from Xapi") recs = session.xenapi.VM.get_all_records() # NOTE(asadoughi): Copied from xen-networking-scripts/utils.py is_inst = lambda r: (r['power_state'].lower() == 'running' and not r['is_a_template'] and not r['is_control_domain'] and ('nova_uuid' in r['other_config'] or r['name_label'].startswith('instance-'))) instances = dict() for vm_ref, rec in recs.iteritems(): if not is_inst(rec): continue instances[vm_ref] = VM(ref=vm_ref, uuid=rec["other_config"]["nova_uuid"], vifs=rec["VIFs"], dom_id=rec["domid"]) return instances
[ "Returns", "a", "dict", "of", "VM", "OpaqueRef", "(", "str", ")", "-", ">", "xapi", ".", "VM", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/agent/xapi.py#L122-L142
[ "def", "get_instances", "(", "self", ",", "session", ")", ":", "LOG", ".", "debug", "(", "\"Getting instances from Xapi\"", ")", "recs", "=", "session", ".", "xenapi", ".", "VM", ".", "get_all_records", "(", ")", "# NOTE(asadoughi): Copied from xen-networking-scripts/utils.py", "is_inst", "=", "lambda", "r", ":", "(", "r", "[", "'power_state'", "]", ".", "lower", "(", ")", "==", "'running'", "and", "not", "r", "[", "'is_a_template'", "]", "and", "not", "r", "[", "'is_control_domain'", "]", "and", "(", "'nova_uuid'", "in", "r", "[", "'other_config'", "]", "or", "r", "[", "'name_label'", "]", ".", "startswith", "(", "'instance-'", ")", ")", ")", "instances", "=", "dict", "(", ")", "for", "vm_ref", ",", "rec", "in", "recs", ".", "iteritems", "(", ")", ":", "if", "not", "is_inst", "(", "rec", ")", ":", "continue", "instances", "[", "vm_ref", "]", "=", "VM", "(", "ref", "=", "vm_ref", ",", "uuid", "=", "rec", "[", "\"other_config\"", "]", "[", "\"nova_uuid\"", "]", ",", "vifs", "=", "rec", "[", "\"VIFs\"", "]", ",", "dom_id", "=", "rec", "[", "\"domid\"", "]", ")", "return", "instances" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
XapiClient.get_interfaces
Returns a set of VIFs from `get_instances` return value.
quark/agent/xapi.py
def get_interfaces(self): """Returns a set of VIFs from `get_instances` return value.""" LOG.debug("Getting interfaces from Xapi") with self.sessioned() as session: instances = self.get_instances(session) recs = session.xenapi.VIF.get_all_records() interfaces = set() for vif_ref, rec in recs.iteritems(): vm = instances.get(rec["VM"]) if not vm: continue device_id = vm.uuid interfaces.add(VIF(device_id, rec, vif_ref)) return interfaces
def get_interfaces(self): """Returns a set of VIFs from `get_instances` return value.""" LOG.debug("Getting interfaces from Xapi") with self.sessioned() as session: instances = self.get_instances(session) recs = session.xenapi.VIF.get_all_records() interfaces = set() for vif_ref, rec in recs.iteritems(): vm = instances.get(rec["VM"]) if not vm: continue device_id = vm.uuid interfaces.add(VIF(device_id, rec, vif_ref)) return interfaces
[ "Returns", "a", "set", "of", "VIFs", "from", "get_instances", "return", "value", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/agent/xapi.py#L144-L159
[ "def", "get_interfaces", "(", "self", ")", ":", "LOG", ".", "debug", "(", "\"Getting interfaces from Xapi\"", ")", "with", "self", ".", "sessioned", "(", ")", "as", "session", ":", "instances", "=", "self", ".", "get_instances", "(", "session", ")", "recs", "=", "session", ".", "xenapi", ".", "VIF", ".", "get_all_records", "(", ")", "interfaces", "=", "set", "(", ")", "for", "vif_ref", ",", "rec", "in", "recs", ".", "iteritems", "(", ")", ":", "vm", "=", "instances", ".", "get", "(", "rec", "[", "\"VM\"", "]", ")", "if", "not", "vm", ":", "continue", "device_id", "=", "vm", ".", "uuid", "interfaces", ".", "add", "(", "VIF", "(", "device_id", ",", "rec", ",", "vif_ref", ")", ")", "return", "interfaces" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
XapiClient.update_interfaces
Handles changes to interfaces' security groups Calls refresh_interfaces on argument VIFs. Set security groups on added_sg's VIFs. Unsets security groups on removed_sg's VIFs.
quark/agent/xapi.py
def update_interfaces(self, added_sg, updated_sg, removed_sg): """Handles changes to interfaces' security groups Calls refresh_interfaces on argument VIFs. Set security groups on added_sg's VIFs. Unsets security groups on removed_sg's VIFs. """ if not (added_sg or updated_sg or removed_sg): return with self.sessioned() as session: self._set_security_groups(session, added_sg) self._unset_security_groups(session, removed_sg) combined = added_sg + updated_sg + removed_sg self._refresh_interfaces(session, combined)
def update_interfaces(self, added_sg, updated_sg, removed_sg): """Handles changes to interfaces' security groups Calls refresh_interfaces on argument VIFs. Set security groups on added_sg's VIFs. Unsets security groups on removed_sg's VIFs. """ if not (added_sg or updated_sg or removed_sg): return with self.sessioned() as session: self._set_security_groups(session, added_sg) self._unset_security_groups(session, removed_sg) combined = added_sg + updated_sg + removed_sg self._refresh_interfaces(session, combined)
[ "Handles", "changes", "to", "interfaces", "security", "groups" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/agent/xapi.py#L215-L228
[ "def", "update_interfaces", "(", "self", ",", "added_sg", ",", "updated_sg", ",", "removed_sg", ")", ":", "if", "not", "(", "added_sg", "or", "updated_sg", "or", "removed_sg", ")", ":", "return", "with", "self", ".", "sessioned", "(", ")", "as", "session", ":", "self", ".", "_set_security_groups", "(", "session", ",", "added_sg", ")", "self", ".", "_unset_security_groups", "(", "session", ",", "removed_sg", ")", "combined", "=", "added_sg", "+", "updated_sg", "+", "removed_sg", "self", ".", "_refresh_interfaces", "(", "session", ",", "combined", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
create_network
Create a network. Create a network which represents an L2 network segment which can have a set of subnets and ports associated with it. : param context: neutron api request context : param network: dictionary describing the network, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated.
quark/plugin_modules/networks.py
def create_network(context, network): """Create a network. Create a network which represents an L2 network segment which can have a set of subnets and ports associated with it. : param context: neutron api request context : param network: dictionary describing the network, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. """ LOG.info("create_network for tenant %s" % context.tenant_id) with context.session.begin(): net_attrs = network["network"] subs = net_attrs.pop("subnets", []) # Enforce subnet quotas if not context.is_admin: if len(subs) > 0: v4_count, v6_count = 0, 0 for s in subs: version = netaddr.IPNetwork(s['subnet']['cidr']).version if version == 6: v6_count += 1 else: v4_count += 1 if v4_count > 0: tenant_q_v4 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v4_subnets_per_network').first() if tenant_q_v4 != -1: quota.QUOTAS.limit_check( context, context.tenant_id, v4_subnets_per_network=v4_count) if v6_count > 0: tenant_q_v6 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v6_subnets_per_network').first() if tenant_q_v6 != -1: quota.QUOTAS.limit_check( context, context.tenant_id, v6_subnets_per_network=v6_count) # Generate a uuid that we're going to hand to the backend and db net_uuid = utils.pop_param(net_attrs, "id", None) net_type = None if net_uuid and context.is_admin: net = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, id=net_uuid, scope=db_api.ONE) net_type = utils.pop_param(net_attrs, "network_plugin", None) if net: raise q_exc.NetworkAlreadyExists(id=net_uuid) else: net_uuid = uuidutils.generate_uuid() # TODO(mdietz) this will be the first component registry hook, but # lets make it work first pnet_type, phys_net, seg_id = _adapt_provider_nets(context, network) ipam_strategy = utils.pop_param(net_attrs, "ipam_strategy", None) if not ipam_strategy or not context.is_admin: ipam_strategy = CONF.QUARK.default_ipam_strategy if not ipam.IPAM_REGISTRY.is_valid_strategy(ipam_strategy): raise q_exc.InvalidIpamStrategy(strat=ipam_strategy) net_attrs["ipam_strategy"] = ipam_strategy # NOTE(mdietz) I think ideally we would create the providernet # elsewhere as a separate driver step that could be # kept in a plugin and completely removed if desired. We could # have a pre-callback/observer on the netdriver create_network # that gathers any additional parameters from the network dict default_net_type = net_type or CONF.QUARK.default_network_type net_driver = registry.DRIVER_REGISTRY.get_driver(default_net_type) net_driver.create_network(context, net_attrs["name"], network_id=net_uuid, phys_type=pnet_type, phys_net=phys_net, segment_id=seg_id) net_attrs["id"] = net_uuid net_attrs["tenant_id"] = context.tenant_id net_attrs["network_plugin"] = default_net_type new_net = db_api.network_create(context, **net_attrs) new_subnets = [] for sub in subs: sub["subnet"]["network_id"] = new_net["id"] sub["subnet"]["tenant_id"] = context.tenant_id s = db_api.subnet_create(context, **sub["subnet"]) new_subnets.append(s) new_net["subnets"] = new_subnets # if not security_groups.get_security_groups( # context, # filters={"id": security_groups.DEFAULT_SG_UUID}): # security_groups._create_default_security_group(context) return v._make_network_dict(new_net)
def create_network(context, network): """Create a network. Create a network which represents an L2 network segment which can have a set of subnets and ports associated with it. : param context: neutron api request context : param network: dictionary describing the network, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. """ LOG.info("create_network for tenant %s" % context.tenant_id) with context.session.begin(): net_attrs = network["network"] subs = net_attrs.pop("subnets", []) # Enforce subnet quotas if not context.is_admin: if len(subs) > 0: v4_count, v6_count = 0, 0 for s in subs: version = netaddr.IPNetwork(s['subnet']['cidr']).version if version == 6: v6_count += 1 else: v4_count += 1 if v4_count > 0: tenant_q_v4 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v4_subnets_per_network').first() if tenant_q_v4 != -1: quota.QUOTAS.limit_check( context, context.tenant_id, v4_subnets_per_network=v4_count) if v6_count > 0: tenant_q_v6 = context.session.query(qdv.Quota).filter_by( tenant_id=context.tenant_id, resource='v6_subnets_per_network').first() if tenant_q_v6 != -1: quota.QUOTAS.limit_check( context, context.tenant_id, v6_subnets_per_network=v6_count) # Generate a uuid that we're going to hand to the backend and db net_uuid = utils.pop_param(net_attrs, "id", None) net_type = None if net_uuid and context.is_admin: net = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, id=net_uuid, scope=db_api.ONE) net_type = utils.pop_param(net_attrs, "network_plugin", None) if net: raise q_exc.NetworkAlreadyExists(id=net_uuid) else: net_uuid = uuidutils.generate_uuid() # TODO(mdietz) this will be the first component registry hook, but # lets make it work first pnet_type, phys_net, seg_id = _adapt_provider_nets(context, network) ipam_strategy = utils.pop_param(net_attrs, "ipam_strategy", None) if not ipam_strategy or not context.is_admin: ipam_strategy = CONF.QUARK.default_ipam_strategy if not ipam.IPAM_REGISTRY.is_valid_strategy(ipam_strategy): raise q_exc.InvalidIpamStrategy(strat=ipam_strategy) net_attrs["ipam_strategy"] = ipam_strategy # NOTE(mdietz) I think ideally we would create the providernet # elsewhere as a separate driver step that could be # kept in a plugin and completely removed if desired. We could # have a pre-callback/observer on the netdriver create_network # that gathers any additional parameters from the network dict default_net_type = net_type or CONF.QUARK.default_network_type net_driver = registry.DRIVER_REGISTRY.get_driver(default_net_type) net_driver.create_network(context, net_attrs["name"], network_id=net_uuid, phys_type=pnet_type, phys_net=phys_net, segment_id=seg_id) net_attrs["id"] = net_uuid net_attrs["tenant_id"] = context.tenant_id net_attrs["network_plugin"] = default_net_type new_net = db_api.network_create(context, **net_attrs) new_subnets = [] for sub in subs: sub["subnet"]["network_id"] = new_net["id"] sub["subnet"]["tenant_id"] = context.tenant_id s = db_api.subnet_create(context, **sub["subnet"]) new_subnets.append(s) new_net["subnets"] = new_subnets # if not security_groups.get_security_groups( # context, # filters={"id": security_groups.DEFAULT_SG_UUID}): # security_groups._create_default_security_group(context) return v._make_network_dict(new_net)
[ "Create", "a", "network", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/networks.py#L53-L151
[ "def", "create_network", "(", "context", ",", "network", ")", ":", "LOG", ".", "info", "(", "\"create_network for tenant %s\"", "%", "context", ".", "tenant_id", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "net_attrs", "=", "network", "[", "\"network\"", "]", "subs", "=", "net_attrs", ".", "pop", "(", "\"subnets\"", ",", "[", "]", ")", "# Enforce subnet quotas", "if", "not", "context", ".", "is_admin", ":", "if", "len", "(", "subs", ")", ">", "0", ":", "v4_count", ",", "v6_count", "=", "0", ",", "0", "for", "s", "in", "subs", ":", "version", "=", "netaddr", ".", "IPNetwork", "(", "s", "[", "'subnet'", "]", "[", "'cidr'", "]", ")", ".", "version", "if", "version", "==", "6", ":", "v6_count", "+=", "1", "else", ":", "v4_count", "+=", "1", "if", "v4_count", ">", "0", ":", "tenant_q_v4", "=", "context", ".", "session", ".", "query", "(", "qdv", ".", "Quota", ")", ".", "filter_by", "(", "tenant_id", "=", "context", ".", "tenant_id", ",", "resource", "=", "'v4_subnets_per_network'", ")", ".", "first", "(", ")", "if", "tenant_q_v4", "!=", "-", "1", ":", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "v4_subnets_per_network", "=", "v4_count", ")", "if", "v6_count", ">", "0", ":", "tenant_q_v6", "=", "context", ".", "session", ".", "query", "(", "qdv", ".", "Quota", ")", ".", "filter_by", "(", "tenant_id", "=", "context", ".", "tenant_id", ",", "resource", "=", "'v6_subnets_per_network'", ")", ".", "first", "(", ")", "if", "tenant_q_v6", "!=", "-", "1", ":", "quota", ".", "QUOTAS", ".", "limit_check", "(", "context", ",", "context", ".", "tenant_id", ",", "v6_subnets_per_network", "=", "v6_count", ")", "# Generate a uuid that we're going to hand to the backend and db", "net_uuid", "=", "utils", ".", "pop_param", "(", "net_attrs", ",", "\"id\"", ",", "None", ")", "net_type", "=", "None", "if", "net_uuid", "and", "context", ".", "is_admin", ":", "net", "=", "db_api", ".", "network_find", "(", "context", "=", "context", ",", "limit", "=", "None", ",", "sorts", "=", "[", "'id'", "]", ",", "marker", "=", "None", ",", "page_reverse", "=", "False", ",", "id", "=", "net_uuid", ",", "scope", "=", "db_api", ".", "ONE", ")", "net_type", "=", "utils", ".", "pop_param", "(", "net_attrs", ",", "\"network_plugin\"", ",", "None", ")", "if", "net", ":", "raise", "q_exc", ".", "NetworkAlreadyExists", "(", "id", "=", "net_uuid", ")", "else", ":", "net_uuid", "=", "uuidutils", ".", "generate_uuid", "(", ")", "# TODO(mdietz) this will be the first component registry hook, but", "# lets make it work first", "pnet_type", ",", "phys_net", ",", "seg_id", "=", "_adapt_provider_nets", "(", "context", ",", "network", ")", "ipam_strategy", "=", "utils", ".", "pop_param", "(", "net_attrs", ",", "\"ipam_strategy\"", ",", "None", ")", "if", "not", "ipam_strategy", "or", "not", "context", ".", "is_admin", ":", "ipam_strategy", "=", "CONF", ".", "QUARK", ".", "default_ipam_strategy", "if", "not", "ipam", ".", "IPAM_REGISTRY", ".", "is_valid_strategy", "(", "ipam_strategy", ")", ":", "raise", "q_exc", ".", "InvalidIpamStrategy", "(", "strat", "=", "ipam_strategy", ")", "net_attrs", "[", "\"ipam_strategy\"", "]", "=", "ipam_strategy", "# NOTE(mdietz) I think ideally we would create the providernet", "# elsewhere as a separate driver step that could be", "# kept in a plugin and completely removed if desired. We could", "# have a pre-callback/observer on the netdriver create_network", "# that gathers any additional parameters from the network dict", "default_net_type", "=", "net_type", "or", "CONF", ".", "QUARK", ".", "default_network_type", "net_driver", "=", "registry", ".", "DRIVER_REGISTRY", ".", "get_driver", "(", "default_net_type", ")", "net_driver", ".", "create_network", "(", "context", ",", "net_attrs", "[", "\"name\"", "]", ",", "network_id", "=", "net_uuid", ",", "phys_type", "=", "pnet_type", ",", "phys_net", "=", "phys_net", ",", "segment_id", "=", "seg_id", ")", "net_attrs", "[", "\"id\"", "]", "=", "net_uuid", "net_attrs", "[", "\"tenant_id\"", "]", "=", "context", ".", "tenant_id", "net_attrs", "[", "\"network_plugin\"", "]", "=", "default_net_type", "new_net", "=", "db_api", ".", "network_create", "(", "context", ",", "*", "*", "net_attrs", ")", "new_subnets", "=", "[", "]", "for", "sub", "in", "subs", ":", "sub", "[", "\"subnet\"", "]", "[", "\"network_id\"", "]", "=", "new_net", "[", "\"id\"", "]", "sub", "[", "\"subnet\"", "]", "[", "\"tenant_id\"", "]", "=", "context", ".", "tenant_id", "s", "=", "db_api", ".", "subnet_create", "(", "context", ",", "*", "*", "sub", "[", "\"subnet\"", "]", ")", "new_subnets", ".", "append", "(", "s", ")", "new_net", "[", "\"subnets\"", "]", "=", "new_subnets", "# if not security_groups.get_security_groups(", "# context,", "# filters={\"id\": security_groups.DEFAULT_SG_UUID}):", "# security_groups._create_default_security_group(context)", "return", "v", ".", "_make_network_dict", "(", "new_net", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
update_network
Update values of a network. : param context: neutron api request context : param id: UUID representing the network to update. : param network: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py.
quark/plugin_modules/networks.py
def update_network(context, id, network): """Update values of a network. : param context: neutron api request context : param id: UUID representing the network to update. : param network: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. """ LOG.info("update_network %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): net = db_api.network_find(context, id=id, scope=db_api.ONE) if not net: raise n_exc.NetworkNotFound(net_id=id) net_dict = network["network"] utils.pop_param(net_dict, "network_plugin") if not context.is_admin and "ipam_strategy" in net_dict: utils.pop_param(net_dict, "ipam_strategy") net = db_api.network_update(context, net, **net_dict) return v._make_network_dict(net)
def update_network(context, id, network): """Update values of a network. : param context: neutron api request context : param id: UUID representing the network to update. : param network: dictionary with keys indicating fields to update. valid keys are those that have a value of True for 'allow_put' as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. """ LOG.info("update_network %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): net = db_api.network_find(context, id=id, scope=db_api.ONE) if not net: raise n_exc.NetworkNotFound(net_id=id) net_dict = network["network"] utils.pop_param(net_dict, "network_plugin") if not context.is_admin and "ipam_strategy" in net_dict: utils.pop_param(net_dict, "ipam_strategy") net = db_api.network_update(context, net, **net_dict) return v._make_network_dict(net)
[ "Update", "values", "of", "a", "network", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/networks.py#L154-L176
[ "def", "update_network", "(", "context", ",", "id", ",", "network", ")", ":", "LOG", ".", "info", "(", "\"update_network %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "net", "=", "db_api", ".", "network_find", "(", "context", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "net", ":", "raise", "n_exc", ".", "NetworkNotFound", "(", "net_id", "=", "id", ")", "net_dict", "=", "network", "[", "\"network\"", "]", "utils", ".", "pop_param", "(", "net_dict", ",", "\"network_plugin\"", ")", "if", "not", "context", ".", "is_admin", "and", "\"ipam_strategy\"", "in", "net_dict", ":", "utils", ".", "pop_param", "(", "net_dict", ",", "\"ipam_strategy\"", ")", "net", "=", "db_api", ".", "network_update", "(", "context", ",", "net", ",", "*", "*", "net_dict", ")", "return", "v", ".", "_make_network_dict", "(", "net", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_network
Retrieve a network. : param context: neutron api request context : param id: UUID representing the network to fetch. : param fields: a list of strings that are valid keys in a network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned.
quark/plugin_modules/networks.py
def get_network(context, id, fields=None): """Retrieve a network. : param context: neutron api request context : param id: UUID representing the network to fetch. : param fields: a list of strings that are valid keys in a network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_network %s for tenant %s fields %s" % (id, context.tenant_id, fields)) network = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, id=id, join_subnets=True, scope=db_api.ONE) if not network: raise n_exc.NetworkNotFound(net_id=id) return v._make_network_dict(network, fields=fields)
def get_network(context, id, fields=None): """Retrieve a network. : param context: neutron api request context : param id: UUID representing the network to fetch. : param fields: a list of strings that are valid keys in a network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_network %s for tenant %s fields %s" % (id, context.tenant_id, fields)) network = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, id=id, join_subnets=True, scope=db_api.ONE) if not network: raise n_exc.NetworkNotFound(net_id=id) return v._make_network_dict(network, fields=fields)
[ "Retrieve", "a", "network", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/networks.py#L179-L197
[ "def", "get_network", "(", "context", ",", "id", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_network %s for tenant %s fields %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ",", "fields", ")", ")", "network", "=", "db_api", ".", "network_find", "(", "context", "=", "context", ",", "limit", "=", "None", ",", "sorts", "=", "[", "'id'", "]", ",", "marker", "=", "None", ",", "page_reverse", "=", "False", ",", "id", "=", "id", ",", "join_subnets", "=", "True", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "network", ":", "raise", "n_exc", ".", "NetworkNotFound", "(", "net_id", "=", "id", ")", "return", "v", ".", "_make_network_dict", "(", "network", ",", "fields", "=", "fields", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_networks
Retrieve a list of networks. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a network as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned.
quark/plugin_modules/networks.py
def get_networks(context, limit=None, sorts=['id'], marker=None, page_reverse=False, filters=None, fields=None): """Retrieve a list of networks. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a network as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_networks for tenant %s with filters %s, fields %s" % (context.tenant_id, filters, fields)) filters = filters or {} nets = db_api.network_find(context, limit, sorts, marker, page_reverse, join_subnets=True, **filters) or [] nets = [v._make_network_dict(net, fields=fields) for net in nets] return nets
def get_networks(context, limit=None, sorts=['id'], marker=None, page_reverse=False, filters=None, fields=None): """Retrieve a list of networks. The contents of the list depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a network as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. : param fields: a list of strings that are valid keys in a network dictionary as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Only these fields will be returned. """ LOG.info("get_networks for tenant %s with filters %s, fields %s" % (context.tenant_id, filters, fields)) filters = filters or {} nets = db_api.network_find(context, limit, sorts, marker, page_reverse, join_subnets=True, **filters) or [] nets = [v._make_network_dict(net, fields=fields) for net in nets] return nets
[ "Retrieve", "a", "list", "of", "networks", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/networks.py#L200-L226
[ "def", "get_networks", "(", "context", ",", "limit", "=", "None", ",", "sorts", "=", "[", "'id'", "]", ",", "marker", "=", "None", ",", "page_reverse", "=", "False", ",", "filters", "=", "None", ",", "fields", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_networks for tenant %s with filters %s, fields %s\"", "%", "(", "context", ".", "tenant_id", ",", "filters", ",", "fields", ")", ")", "filters", "=", "filters", "or", "{", "}", "nets", "=", "db_api", ".", "network_find", "(", "context", ",", "limit", ",", "sorts", ",", "marker", ",", "page_reverse", ",", "join_subnets", "=", "True", ",", "*", "*", "filters", ")", "or", "[", "]", "nets", "=", "[", "v", ".", "_make_network_dict", "(", "net", ",", "fields", "=", "fields", ")", "for", "net", "in", "nets", "]", "return", "nets" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
get_networks_count
Return the number of networks. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a network as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API.
quark/plugin_modules/networks.py
def get_networks_count(context, filters=None): """Return the number of networks. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a network as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API. """ LOG.info("get_networks_count for tenant %s filters %s" % (context.tenant_id, filters)) return db_api.network_count_all(context)
def get_networks_count(context, filters=None): """Return the number of networks. The result depends on the identity of the user making the request (as indicated by the context) as well as any filters. : param context: neutron api request context : param filters: a dictionary with keys that are valid keys for a network as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. Values in this dictiontary are an iterable containing values that will be used for an exact match comparison for that value. Each result returned by this function will have matched one of the values for each key in filters. NOTE: this method is optional, as it was not part of the originally defined plugin API. """ LOG.info("get_networks_count for tenant %s filters %s" % (context.tenant_id, filters)) return db_api.network_count_all(context)
[ "Return", "the", "number", "of", "networks", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/networks.py#L229-L248
[ "def", "get_networks_count", "(", "context", ",", "filters", "=", "None", ")", ":", "LOG", ".", "info", "(", "\"get_networks_count for tenant %s filters %s\"", "%", "(", "context", ".", "tenant_id", ",", "filters", ")", ")", "return", "db_api", ".", "network_count_all", "(", "context", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
delete_network
Delete a network. : param context: neutron api request context : param id: UUID representing the network to delete.
quark/plugin_modules/networks.py
def delete_network(context, id): """Delete a network. : param context: neutron api request context : param id: UUID representing the network to delete. """ LOG.info("delete_network %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): net = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, id=id, scope=db_api.ONE) if not net: raise n_exc.NetworkNotFound(net_id=id) if not context.is_admin: if STRATEGY.is_provider_network(net.id): raise n_exc.NotAuthorized(net_id=id) if net.ports: raise n_exc.NetworkInUse(net_id=id) net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"]) net_driver.delete_network(context, id) for subnet in net["subnets"]: subnets._delete_subnet(context, subnet) db_api.network_delete(context, net)
def delete_network(context, id): """Delete a network. : param context: neutron api request context : param id: UUID representing the network to delete. """ LOG.info("delete_network %s for tenant %s" % (id, context.tenant_id)) with context.session.begin(): net = db_api.network_find(context=context, limit=None, sorts=['id'], marker=None, page_reverse=False, id=id, scope=db_api.ONE) if not net: raise n_exc.NetworkNotFound(net_id=id) if not context.is_admin: if STRATEGY.is_provider_network(net.id): raise n_exc.NotAuthorized(net_id=id) if net.ports: raise n_exc.NetworkInUse(net_id=id) net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"]) net_driver.delete_network(context, id) for subnet in net["subnets"]: subnets._delete_subnet(context, subnet) db_api.network_delete(context, net)
[ "Delete", "a", "network", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/networks.py#L251-L273
[ "def", "delete_network", "(", "context", ",", "id", ")", ":", "LOG", ".", "info", "(", "\"delete_network %s for tenant %s\"", "%", "(", "id", ",", "context", ".", "tenant_id", ")", ")", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "net", "=", "db_api", ".", "network_find", "(", "context", "=", "context", ",", "limit", "=", "None", ",", "sorts", "=", "[", "'id'", "]", ",", "marker", "=", "None", ",", "page_reverse", "=", "False", ",", "id", "=", "id", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "net", ":", "raise", "n_exc", ".", "NetworkNotFound", "(", "net_id", "=", "id", ")", "if", "not", "context", ".", "is_admin", ":", "if", "STRATEGY", ".", "is_provider_network", "(", "net", ".", "id", ")", ":", "raise", "n_exc", ".", "NotAuthorized", "(", "net_id", "=", "id", ")", "if", "net", ".", "ports", ":", "raise", "n_exc", ".", "NetworkInUse", "(", "net_id", "=", "id", ")", "net_driver", "=", "registry", ".", "DRIVER_REGISTRY", ".", "get_driver", "(", "net", "[", "\"network_plugin\"", "]", ")", "net_driver", ".", "delete_network", "(", "context", ",", "id", ")", "for", "subnet", "in", "net", "[", "\"subnets\"", "]", ":", "subnets", ".", "_delete_subnet", "(", "context", ",", "subnet", ")", "db_api", ".", "network_delete", "(", "context", ",", "net", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
make_case2
This is a helper method for testing. When run with the current context, it will create a case 2 entries in the database. See top of file for what case 2 is.
quark/tools/billing.py
def make_case2(context): """This is a helper method for testing. When run with the current context, it will create a case 2 entries in the database. See top of file for what case 2 is. """ query = context.session.query(models.IPAddress) period_start, period_end = billing.calc_periods() ip_list = billing.build_full_day_ips(query, period_start, period_end) import random ind = random.randint(0, len(ip_list) - 1) address = ip_list[ind] address.allocated_at = datetime.datetime.utcnow() -\ datetime.timedelta(days=1) context.session.add(address) context.session.flush()
def make_case2(context): """This is a helper method for testing. When run with the current context, it will create a case 2 entries in the database. See top of file for what case 2 is. """ query = context.session.query(models.IPAddress) period_start, period_end = billing.calc_periods() ip_list = billing.build_full_day_ips(query, period_start, period_end) import random ind = random.randint(0, len(ip_list) - 1) address = ip_list[ind] address.allocated_at = datetime.datetime.utcnow() -\ datetime.timedelta(days=1) context.session.add(address) context.session.flush()
[ "This", "is", "a", "helper", "method", "for", "testing", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tools/billing.py#L32-L47
[ "def", "make_case2", "(", "context", ")", ":", "query", "=", "context", ".", "session", ".", "query", "(", "models", ".", "IPAddress", ")", "period_start", ",", "period_end", "=", "billing", ".", "calc_periods", "(", ")", "ip_list", "=", "billing", ".", "build_full_day_ips", "(", "query", ",", "period_start", ",", "period_end", ")", "import", "random", "ind", "=", "random", ".", "randint", "(", "0", ",", "len", "(", "ip_list", ")", "-", "1", ")", "address", "=", "ip_list", "[", "ind", "]", "address", ".", "allocated_at", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "-", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "context", ".", "session", ".", "add", "(", "address", ")", "context", ".", "session", ".", "flush", "(", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
main
Runs billing report. Optionally sends notifications to billing
quark/tools/billing.py
def main(notify, hour, minute): """Runs billing report. Optionally sends notifications to billing""" # Read the config file and get the admin context config_opts = ['--config-file', '/etc/neutron/neutron.conf'] config.init(config_opts) # Have to load the billing module _after_ config is parsed so # that we get the right network strategy network_strategy.STRATEGY.load() billing.PUBLIC_NETWORK_ID = network_strategy.STRATEGY.get_public_net_id() config.setup_logging() context = neutron_context.get_admin_context() # A query to get all IPAddress objects from the db query = context.session.query(models.IPAddress) (period_start, period_end) = billing.calc_periods(hour, minute) full_day_ips = billing.build_full_day_ips(query, period_start, period_end) partial_day_ips = billing.build_partial_day_ips(query, period_start, period_end) if notify: # '==================== Full Day =============================' for ipaddress in full_day_ips: click.echo('start: {}, end: {}'.format(period_start, period_end)) payload = billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=period_start, end_time=period_end) billing.do_notify(context, billing.IP_EXISTS, payload) # '==================== Part Day =============================' for ipaddress in partial_day_ips: click.echo('start: {}, end: {}'.format(period_start, period_end)) payload = billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=ipaddress.allocated_at, end_time=period_end) billing.do_notify(context, billing.IP_EXISTS, payload) else: click.echo('Case 1 ({}):\n'.format(len(full_day_ips))) for ipaddress in full_day_ips: pp(billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=period_start, end_time=period_end)) click.echo('\n===============================================\n') click.echo('Case 2 ({}):\n'.format(len(partial_day_ips))) for ipaddress in partial_day_ips: pp(billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=ipaddress.allocated_at, end_time=period_end))
def main(notify, hour, minute): """Runs billing report. Optionally sends notifications to billing""" # Read the config file and get the admin context config_opts = ['--config-file', '/etc/neutron/neutron.conf'] config.init(config_opts) # Have to load the billing module _after_ config is parsed so # that we get the right network strategy network_strategy.STRATEGY.load() billing.PUBLIC_NETWORK_ID = network_strategy.STRATEGY.get_public_net_id() config.setup_logging() context = neutron_context.get_admin_context() # A query to get all IPAddress objects from the db query = context.session.query(models.IPAddress) (period_start, period_end) = billing.calc_periods(hour, minute) full_day_ips = billing.build_full_day_ips(query, period_start, period_end) partial_day_ips = billing.build_partial_day_ips(query, period_start, period_end) if notify: # '==================== Full Day =============================' for ipaddress in full_day_ips: click.echo('start: {}, end: {}'.format(period_start, period_end)) payload = billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=period_start, end_time=period_end) billing.do_notify(context, billing.IP_EXISTS, payload) # '==================== Part Day =============================' for ipaddress in partial_day_ips: click.echo('start: {}, end: {}'.format(period_start, period_end)) payload = billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=ipaddress.allocated_at, end_time=period_end) billing.do_notify(context, billing.IP_EXISTS, payload) else: click.echo('Case 1 ({}):\n'.format(len(full_day_ips))) for ipaddress in full_day_ips: pp(billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=period_start, end_time=period_end)) click.echo('\n===============================================\n') click.echo('Case 2 ({}):\n'.format(len(partial_day_ips))) for ipaddress in partial_day_ips: pp(billing.build_payload(ipaddress, billing.IP_EXISTS, start_time=ipaddress.allocated_at, end_time=period_end))
[ "Runs", "billing", "report", ".", "Optionally", "sends", "notifications", "to", "billing" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/tools/billing.py#L57-L118
[ "def", "main", "(", "notify", ",", "hour", ",", "minute", ")", ":", "# Read the config file and get the admin context", "config_opts", "=", "[", "'--config-file'", ",", "'/etc/neutron/neutron.conf'", "]", "config", ".", "init", "(", "config_opts", ")", "# Have to load the billing module _after_ config is parsed so", "# that we get the right network strategy", "network_strategy", ".", "STRATEGY", ".", "load", "(", ")", "billing", ".", "PUBLIC_NETWORK_ID", "=", "network_strategy", ".", "STRATEGY", ".", "get_public_net_id", "(", ")", "config", ".", "setup_logging", "(", ")", "context", "=", "neutron_context", ".", "get_admin_context", "(", ")", "# A query to get all IPAddress objects from the db", "query", "=", "context", ".", "session", ".", "query", "(", "models", ".", "IPAddress", ")", "(", "period_start", ",", "period_end", ")", "=", "billing", ".", "calc_periods", "(", "hour", ",", "minute", ")", "full_day_ips", "=", "billing", ".", "build_full_day_ips", "(", "query", ",", "period_start", ",", "period_end", ")", "partial_day_ips", "=", "billing", ".", "build_partial_day_ips", "(", "query", ",", "period_start", ",", "period_end", ")", "if", "notify", ":", "# '==================== Full Day ============================='", "for", "ipaddress", "in", "full_day_ips", ":", "click", ".", "echo", "(", "'start: {}, end: {}'", ".", "format", "(", "period_start", ",", "period_end", ")", ")", "payload", "=", "billing", ".", "build_payload", "(", "ipaddress", ",", "billing", ".", "IP_EXISTS", ",", "start_time", "=", "period_start", ",", "end_time", "=", "period_end", ")", "billing", ".", "do_notify", "(", "context", ",", "billing", ".", "IP_EXISTS", ",", "payload", ")", "# '==================== Part Day ============================='", "for", "ipaddress", "in", "partial_day_ips", ":", "click", ".", "echo", "(", "'start: {}, end: {}'", ".", "format", "(", "period_start", ",", "period_end", ")", ")", "payload", "=", "billing", ".", "build_payload", "(", "ipaddress", ",", "billing", ".", "IP_EXISTS", ",", "start_time", "=", "ipaddress", ".", "allocated_at", ",", "end_time", "=", "period_end", ")", "billing", ".", "do_notify", "(", "context", ",", "billing", ".", "IP_EXISTS", ",", "payload", ")", "else", ":", "click", ".", "echo", "(", "'Case 1 ({}):\\n'", ".", "format", "(", "len", "(", "full_day_ips", ")", ")", ")", "for", "ipaddress", "in", "full_day_ips", ":", "pp", "(", "billing", ".", "build_payload", "(", "ipaddress", ",", "billing", ".", "IP_EXISTS", ",", "start_time", "=", "period_start", ",", "end_time", "=", "period_end", ")", ")", "click", ".", "echo", "(", "'\\n===============================================\\n'", ")", "click", ".", "echo", "(", "'Case 2 ({}):\\n'", ".", "format", "(", "len", "(", "partial_day_ips", ")", ")", ")", "for", "ipaddress", "in", "partial_day_ips", ":", "pp", "(", "billing", ".", "build_payload", "(", "ipaddress", ",", "billing", ".", "IP_EXISTS", ",", "start_time", "=", "ipaddress", ".", "allocated_at", ",", "end_time", "=", "period_end", ")", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
QuarkAsyncPluginBase.start_rpc_listeners
Configure all listeners here
quark/worker_plugins/base_worker.py
def start_rpc_listeners(self): """Configure all listeners here""" self._setup_rpc() if not self.endpoints: return [] self.conn = n_rpc.create_connection() self.conn.create_consumer(self.topic, self.endpoints, fanout=False) return self.conn.consume_in_threads()
def start_rpc_listeners(self): """Configure all listeners here""" self._setup_rpc() if not self.endpoints: return [] self.conn = n_rpc.create_connection() self.conn.create_consumer(self.topic, self.endpoints, fanout=False) return self.conn.consume_in_threads()
[ "Configure", "all", "listeners", "here" ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/worker_plugins/base_worker.py#L42-L50
[ "def", "start_rpc_listeners", "(", "self", ")", ":", "self", ".", "_setup_rpc", "(", ")", "if", "not", "self", ".", "endpoints", ":", "return", "[", "]", "self", ".", "conn", "=", "n_rpc", ".", "create_connection", "(", ")", "self", ".", "conn", ".", "create_consumer", "(", "self", ".", "topic", ",", "self", ".", "endpoints", ",", "fanout", "=", "False", ")", "return", "self", ".", "conn", ".", "consume_in_threads", "(", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
QuarkAsyncPluginBase.context
Provides an admin context for workers.
quark/worker_plugins/base_worker.py
def context(self): """Provides an admin context for workers.""" if not self._context: self._context = context.get_admin_context() return self._context
def context(self): """Provides an admin context for workers.""" if not self._context: self._context = context.get_admin_context() return self._context
[ "Provides", "an", "admin", "context", "for", "workers", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/worker_plugins/base_worker.py#L53-L57
[ "def", "context", "(", "self", ")", ":", "if", "not", "self", ".", "_context", ":", "self", ".", "_context", "=", "context", ".", "get_admin_context", "(", ")", "return", "self", ".", "_context" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
QuarkSGAsyncProcessCallback.update_sg
Begins the async update process.
quark/worker_plugins/sg_update_worker.py
def update_sg(self, context, sg, rule_id, action): """Begins the async update process.""" db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE) if not db_sg: return None with context.session.begin(): job_body = dict(action="%s sg rule %s" % (action, rule_id), resource_id=rule_id, tenant_id=db_sg['tenant_id']) job_body = dict(job=job_body) job = job_api.create_job(context.elevated(), job_body) rpc_client = QuarkSGAsyncProducerClient() try: rpc_client.populate_subtasks(context, sg, job['id']) except om_exc.MessagingTimeout: LOG.error("Failed to create subtasks. Rabbit running?") return None return {"job_id": job['id']}
def update_sg(self, context, sg, rule_id, action): """Begins the async update process.""" db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE) if not db_sg: return None with context.session.begin(): job_body = dict(action="%s sg rule %s" % (action, rule_id), resource_id=rule_id, tenant_id=db_sg['tenant_id']) job_body = dict(job=job_body) job = job_api.create_job(context.elevated(), job_body) rpc_client = QuarkSGAsyncProducerClient() try: rpc_client.populate_subtasks(context, sg, job['id']) except om_exc.MessagingTimeout: LOG.error("Failed to create subtasks. Rabbit running?") return None return {"job_id": job['id']}
[ "Begins", "the", "async", "update", "process", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/worker_plugins/sg_update_worker.py#L71-L88
[ "def", "update_sg", "(", "self", ",", "context", ",", "sg", ",", "rule_id", ",", "action", ")", ":", "db_sg", "=", "db_api", ".", "security_group_find", "(", "context", ",", "id", "=", "sg", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "db_sg", ":", "return", "None", "with", "context", ".", "session", ".", "begin", "(", ")", ":", "job_body", "=", "dict", "(", "action", "=", "\"%s sg rule %s\"", "%", "(", "action", ",", "rule_id", ")", ",", "resource_id", "=", "rule_id", ",", "tenant_id", "=", "db_sg", "[", "'tenant_id'", "]", ")", "job_body", "=", "dict", "(", "job", "=", "job_body", ")", "job", "=", "job_api", ".", "create_job", "(", "context", ".", "elevated", "(", ")", ",", "job_body", ")", "rpc_client", "=", "QuarkSGAsyncProducerClient", "(", ")", "try", ":", "rpc_client", ".", "populate_subtasks", "(", "context", ",", "sg", ",", "job", "[", "'id'", "]", ")", "except", "om_exc", ".", "MessagingTimeout", ":", "LOG", ".", "error", "(", "\"Failed to create subtasks. Rabbit running?\"", ")", "return", "None", "return", "{", "\"job_id\"", ":", "job", "[", "'id'", "]", "}" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
QuarkSGProducerCallback.populate_subtasks
Produces a list of ports to be updated async.
quark/worker_plugins/sg_update_worker.py
def populate_subtasks(self, context, sg, parent_job_id): """Produces a list of ports to be updated async.""" db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE) if not db_sg: return None ports = db_api.sg_gather_associated_ports(context, db_sg) if len(ports) == 0: return {"ports": 0} for port in ports: job_body = dict(action="update port %s" % port['id'], tenant_id=db_sg['tenant_id'], resource_id=port['id'], parent_id=parent_job_id) job_body = dict(job=job_body) job = job_api.create_job(context.elevated(), job_body) rpc_consumer = QuarkSGAsyncConsumerClient() try: rpc_consumer.update_port(context, port['id'], job['id']) except om_exc.MessagingTimeout: # TODO(roaet): Not too sure what can be done here other than # updating the job as a failure? LOG.error("Failed to update port. Rabbit running?") return None
def populate_subtasks(self, context, sg, parent_job_id): """Produces a list of ports to be updated async.""" db_sg = db_api.security_group_find(context, id=sg, scope=db_api.ONE) if not db_sg: return None ports = db_api.sg_gather_associated_ports(context, db_sg) if len(ports) == 0: return {"ports": 0} for port in ports: job_body = dict(action="update port %s" % port['id'], tenant_id=db_sg['tenant_id'], resource_id=port['id'], parent_id=parent_job_id) job_body = dict(job=job_body) job = job_api.create_job(context.elevated(), job_body) rpc_consumer = QuarkSGAsyncConsumerClient() try: rpc_consumer.update_port(context, port['id'], job['id']) except om_exc.MessagingTimeout: # TODO(roaet): Not too sure what can be done here other than # updating the job as a failure? LOG.error("Failed to update port. Rabbit running?") return None
[ "Produces", "a", "list", "of", "ports", "to", "be", "updated", "async", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/worker_plugins/sg_update_worker.py#L124-L146
[ "def", "populate_subtasks", "(", "self", ",", "context", ",", "sg", ",", "parent_job_id", ")", ":", "db_sg", "=", "db_api", ".", "security_group_find", "(", "context", ",", "id", "=", "sg", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "db_sg", ":", "return", "None", "ports", "=", "db_api", ".", "sg_gather_associated_ports", "(", "context", ",", "db_sg", ")", "if", "len", "(", "ports", ")", "==", "0", ":", "return", "{", "\"ports\"", ":", "0", "}", "for", "port", "in", "ports", ":", "job_body", "=", "dict", "(", "action", "=", "\"update port %s\"", "%", "port", "[", "'id'", "]", ",", "tenant_id", "=", "db_sg", "[", "'tenant_id'", "]", ",", "resource_id", "=", "port", "[", "'id'", "]", ",", "parent_id", "=", "parent_job_id", ")", "job_body", "=", "dict", "(", "job", "=", "job_body", ")", "job", "=", "job_api", ".", "create_job", "(", "context", ".", "elevated", "(", ")", ",", "job_body", ")", "rpc_consumer", "=", "QuarkSGAsyncConsumerClient", "(", ")", "try", ":", "rpc_consumer", ".", "update_port", "(", "context", ",", "port", "[", "'id'", "]", ",", "job", "[", "'id'", "]", ")", "except", "om_exc", ".", "MessagingTimeout", ":", "# TODO(roaet): Not too sure what can be done here other than", "# updating the job as a failure?", "LOG", ".", "error", "(", "\"Failed to update port. Rabbit running?\"", ")", "return", "None" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
QuarkSGConsumerCallback.update_ports_for_sg
Updates the ports through redis.
quark/worker_plugins/sg_update_worker.py
def update_ports_for_sg(self, context, portid, jobid): """Updates the ports through redis.""" port = db_api.port_find(context, id=portid, scope=db_api.ONE) if not port: LOG.warning("Port not found") return net_driver = port_api._get_net_driver(port.network, port=port) base_net_driver = port_api._get_net_driver(port.network) sg_list = [sg for sg in port.security_groups] success = False error = None retries = 3 retry_delay = 2 for retry in xrange(retries): try: net_driver.update_port(context, port_id=port["backend_key"], mac_address=port["mac_address"], device_id=port["device_id"], base_net_driver=base_net_driver, security_groups=sg_list) success = True error = None break except Exception as error: LOG.warning("Could not connect to redis, but retrying soon") time.sleep(retry_delay) status_str = "" if not success: status_str = "Port %s update failed after %d tries. Error: %s" % ( portid, retries, error) update_body = dict(completed=True, status=status_str) update_body = dict(job=update_body) job_api.update_job(context.elevated(), jobid, update_body)
def update_ports_for_sg(self, context, portid, jobid): """Updates the ports through redis.""" port = db_api.port_find(context, id=portid, scope=db_api.ONE) if not port: LOG.warning("Port not found") return net_driver = port_api._get_net_driver(port.network, port=port) base_net_driver = port_api._get_net_driver(port.network) sg_list = [sg for sg in port.security_groups] success = False error = None retries = 3 retry_delay = 2 for retry in xrange(retries): try: net_driver.update_port(context, port_id=port["backend_key"], mac_address=port["mac_address"], device_id=port["device_id"], base_net_driver=base_net_driver, security_groups=sg_list) success = True error = None break except Exception as error: LOG.warning("Could not connect to redis, but retrying soon") time.sleep(retry_delay) status_str = "" if not success: status_str = "Port %s update failed after %d tries. Error: %s" % ( portid, retries, error) update_body = dict(completed=True, status=status_str) update_body = dict(job=update_body) job_api.update_job(context.elevated(), jobid, update_body)
[ "Updates", "the", "ports", "through", "redis", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/worker_plugins/sg_update_worker.py#L179-L212
[ "def", "update_ports_for_sg", "(", "self", ",", "context", ",", "portid", ",", "jobid", ")", ":", "port", "=", "db_api", ".", "port_find", "(", "context", ",", "id", "=", "portid", ",", "scope", "=", "db_api", ".", "ONE", ")", "if", "not", "port", ":", "LOG", ".", "warning", "(", "\"Port not found\"", ")", "return", "net_driver", "=", "port_api", ".", "_get_net_driver", "(", "port", ".", "network", ",", "port", "=", "port", ")", "base_net_driver", "=", "port_api", ".", "_get_net_driver", "(", "port", ".", "network", ")", "sg_list", "=", "[", "sg", "for", "sg", "in", "port", ".", "security_groups", "]", "success", "=", "False", "error", "=", "None", "retries", "=", "3", "retry_delay", "=", "2", "for", "retry", "in", "xrange", "(", "retries", ")", ":", "try", ":", "net_driver", ".", "update_port", "(", "context", ",", "port_id", "=", "port", "[", "\"backend_key\"", "]", ",", "mac_address", "=", "port", "[", "\"mac_address\"", "]", ",", "device_id", "=", "port", "[", "\"device_id\"", "]", ",", "base_net_driver", "=", "base_net_driver", ",", "security_groups", "=", "sg_list", ")", "success", "=", "True", "error", "=", "None", "break", "except", "Exception", "as", "error", ":", "LOG", ".", "warning", "(", "\"Could not connect to redis, but retrying soon\"", ")", "time", ".", "sleep", "(", "retry_delay", ")", "status_str", "=", "\"\"", "if", "not", "success", ":", "status_str", "=", "\"Port %s update failed after %d tries. Error: %s\"", "%", "(", "portid", ",", "retries", ",", "error", ")", "update_body", "=", "dict", "(", "completed", "=", "True", ",", "status", "=", "status_str", ")", "update_body", "=", "dict", "(", "job", "=", "update_body", ")", "job_api", ".", "update_job", "(", "context", ".", "elevated", "(", ")", ",", "jobid", ",", "update_body", ")" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
sg_gather_associated_ports
Gather all ports associated to security group. Returns: * list, or None
quark/db/api.py
def sg_gather_associated_ports(context, group): """Gather all ports associated to security group. Returns: * list, or None """ if not group: return None if not hasattr(group, "ports") or len(group.ports) <= 0: return [] return group.ports
def sg_gather_associated_ports(context, group): """Gather all ports associated to security group. Returns: * list, or None """ if not group: return None if not hasattr(group, "ports") or len(group.ports) <= 0: return [] return group.ports
[ "Gather", "all", "ports", "associated", "to", "security", "group", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/db/api.py#L892-L902
[ "def", "sg_gather_associated_ports", "(", "context", ",", "group", ")", ":", "if", "not", "group", ":", "return", "None", "if", "not", "hasattr", "(", "group", ",", "\"ports\"", ")", "or", "len", "(", "group", ".", "ports", ")", "<=", "0", ":", "return", "[", "]", "return", "group", ".", "ports" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
security_group_rule_update
Updates a security group rule. NOTE(alexm) this is non-standard functionality.
quark/db/api.py
def security_group_rule_update(context, rule, **kwargs): '''Updates a security group rule. NOTE(alexm) this is non-standard functionality. ''' rule.update(kwargs) context.session.add(rule) return rule
def security_group_rule_update(context, rule, **kwargs): '''Updates a security group rule. NOTE(alexm) this is non-standard functionality. ''' rule.update(kwargs) context.session.add(rule) return rule
[ "Updates", "a", "security", "group", "rule", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/db/api.py#L969-L976
[ "def", "security_group_rule_update", "(", "context", ",", "rule", ",", "*", "*", "kwargs", ")", ":", "rule", ".", "update", "(", "kwargs", ")", "context", ".", "session", ".", "add", "(", "rule", ")", "return", "rule" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
segment_allocation_find
Query for segment allocations.
quark/db/api.py
def segment_allocation_find(context, lock_mode=False, **filters): """Query for segment allocations.""" range_ids = filters.pop("segment_allocation_range_ids", None) query = context.session.query(models.SegmentAllocation) if lock_mode: query = query.with_lockmode("update") query = query.filter_by(**filters) # Optionally filter by given list of range ids if range_ids: query.filter( models.SegmentAllocation.segment_allocation_range_id.in_( range_ids)) return query
def segment_allocation_find(context, lock_mode=False, **filters): """Query for segment allocations.""" range_ids = filters.pop("segment_allocation_range_ids", None) query = context.session.query(models.SegmentAllocation) if lock_mode: query = query.with_lockmode("update") query = query.filter_by(**filters) # Optionally filter by given list of range ids if range_ids: query.filter( models.SegmentAllocation.segment_allocation_range_id.in_( range_ids)) return query
[ "Query", "for", "segment", "allocations", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/db/api.py#L1148-L1163
[ "def", "segment_allocation_find", "(", "context", ",", "lock_mode", "=", "False", ",", "*", "*", "filters", ")", ":", "range_ids", "=", "filters", ".", "pop", "(", "\"segment_allocation_range_ids\"", ",", "None", ")", "query", "=", "context", ".", "session", ".", "query", "(", "models", ".", "SegmentAllocation", ")", "if", "lock_mode", ":", "query", "=", "query", ".", "with_lockmode", "(", "\"update\"", ")", "query", "=", "query", ".", "filter_by", "(", "*", "*", "filters", ")", "# Optionally filter by given list of range ids", "if", "range_ids", ":", "query", ".", "filter", "(", "models", ".", "SegmentAllocation", ".", "segment_allocation_range_id", ".", "in_", "(", "range_ids", ")", ")", "return", "query" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
Routes.get_resources
Returns Ext Resources.
quark/api/extensions/routes.py
def get_resources(cls): """Returns Ext Resources.""" controller = RoutesController(directory.get_plugin()) return [extensions.ResourceExtension( Routes.get_alias(), controller)]
def get_resources(cls): """Returns Ext Resources.""" controller = RoutesController(directory.get_plugin()) return [extensions.ResourceExtension( Routes.get_alias(), controller)]
[ "Returns", "Ext", "Resources", "." ]
openstack/quark
python
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/api/extensions/routes.py#L100-L105
[ "def", "get_resources", "(", "cls", ")", ":", "controller", "=", "RoutesController", "(", "directory", ".", "get_plugin", "(", ")", ")", "return", "[", "extensions", ".", "ResourceExtension", "(", "Routes", ".", "get_alias", "(", ")", ",", "controller", ")", "]" ]
1112e6a66917d3e98e44cb7b33b107fd5a74bb2e
valid
NikoHomeControlConnection.send
Sends the given command to Niko Home Control and returns the output of the system. Aliases: write, put, sendall, send_all
nikohomecontrol/nhcconnection.py
def send(self, s): """ Sends the given command to Niko Home Control and returns the output of the system. Aliases: write, put, sendall, send_all """ self._socket.send(s.encode()) return self.read()
def send(self, s): """ Sends the given command to Niko Home Control and returns the output of the system. Aliases: write, put, sendall, send_all """ self._socket.send(s.encode()) return self.read()
[ "Sends", "the", "given", "command", "to", "Niko", "Home", "Control", "and", "returns", "the", "output", "of", "the", "system", "." ]
NoUseFreak/niko-home-control
python
https://github.com/NoUseFreak/niko-home-control/blob/4b9ff57c0f3fdadea7ac450d548292ca7b3033ad/nikohomecontrol/nhcconnection.py#L55-L63
[ "def", "send", "(", "self", ",", "s", ")", ":", "self", ".", "_socket", ".", "send", "(", "s", ".", "encode", "(", ")", ")", "return", "self", ".", "read", "(", ")" ]
4b9ff57c0f3fdadea7ac450d548292ca7b3033ad
valid
if_
Implements the 'if' operator with support for multiple elseif-s.
json_logic/__init__.py
def if_(*args): """Implements the 'if' operator with support for multiple elseif-s.""" for i in range(0, len(args) - 1, 2): if args[i]: return args[i + 1] if len(args) % 2: return args[-1] else: return None
def if_(*args): """Implements the 'if' operator with support for multiple elseif-s.""" for i in range(0, len(args) - 1, 2): if args[i]: return args[i + 1] if len(args) % 2: return args[-1] else: return None
[ "Implements", "the", "if", "operator", "with", "support", "for", "multiple", "elseif", "-", "s", "." ]
nadirizr/json-logic-py
python
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L20-L28
[ "def", "if_", "(", "*", "args", ")", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "args", ")", "-", "1", ",", "2", ")", ":", "if", "args", "[", "i", "]", ":", "return", "args", "[", "i", "+", "1", "]", "if", "len", "(", "args", ")", "%", "2", ":", "return", "args", "[", "-", "1", "]", "else", ":", "return", "None" ]
5fda9125eab4178f8f81c7779291940e31e87bab
valid
soft_equals
Implements the '==' operator, which does type JS-style coertion.
json_logic/__init__.py
def soft_equals(a, b): """Implements the '==' operator, which does type JS-style coertion.""" if isinstance(a, str) or isinstance(b, str): return str(a) == str(b) if isinstance(a, bool) or isinstance(b, bool): return bool(a) is bool(b) return a == b
def soft_equals(a, b): """Implements the '==' operator, which does type JS-style coertion.""" if isinstance(a, str) or isinstance(b, str): return str(a) == str(b) if isinstance(a, bool) or isinstance(b, bool): return bool(a) is bool(b) return a == b
[ "Implements", "the", "==", "operator", "which", "does", "type", "JS", "-", "style", "coertion", "." ]
nadirizr/json-logic-py
python
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L31-L37
[ "def", "soft_equals", "(", "a", ",", "b", ")", ":", "if", "isinstance", "(", "a", ",", "str", ")", "or", "isinstance", "(", "b", ",", "str", ")", ":", "return", "str", "(", "a", ")", "==", "str", "(", "b", ")", "if", "isinstance", "(", "a", ",", "bool", ")", "or", "isinstance", "(", "b", ",", "bool", ")", ":", "return", "bool", "(", "a", ")", "is", "bool", "(", "b", ")", "return", "a", "==", "b" ]
5fda9125eab4178f8f81c7779291940e31e87bab
valid
hard_equals
Implements the '===' operator.
json_logic/__init__.py
def hard_equals(a, b): """Implements the '===' operator.""" if type(a) != type(b): return False return a == b
def hard_equals(a, b): """Implements the '===' operator.""" if type(a) != type(b): return False return a == b
[ "Implements", "the", "===", "operator", "." ]
nadirizr/json-logic-py
python
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L40-L44
[ "def", "hard_equals", "(", "a", ",", "b", ")", ":", "if", "type", "(", "a", ")", "!=", "type", "(", "b", ")", ":", "return", "False", "return", "a", "==", "b" ]
5fda9125eab4178f8f81c7779291940e31e87bab
valid
less
Implements the '<' operator with JS-style type coertion.
json_logic/__init__.py
def less(a, b, *args): """Implements the '<' operator with JS-style type coertion.""" types = set([type(a), type(b)]) if float in types or int in types: try: a, b = float(a), float(b) except TypeError: # NaN return False return a < b and (not args or less(b, *args))
def less(a, b, *args): """Implements the '<' operator with JS-style type coertion.""" types = set([type(a), type(b)]) if float in types or int in types: try: a, b = float(a), float(b) except TypeError: # NaN return False return a < b and (not args or less(b, *args))
[ "Implements", "the", "<", "operator", "with", "JS", "-", "style", "type", "coertion", "." ]
nadirizr/json-logic-py
python
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L47-L56
[ "def", "less", "(", "a", ",", "b", ",", "*", "args", ")", ":", "types", "=", "set", "(", "[", "type", "(", "a", ")", ",", "type", "(", "b", ")", "]", ")", "if", "float", "in", "types", "or", "int", "in", "types", ":", "try", ":", "a", ",", "b", "=", "float", "(", "a", ")", ",", "float", "(", "b", ")", "except", "TypeError", ":", "# NaN", "return", "False", "return", "a", "<", "b", "and", "(", "not", "args", "or", "less", "(", "b", ",", "*", "args", ")", ")" ]
5fda9125eab4178f8f81c7779291940e31e87bab
valid
less_or_equal
Implements the '<=' operator with JS-style type coertion.
json_logic/__init__.py
def less_or_equal(a, b, *args): """Implements the '<=' operator with JS-style type coertion.""" return ( less(a, b) or soft_equals(a, b) ) and (not args or less_or_equal(b, *args))
def less_or_equal(a, b, *args): """Implements the '<=' operator with JS-style type coertion.""" return ( less(a, b) or soft_equals(a, b) ) and (not args or less_or_equal(b, *args))
[ "Implements", "the", "<", "=", "operator", "with", "JS", "-", "style", "type", "coertion", "." ]
nadirizr/json-logic-py
python
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L59-L63
[ "def", "less_or_equal", "(", "a", ",", "b", ",", "*", "args", ")", ":", "return", "(", "less", "(", "a", ",", "b", ")", "or", "soft_equals", "(", "a", ",", "b", ")", ")", "and", "(", "not", "args", "or", "less_or_equal", "(", "b", ",", "*", "args", ")", ")" ]
5fda9125eab4178f8f81c7779291940e31e87bab
valid
to_numeric
Converts a string either to int or to float. This is important, because e.g. {"!==": [{"+": "0"}, 0.0]}
json_logic/__init__.py
def to_numeric(arg): """ Converts a string either to int or to float. This is important, because e.g. {"!==": [{"+": "0"}, 0.0]} """ if isinstance(arg, str): if '.' in arg: return float(arg) else: return int(arg) return arg
def to_numeric(arg): """ Converts a string either to int or to float. This is important, because e.g. {"!==": [{"+": "0"}, 0.0]} """ if isinstance(arg, str): if '.' in arg: return float(arg) else: return int(arg) return arg
[ "Converts", "a", "string", "either", "to", "int", "or", "to", "float", ".", "This", "is", "important", "because", "e", ".", "g", ".", "{", "!", "==", ":", "[", "{", "+", ":", "0", "}", "0", ".", "0", "]", "}" ]
nadirizr/json-logic-py
python
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L66-L76
[ "def", "to_numeric", "(", "arg", ")", ":", "if", "isinstance", "(", "arg", ",", "str", ")", ":", "if", "'.'", "in", "arg", ":", "return", "float", "(", "arg", ")", "else", ":", "return", "int", "(", "arg", ")", "return", "arg" ]
5fda9125eab4178f8f81c7779291940e31e87bab
valid
minus
Also, converts either to ints or to floats.
json_logic/__init__.py
def minus(*args): """Also, converts either to ints or to floats.""" if len(args) == 1: return -to_numeric(args[0]) return to_numeric(args[0]) - to_numeric(args[1])
def minus(*args): """Also, converts either to ints or to floats.""" if len(args) == 1: return -to_numeric(args[0]) return to_numeric(args[0]) - to_numeric(args[1])
[ "Also", "converts", "either", "to", "ints", "or", "to", "floats", "." ]
nadirizr/json-logic-py
python
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L83-L87
[ "def", "minus", "(", "*", "args", ")", ":", "if", "len", "(", "args", ")", "==", "1", ":", "return", "-", "to_numeric", "(", "args", "[", "0", "]", ")", "return", "to_numeric", "(", "args", "[", "0", "]", ")", "-", "to_numeric", "(", "args", "[", "1", "]", ")" ]
5fda9125eab4178f8f81c7779291940e31e87bab
valid
merge
Implements the 'merge' operator for merging lists.
json_logic/__init__.py
def merge(*args): """Implements the 'merge' operator for merging lists.""" ret = [] for arg in args: if isinstance(arg, list) or isinstance(arg, tuple): ret += list(arg) else: ret.append(arg) return ret
def merge(*args): """Implements the 'merge' operator for merging lists.""" ret = [] for arg in args: if isinstance(arg, list) or isinstance(arg, tuple): ret += list(arg) else: ret.append(arg) return ret
[ "Implements", "the", "merge", "operator", "for", "merging", "lists", "." ]
nadirizr/json-logic-py
python
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L90-L98
[ "def", "merge", "(", "*", "args", ")", ":", "ret", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "isinstance", "(", "arg", ",", "list", ")", "or", "isinstance", "(", "arg", ",", "tuple", ")", ":", "ret", "+=", "list", "(", "arg", ")", "else", ":", "ret", ".", "append", "(", "arg", ")", "return", "ret" ]
5fda9125eab4178f8f81c7779291940e31e87bab
valid
get_var
Gets variable value from data dictionary.
json_logic/__init__.py
def get_var(data, var_name, not_found=None): """Gets variable value from data dictionary.""" try: for key in str(var_name).split('.'): try: data = data[key] except TypeError: data = data[int(key)] except (KeyError, TypeError, ValueError): return not_found else: return data
def get_var(data, var_name, not_found=None): """Gets variable value from data dictionary.""" try: for key in str(var_name).split('.'): try: data = data[key] except TypeError: data = data[int(key)] except (KeyError, TypeError, ValueError): return not_found else: return data
[ "Gets", "variable", "value", "from", "data", "dictionary", "." ]
nadirizr/json-logic-py
python
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L101-L112
[ "def", "get_var", "(", "data", ",", "var_name", ",", "not_found", "=", "None", ")", ":", "try", ":", "for", "key", "in", "str", "(", "var_name", ")", ".", "split", "(", "'.'", ")", ":", "try", ":", "data", "=", "data", "[", "key", "]", "except", "TypeError", ":", "data", "=", "data", "[", "int", "(", "key", ")", "]", "except", "(", "KeyError", ",", "TypeError", ",", "ValueError", ")", ":", "return", "not_found", "else", ":", "return", "data" ]
5fda9125eab4178f8f81c7779291940e31e87bab
valid
missing
Implements the missing operator for finding missing variables.
json_logic/__init__.py
def missing(data, *args): """Implements the missing operator for finding missing variables.""" not_found = object() if args and isinstance(args[0], list): args = args[0] ret = [] for arg in args: if get_var(data, arg, not_found) is not_found: ret.append(arg) return ret
def missing(data, *args): """Implements the missing operator for finding missing variables.""" not_found = object() if args and isinstance(args[0], list): args = args[0] ret = [] for arg in args: if get_var(data, arg, not_found) is not_found: ret.append(arg) return ret
[ "Implements", "the", "missing", "operator", "for", "finding", "missing", "variables", "." ]
nadirizr/json-logic-py
python
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L115-L124
[ "def", "missing", "(", "data", ",", "*", "args", ")", ":", "not_found", "=", "object", "(", ")", "if", "args", "and", "isinstance", "(", "args", "[", "0", "]", ",", "list", ")", ":", "args", "=", "args", "[", "0", "]", "ret", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "get_var", "(", "data", ",", "arg", ",", "not_found", ")", "is", "not_found", ":", "ret", ".", "append", "(", "arg", ")", "return", "ret" ]
5fda9125eab4178f8f81c7779291940e31e87bab
valid
missing_some
Implements the missing_some operator for finding missing variables.
json_logic/__init__.py
def missing_some(data, min_required, args): """Implements the missing_some operator for finding missing variables.""" if min_required < 1: return [] found = 0 not_found = object() ret = [] for arg in args: if get_var(data, arg, not_found) is not_found: ret.append(arg) else: found += 1 if found >= min_required: return [] return ret
def missing_some(data, min_required, args): """Implements the missing_some operator for finding missing variables.""" if min_required < 1: return [] found = 0 not_found = object() ret = [] for arg in args: if get_var(data, arg, not_found) is not_found: ret.append(arg) else: found += 1 if found >= min_required: return [] return ret
[ "Implements", "the", "missing_some", "operator", "for", "finding", "missing", "variables", "." ]
nadirizr/json-logic-py
python
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L127-L141
[ "def", "missing_some", "(", "data", ",", "min_required", ",", "args", ")", ":", "if", "min_required", "<", "1", ":", "return", "[", "]", "found", "=", "0", "not_found", "=", "object", "(", ")", "ret", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "get_var", "(", "data", ",", "arg", ",", "not_found", ")", "is", "not_found", ":", "ret", ".", "append", "(", "arg", ")", "else", ":", "found", "+=", "1", "if", "found", ">=", "min_required", ":", "return", "[", "]", "return", "ret" ]
5fda9125eab4178f8f81c7779291940e31e87bab
valid
jsonLogic
Executes the json-logic with given data.
json_logic/__init__.py
def jsonLogic(tests, data=None): """Executes the json-logic with given data.""" # You've recursed to a primitive, stop! if tests is None or not isinstance(tests, dict): return tests data = data or {} operator = list(tests.keys())[0] values = tests[operator] # Easy syntax for unary operators, like {"var": "x"} instead of strict # {"var": ["x"]} if not isinstance(values, list) and not isinstance(values, tuple): values = [values] # Recursion! values = [jsonLogic(val, data) for val in values] if operator == 'var': return get_var(data, *values) if operator == 'missing': return missing(data, *values) if operator == 'missing_some': return missing_some(data, *values) if operator not in operations: raise ValueError("Unrecognized operation %s" % operator) return operations[operator](*values)
def jsonLogic(tests, data=None): """Executes the json-logic with given data.""" # You've recursed to a primitive, stop! if tests is None or not isinstance(tests, dict): return tests data = data or {} operator = list(tests.keys())[0] values = tests[operator] # Easy syntax for unary operators, like {"var": "x"} instead of strict # {"var": ["x"]} if not isinstance(values, list) and not isinstance(values, tuple): values = [values] # Recursion! values = [jsonLogic(val, data) for val in values] if operator == 'var': return get_var(data, *values) if operator == 'missing': return missing(data, *values) if operator == 'missing_some': return missing_some(data, *values) if operator not in operations: raise ValueError("Unrecognized operation %s" % operator) return operations[operator](*values)
[ "Executes", "the", "json", "-", "logic", "with", "given", "data", "." ]
nadirizr/json-logic-py
python
https://github.com/nadirizr/json-logic-py/blob/5fda9125eab4178f8f81c7779291940e31e87bab/json_logic/__init__.py#L174-L203
[ "def", "jsonLogic", "(", "tests", ",", "data", "=", "None", ")", ":", "# You've recursed to a primitive, stop!", "if", "tests", "is", "None", "or", "not", "isinstance", "(", "tests", ",", "dict", ")", ":", "return", "tests", "data", "=", "data", "or", "{", "}", "operator", "=", "list", "(", "tests", ".", "keys", "(", ")", ")", "[", "0", "]", "values", "=", "tests", "[", "operator", "]", "# Easy syntax for unary operators, like {\"var\": \"x\"} instead of strict", "# {\"var\": [\"x\"]}", "if", "not", "isinstance", "(", "values", ",", "list", ")", "and", "not", "isinstance", "(", "values", ",", "tuple", ")", ":", "values", "=", "[", "values", "]", "# Recursion!", "values", "=", "[", "jsonLogic", "(", "val", ",", "data", ")", "for", "val", "in", "values", "]", "if", "operator", "==", "'var'", ":", "return", "get_var", "(", "data", ",", "*", "values", ")", "if", "operator", "==", "'missing'", ":", "return", "missing", "(", "data", ",", "*", "values", ")", "if", "operator", "==", "'missing_some'", ":", "return", "missing_some", "(", "data", ",", "*", "values", ")", "if", "operator", "not", "in", "operations", ":", "raise", "ValueError", "(", "\"Unrecognized operation %s\"", "%", "operator", ")", "return", "operations", "[", "operator", "]", "(", "*", "values", ")" ]
5fda9125eab4178f8f81c7779291940e31e87bab