partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
SolidExecutionResult.transformed_values
|
Return dictionary of transformed results, with keys being output names.
Returns None if execution result isn't a success.
Reconstructs the pipeline context to materialize values.
|
python_modules/dagster/dagster/core/execution.py
|
def transformed_values(self):
'''Return dictionary of transformed results, with keys being output names.
Returns None if execution result isn't a success.
Reconstructs the pipeline context to materialize values.
'''
if self.success and self.transforms:
with self.reconstruct_context() as context:
values = {
result.step_output_data.output_name: self._get_value(
context, result.step_output_data
)
for result in self.transforms
if result.is_successful_output
}
return values
else:
return None
|
def transformed_values(self):
'''Return dictionary of transformed results, with keys being output names.
Returns None if execution result isn't a success.
Reconstructs the pipeline context to materialize values.
'''
if self.success and self.transforms:
with self.reconstruct_context() as context:
values = {
result.step_output_data.output_name: self._get_value(
context, result.step_output_data
)
for result in self.transforms
if result.is_successful_output
}
return values
else:
return None
|
[
"Return",
"dictionary",
"of",
"transformed",
"results",
"with",
"keys",
"being",
"output",
"names",
".",
"Returns",
"None",
"if",
"execution",
"result",
"isn",
"t",
"a",
"success",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L222-L239
|
[
"def",
"transformed_values",
"(",
"self",
")",
":",
"if",
"self",
".",
"success",
"and",
"self",
".",
"transforms",
":",
"with",
"self",
".",
"reconstruct_context",
"(",
")",
"as",
"context",
":",
"values",
"=",
"{",
"result",
".",
"step_output_data",
".",
"output_name",
":",
"self",
".",
"_get_value",
"(",
"context",
",",
"result",
".",
"step_output_data",
")",
"for",
"result",
"in",
"self",
".",
"transforms",
"if",
"result",
".",
"is_successful_output",
"}",
"return",
"values",
"else",
":",
"return",
"None"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
SolidExecutionResult.transformed_value
|
Returns transformed value either for DEFAULT_OUTPUT or for the output
given as output_name. Returns None if execution result isn't a success.
Reconstructs the pipeline context to materialize value.
|
python_modules/dagster/dagster/core/execution.py
|
def transformed_value(self, output_name=DEFAULT_OUTPUT):
'''Returns transformed value either for DEFAULT_OUTPUT or for the output
given as output_name. Returns None if execution result isn't a success.
Reconstructs the pipeline context to materialize value.
'''
check.str_param(output_name, 'output_name')
if not self.solid.definition.has_output(output_name):
raise DagsterInvariantViolationError(
'{output_name} not defined in solid {solid}'.format(
output_name=output_name, solid=self.solid.name
)
)
if self.success:
for result in self.transforms:
if (
result.is_successful_output
and result.step_output_data.output_name == output_name
):
with self.reconstruct_context() as context:
value = self._get_value(context, result.step_output_data)
return value
raise DagsterInvariantViolationError(
(
'Did not find result {output_name} in solid {self.solid.name} '
'execution result'
).format(output_name=output_name, self=self)
)
else:
return None
|
def transformed_value(self, output_name=DEFAULT_OUTPUT):
'''Returns transformed value either for DEFAULT_OUTPUT or for the output
given as output_name. Returns None if execution result isn't a success.
Reconstructs the pipeline context to materialize value.
'''
check.str_param(output_name, 'output_name')
if not self.solid.definition.has_output(output_name):
raise DagsterInvariantViolationError(
'{output_name} not defined in solid {solid}'.format(
output_name=output_name, solid=self.solid.name
)
)
if self.success:
for result in self.transforms:
if (
result.is_successful_output
and result.step_output_data.output_name == output_name
):
with self.reconstruct_context() as context:
value = self._get_value(context, result.step_output_data)
return value
raise DagsterInvariantViolationError(
(
'Did not find result {output_name} in solid {self.solid.name} '
'execution result'
).format(output_name=output_name, self=self)
)
else:
return None
|
[
"Returns",
"transformed",
"value",
"either",
"for",
"DEFAULT_OUTPUT",
"or",
"for",
"the",
"output",
"given",
"as",
"output_name",
".",
"Returns",
"None",
"if",
"execution",
"result",
"isn",
"t",
"a",
"success",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L241-L273
|
[
"def",
"transformed_value",
"(",
"self",
",",
"output_name",
"=",
"DEFAULT_OUTPUT",
")",
":",
"check",
".",
"str_param",
"(",
"output_name",
",",
"'output_name'",
")",
"if",
"not",
"self",
".",
"solid",
".",
"definition",
".",
"has_output",
"(",
"output_name",
")",
":",
"raise",
"DagsterInvariantViolationError",
"(",
"'{output_name} not defined in solid {solid}'",
".",
"format",
"(",
"output_name",
"=",
"output_name",
",",
"solid",
"=",
"self",
".",
"solid",
".",
"name",
")",
")",
"if",
"self",
".",
"success",
":",
"for",
"result",
"in",
"self",
".",
"transforms",
":",
"if",
"(",
"result",
".",
"is_successful_output",
"and",
"result",
".",
"step_output_data",
".",
"output_name",
"==",
"output_name",
")",
":",
"with",
"self",
".",
"reconstruct_context",
"(",
")",
"as",
"context",
":",
"value",
"=",
"self",
".",
"_get_value",
"(",
"context",
",",
"result",
".",
"step_output_data",
")",
"return",
"value",
"raise",
"DagsterInvariantViolationError",
"(",
"(",
"'Did not find result {output_name} in solid {self.solid.name} '",
"'execution result'",
")",
".",
"format",
"(",
"output_name",
"=",
"output_name",
",",
"self",
"=",
"self",
")",
")",
"else",
":",
"return",
"None"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
SolidExecutionResult.failure_data
|
Returns the failing step's data that happened during this solid's execution, if any
|
python_modules/dagster/dagster/core/execution.py
|
def failure_data(self):
'''Returns the failing step's data that happened during this solid's execution, if any'''
for result in itertools.chain(
self.input_expectations, self.output_expectations, self.transforms
):
if result.event_type == DagsterEventType.STEP_FAILURE:
return result.step_failure_data
|
def failure_data(self):
'''Returns the failing step's data that happened during this solid's execution, if any'''
for result in itertools.chain(
self.input_expectations, self.output_expectations, self.transforms
):
if result.event_type == DagsterEventType.STEP_FAILURE:
return result.step_failure_data
|
[
"Returns",
"the",
"failing",
"step",
"s",
"data",
"that",
"happened",
"during",
"this",
"solid",
"s",
"execution",
"if",
"any"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L283-L289
|
[
"def",
"failure_data",
"(",
"self",
")",
":",
"for",
"result",
"in",
"itertools",
".",
"chain",
"(",
"self",
".",
"input_expectations",
",",
"self",
".",
"output_expectations",
",",
"self",
".",
"transforms",
")",
":",
"if",
"result",
".",
"event_type",
"==",
"DagsterEventType",
".",
"STEP_FAILURE",
":",
"return",
"result",
".",
"step_failure_data"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
NamedDict
|
A :py:class:`Dict` with a name allowing it to be referenced by that name.
|
python_modules/dagster/dagster/core/types/field_utils.py
|
def NamedDict(name, fields, description=None, type_attributes=DEFAULT_TYPE_ATTRIBUTES):
'''
A :py:class:`Dict` with a name allowing it to be referenced by that name.
'''
check_user_facing_fields_dict(fields, 'NamedDict named "{}"'.format(name))
class _NamedDict(_ConfigComposite):
def __init__(self):
super(_NamedDict, self).__init__(
key=name,
name=name,
fields=fields,
description=description,
type_attributes=type_attributes,
)
return _NamedDict
|
def NamedDict(name, fields, description=None, type_attributes=DEFAULT_TYPE_ATTRIBUTES):
'''
A :py:class:`Dict` with a name allowing it to be referenced by that name.
'''
check_user_facing_fields_dict(fields, 'NamedDict named "{}"'.format(name))
class _NamedDict(_ConfigComposite):
def __init__(self):
super(_NamedDict, self).__init__(
key=name,
name=name,
fields=fields,
description=description,
type_attributes=type_attributes,
)
return _NamedDict
|
[
"A",
":",
"py",
":",
"class",
":",
"Dict",
"with",
"a",
"name",
"allowing",
"it",
"to",
"be",
"referenced",
"by",
"that",
"name",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/types/field_utils.py#L242-L258
|
[
"def",
"NamedDict",
"(",
"name",
",",
"fields",
",",
"description",
"=",
"None",
",",
"type_attributes",
"=",
"DEFAULT_TYPE_ATTRIBUTES",
")",
":",
"check_user_facing_fields_dict",
"(",
"fields",
",",
"'NamedDict named \"{}\"'",
".",
"format",
"(",
"name",
")",
")",
"class",
"_NamedDict",
"(",
"_ConfigComposite",
")",
":",
"def",
"__init__",
"(",
"self",
")",
":",
"super",
"(",
"_NamedDict",
",",
"self",
")",
".",
"__init__",
"(",
"key",
"=",
"name",
",",
"name",
"=",
"name",
",",
"fields",
"=",
"fields",
",",
"description",
"=",
"description",
",",
"type_attributes",
"=",
"type_attributes",
",",
")",
"return",
"_NamedDict"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
Dict
|
Schema for configuration data with string keys and typed values via :py:class:`Field` .
Args:
fields (Dict[str, Field])
|
python_modules/dagster/dagster/core/types/field_utils.py
|
def Dict(fields):
'''
Schema for configuration data with string keys and typed values via :py:class:`Field` .
Args:
fields (Dict[str, Field])
'''
check_user_facing_fields_dict(fields, 'Dict')
class _Dict(_ConfigComposite):
def __init__(self):
key = 'Dict.' + str(DictCounter.get_next_count())
super(_Dict, self).__init__(
name=None,
key=key,
fields=fields,
description='A configuration dictionary with typed fields',
type_attributes=ConfigTypeAttributes(is_builtin=True),
)
return _Dict
|
def Dict(fields):
'''
Schema for configuration data with string keys and typed values via :py:class:`Field` .
Args:
fields (Dict[str, Field])
'''
check_user_facing_fields_dict(fields, 'Dict')
class _Dict(_ConfigComposite):
def __init__(self):
key = 'Dict.' + str(DictCounter.get_next_count())
super(_Dict, self).__init__(
name=None,
key=key,
fields=fields,
description='A configuration dictionary with typed fields',
type_attributes=ConfigTypeAttributes(is_builtin=True),
)
return _Dict
|
[
"Schema",
"for",
"configuration",
"data",
"with",
"string",
"keys",
"and",
"typed",
"values",
"via",
":",
"py",
":",
"class",
":",
"Field",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/types/field_utils.py#L261-L281
|
[
"def",
"Dict",
"(",
"fields",
")",
":",
"check_user_facing_fields_dict",
"(",
"fields",
",",
"'Dict'",
")",
"class",
"_Dict",
"(",
"_ConfigComposite",
")",
":",
"def",
"__init__",
"(",
"self",
")",
":",
"key",
"=",
"'Dict.'",
"+",
"str",
"(",
"DictCounter",
".",
"get_next_count",
"(",
")",
")",
"super",
"(",
"_Dict",
",",
"self",
")",
".",
"__init__",
"(",
"name",
"=",
"None",
",",
"key",
"=",
"key",
",",
"fields",
"=",
"fields",
",",
"description",
"=",
"'A configuration dictionary with typed fields'",
",",
"type_attributes",
"=",
"ConfigTypeAttributes",
"(",
"is_builtin",
"=",
"True",
")",
",",
")",
"return",
"_Dict"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
PermissiveDict
|
A permissive dict will permit the user to partially specify the permitted fields. Any fields
that are specified and passed in will be type checked. Other fields will be allowed, but
will be ignored by the type checker.
|
python_modules/dagster/dagster/core/types/field_utils.py
|
def PermissiveDict(fields=None):
'''A permissive dict will permit the user to partially specify the permitted fields. Any fields
that are specified and passed in will be type checked. Other fields will be allowed, but
will be ignored by the type checker.
'''
if fields:
check_user_facing_fields_dict(fields, 'PermissiveDict')
class _PermissiveDict(_ConfigComposite):
def __init__(self):
key = 'PermissiveDict.' + str(DictCounter.get_next_count())
super(_PermissiveDict, self).__init__(
name=None,
key=key,
fields=fields or dict(),
description='A configuration dictionary with typed fields',
type_attributes=ConfigTypeAttributes(is_builtin=True),
)
@property
def is_permissive_composite(self):
return True
return _PermissiveDict
|
def PermissiveDict(fields=None):
'''A permissive dict will permit the user to partially specify the permitted fields. Any fields
that are specified and passed in will be type checked. Other fields will be allowed, but
will be ignored by the type checker.
'''
if fields:
check_user_facing_fields_dict(fields, 'PermissiveDict')
class _PermissiveDict(_ConfigComposite):
def __init__(self):
key = 'PermissiveDict.' + str(DictCounter.get_next_count())
super(_PermissiveDict, self).__init__(
name=None,
key=key,
fields=fields or dict(),
description='A configuration dictionary with typed fields',
type_attributes=ConfigTypeAttributes(is_builtin=True),
)
@property
def is_permissive_composite(self):
return True
return _PermissiveDict
|
[
"A",
"permissive",
"dict",
"will",
"permit",
"the",
"user",
"to",
"partially",
"specify",
"the",
"permitted",
"fields",
".",
"Any",
"fields",
"that",
"are",
"specified",
"and",
"passed",
"in",
"will",
"be",
"type",
"checked",
".",
"Other",
"fields",
"will",
"be",
"allowed",
"but",
"will",
"be",
"ignored",
"by",
"the",
"type",
"checker",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/types/field_utils.py#L284-L308
|
[
"def",
"PermissiveDict",
"(",
"fields",
"=",
"None",
")",
":",
"if",
"fields",
":",
"check_user_facing_fields_dict",
"(",
"fields",
",",
"'PermissiveDict'",
")",
"class",
"_PermissiveDict",
"(",
"_ConfigComposite",
")",
":",
"def",
"__init__",
"(",
"self",
")",
":",
"key",
"=",
"'PermissiveDict.'",
"+",
"str",
"(",
"DictCounter",
".",
"get_next_count",
"(",
")",
")",
"super",
"(",
"_PermissiveDict",
",",
"self",
")",
".",
"__init__",
"(",
"name",
"=",
"None",
",",
"key",
"=",
"key",
",",
"fields",
"=",
"fields",
"or",
"dict",
"(",
")",
",",
"description",
"=",
"'A configuration dictionary with typed fields'",
",",
"type_attributes",
"=",
"ConfigTypeAttributes",
"(",
"is_builtin",
"=",
"True",
")",
",",
")",
"@",
"property",
"def",
"is_permissive_composite",
"(",
"self",
")",
":",
"return",
"True",
"return",
"_PermissiveDict"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
Selector
|
Selectors are used when you want to be able present several different options to the user but
force them to select one. For example, it would not make much sense to allow them
to say that a single input should be sourced from a csv and a parquet file: They must choose.
Note that in other type systems this might be called an "input union."
Args:
fields (Dict[str, Field]):
|
python_modules/dagster/dagster/core/types/field_utils.py
|
def Selector(fields):
'''Selectors are used when you want to be able present several different options to the user but
force them to select one. For example, it would not make much sense to allow them
to say that a single input should be sourced from a csv and a parquet file: They must choose.
Note that in other type systems this might be called an "input union."
Args:
fields (Dict[str, Field]):
'''
check_user_facing_fields_dict(fields, 'Selector')
class _Selector(_ConfigSelector):
def __init__(self):
key = 'Selector.' + str(DictCounter.get_next_count())
super(_Selector, self).__init__(
key=key,
name=None,
fields=fields,
# description='A configuration dictionary with typed fields',
type_attributes=ConfigTypeAttributes(is_builtin=True),
)
return _Selector
|
def Selector(fields):
'''Selectors are used when you want to be able present several different options to the user but
force them to select one. For example, it would not make much sense to allow them
to say that a single input should be sourced from a csv and a parquet file: They must choose.
Note that in other type systems this might be called an "input union."
Args:
fields (Dict[str, Field]):
'''
check_user_facing_fields_dict(fields, 'Selector')
class _Selector(_ConfigSelector):
def __init__(self):
key = 'Selector.' + str(DictCounter.get_next_count())
super(_Selector, self).__init__(
key=key,
name=None,
fields=fields,
# description='A configuration dictionary with typed fields',
type_attributes=ConfigTypeAttributes(is_builtin=True),
)
return _Selector
|
[
"Selectors",
"are",
"used",
"when",
"you",
"want",
"to",
"be",
"able",
"present",
"several",
"different",
"options",
"to",
"the",
"user",
"but",
"force",
"them",
"to",
"select",
"one",
".",
"For",
"example",
"it",
"would",
"not",
"make",
"much",
"sense",
"to",
"allow",
"them",
"to",
"say",
"that",
"a",
"single",
"input",
"should",
"be",
"sourced",
"from",
"a",
"csv",
"and",
"a",
"parquet",
"file",
":",
"They",
"must",
"choose",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/types/field_utils.py#L311-L335
|
[
"def",
"Selector",
"(",
"fields",
")",
":",
"check_user_facing_fields_dict",
"(",
"fields",
",",
"'Selector'",
")",
"class",
"_Selector",
"(",
"_ConfigSelector",
")",
":",
"def",
"__init__",
"(",
"self",
")",
":",
"key",
"=",
"'Selector.'",
"+",
"str",
"(",
"DictCounter",
".",
"get_next_count",
"(",
")",
")",
"super",
"(",
"_Selector",
",",
"self",
")",
".",
"__init__",
"(",
"key",
"=",
"key",
",",
"name",
"=",
"None",
",",
"fields",
"=",
"fields",
",",
"# description='A configuration dictionary with typed fields',",
"type_attributes",
"=",
"ConfigTypeAttributes",
"(",
"is_builtin",
"=",
"True",
")",
",",
")",
"return",
"_Selector"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
NamedSelector
|
A :py:class`Selector` with a name, allowing it to be referenced by that name.
Args:
name (str):
fields (Dict[str, Field])
|
python_modules/dagster/dagster/core/types/field_utils.py
|
def NamedSelector(name, fields, description=None, type_attributes=DEFAULT_TYPE_ATTRIBUTES):
'''
A :py:class`Selector` with a name, allowing it to be referenced by that name.
Args:
name (str):
fields (Dict[str, Field])
'''
check.str_param(name, 'name')
check_user_facing_fields_dict(fields, 'NamedSelector named "{}"'.format(name))
class _NamedSelector(_ConfigSelector):
def __init__(self):
super(_NamedSelector, self).__init__(
key=name,
name=name,
fields=fields,
description=description,
type_attributes=type_attributes,
)
return _NamedSelector
|
def NamedSelector(name, fields, description=None, type_attributes=DEFAULT_TYPE_ATTRIBUTES):
'''
A :py:class`Selector` with a name, allowing it to be referenced by that name.
Args:
name (str):
fields (Dict[str, Field])
'''
check.str_param(name, 'name')
check_user_facing_fields_dict(fields, 'NamedSelector named "{}"'.format(name))
class _NamedSelector(_ConfigSelector):
def __init__(self):
super(_NamedSelector, self).__init__(
key=name,
name=name,
fields=fields,
description=description,
type_attributes=type_attributes,
)
return _NamedSelector
|
[
"A",
":",
"py",
":",
"class",
"Selector",
"with",
"a",
"name",
"allowing",
"it",
"to",
"be",
"referenced",
"by",
"that",
"name",
".",
"Args",
":",
"name",
"(",
"str",
")",
":",
"fields",
"(",
"Dict",
"[",
"str",
"Field",
"]",
")"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/types/field_utils.py#L338-L359
|
[
"def",
"NamedSelector",
"(",
"name",
",",
"fields",
",",
"description",
"=",
"None",
",",
"type_attributes",
"=",
"DEFAULT_TYPE_ATTRIBUTES",
")",
":",
"check",
".",
"str_param",
"(",
"name",
",",
"'name'",
")",
"check_user_facing_fields_dict",
"(",
"fields",
",",
"'NamedSelector named \"{}\"'",
".",
"format",
"(",
"name",
")",
")",
"class",
"_NamedSelector",
"(",
"_ConfigSelector",
")",
":",
"def",
"__init__",
"(",
"self",
")",
":",
"super",
"(",
"_NamedSelector",
",",
"self",
")",
".",
"__init__",
"(",
"key",
"=",
"name",
",",
"name",
"=",
"name",
",",
"fields",
"=",
"fields",
",",
"description",
"=",
"description",
",",
"type_attributes",
"=",
"type_attributes",
",",
")",
"return",
"_NamedSelector"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
_is_valid_dataset
|
Datasets must be of form "project.dataset" or "dataset"
|
python_modules/libraries/dagster-gcp/dagster_gcp/types.py
|
def _is_valid_dataset(config_value):
'''Datasets must be of form "project.dataset" or "dataset"
'''
return re.match(
# regex matches: project.table -- OR -- table
r'^' + RE_PROJECT + r'\.' + RE_DS_TABLE + r'$|^' + RE_DS_TABLE + r'$',
config_value,
)
|
def _is_valid_dataset(config_value):
'''Datasets must be of form "project.dataset" or "dataset"
'''
return re.match(
# regex matches: project.table -- OR -- table
r'^' + RE_PROJECT + r'\.' + RE_DS_TABLE + r'$|^' + RE_DS_TABLE + r'$',
config_value,
)
|
[
"Datasets",
"must",
"be",
"of",
"form",
"project",
".",
"dataset",
"or",
"dataset"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/libraries/dagster-gcp/dagster_gcp/types.py#L86-L93
|
[
"def",
"_is_valid_dataset",
"(",
"config_value",
")",
":",
"return",
"re",
".",
"match",
"(",
"# regex matches: project.table -- OR -- table",
"r'^'",
"+",
"RE_PROJECT",
"+",
"r'\\.'",
"+",
"RE_DS_TABLE",
"+",
"r'$|^'",
"+",
"RE_DS_TABLE",
"+",
"r'$'",
",",
"config_value",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
_is_valid_table
|
Tables must be of form "project.dataset.table" or "dataset.table"
|
python_modules/libraries/dagster-gcp/dagster_gcp/types.py
|
def _is_valid_table(config_value):
'''Tables must be of form "project.dataset.table" or "dataset.table"
'''
return re.match(
r'^'
+ RE_PROJECT # project
+ r'\.' # .
+ RE_DS_TABLE # dataset
+ r'\.' # .
+ RE_DS_TABLE # table
+ r'$|^' # -- OR --
+ RE_DS_TABLE # dataset
+ r'\.' # .
+ RE_DS_TABLE # table
+ r'$',
config_value,
)
|
def _is_valid_table(config_value):
'''Tables must be of form "project.dataset.table" or "dataset.table"
'''
return re.match(
r'^'
+ RE_PROJECT # project
+ r'\.' # .
+ RE_DS_TABLE # dataset
+ r'\.' # .
+ RE_DS_TABLE # table
+ r'$|^' # -- OR --
+ RE_DS_TABLE # dataset
+ r'\.' # .
+ RE_DS_TABLE # table
+ r'$',
config_value,
)
|
[
"Tables",
"must",
"be",
"of",
"form",
"project",
".",
"dataset",
".",
"table",
"or",
"dataset",
".",
"table"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/libraries/dagster-gcp/dagster_gcp/types.py#L96-L112
|
[
"def",
"_is_valid_table",
"(",
"config_value",
")",
":",
"return",
"re",
".",
"match",
"(",
"r'^'",
"+",
"RE_PROJECT",
"# project",
"+",
"r'\\.'",
"# .",
"+",
"RE_DS_TABLE",
"# dataset",
"+",
"r'\\.'",
"# .",
"+",
"RE_DS_TABLE",
"# table",
"+",
"r'$|^'",
"# -- OR --",
"+",
"RE_DS_TABLE",
"# dataset",
"+",
"r'\\.'",
"# .",
"+",
"RE_DS_TABLE",
"# table",
"+",
"r'$'",
",",
"config_value",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
_execute_core_transform
|
Execute the user-specified transform for the solid. Wrap in an error boundary and do
all relevant logging and metrics tracking
|
python_modules/dagster/dagster/core/execution_plan/transform.py
|
def _execute_core_transform(transform_context, inputs):
'''
Execute the user-specified transform for the solid. Wrap in an error boundary and do
all relevant logging and metrics tracking
'''
check.inst_param(transform_context, 'transform_context', SystemTransformExecutionContext)
check.dict_param(inputs, 'inputs', key_type=str)
step = transform_context.step
solid = step.solid
transform_context.log.debug(
'Executing core transform for solid {solid}.'.format(solid=solid.name)
)
all_results = []
for step_output in _yield_transform_results(transform_context, inputs):
yield step_output
if isinstance(step_output, StepOutputValue):
all_results.append(step_output)
if len(all_results) != len(solid.definition.output_defs):
emitted_result_names = {r.output_name for r in all_results}
solid_output_names = {output_def.name for output_def in solid.definition.output_defs}
omitted_outputs = solid_output_names.difference(emitted_result_names)
transform_context.log.info(
'Solid {solid} did not fire outputs {outputs}'.format(
solid=solid.name, outputs=repr(omitted_outputs)
)
)
|
def _execute_core_transform(transform_context, inputs):
'''
Execute the user-specified transform for the solid. Wrap in an error boundary and do
all relevant logging and metrics tracking
'''
check.inst_param(transform_context, 'transform_context', SystemTransformExecutionContext)
check.dict_param(inputs, 'inputs', key_type=str)
step = transform_context.step
solid = step.solid
transform_context.log.debug(
'Executing core transform for solid {solid}.'.format(solid=solid.name)
)
all_results = []
for step_output in _yield_transform_results(transform_context, inputs):
yield step_output
if isinstance(step_output, StepOutputValue):
all_results.append(step_output)
if len(all_results) != len(solid.definition.output_defs):
emitted_result_names = {r.output_name for r in all_results}
solid_output_names = {output_def.name for output_def in solid.definition.output_defs}
omitted_outputs = solid_output_names.difference(emitted_result_names)
transform_context.log.info(
'Solid {solid} did not fire outputs {outputs}'.format(
solid=solid.name, outputs=repr(omitted_outputs)
)
)
|
[
"Execute",
"the",
"user",
"-",
"specified",
"transform",
"for",
"the",
"solid",
".",
"Wrap",
"in",
"an",
"error",
"boundary",
"and",
"do",
"all",
"relevant",
"logging",
"and",
"metrics",
"tracking"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution_plan/transform.py#L73-L102
|
[
"def",
"_execute_core_transform",
"(",
"transform_context",
",",
"inputs",
")",
":",
"check",
".",
"inst_param",
"(",
"transform_context",
",",
"'transform_context'",
",",
"SystemTransformExecutionContext",
")",
"check",
".",
"dict_param",
"(",
"inputs",
",",
"'inputs'",
",",
"key_type",
"=",
"str",
")",
"step",
"=",
"transform_context",
".",
"step",
"solid",
"=",
"step",
".",
"solid",
"transform_context",
".",
"log",
".",
"debug",
"(",
"'Executing core transform for solid {solid}.'",
".",
"format",
"(",
"solid",
"=",
"solid",
".",
"name",
")",
")",
"all_results",
"=",
"[",
"]",
"for",
"step_output",
"in",
"_yield_transform_results",
"(",
"transform_context",
",",
"inputs",
")",
":",
"yield",
"step_output",
"if",
"isinstance",
"(",
"step_output",
",",
"StepOutputValue",
")",
":",
"all_results",
".",
"append",
"(",
"step_output",
")",
"if",
"len",
"(",
"all_results",
")",
"!=",
"len",
"(",
"solid",
".",
"definition",
".",
"output_defs",
")",
":",
"emitted_result_names",
"=",
"{",
"r",
".",
"output_name",
"for",
"r",
"in",
"all_results",
"}",
"solid_output_names",
"=",
"{",
"output_def",
".",
"name",
"for",
"output_def",
"in",
"solid",
".",
"definition",
".",
"output_defs",
"}",
"omitted_outputs",
"=",
"solid_output_names",
".",
"difference",
"(",
"emitted_result_names",
")",
"transform_context",
".",
"log",
".",
"info",
"(",
"'Solid {solid} did not fire outputs {outputs}'",
".",
"format",
"(",
"solid",
"=",
"solid",
".",
"name",
",",
"outputs",
"=",
"repr",
"(",
"omitted_outputs",
")",
")",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
dagster_type
|
Decorator version of as_dagster_type. See documentation for :py:func:`as_dagster_type` .
|
python_modules/dagster/dagster/core/types/decorator.py
|
def dagster_type(
name=None,
description=None,
input_schema=None,
output_schema=None,
serialization_strategy=None,
storage_plugins=None,
):
'''
Decorator version of as_dagster_type. See documentation for :py:func:`as_dagster_type` .
'''
def _with_args(bare_cls):
check.type_param(bare_cls, 'bare_cls')
new_name = name if name else bare_cls.__name__
return _decorate_as_dagster_type(
bare_cls=bare_cls,
key=new_name,
name=new_name,
description=description,
input_schema=input_schema,
output_schema=output_schema,
serialization_strategy=serialization_strategy,
storage_plugins=storage_plugins,
)
# check for no args, no parens case
if callable(name):
klass = name
new_name = klass.__name__
return _decorate_as_dagster_type(
bare_cls=klass, key=new_name, name=new_name, description=None
)
return _with_args
|
def dagster_type(
name=None,
description=None,
input_schema=None,
output_schema=None,
serialization_strategy=None,
storage_plugins=None,
):
'''
Decorator version of as_dagster_type. See documentation for :py:func:`as_dagster_type` .
'''
def _with_args(bare_cls):
check.type_param(bare_cls, 'bare_cls')
new_name = name if name else bare_cls.__name__
return _decorate_as_dagster_type(
bare_cls=bare_cls,
key=new_name,
name=new_name,
description=description,
input_schema=input_schema,
output_schema=output_schema,
serialization_strategy=serialization_strategy,
storage_plugins=storage_plugins,
)
# check for no args, no parens case
if callable(name):
klass = name
new_name = klass.__name__
return _decorate_as_dagster_type(
bare_cls=klass, key=new_name, name=new_name, description=None
)
return _with_args
|
[
"Decorator",
"version",
"of",
"as_dagster_type",
".",
"See",
"documentation",
"for",
":",
"py",
":",
"func",
":",
"as_dagster_type",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/types/decorator.py#L42-L76
|
[
"def",
"dagster_type",
"(",
"name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"input_schema",
"=",
"None",
",",
"output_schema",
"=",
"None",
",",
"serialization_strategy",
"=",
"None",
",",
"storage_plugins",
"=",
"None",
",",
")",
":",
"def",
"_with_args",
"(",
"bare_cls",
")",
":",
"check",
".",
"type_param",
"(",
"bare_cls",
",",
"'bare_cls'",
")",
"new_name",
"=",
"name",
"if",
"name",
"else",
"bare_cls",
".",
"__name__",
"return",
"_decorate_as_dagster_type",
"(",
"bare_cls",
"=",
"bare_cls",
",",
"key",
"=",
"new_name",
",",
"name",
"=",
"new_name",
",",
"description",
"=",
"description",
",",
"input_schema",
"=",
"input_schema",
",",
"output_schema",
"=",
"output_schema",
",",
"serialization_strategy",
"=",
"serialization_strategy",
",",
"storage_plugins",
"=",
"storage_plugins",
",",
")",
"# check for no args, no parens case",
"if",
"callable",
"(",
"name",
")",
":",
"klass",
"=",
"name",
"new_name",
"=",
"klass",
".",
"__name__",
"return",
"_decorate_as_dagster_type",
"(",
"bare_cls",
"=",
"klass",
",",
"key",
"=",
"new_name",
",",
"name",
"=",
"new_name",
",",
"description",
"=",
"None",
")",
"return",
"_with_args"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
as_dagster_type
|
Takes a python cls and creates a type for it in the Dagster domain.
Args:
existing_type (cls)
The python type you want to project in to the Dagster type system.
name (Optional[str]):
description (Optiona[str]):
input_schema (Optional[InputSchema]):
An instance of a class that inherits from :py:class:`InputSchema` that
can map config data to a value of this type.
output_schema (Optiona[OutputSchema]):
An instance of a class that inherits from :py:class:`OutputSchema` that
can map config data to persisting values of this type.
serialization_strategy (Optional[SerializationStrategy]):
The default behavior for how to serialize this value for
persisting between execution steps.
storage_plugins (Optional[Dict[RunStorageMode, TypeStoragePlugin]]):
Storage type specific overrides for the serialization strategy.
This allows for storage specific optimzations such as effecient
distributed storage on S3.
|
python_modules/dagster/dagster/core/types/decorator.py
|
def as_dagster_type(
existing_type,
name=None,
description=None,
input_schema=None,
output_schema=None,
serialization_strategy=None,
storage_plugins=None,
):
'''
Takes a python cls and creates a type for it in the Dagster domain.
Args:
existing_type (cls)
The python type you want to project in to the Dagster type system.
name (Optional[str]):
description (Optiona[str]):
input_schema (Optional[InputSchema]):
An instance of a class that inherits from :py:class:`InputSchema` that
can map config data to a value of this type.
output_schema (Optiona[OutputSchema]):
An instance of a class that inherits from :py:class:`OutputSchema` that
can map config data to persisting values of this type.
serialization_strategy (Optional[SerializationStrategy]):
The default behavior for how to serialize this value for
persisting between execution steps.
storage_plugins (Optional[Dict[RunStorageMode, TypeStoragePlugin]]):
Storage type specific overrides for the serialization strategy.
This allows for storage specific optimzations such as effecient
distributed storage on S3.
'''
check.type_param(existing_type, 'existing_type')
check.opt_str_param(name, 'name')
check.opt_str_param(description, 'description')
check.opt_inst_param(input_schema, 'input_schema', InputSchema)
check.opt_inst_param(output_schema, 'output_schema', OutputSchema)
check.opt_inst_param(serialization_strategy, 'serialization_strategy', SerializationStrategy)
storage_plugins = check.opt_dict_param(storage_plugins, 'storage_plugins')
if serialization_strategy is None:
serialization_strategy = PickleSerializationStrategy()
name = existing_type.__name__ if name is None else name
return _decorate_as_dagster_type(
existing_type,
key=name,
name=name,
description=description,
input_schema=input_schema,
output_schema=output_schema,
serialization_strategy=serialization_strategy,
storage_plugins=storage_plugins,
)
|
def as_dagster_type(
existing_type,
name=None,
description=None,
input_schema=None,
output_schema=None,
serialization_strategy=None,
storage_plugins=None,
):
'''
Takes a python cls and creates a type for it in the Dagster domain.
Args:
existing_type (cls)
The python type you want to project in to the Dagster type system.
name (Optional[str]):
description (Optiona[str]):
input_schema (Optional[InputSchema]):
An instance of a class that inherits from :py:class:`InputSchema` that
can map config data to a value of this type.
output_schema (Optiona[OutputSchema]):
An instance of a class that inherits from :py:class:`OutputSchema` that
can map config data to persisting values of this type.
serialization_strategy (Optional[SerializationStrategy]):
The default behavior for how to serialize this value for
persisting between execution steps.
storage_plugins (Optional[Dict[RunStorageMode, TypeStoragePlugin]]):
Storage type specific overrides for the serialization strategy.
This allows for storage specific optimzations such as effecient
distributed storage on S3.
'''
check.type_param(existing_type, 'existing_type')
check.opt_str_param(name, 'name')
check.opt_str_param(description, 'description')
check.opt_inst_param(input_schema, 'input_schema', InputSchema)
check.opt_inst_param(output_schema, 'output_schema', OutputSchema)
check.opt_inst_param(serialization_strategy, 'serialization_strategy', SerializationStrategy)
storage_plugins = check.opt_dict_param(storage_plugins, 'storage_plugins')
if serialization_strategy is None:
serialization_strategy = PickleSerializationStrategy()
name = existing_type.__name__ if name is None else name
return _decorate_as_dagster_type(
existing_type,
key=name,
name=name,
description=description,
input_schema=input_schema,
output_schema=output_schema,
serialization_strategy=serialization_strategy,
storage_plugins=storage_plugins,
)
|
[
"Takes",
"a",
"python",
"cls",
"and",
"creates",
"a",
"type",
"for",
"it",
"in",
"the",
"Dagster",
"domain",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/types/decorator.py#L98-L154
|
[
"def",
"as_dagster_type",
"(",
"existing_type",
",",
"name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"input_schema",
"=",
"None",
",",
"output_schema",
"=",
"None",
",",
"serialization_strategy",
"=",
"None",
",",
"storage_plugins",
"=",
"None",
",",
")",
":",
"check",
".",
"type_param",
"(",
"existing_type",
",",
"'existing_type'",
")",
"check",
".",
"opt_str_param",
"(",
"name",
",",
"'name'",
")",
"check",
".",
"opt_str_param",
"(",
"description",
",",
"'description'",
")",
"check",
".",
"opt_inst_param",
"(",
"input_schema",
",",
"'input_schema'",
",",
"InputSchema",
")",
"check",
".",
"opt_inst_param",
"(",
"output_schema",
",",
"'output_schema'",
",",
"OutputSchema",
")",
"check",
".",
"opt_inst_param",
"(",
"serialization_strategy",
",",
"'serialization_strategy'",
",",
"SerializationStrategy",
")",
"storage_plugins",
"=",
"check",
".",
"opt_dict_param",
"(",
"storage_plugins",
",",
"'storage_plugins'",
")",
"if",
"serialization_strategy",
"is",
"None",
":",
"serialization_strategy",
"=",
"PickleSerializationStrategy",
"(",
")",
"name",
"=",
"existing_type",
".",
"__name__",
"if",
"name",
"is",
"None",
"else",
"name",
"return",
"_decorate_as_dagster_type",
"(",
"existing_type",
",",
"key",
"=",
"name",
",",
"name",
"=",
"name",
",",
"description",
"=",
"description",
",",
"input_schema",
"=",
"input_schema",
",",
"output_schema",
"=",
"output_schema",
",",
"serialization_strategy",
"=",
"serialization_strategy",
",",
"storage_plugins",
"=",
"storage_plugins",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
resource
|
A decorator for creating a resource. The decorated function will be used as the
resource_fn in a ResourceDefinition.
|
python_modules/dagster/dagster/core/definitions/resource.py
|
def resource(config_field=None, description=None):
'''A decorator for creating a resource. The decorated function will be used as the
resource_fn in a ResourceDefinition.
'''
# This case is for when decorator is used bare, without arguments.
# E.g. @resource versus @resource()
if callable(config_field):
return ResourceDefinition(resource_fn=config_field)
def _wrap(resource_fn):
return ResourceDefinition(resource_fn, config_field, description)
return _wrap
|
def resource(config_field=None, description=None):
'''A decorator for creating a resource. The decorated function will be used as the
resource_fn in a ResourceDefinition.
'''
# This case is for when decorator is used bare, without arguments.
# E.g. @resource versus @resource()
if callable(config_field):
return ResourceDefinition(resource_fn=config_field)
def _wrap(resource_fn):
return ResourceDefinition(resource_fn, config_field, description)
return _wrap
|
[
"A",
"decorator",
"for",
"creating",
"a",
"resource",
".",
"The",
"decorated",
"function",
"will",
"be",
"used",
"as",
"the",
"resource_fn",
"in",
"a",
"ResourceDefinition",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/definitions/resource.py#L41-L54
|
[
"def",
"resource",
"(",
"config_field",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"# This case is for when decorator is used bare, without arguments.",
"# E.g. @resource versus @resource()",
"if",
"callable",
"(",
"config_field",
")",
":",
"return",
"ResourceDefinition",
"(",
"resource_fn",
"=",
"config_field",
")",
"def",
"_wrap",
"(",
"resource_fn",
")",
":",
"return",
"ResourceDefinition",
"(",
"resource_fn",
",",
"config_field",
",",
"description",
")",
"return",
"_wrap"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
run_spark_subprocess
|
See https://bit.ly/2OpksJC for source of the subprocess stdout/stderr capture pattern in this
function.
|
python_modules/libraries/dagster-spark/dagster_spark/utils.py
|
def run_spark_subprocess(cmd, logger):
"""See https://bit.ly/2OpksJC for source of the subprocess stdout/stderr capture pattern in this
function.
"""
# Spark sometimes logs in log4j format. In those cases, we detect and parse.
# Example log line from Spark that this is intended to match:
# 2019-03-27 16:00:19 INFO ContextHandler:781 - Started o.s.j.s.ServletContextHandler...
log4j_regex = r'^(\d{4}\-\d{2}\-\d{2} \d{2}:\d{2}:\d{2}) ([A-Z]{3,5})(.*?)$'
def reader(pipe, pipe_name, p, msg_queue):
try:
with pipe:
while p.poll() is None:
for line in pipe.readlines():
match = re.match(log4j_regex, line)
if match:
line = match.groups()[2]
msg_queue.put((pipe_name, line))
finally:
# Use None as sentinel for done state, detected by iter() below
msg_queue.put(None)
p = subprocess.Popen(
' '.join(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
universal_newlines=True,
shell=True,
)
q = queue.Queue()
Thread(target=reader, args=[p.stdout, 'stdout', p, q]).start()
Thread(target=reader, args=[p.stderr, 'stderr', p, q]).start()
for _ in range(2): # There will be two None sentinels, one for each stream
for pipe_name, line in iter(q.get, None):
if pipe_name == 'stdout':
logger.info(line)
elif pipe_name == 'stderr':
logger.error(line)
p.wait()
return p.returncode
|
def run_spark_subprocess(cmd, logger):
"""See https://bit.ly/2OpksJC for source of the subprocess stdout/stderr capture pattern in this
function.
"""
# Spark sometimes logs in log4j format. In those cases, we detect and parse.
# Example log line from Spark that this is intended to match:
# 2019-03-27 16:00:19 INFO ContextHandler:781 - Started o.s.j.s.ServletContextHandler...
log4j_regex = r'^(\d{4}\-\d{2}\-\d{2} \d{2}:\d{2}:\d{2}) ([A-Z]{3,5})(.*?)$'
def reader(pipe, pipe_name, p, msg_queue):
try:
with pipe:
while p.poll() is None:
for line in pipe.readlines():
match = re.match(log4j_regex, line)
if match:
line = match.groups()[2]
msg_queue.put((pipe_name, line))
finally:
# Use None as sentinel for done state, detected by iter() below
msg_queue.put(None)
p = subprocess.Popen(
' '.join(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
universal_newlines=True,
shell=True,
)
q = queue.Queue()
Thread(target=reader, args=[p.stdout, 'stdout', p, q]).start()
Thread(target=reader, args=[p.stderr, 'stderr', p, q]).start()
for _ in range(2): # There will be two None sentinels, one for each stream
for pipe_name, line in iter(q.get, None):
if pipe_name == 'stdout':
logger.info(line)
elif pipe_name == 'stderr':
logger.error(line)
p.wait()
return p.returncode
|
[
"See",
"https",
":",
"//",
"bit",
".",
"ly",
"/",
"2OpksJC",
"for",
"source",
"of",
"the",
"subprocess",
"stdout",
"/",
"stderr",
"capture",
"pattern",
"in",
"this",
"function",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/libraries/dagster-spark/dagster_spark/utils.py#L9-L51
|
[
"def",
"run_spark_subprocess",
"(",
"cmd",
",",
"logger",
")",
":",
"# Spark sometimes logs in log4j format. In those cases, we detect and parse.",
"# Example log line from Spark that this is intended to match:",
"# 2019-03-27 16:00:19 INFO ContextHandler:781 - Started o.s.j.s.ServletContextHandler...",
"log4j_regex",
"=",
"r'^(\\d{4}\\-\\d{2}\\-\\d{2} \\d{2}:\\d{2}:\\d{2}) ([A-Z]{3,5})(.*?)$'",
"def",
"reader",
"(",
"pipe",
",",
"pipe_name",
",",
"p",
",",
"msg_queue",
")",
":",
"try",
":",
"with",
"pipe",
":",
"while",
"p",
".",
"poll",
"(",
")",
"is",
"None",
":",
"for",
"line",
"in",
"pipe",
".",
"readlines",
"(",
")",
":",
"match",
"=",
"re",
".",
"match",
"(",
"log4j_regex",
",",
"line",
")",
"if",
"match",
":",
"line",
"=",
"match",
".",
"groups",
"(",
")",
"[",
"2",
"]",
"msg_queue",
".",
"put",
"(",
"(",
"pipe_name",
",",
"line",
")",
")",
"finally",
":",
"# Use None as sentinel for done state, detected by iter() below",
"msg_queue",
".",
"put",
"(",
"None",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"' '",
".",
"join",
"(",
"cmd",
")",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"bufsize",
"=",
"0",
",",
"universal_newlines",
"=",
"True",
",",
"shell",
"=",
"True",
",",
")",
"q",
"=",
"queue",
".",
"Queue",
"(",
")",
"Thread",
"(",
"target",
"=",
"reader",
",",
"args",
"=",
"[",
"p",
".",
"stdout",
",",
"'stdout'",
",",
"p",
",",
"q",
"]",
")",
".",
"start",
"(",
")",
"Thread",
"(",
"target",
"=",
"reader",
",",
"args",
"=",
"[",
"p",
".",
"stderr",
",",
"'stderr'",
",",
"p",
",",
"q",
"]",
")",
".",
"start",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"2",
")",
":",
"# There will be two None sentinels, one for each stream",
"for",
"pipe_name",
",",
"line",
"in",
"iter",
"(",
"q",
".",
"get",
",",
"None",
")",
":",
"if",
"pipe_name",
"==",
"'stdout'",
":",
"logger",
".",
"info",
"(",
"line",
")",
"elif",
"pipe_name",
"==",
"'stderr'",
":",
"logger",
".",
"error",
"(",
"line",
")",
"p",
".",
"wait",
"(",
")",
"return",
"p",
".",
"returncode"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
parse_spark_config
|
For each key-value pair in spark conf, we need to pass to CLI in format:
--conf "key=value"
|
python_modules/libraries/dagster-spark/dagster_spark/utils.py
|
def parse_spark_config(spark_conf):
'''For each key-value pair in spark conf, we need to pass to CLI in format:
--conf "key=value"
'''
spark_conf_list = flatten_dict(spark_conf)
return list(
itertools.chain.from_iterable([('--conf', '{}={}'.format(*c)) for c in spark_conf_list])
)
|
def parse_spark_config(spark_conf):
'''For each key-value pair in spark conf, we need to pass to CLI in format:
--conf "key=value"
'''
spark_conf_list = flatten_dict(spark_conf)
return list(
itertools.chain.from_iterable([('--conf', '{}={}'.format(*c)) for c in spark_conf_list])
)
|
[
"For",
"each",
"key",
"-",
"value",
"pair",
"in",
"spark",
"conf",
"we",
"need",
"to",
"pass",
"to",
"CLI",
"in",
"format",
":"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/libraries/dagster-spark/dagster_spark/utils.py#L74-L83
|
[
"def",
"parse_spark_config",
"(",
"spark_conf",
")",
":",
"spark_conf_list",
"=",
"flatten_dict",
"(",
"spark_conf",
")",
"return",
"list",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"[",
"(",
"'--conf'",
",",
"'{}={}'",
".",
"format",
"(",
"*",
"c",
")",
")",
"for",
"c",
"in",
"spark_conf_list",
"]",
")",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
SystemNamedDict
|
A SystemNamedDict object is simply a NamedDict intended for internal (dagster) use.
|
python_modules/dagster/dagster/core/definitions/environment_configs.py
|
def SystemNamedDict(name, fields, description=None):
'''A SystemNamedDict object is simply a NamedDict intended for internal (dagster) use.
'''
return NamedDict(name, fields, description, ConfigTypeAttributes(is_system_config=True))
|
def SystemNamedDict(name, fields, description=None):
'''A SystemNamedDict object is simply a NamedDict intended for internal (dagster) use.
'''
return NamedDict(name, fields, description, ConfigTypeAttributes(is_system_config=True))
|
[
"A",
"SystemNamedDict",
"object",
"is",
"simply",
"a",
"NamedDict",
"intended",
"for",
"internal",
"(",
"dagster",
")",
"use",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/definitions/environment_configs.py#L26-L29
|
[
"def",
"SystemNamedDict",
"(",
"name",
",",
"fields",
",",
"description",
"=",
"None",
")",
":",
"return",
"NamedDict",
"(",
"name",
",",
"fields",
",",
"description",
",",
"ConfigTypeAttributes",
"(",
"is_system_config",
"=",
"True",
")",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
PagerDutyService.EventV2_create
|
Events API v2 enables you to add PagerDuty's advanced event and incident management
functionality to any system that can make an outbound HTTP connection.
Arguments:
summary {string} -- A high-level, text summary message of the event. Will be used to
construct an alert's description.
Example: "PING OK - Packet loss = 0%, RTA = 1.41 ms" "Host
'acme-andromeda-sv1-c40 :: 179.21.24.50' is DOWN"
source {string} -- Specific human-readable unique identifier, such as a hostname, for
the system having the problem.
Examples:
"prod05.theseus.acme-widgets.com"
"171.26.23.22"
"aws:elasticache:us-east-1:852511987:cluster/api-stats-prod-003"
"9c09acd49a25"
severity {string} -- How impacted the affected system is. Displayed to users in lists
and influences the priority of any created incidents. Must be one
of {info, warning, error, critical}
Keyword Arguments:
event_action {str} -- There are three types of events that PagerDuty recognizes, and
are used to represent different types of activity in your
monitored systems. (default: 'trigger')
* trigger: When PagerDuty receives a trigger event, it will either open a new alert,
or add a new trigger log entry to an existing alert, depending on the
provided dedup_key. Your monitoring tools should send PagerDuty a trigger
when a new problem has been detected. You may send additional triggers
when a previously detected problem has occurred again.
* acknowledge: acknowledge events cause the referenced incident to enter the
acknowledged state. While an incident is acknowledged, it won't
generate any additional notifications, even if it receives new
trigger events. Your monitoring tools should send PagerDuty an
acknowledge event when they know someone is presently working on the
problem.
* resolve: resolve events cause the referenced incident to enter the resolved state.
Once an incident is resolved, it won't generate any additional
notifications. New trigger events with the same dedup_key as a resolved
incident won't re-open the incident. Instead, a new incident will be
created. Your monitoring tools should send PagerDuty a resolve event when
the problem that caused the initial trigger event has been fixed.
dedup_key {string} -- Deduplication key for correlating triggers and resolves. The
maximum permitted length of this property is 255 characters.
timestamp {string} -- Timestamp (ISO 8601). When the upstream system detected / created
the event. This is useful if a system batches or holds events
before sending them to PagerDuty.
Optional - Will be auto-generated by PagerDuty if not provided.
Example:
2015-07-17T08:42:58.315+0000
component {string} -- The part or component of the affected system that is broken.
Examples:
"keepalive"
"webping"
"mysql"
"wqueue"
group {string} -- A cluster or grouping of sources. For example, sources
“prod-datapipe-02” and “prod-datapipe-03” might both be part of
“prod-datapipe”
Examples:
"prod-datapipe"
"www"
"web_stack"
event_class {string} -- The class/type of the event.
Examples:
"High CPU"
"Latency"
"500 Error"
custom_details {Dict[str, str]} -- Additional details about the event and affected
system.
Example:
{"ping time": "1500ms", "load avg": 0.75 }
|
python_modules/libraries/dagster-pagerduty/dagster_pagerduty/resources.py
|
def EventV2_create(
self,
summary,
source,
severity,
event_action='trigger',
dedup_key=None,
timestamp=None,
component=None,
group=None,
event_class=None,
custom_details=None,
):
'''Events API v2 enables you to add PagerDuty's advanced event and incident management
functionality to any system that can make an outbound HTTP connection.
Arguments:
summary {string} -- A high-level, text summary message of the event. Will be used to
construct an alert's description.
Example: "PING OK - Packet loss = 0%, RTA = 1.41 ms" "Host
'acme-andromeda-sv1-c40 :: 179.21.24.50' is DOWN"
source {string} -- Specific human-readable unique identifier, such as a hostname, for
the system having the problem.
Examples:
"prod05.theseus.acme-widgets.com"
"171.26.23.22"
"aws:elasticache:us-east-1:852511987:cluster/api-stats-prod-003"
"9c09acd49a25"
severity {string} -- How impacted the affected system is. Displayed to users in lists
and influences the priority of any created incidents. Must be one
of {info, warning, error, critical}
Keyword Arguments:
event_action {str} -- There are three types of events that PagerDuty recognizes, and
are used to represent different types of activity in your
monitored systems. (default: 'trigger')
* trigger: When PagerDuty receives a trigger event, it will either open a new alert,
or add a new trigger log entry to an existing alert, depending on the
provided dedup_key. Your monitoring tools should send PagerDuty a trigger
when a new problem has been detected. You may send additional triggers
when a previously detected problem has occurred again.
* acknowledge: acknowledge events cause the referenced incident to enter the
acknowledged state. While an incident is acknowledged, it won't
generate any additional notifications, even if it receives new
trigger events. Your monitoring tools should send PagerDuty an
acknowledge event when they know someone is presently working on the
problem.
* resolve: resolve events cause the referenced incident to enter the resolved state.
Once an incident is resolved, it won't generate any additional
notifications. New trigger events with the same dedup_key as a resolved
incident won't re-open the incident. Instead, a new incident will be
created. Your monitoring tools should send PagerDuty a resolve event when
the problem that caused the initial trigger event has been fixed.
dedup_key {string} -- Deduplication key for correlating triggers and resolves. The
maximum permitted length of this property is 255 characters.
timestamp {string} -- Timestamp (ISO 8601). When the upstream system detected / created
the event. This is useful if a system batches or holds events
before sending them to PagerDuty.
Optional - Will be auto-generated by PagerDuty if not provided.
Example:
2015-07-17T08:42:58.315+0000
component {string} -- The part or component of the affected system that is broken.
Examples:
"keepalive"
"webping"
"mysql"
"wqueue"
group {string} -- A cluster or grouping of sources. For example, sources
“prod-datapipe-02” and “prod-datapipe-03” might both be part of
“prod-datapipe”
Examples:
"prod-datapipe"
"www"
"web_stack"
event_class {string} -- The class/type of the event.
Examples:
"High CPU"
"Latency"
"500 Error"
custom_details {Dict[str, str]} -- Additional details about the event and affected
system.
Example:
{"ping time": "1500ms", "load avg": 0.75 }
'''
data = {
'routing_key': self.routing_key,
'event_action': event_action,
'payload': {'summary': summary, 'source': source, 'severity': severity},
}
if dedup_key is not None:
data['dedup_key'] = dedup_key
if timestamp is not None:
data['payload']['timestamp'] = timestamp
if component is not None:
data['payload']['component'] = component
if group is not None:
data['payload']['group'] = group
if event_class is not None:
data['payload']['class'] = event_class
if custom_details is not None:
data['payload']['custom_details'] = custom_details
return pypd.EventV2.create(data=data)
|
def EventV2_create(
self,
summary,
source,
severity,
event_action='trigger',
dedup_key=None,
timestamp=None,
component=None,
group=None,
event_class=None,
custom_details=None,
):
'''Events API v2 enables you to add PagerDuty's advanced event and incident management
functionality to any system that can make an outbound HTTP connection.
Arguments:
summary {string} -- A high-level, text summary message of the event. Will be used to
construct an alert's description.
Example: "PING OK - Packet loss = 0%, RTA = 1.41 ms" "Host
'acme-andromeda-sv1-c40 :: 179.21.24.50' is DOWN"
source {string} -- Specific human-readable unique identifier, such as a hostname, for
the system having the problem.
Examples:
"prod05.theseus.acme-widgets.com"
"171.26.23.22"
"aws:elasticache:us-east-1:852511987:cluster/api-stats-prod-003"
"9c09acd49a25"
severity {string} -- How impacted the affected system is. Displayed to users in lists
and influences the priority of any created incidents. Must be one
of {info, warning, error, critical}
Keyword Arguments:
event_action {str} -- There are three types of events that PagerDuty recognizes, and
are used to represent different types of activity in your
monitored systems. (default: 'trigger')
* trigger: When PagerDuty receives a trigger event, it will either open a new alert,
or add a new trigger log entry to an existing alert, depending on the
provided dedup_key. Your monitoring tools should send PagerDuty a trigger
when a new problem has been detected. You may send additional triggers
when a previously detected problem has occurred again.
* acknowledge: acknowledge events cause the referenced incident to enter the
acknowledged state. While an incident is acknowledged, it won't
generate any additional notifications, even if it receives new
trigger events. Your monitoring tools should send PagerDuty an
acknowledge event when they know someone is presently working on the
problem.
* resolve: resolve events cause the referenced incident to enter the resolved state.
Once an incident is resolved, it won't generate any additional
notifications. New trigger events with the same dedup_key as a resolved
incident won't re-open the incident. Instead, a new incident will be
created. Your monitoring tools should send PagerDuty a resolve event when
the problem that caused the initial trigger event has been fixed.
dedup_key {string} -- Deduplication key for correlating triggers and resolves. The
maximum permitted length of this property is 255 characters.
timestamp {string} -- Timestamp (ISO 8601). When the upstream system detected / created
the event. This is useful if a system batches or holds events
before sending them to PagerDuty.
Optional - Will be auto-generated by PagerDuty if not provided.
Example:
2015-07-17T08:42:58.315+0000
component {string} -- The part or component of the affected system that is broken.
Examples:
"keepalive"
"webping"
"mysql"
"wqueue"
group {string} -- A cluster or grouping of sources. For example, sources
“prod-datapipe-02” and “prod-datapipe-03” might both be part of
“prod-datapipe”
Examples:
"prod-datapipe"
"www"
"web_stack"
event_class {string} -- The class/type of the event.
Examples:
"High CPU"
"Latency"
"500 Error"
custom_details {Dict[str, str]} -- Additional details about the event and affected
system.
Example:
{"ping time": "1500ms", "load avg": 0.75 }
'''
data = {
'routing_key': self.routing_key,
'event_action': event_action,
'payload': {'summary': summary, 'source': source, 'severity': severity},
}
if dedup_key is not None:
data['dedup_key'] = dedup_key
if timestamp is not None:
data['payload']['timestamp'] = timestamp
if component is not None:
data['payload']['component'] = component
if group is not None:
data['payload']['group'] = group
if event_class is not None:
data['payload']['class'] = event_class
if custom_details is not None:
data['payload']['custom_details'] = custom_details
return pypd.EventV2.create(data=data)
|
[
"Events",
"API",
"v2",
"enables",
"you",
"to",
"add",
"PagerDuty",
"s",
"advanced",
"event",
"and",
"incident",
"management",
"functionality",
"to",
"any",
"system",
"that",
"can",
"make",
"an",
"outbound",
"HTTP",
"connection",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/libraries/dagster-pagerduty/dagster_pagerduty/resources.py#L21-L148
|
[
"def",
"EventV2_create",
"(",
"self",
",",
"summary",
",",
"source",
",",
"severity",
",",
"event_action",
"=",
"'trigger'",
",",
"dedup_key",
"=",
"None",
",",
"timestamp",
"=",
"None",
",",
"component",
"=",
"None",
",",
"group",
"=",
"None",
",",
"event_class",
"=",
"None",
",",
"custom_details",
"=",
"None",
",",
")",
":",
"data",
"=",
"{",
"'routing_key'",
":",
"self",
".",
"routing_key",
",",
"'event_action'",
":",
"event_action",
",",
"'payload'",
":",
"{",
"'summary'",
":",
"summary",
",",
"'source'",
":",
"source",
",",
"'severity'",
":",
"severity",
"}",
",",
"}",
"if",
"dedup_key",
"is",
"not",
"None",
":",
"data",
"[",
"'dedup_key'",
"]",
"=",
"dedup_key",
"if",
"timestamp",
"is",
"not",
"None",
":",
"data",
"[",
"'payload'",
"]",
"[",
"'timestamp'",
"]",
"=",
"timestamp",
"if",
"component",
"is",
"not",
"None",
":",
"data",
"[",
"'payload'",
"]",
"[",
"'component'",
"]",
"=",
"component",
"if",
"group",
"is",
"not",
"None",
":",
"data",
"[",
"'payload'",
"]",
"[",
"'group'",
"]",
"=",
"group",
"if",
"event_class",
"is",
"not",
"None",
":",
"data",
"[",
"'payload'",
"]",
"[",
"'class'",
"]",
"=",
"event_class",
"if",
"custom_details",
"is",
"not",
"None",
":",
"data",
"[",
"'payload'",
"]",
"[",
"'custom_details'",
"]",
"=",
"custom_details",
"return",
"pypd",
".",
"EventV2",
".",
"create",
"(",
"data",
"=",
"data",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
coalesce_execution_steps
|
Groups execution steps by solid, in topological order of the solids.
|
python_modules/dagster-airflow/dagster_airflow/compile.py
|
def coalesce_execution_steps(execution_plan):
'''Groups execution steps by solid, in topological order of the solids.'''
solid_order = _coalesce_solid_order(execution_plan)
steps = defaultdict(list)
for solid_name, solid_steps in itertools.groupby(
execution_plan.topological_steps(), lambda x: x.solid_name
):
steps[solid_name] += list(solid_steps)
return OrderedDict([(solid_name, steps[solid_name]) for solid_name in solid_order])
|
def coalesce_execution_steps(execution_plan):
'''Groups execution steps by solid, in topological order of the solids.'''
solid_order = _coalesce_solid_order(execution_plan)
steps = defaultdict(list)
for solid_name, solid_steps in itertools.groupby(
execution_plan.topological_steps(), lambda x: x.solid_name
):
steps[solid_name] += list(solid_steps)
return OrderedDict([(solid_name, steps[solid_name]) for solid_name in solid_order])
|
[
"Groups",
"execution",
"steps",
"by",
"solid",
"in",
"topological",
"order",
"of",
"the",
"solids",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster-airflow/dagster_airflow/compile.py#L16-L28
|
[
"def",
"coalesce_execution_steps",
"(",
"execution_plan",
")",
":",
"solid_order",
"=",
"_coalesce_solid_order",
"(",
"execution_plan",
")",
"steps",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"solid_name",
",",
"solid_steps",
"in",
"itertools",
".",
"groupby",
"(",
"execution_plan",
".",
"topological_steps",
"(",
")",
",",
"lambda",
"x",
":",
"x",
".",
"solid_name",
")",
":",
"steps",
"[",
"solid_name",
"]",
"+=",
"list",
"(",
"solid_steps",
")",
"return",
"OrderedDict",
"(",
"[",
"(",
"solid_name",
",",
"steps",
"[",
"solid_name",
"]",
")",
"for",
"solid_name",
"in",
"solid_order",
"]",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
DatabaseWrapper.get_connection_params
|
Default method to acquire database connection parameters.
Sets connection parameters to match settings.py, and sets
default values to blank fields.
|
djongo/base.py
|
def get_connection_params(self):
"""
Default method to acquire database connection parameters.
Sets connection parameters to match settings.py, and sets
default values to blank fields.
"""
valid_settings = {
'NAME': 'name',
'HOST': 'host',
'PORT': 'port',
'USER': 'username',
'PASSWORD': 'password',
'AUTH_SOURCE': 'authSource',
'AUTH_MECHANISM': 'authMechanism',
'ENFORCE_SCHEMA': 'enforce_schema',
'REPLICASET': 'replicaset',
'SSL': 'ssl',
'SSL_CERTFILE': 'ssl_certfile',
'SSL_CA_CERTS': 'ssl_ca_certs',
'READ_PREFERENCE': 'read_preference'
}
connection_params = {
'name': 'djongo_test',
'enforce_schema': True
}
for setting_name, kwarg in valid_settings.items():
try:
setting = self.settings_dict[setting_name]
except KeyError:
continue
if setting or setting is False:
connection_params[kwarg] = setting
return connection_params
|
def get_connection_params(self):
"""
Default method to acquire database connection parameters.
Sets connection parameters to match settings.py, and sets
default values to blank fields.
"""
valid_settings = {
'NAME': 'name',
'HOST': 'host',
'PORT': 'port',
'USER': 'username',
'PASSWORD': 'password',
'AUTH_SOURCE': 'authSource',
'AUTH_MECHANISM': 'authMechanism',
'ENFORCE_SCHEMA': 'enforce_schema',
'REPLICASET': 'replicaset',
'SSL': 'ssl',
'SSL_CERTFILE': 'ssl_certfile',
'SSL_CA_CERTS': 'ssl_ca_certs',
'READ_PREFERENCE': 'read_preference'
}
connection_params = {
'name': 'djongo_test',
'enforce_schema': True
}
for setting_name, kwarg in valid_settings.items():
try:
setting = self.settings_dict[setting_name]
except KeyError:
continue
if setting or setting is False:
connection_params[kwarg] = setting
return connection_params
|
[
"Default",
"method",
"to",
"acquire",
"database",
"connection",
"parameters",
"."
] |
nesdis/djongo
|
python
|
https://github.com/nesdis/djongo/blob/7f9d79455cf030cb5eee0b822502c50a0d9d3abb/djongo/base.py#L122-L157
|
[
"def",
"get_connection_params",
"(",
"self",
")",
":",
"valid_settings",
"=",
"{",
"'NAME'",
":",
"'name'",
",",
"'HOST'",
":",
"'host'",
",",
"'PORT'",
":",
"'port'",
",",
"'USER'",
":",
"'username'",
",",
"'PASSWORD'",
":",
"'password'",
",",
"'AUTH_SOURCE'",
":",
"'authSource'",
",",
"'AUTH_MECHANISM'",
":",
"'authMechanism'",
",",
"'ENFORCE_SCHEMA'",
":",
"'enforce_schema'",
",",
"'REPLICASET'",
":",
"'replicaset'",
",",
"'SSL'",
":",
"'ssl'",
",",
"'SSL_CERTFILE'",
":",
"'ssl_certfile'",
",",
"'SSL_CA_CERTS'",
":",
"'ssl_ca_certs'",
",",
"'READ_PREFERENCE'",
":",
"'read_preference'",
"}",
"connection_params",
"=",
"{",
"'name'",
":",
"'djongo_test'",
",",
"'enforce_schema'",
":",
"True",
"}",
"for",
"setting_name",
",",
"kwarg",
"in",
"valid_settings",
".",
"items",
"(",
")",
":",
"try",
":",
"setting",
"=",
"self",
".",
"settings_dict",
"[",
"setting_name",
"]",
"except",
"KeyError",
":",
"continue",
"if",
"setting",
"or",
"setting",
"is",
"False",
":",
"connection_params",
"[",
"kwarg",
"]",
"=",
"setting",
"return",
"connection_params"
] |
7f9d79455cf030cb5eee0b822502c50a0d9d3abb
|
test
|
DatabaseWrapper.get_new_connection
|
Receives a dictionary connection_params to setup
a connection to the database.
Dictionary correct setup is made through the
get_connection_params method.
TODO: This needs to be made more generic to accept
other MongoClient parameters.
|
djongo/base.py
|
def get_new_connection(self, connection_params):
"""
Receives a dictionary connection_params to setup
a connection to the database.
Dictionary correct setup is made through the
get_connection_params method.
TODO: This needs to be made more generic to accept
other MongoClient parameters.
"""
name = connection_params.pop('name')
es = connection_params.pop('enforce_schema')
connection_params['document_class'] = OrderedDict
# connection_params['tz_aware'] = True
# To prevent leaving unclosed connections behind,
# client_conn must be closed before a new connection
# is created.
if self.client_connection is not None:
self.client_connection.close()
self.client_connection = Database.connect(**connection_params)
database = self.client_connection[name]
self.djongo_connection = DjongoClient(database, es)
return self.client_connection[name]
|
def get_new_connection(self, connection_params):
"""
Receives a dictionary connection_params to setup
a connection to the database.
Dictionary correct setup is made through the
get_connection_params method.
TODO: This needs to be made more generic to accept
other MongoClient parameters.
"""
name = connection_params.pop('name')
es = connection_params.pop('enforce_schema')
connection_params['document_class'] = OrderedDict
# connection_params['tz_aware'] = True
# To prevent leaving unclosed connections behind,
# client_conn must be closed before a new connection
# is created.
if self.client_connection is not None:
self.client_connection.close()
self.client_connection = Database.connect(**connection_params)
database = self.client_connection[name]
self.djongo_connection = DjongoClient(database, es)
return self.client_connection[name]
|
[
"Receives",
"a",
"dictionary",
"connection_params",
"to",
"setup",
"a",
"connection",
"to",
"the",
"database",
"."
] |
nesdis/djongo
|
python
|
https://github.com/nesdis/djongo/blob/7f9d79455cf030cb5eee0b822502c50a0d9d3abb/djongo/base.py#L159-L185
|
[
"def",
"get_new_connection",
"(",
"self",
",",
"connection_params",
")",
":",
"name",
"=",
"connection_params",
".",
"pop",
"(",
"'name'",
")",
"es",
"=",
"connection_params",
".",
"pop",
"(",
"'enforce_schema'",
")",
"connection_params",
"[",
"'document_class'",
"]",
"=",
"OrderedDict",
"# connection_params['tz_aware'] = True",
"# To prevent leaving unclosed connections behind,",
"# client_conn must be closed before a new connection",
"# is created.",
"if",
"self",
".",
"client_connection",
"is",
"not",
"None",
":",
"self",
".",
"client_connection",
".",
"close",
"(",
")",
"self",
".",
"client_connection",
"=",
"Database",
".",
"connect",
"(",
"*",
"*",
"connection_params",
")",
"database",
"=",
"self",
".",
"client_connection",
"[",
"name",
"]",
"self",
".",
"djongo_connection",
"=",
"DjongoClient",
"(",
"database",
",",
"es",
")",
"return",
"self",
".",
"client_connection",
"[",
"name",
"]"
] |
7f9d79455cf030cb5eee0b822502c50a0d9d3abb
|
test
|
DatabaseWrapper.create_cursor
|
Returns an active connection cursor to the database.
|
djongo/base.py
|
def create_cursor(self, name=None):
"""
Returns an active connection cursor to the database.
"""
return Cursor(self.client_connection, self.connection, self.djongo_connection)
|
def create_cursor(self, name=None):
"""
Returns an active connection cursor to the database.
"""
return Cursor(self.client_connection, self.connection, self.djongo_connection)
|
[
"Returns",
"an",
"active",
"connection",
"cursor",
"to",
"the",
"database",
"."
] |
nesdis/djongo
|
python
|
https://github.com/nesdis/djongo/blob/7f9d79455cf030cb5eee0b822502c50a0d9d3abb/djongo/base.py#L199-L203
|
[
"def",
"create_cursor",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"return",
"Cursor",
"(",
"self",
".",
"client_connection",
",",
"self",
".",
"connection",
",",
"self",
".",
"djongo_connection",
")"
] |
7f9d79455cf030cb5eee0b822502c50a0d9d3abb
|
test
|
DatabaseWrapper._close
|
Closes the client connection to the database.
|
djongo/base.py
|
def _close(self):
"""
Closes the client connection to the database.
"""
if self.connection:
with self.wrap_database_errors:
self.connection.client.close()
|
def _close(self):
"""
Closes the client connection to the database.
"""
if self.connection:
with self.wrap_database_errors:
self.connection.client.close()
|
[
"Closes",
"the",
"client",
"connection",
"to",
"the",
"database",
"."
] |
nesdis/djongo
|
python
|
https://github.com/nesdis/djongo/blob/7f9d79455cf030cb5eee0b822502c50a0d9d3abb/djongo/base.py#L205-L211
|
[
"def",
"_close",
"(",
"self",
")",
":",
"if",
"self",
".",
"connection",
":",
"with",
"self",
".",
"wrap_database_errors",
":",
"self",
".",
"connection",
".",
"client",
".",
"close",
"(",
")"
] |
7f9d79455cf030cb5eee0b822502c50a0d9d3abb
|
test
|
make_mdl
|
Builds an instance of model from the model_dict.
|
djongo/models/fields.py
|
def make_mdl(model, model_dict):
"""
Builds an instance of model from the model_dict.
"""
for field_name in model_dict:
field = model._meta.get_field(field_name)
model_dict[field_name] = field.to_python(model_dict[field_name])
return model(**model_dict)
|
def make_mdl(model, model_dict):
"""
Builds an instance of model from the model_dict.
"""
for field_name in model_dict:
field = model._meta.get_field(field_name)
model_dict[field_name] = field.to_python(model_dict[field_name])
return model(**model_dict)
|
[
"Builds",
"an",
"instance",
"of",
"model",
"from",
"the",
"model_dict",
"."
] |
nesdis/djongo
|
python
|
https://github.com/nesdis/djongo/blob/7f9d79455cf030cb5eee0b822502c50a0d9d3abb/djongo/models/fields.py#L35-L43
|
[
"def",
"make_mdl",
"(",
"model",
",",
"model_dict",
")",
":",
"for",
"field_name",
"in",
"model_dict",
":",
"field",
"=",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field_name",
")",
"model_dict",
"[",
"field_name",
"]",
"=",
"field",
".",
"to_python",
"(",
"model_dict",
"[",
"field_name",
"]",
")",
"return",
"model",
"(",
"*",
"*",
"model_dict",
")"
] |
7f9d79455cf030cb5eee0b822502c50a0d9d3abb
|
test
|
ArrayModelField.to_python
|
Overrides standard to_python method from django models to allow
correct translation of Mongo array to a python list.
|
djongo/models/fields.py
|
def to_python(self, value):
"""
Overrides standard to_python method from django models to allow
correct translation of Mongo array to a python list.
"""
if value is None:
return value
assert isinstance(value, list)
ret = []
for mdl_dict in value:
if isinstance(mdl_dict, self.model_container):
ret.append(mdl_dict)
continue
mdl = make_mdl(self.model_container, mdl_dict)
ret.append(mdl)
return ret
|
def to_python(self, value):
"""
Overrides standard to_python method from django models to allow
correct translation of Mongo array to a python list.
"""
if value is None:
return value
assert isinstance(value, list)
ret = []
for mdl_dict in value:
if isinstance(mdl_dict, self.model_container):
ret.append(mdl_dict)
continue
mdl = make_mdl(self.model_container, mdl_dict)
ret.append(mdl)
return ret
|
[
"Overrides",
"standard",
"to_python",
"method",
"from",
"django",
"models",
"to",
"allow",
"correct",
"translation",
"of",
"Mongo",
"array",
"to",
"a",
"python",
"list",
"."
] |
nesdis/djongo
|
python
|
https://github.com/nesdis/djongo/blob/7f9d79455cf030cb5eee0b822502c50a0d9d3abb/djongo/models/fields.py#L224-L241
|
[
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"value",
"assert",
"isinstance",
"(",
"value",
",",
"list",
")",
"ret",
"=",
"[",
"]",
"for",
"mdl_dict",
"in",
"value",
":",
"if",
"isinstance",
"(",
"mdl_dict",
",",
"self",
".",
"model_container",
")",
":",
"ret",
".",
"append",
"(",
"mdl_dict",
")",
"continue",
"mdl",
"=",
"make_mdl",
"(",
"self",
".",
"model_container",
",",
"mdl_dict",
")",
"ret",
".",
"append",
"(",
"mdl",
")",
"return",
"ret"
] |
7f9d79455cf030cb5eee0b822502c50a0d9d3abb
|
test
|
ArrayModelField.formfield
|
Returns the formfield for the array.
|
djongo/models/fields.py
|
def formfield(self, **kwargs):
"""
Returns the formfield for the array.
"""
defaults = {
'form_class': ArrayFormField,
'model_container': self.model_container,
'model_form_class': self.model_form_class,
'name': self.attname,
'mdl_form_kw_l': self.model_form_kwargs_l
}
defaults.update(kwargs)
return super().formfield(**defaults)
|
def formfield(self, **kwargs):
"""
Returns the formfield for the array.
"""
defaults = {
'form_class': ArrayFormField,
'model_container': self.model_container,
'model_form_class': self.model_form_class,
'name': self.attname,
'mdl_form_kw_l': self.model_form_kwargs_l
}
defaults.update(kwargs)
return super().formfield(**defaults)
|
[
"Returns",
"the",
"formfield",
"for",
"the",
"array",
"."
] |
nesdis/djongo
|
python
|
https://github.com/nesdis/djongo/blob/7f9d79455cf030cb5eee0b822502c50a0d9d3abb/djongo/models/fields.py#L243-L256
|
[
"def",
"formfield",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"defaults",
"=",
"{",
"'form_class'",
":",
"ArrayFormField",
",",
"'model_container'",
":",
"self",
".",
"model_container",
",",
"'model_form_class'",
":",
"self",
".",
"model_form_class",
",",
"'name'",
":",
"self",
".",
"attname",
",",
"'mdl_form_kw_l'",
":",
"self",
".",
"model_form_kwargs_l",
"}",
"defaults",
".",
"update",
"(",
"kwargs",
")",
"return",
"super",
"(",
")",
".",
"formfield",
"(",
"*",
"*",
"defaults",
")"
] |
7f9d79455cf030cb5eee0b822502c50a0d9d3abb
|
test
|
EmbeddedModelField.to_python
|
Overrides Django's default to_python to allow correct
translation to instance.
|
djongo/models/fields.py
|
def to_python(self, value):
"""
Overrides Django's default to_python to allow correct
translation to instance.
"""
if value is None or isinstance(value, self.model_container):
return value
assert isinstance(value, dict)
instance = make_mdl(self.model_container, value)
return instance
|
def to_python(self, value):
"""
Overrides Django's default to_python to allow correct
translation to instance.
"""
if value is None or isinstance(value, self.model_container):
return value
assert isinstance(value, dict)
instance = make_mdl(self.model_container, value)
return instance
|
[
"Overrides",
"Django",
"s",
"default",
"to_python",
"to",
"allow",
"correct",
"translation",
"to",
"instance",
"."
] |
nesdis/djongo
|
python
|
https://github.com/nesdis/djongo/blob/7f9d79455cf030cb5eee0b822502c50a0d9d3abb/djongo/models/fields.py#L507-L517
|
[
"def",
"to_python",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
"or",
"isinstance",
"(",
"value",
",",
"self",
".",
"model_container",
")",
":",
"return",
"value",
"assert",
"isinstance",
"(",
"value",
",",
"dict",
")",
"instance",
"=",
"make_mdl",
"(",
"self",
".",
"model_container",
",",
"value",
")",
"return",
"instance"
] |
7f9d79455cf030cb5eee0b822502c50a0d9d3abb
|
test
|
ArrayReferenceManagerMixin._apply_rel_filters
|
Filter the queryset for the instance this manager is bound to.
|
djongo/models/fields.py
|
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset = queryset.filter(**self.core_filters)
return queryset
|
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
queryset = queryset.filter(**self.core_filters)
return queryset
|
[
"Filter",
"the",
"queryset",
"for",
"the",
"instance",
"this",
"manager",
"is",
"bound",
"to",
"."
] |
nesdis/djongo
|
python
|
https://github.com/nesdis/djongo/blob/7f9d79455cf030cb5eee0b822502c50a0d9d3abb/djongo/models/fields.py#L675-L684
|
[
"def",
"_apply_rel_filters",
"(",
"self",
",",
"queryset",
")",
":",
"queryset",
".",
"_add_hints",
"(",
"instance",
"=",
"self",
".",
"instance",
")",
"if",
"self",
".",
"_db",
":",
"queryset",
"=",
"queryset",
".",
"using",
"(",
"self",
".",
"_db",
")",
"queryset",
"=",
"queryset",
".",
"filter",
"(",
"*",
"*",
"self",
".",
"core_filters",
")",
"return",
"queryset"
] |
7f9d79455cf030cb5eee0b822502c50a0d9d3abb
|
test
|
_compute_nfp_uniform
|
Computes the expected number of false positives caused by using
u to approximate set sizes in the interval [l, u], assuming uniform
distribution of set sizes within the interval.
Args:
l: the lower bound on set sizes.
u: the upper bound on set sizes.
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (float): the expected number of false positives.
|
datasketch/lshensemble_partition.py
|
def _compute_nfp_uniform(l, u, cum_counts, sizes):
"""Computes the expected number of false positives caused by using
u to approximate set sizes in the interval [l, u], assuming uniform
distribution of set sizes within the interval.
Args:
l: the lower bound on set sizes.
u: the upper bound on set sizes.
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (float): the expected number of false positives.
"""
if l > u:
raise ValueError("l must be less or equal to u")
if l == 0:
n = cum_counts[u]
else:
n = cum_counts[u]-cum_counts[l-1]
return n * float(sizes[u] - sizes[l]) / float(2*sizes[u])
|
def _compute_nfp_uniform(l, u, cum_counts, sizes):
"""Computes the expected number of false positives caused by using
u to approximate set sizes in the interval [l, u], assuming uniform
distribution of set sizes within the interval.
Args:
l: the lower bound on set sizes.
u: the upper bound on set sizes.
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (float): the expected number of false positives.
"""
if l > u:
raise ValueError("l must be less or equal to u")
if l == 0:
n = cum_counts[u]
else:
n = cum_counts[u]-cum_counts[l-1]
return n * float(sizes[u] - sizes[l]) / float(2*sizes[u])
|
[
"Computes",
"the",
"expected",
"number",
"of",
"false",
"positives",
"caused",
"by",
"using",
"u",
"to",
"approximate",
"set",
"sizes",
"in",
"the",
"interval",
"[",
"l",
"u",
"]",
"assuming",
"uniform",
"distribution",
"of",
"set",
"sizes",
"within",
"the",
"interval",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshensemble_partition.py#L13-L32
|
[
"def",
"_compute_nfp_uniform",
"(",
"l",
",",
"u",
",",
"cum_counts",
",",
"sizes",
")",
":",
"if",
"l",
">",
"u",
":",
"raise",
"ValueError",
"(",
"\"l must be less or equal to u\"",
")",
"if",
"l",
"==",
"0",
":",
"n",
"=",
"cum_counts",
"[",
"u",
"]",
"else",
":",
"n",
"=",
"cum_counts",
"[",
"u",
"]",
"-",
"cum_counts",
"[",
"l",
"-",
"1",
"]",
"return",
"n",
"*",
"float",
"(",
"sizes",
"[",
"u",
"]",
"-",
"sizes",
"[",
"l",
"]",
")",
"/",
"float",
"(",
"2",
"*",
"sizes",
"[",
"u",
"]",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
_compute_nfps_uniform
|
Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes, assuming uniform
distribution of set_sizes within each sub-intervals.
Args:
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1.
|
datasketch/lshensemble_partition.py
|
def _compute_nfps_uniform(cum_counts, sizes):
"""Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes, assuming uniform
distribution of set_sizes within each sub-intervals.
Args:
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1.
"""
nfps = np.zeros((len(sizes), len(sizes)))
# All u an l are inclusive bounds for intervals.
# Compute p = 1, the NFPs
for l in range(len(sizes)):
for u in range(l, len(sizes)):
nfps[l, u] = _compute_nfp_uniform(l, u, cum_counts, sizes)
return nfps
|
def _compute_nfps_uniform(cum_counts, sizes):
"""Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes, assuming uniform
distribution of set_sizes within each sub-intervals.
Args:
cum_counts: the complete cummulative distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1.
"""
nfps = np.zeros((len(sizes), len(sizes)))
# All u an l are inclusive bounds for intervals.
# Compute p = 1, the NFPs
for l in range(len(sizes)):
for u in range(l, len(sizes)):
nfps[l, u] = _compute_nfp_uniform(l, u, cum_counts, sizes)
return nfps
|
[
"Computes",
"the",
"matrix",
"of",
"expected",
"false",
"positives",
"for",
"all",
"possible",
"sub",
"-",
"intervals",
"of",
"the",
"complete",
"domain",
"of",
"set",
"sizes",
"assuming",
"uniform",
"distribution",
"of",
"set_sizes",
"within",
"each",
"sub",
"-",
"intervals",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshensemble_partition.py#L35-L54
|
[
"def",
"_compute_nfps_uniform",
"(",
"cum_counts",
",",
"sizes",
")",
":",
"nfps",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"sizes",
")",
",",
"len",
"(",
"sizes",
")",
")",
")",
"# All u an l are inclusive bounds for intervals.",
"# Compute p = 1, the NFPs",
"for",
"l",
"in",
"range",
"(",
"len",
"(",
"sizes",
")",
")",
":",
"for",
"u",
"in",
"range",
"(",
"l",
",",
"len",
"(",
"sizes",
")",
")",
":",
"nfps",
"[",
"l",
",",
"u",
"]",
"=",
"_compute_nfp_uniform",
"(",
"l",
",",
"u",
",",
"cum_counts",
",",
"sizes",
")",
"return",
"nfps"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
_compute_nfp_real
|
Computes the expected number of false positives caused by using
u to approximate set sizes in the interval [l, u], using the real
set size distribution.
Args:
l: the lower bound on set sizes.
u: the upper bound on set sizes.
counts: the complete distribution of set sizes.
sizes: the complete domain of set sizes.
Return (float): the expected number of false positives.
|
datasketch/lshensemble_partition.py
|
def _compute_nfp_real(l, u, counts, sizes):
"""Computes the expected number of false positives caused by using
u to approximate set sizes in the interval [l, u], using the real
set size distribution.
Args:
l: the lower bound on set sizes.
u: the upper bound on set sizes.
counts: the complete distribution of set sizes.
sizes: the complete domain of set sizes.
Return (float): the expected number of false positives.
"""
if l > u:
raise ValueError("l must be less or equal to u")
return np.sum((float(sizes[u])-sizes[l:u+1])/float(sizes[u])*counts[l:u+1])
|
def _compute_nfp_real(l, u, counts, sizes):
"""Computes the expected number of false positives caused by using
u to approximate set sizes in the interval [l, u], using the real
set size distribution.
Args:
l: the lower bound on set sizes.
u: the upper bound on set sizes.
counts: the complete distribution of set sizes.
sizes: the complete domain of set sizes.
Return (float): the expected number of false positives.
"""
if l > u:
raise ValueError("l must be less or equal to u")
return np.sum((float(sizes[u])-sizes[l:u+1])/float(sizes[u])*counts[l:u+1])
|
[
"Computes",
"the",
"expected",
"number",
"of",
"false",
"positives",
"caused",
"by",
"using",
"u",
"to",
"approximate",
"set",
"sizes",
"in",
"the",
"interval",
"[",
"l",
"u",
"]",
"using",
"the",
"real",
"set",
"size",
"distribution",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshensemble_partition.py#L57-L72
|
[
"def",
"_compute_nfp_real",
"(",
"l",
",",
"u",
",",
"counts",
",",
"sizes",
")",
":",
"if",
"l",
">",
"u",
":",
"raise",
"ValueError",
"(",
"\"l must be less or equal to u\"",
")",
"return",
"np",
".",
"sum",
"(",
"(",
"float",
"(",
"sizes",
"[",
"u",
"]",
")",
"-",
"sizes",
"[",
"l",
":",
"u",
"+",
"1",
"]",
")",
"/",
"float",
"(",
"sizes",
"[",
"u",
"]",
")",
"*",
"counts",
"[",
"l",
":",
"u",
"+",
"1",
"]",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
_compute_nfps_real
|
Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes.
Args:
counts: the complete distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1.
|
datasketch/lshensemble_partition.py
|
def _compute_nfps_real(counts, sizes):
"""Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes.
Args:
counts: the complete distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1.
"""
nfps = np.zeros((len(sizes), len(sizes)))
# All u an l are inclusive bounds for intervals.
# Compute p = 1, the NFPs
for l in range(len(sizes)):
for u in range(l, len(sizes)):
nfps[l, u] = _compute_nfp_real(l, u, counts, sizes)
return nfps
|
def _compute_nfps_real(counts, sizes):
"""Computes the matrix of expected false positives for all possible
sub-intervals of the complete domain of set sizes.
Args:
counts: the complete distribution of set sizes.
sizes: the complete domain of set sizes.
Return (np.array): the 2-D array of expected number of false positives
for every pair of [l, u] interval, where l is axis-0 and u is
axis-1.
"""
nfps = np.zeros((len(sizes), len(sizes)))
# All u an l are inclusive bounds for intervals.
# Compute p = 1, the NFPs
for l in range(len(sizes)):
for u in range(l, len(sizes)):
nfps[l, u] = _compute_nfp_real(l, u, counts, sizes)
return nfps
|
[
"Computes",
"the",
"matrix",
"of",
"expected",
"false",
"positives",
"for",
"all",
"possible",
"sub",
"-",
"intervals",
"of",
"the",
"complete",
"domain",
"of",
"set",
"sizes",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshensemble_partition.py#L75-L93
|
[
"def",
"_compute_nfps_real",
"(",
"counts",
",",
"sizes",
")",
":",
"nfps",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"sizes",
")",
",",
"len",
"(",
"sizes",
")",
")",
")",
"# All u an l are inclusive bounds for intervals.",
"# Compute p = 1, the NFPs",
"for",
"l",
"in",
"range",
"(",
"len",
"(",
"sizes",
")",
")",
":",
"for",
"u",
"in",
"range",
"(",
"l",
",",
"len",
"(",
"sizes",
")",
")",
":",
"nfps",
"[",
"l",
",",
"u",
"]",
"=",
"_compute_nfp_real",
"(",
"l",
",",
"u",
",",
"counts",
",",
"sizes",
")",
"return",
"nfps"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
_compute_best_partitions
|
Computes the optimal partitions given the size distributions
and computed number of expected false positives for all sub-intervals.
Args:
num_part (int): The number of partitions to create.
sizes (numpy.array): The complete domain of set sizes in sorted order.
nfps (numpy.array): The computed number of expected false positives
for all sub-intervals; axis-0 is for the indexes of lower bounds and
axis-1 is for the indexes of upper bounds.
Returns:
partitions (list): list of lower and upper bounds of set sizes for
all partitions.
total_nfps (float): total number of expected false positives from all
partitions.
cost (numpy.array): a N x p-1 matrix of the computed optimal NFPs for
all sub-problems given upper bound set size and number of partitions.
|
datasketch/lshensemble_partition.py
|
def _compute_best_partitions(num_part, sizes, nfps):
"""Computes the optimal partitions given the size distributions
and computed number of expected false positives for all sub-intervals.
Args:
num_part (int): The number of partitions to create.
sizes (numpy.array): The complete domain of set sizes in sorted order.
nfps (numpy.array): The computed number of expected false positives
for all sub-intervals; axis-0 is for the indexes of lower bounds and
axis-1 is for the indexes of upper bounds.
Returns:
partitions (list): list of lower and upper bounds of set sizes for
all partitions.
total_nfps (float): total number of expected false positives from all
partitions.
cost (numpy.array): a N x p-1 matrix of the computed optimal NFPs for
all sub-problems given upper bound set size and number of partitions.
"""
if num_part < 2:
raise ValueError("num_part cannot be less than 2")
if num_part > len(sizes):
raise ValueError("num_part cannot be greater than the domain size of "
"all set sizes")
# If number of partitions is 2, then simply find the upper bound
# of the first partition.
if num_part == 2:
total_nfps, u = min((nfps[0, u1]+nfps[u1+1, len(sizes)-1], u1)
for u1 in range(0, len(sizes)-1))
return [(sizes[0], sizes[u]), (sizes[u+1], sizes[-1]),], \
total_nfps, None
# Initialize subproblem total NFPs.
cost = np.zeros((len(sizes), num_part-2))
# Note: p is the number of partitions in the subproblem.
# p2i translates the number of partition into the index in the matrix.
p2i = lambda p : p - 2
# Compute p >= 2 until before p = num_part.
for p in range(2, num_part):
# Compute best partition for subproblems with increasing
# max index u, starting from the smallest possible u given the p.
# The smallest possible u can be considered as the max index that
# generates p partitions each with only one size.
for u in range(p-1, len(sizes)):
if p == 2:
cost[u, p2i(p)] = min(nfps[0, u1]+nfps[u1+1,u]
for u1 in range(u))
else:
cost[u, p2i(p)] = min(cost[u1, p2i(p-1)] + nfps[u1+1, u]
for u1 in range((p-1)-1, u))
p = num_part
# Find the optimal upper bound index of the 2nd right-most partition given
# the number of partitions (p).
total_nfps, u = min((cost[u1, p2i(p-1)]+nfps[u1+1, len(sizes)-1], u1)
for u1 in range((p-1)-1, len(sizes)-1))
partitions = [(sizes[u+1], sizes[-1]),]
p -= 1
# Back track to find the best partitions.
while p > 1:
# Find the optimal upper bound index of the 2nd right-most partition
# givne the number of partitions (p) and upper bound index (u) in this
# sub-problem.
_, u1_best = min((cost[u1, p2i(p)]+nfps[u1+1, u], u1)
for u1 in range((p-1)-1, u))
partitions.insert(0, (sizes[u1_best+1], sizes[u]))
u = u1_best
p -= 1
partitions.insert(0, (sizes[0], sizes[u]))
return [partitions, total_nfps, cost]
|
def _compute_best_partitions(num_part, sizes, nfps):
"""Computes the optimal partitions given the size distributions
and computed number of expected false positives for all sub-intervals.
Args:
num_part (int): The number of partitions to create.
sizes (numpy.array): The complete domain of set sizes in sorted order.
nfps (numpy.array): The computed number of expected false positives
for all sub-intervals; axis-0 is for the indexes of lower bounds and
axis-1 is for the indexes of upper bounds.
Returns:
partitions (list): list of lower and upper bounds of set sizes for
all partitions.
total_nfps (float): total number of expected false positives from all
partitions.
cost (numpy.array): a N x p-1 matrix of the computed optimal NFPs for
all sub-problems given upper bound set size and number of partitions.
"""
if num_part < 2:
raise ValueError("num_part cannot be less than 2")
if num_part > len(sizes):
raise ValueError("num_part cannot be greater than the domain size of "
"all set sizes")
# If number of partitions is 2, then simply find the upper bound
# of the first partition.
if num_part == 2:
total_nfps, u = min((nfps[0, u1]+nfps[u1+1, len(sizes)-1], u1)
for u1 in range(0, len(sizes)-1))
return [(sizes[0], sizes[u]), (sizes[u+1], sizes[-1]),], \
total_nfps, None
# Initialize subproblem total NFPs.
cost = np.zeros((len(sizes), num_part-2))
# Note: p is the number of partitions in the subproblem.
# p2i translates the number of partition into the index in the matrix.
p2i = lambda p : p - 2
# Compute p >= 2 until before p = num_part.
for p in range(2, num_part):
# Compute best partition for subproblems with increasing
# max index u, starting from the smallest possible u given the p.
# The smallest possible u can be considered as the max index that
# generates p partitions each with only one size.
for u in range(p-1, len(sizes)):
if p == 2:
cost[u, p2i(p)] = min(nfps[0, u1]+nfps[u1+1,u]
for u1 in range(u))
else:
cost[u, p2i(p)] = min(cost[u1, p2i(p-1)] + nfps[u1+1, u]
for u1 in range((p-1)-1, u))
p = num_part
# Find the optimal upper bound index of the 2nd right-most partition given
# the number of partitions (p).
total_nfps, u = min((cost[u1, p2i(p-1)]+nfps[u1+1, len(sizes)-1], u1)
for u1 in range((p-1)-1, len(sizes)-1))
partitions = [(sizes[u+1], sizes[-1]),]
p -= 1
# Back track to find the best partitions.
while p > 1:
# Find the optimal upper bound index of the 2nd right-most partition
# givne the number of partitions (p) and upper bound index (u) in this
# sub-problem.
_, u1_best = min((cost[u1, p2i(p)]+nfps[u1+1, u], u1)
for u1 in range((p-1)-1, u))
partitions.insert(0, (sizes[u1_best+1], sizes[u]))
u = u1_best
p -= 1
partitions.insert(0, (sizes[0], sizes[u]))
return [partitions, total_nfps, cost]
|
[
"Computes",
"the",
"optimal",
"partitions",
"given",
"the",
"size",
"distributions",
"and",
"computed",
"number",
"of",
"expected",
"false",
"positives",
"for",
"all",
"sub",
"-",
"intervals",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshensemble_partition.py#L96-L168
|
[
"def",
"_compute_best_partitions",
"(",
"num_part",
",",
"sizes",
",",
"nfps",
")",
":",
"if",
"num_part",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"num_part cannot be less than 2\"",
")",
"if",
"num_part",
">",
"len",
"(",
"sizes",
")",
":",
"raise",
"ValueError",
"(",
"\"num_part cannot be greater than the domain size of \"",
"\"all set sizes\"",
")",
"# If number of partitions is 2, then simply find the upper bound",
"# of the first partition.",
"if",
"num_part",
"==",
"2",
":",
"total_nfps",
",",
"u",
"=",
"min",
"(",
"(",
"nfps",
"[",
"0",
",",
"u1",
"]",
"+",
"nfps",
"[",
"u1",
"+",
"1",
",",
"len",
"(",
"sizes",
")",
"-",
"1",
"]",
",",
"u1",
")",
"for",
"u1",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"sizes",
")",
"-",
"1",
")",
")",
"return",
"[",
"(",
"sizes",
"[",
"0",
"]",
",",
"sizes",
"[",
"u",
"]",
")",
",",
"(",
"sizes",
"[",
"u",
"+",
"1",
"]",
",",
"sizes",
"[",
"-",
"1",
"]",
")",
",",
"]",
",",
"total_nfps",
",",
"None",
"# Initialize subproblem total NFPs.",
"cost",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"sizes",
")",
",",
"num_part",
"-",
"2",
")",
")",
"# Note: p is the number of partitions in the subproblem.",
"# p2i translates the number of partition into the index in the matrix.",
"p2i",
"=",
"lambda",
"p",
":",
"p",
"-",
"2",
"# Compute p >= 2 until before p = num_part.",
"for",
"p",
"in",
"range",
"(",
"2",
",",
"num_part",
")",
":",
"# Compute best partition for subproblems with increasing",
"# max index u, starting from the smallest possible u given the p.",
"# The smallest possible u can be considered as the max index that",
"# generates p partitions each with only one size.",
"for",
"u",
"in",
"range",
"(",
"p",
"-",
"1",
",",
"len",
"(",
"sizes",
")",
")",
":",
"if",
"p",
"==",
"2",
":",
"cost",
"[",
"u",
",",
"p2i",
"(",
"p",
")",
"]",
"=",
"min",
"(",
"nfps",
"[",
"0",
",",
"u1",
"]",
"+",
"nfps",
"[",
"u1",
"+",
"1",
",",
"u",
"]",
"for",
"u1",
"in",
"range",
"(",
"u",
")",
")",
"else",
":",
"cost",
"[",
"u",
",",
"p2i",
"(",
"p",
")",
"]",
"=",
"min",
"(",
"cost",
"[",
"u1",
",",
"p2i",
"(",
"p",
"-",
"1",
")",
"]",
"+",
"nfps",
"[",
"u1",
"+",
"1",
",",
"u",
"]",
"for",
"u1",
"in",
"range",
"(",
"(",
"p",
"-",
"1",
")",
"-",
"1",
",",
"u",
")",
")",
"p",
"=",
"num_part",
"# Find the optimal upper bound index of the 2nd right-most partition given",
"# the number of partitions (p).",
"total_nfps",
",",
"u",
"=",
"min",
"(",
"(",
"cost",
"[",
"u1",
",",
"p2i",
"(",
"p",
"-",
"1",
")",
"]",
"+",
"nfps",
"[",
"u1",
"+",
"1",
",",
"len",
"(",
"sizes",
")",
"-",
"1",
"]",
",",
"u1",
")",
"for",
"u1",
"in",
"range",
"(",
"(",
"p",
"-",
"1",
")",
"-",
"1",
",",
"len",
"(",
"sizes",
")",
"-",
"1",
")",
")",
"partitions",
"=",
"[",
"(",
"sizes",
"[",
"u",
"+",
"1",
"]",
",",
"sizes",
"[",
"-",
"1",
"]",
")",
",",
"]",
"p",
"-=",
"1",
"# Back track to find the best partitions.",
"while",
"p",
">",
"1",
":",
"# Find the optimal upper bound index of the 2nd right-most partition",
"# givne the number of partitions (p) and upper bound index (u) in this",
"# sub-problem.",
"_",
",",
"u1_best",
"=",
"min",
"(",
"(",
"cost",
"[",
"u1",
",",
"p2i",
"(",
"p",
")",
"]",
"+",
"nfps",
"[",
"u1",
"+",
"1",
",",
"u",
"]",
",",
"u1",
")",
"for",
"u1",
"in",
"range",
"(",
"(",
"p",
"-",
"1",
")",
"-",
"1",
",",
"u",
")",
")",
"partitions",
".",
"insert",
"(",
"0",
",",
"(",
"sizes",
"[",
"u1_best",
"+",
"1",
"]",
",",
"sizes",
"[",
"u",
"]",
")",
")",
"u",
"=",
"u1_best",
"p",
"-=",
"1",
"partitions",
".",
"insert",
"(",
"0",
",",
"(",
"sizes",
"[",
"0",
"]",
",",
"sizes",
"[",
"u",
"]",
")",
")",
"return",
"[",
"partitions",
",",
"total_nfps",
",",
"cost",
"]"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
optimal_partitions
|
Compute the optimal partitions given a distribution of set sizes.
Args:
sizes (numpy.array): The complete domain of set sizes in ascending
order.
counts (numpy.array): The frequencies of all set sizes in the same
order as `sizes`.
num_part (int): The number of partitions to create.
Returns:
list: A list of partitions in the form of `(lower, upper)` tuples,
where `lower` and `upper` are lower and upper bound (inclusive)
set sizes of each partition.
|
datasketch/lshensemble_partition.py
|
def optimal_partitions(sizes, counts, num_part):
"""Compute the optimal partitions given a distribution of set sizes.
Args:
sizes (numpy.array): The complete domain of set sizes in ascending
order.
counts (numpy.array): The frequencies of all set sizes in the same
order as `sizes`.
num_part (int): The number of partitions to create.
Returns:
list: A list of partitions in the form of `(lower, upper)` tuples,
where `lower` and `upper` are lower and upper bound (inclusive)
set sizes of each partition.
"""
if num_part < 2:
return [(sizes[0], sizes[-1])]
if num_part >= len(sizes):
partitions = [(x, x) for x in sizes]
return partitions
nfps = _compute_nfps_real(counts, sizes)
partitions, _, _ = _compute_best_partitions(num_part, sizes, nfps)
return partitions
|
def optimal_partitions(sizes, counts, num_part):
"""Compute the optimal partitions given a distribution of set sizes.
Args:
sizes (numpy.array): The complete domain of set sizes in ascending
order.
counts (numpy.array): The frequencies of all set sizes in the same
order as `sizes`.
num_part (int): The number of partitions to create.
Returns:
list: A list of partitions in the form of `(lower, upper)` tuples,
where `lower` and `upper` are lower and upper bound (inclusive)
set sizes of each partition.
"""
if num_part < 2:
return [(sizes[0], sizes[-1])]
if num_part >= len(sizes):
partitions = [(x, x) for x in sizes]
return partitions
nfps = _compute_nfps_real(counts, sizes)
partitions, _, _ = _compute_best_partitions(num_part, sizes, nfps)
return partitions
|
[
"Compute",
"the",
"optimal",
"partitions",
"given",
"a",
"distribution",
"of",
"set",
"sizes",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshensemble_partition.py#L172-L194
|
[
"def",
"optimal_partitions",
"(",
"sizes",
",",
"counts",
",",
"num_part",
")",
":",
"if",
"num_part",
"<",
"2",
":",
"return",
"[",
"(",
"sizes",
"[",
"0",
"]",
",",
"sizes",
"[",
"-",
"1",
"]",
")",
"]",
"if",
"num_part",
">=",
"len",
"(",
"sizes",
")",
":",
"partitions",
"=",
"[",
"(",
"x",
",",
"x",
")",
"for",
"x",
"in",
"sizes",
"]",
"return",
"partitions",
"nfps",
"=",
"_compute_nfps_real",
"(",
"counts",
",",
"sizes",
")",
"partitions",
",",
"_",
",",
"_",
"=",
"_compute_best_partitions",
"(",
"num_part",
",",
"sizes",
",",
"nfps",
")",
"return",
"partitions"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
bBitMinHash.jaccard
|
Estimate the Jaccard similarity (resemblance) between this b-bit
MinHash and the other.
|
datasketch/b_bit_minhash.py
|
def jaccard(self, other):
'''
Estimate the Jaccard similarity (resemblance) between this b-bit
MinHash and the other.
'''
if self.b != other.b:
raise ValueError("Cannot compare two b-bit MinHashes with different\
b values")
if self.seed != other.seed:
raise ValueError("Cannot compare two b-bit MinHashes with different\
set of permutations")
intersection = np.count_nonzero(self.hashvalues==other.hashvalues)
raw_est = float(intersection) / float(self.hashvalues.size)
a1 = self._calc_a(self.r, self.b)
a2 = self._calc_a(other.r, other.b)
c1, c2 = self._calc_c(a1, a2, self.r, other.r)
return (raw_est - c1) / (1 - c2)
|
def jaccard(self, other):
'''
Estimate the Jaccard similarity (resemblance) between this b-bit
MinHash and the other.
'''
if self.b != other.b:
raise ValueError("Cannot compare two b-bit MinHashes with different\
b values")
if self.seed != other.seed:
raise ValueError("Cannot compare two b-bit MinHashes with different\
set of permutations")
intersection = np.count_nonzero(self.hashvalues==other.hashvalues)
raw_est = float(intersection) / float(self.hashvalues.size)
a1 = self._calc_a(self.r, self.b)
a2 = self._calc_a(other.r, other.b)
c1, c2 = self._calc_c(a1, a2, self.r, other.r)
return (raw_est - c1) / (1 - c2)
|
[
"Estimate",
"the",
"Jaccard",
"similarity",
"(",
"resemblance",
")",
"between",
"this",
"b",
"-",
"bit",
"MinHash",
"and",
"the",
"other",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/b_bit_minhash.py#L57-L73
|
[
"def",
"jaccard",
"(",
"self",
",",
"other",
")",
":",
"if",
"self",
".",
"b",
"!=",
"other",
".",
"b",
":",
"raise",
"ValueError",
"(",
"\"Cannot compare two b-bit MinHashes with different\\\n b values\"",
")",
"if",
"self",
".",
"seed",
"!=",
"other",
".",
"seed",
":",
"raise",
"ValueError",
"(",
"\"Cannot compare two b-bit MinHashes with different\\\n set of permutations\"",
")",
"intersection",
"=",
"np",
".",
"count_nonzero",
"(",
"self",
".",
"hashvalues",
"==",
"other",
".",
"hashvalues",
")",
"raw_est",
"=",
"float",
"(",
"intersection",
")",
"/",
"float",
"(",
"self",
".",
"hashvalues",
".",
"size",
")",
"a1",
"=",
"self",
".",
"_calc_a",
"(",
"self",
".",
"r",
",",
"self",
".",
"b",
")",
"a2",
"=",
"self",
".",
"_calc_a",
"(",
"other",
".",
"r",
",",
"other",
".",
"b",
")",
"c1",
",",
"c2",
"=",
"self",
".",
"_calc_c",
"(",
"a1",
",",
"a2",
",",
"self",
".",
"r",
",",
"other",
".",
"r",
")",
"return",
"(",
"raw_est",
"-",
"c1",
")",
"/",
"(",
"1",
"-",
"c2",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
bBitMinHash._calc_a
|
Compute the function A(r, b)
|
datasketch/b_bit_minhash.py
|
def _calc_a(self, r, b):
'''
Compute the function A(r, b)
'''
if r == 0.0:
# Find the limit of A(r, b) as r -> 0.
return 1.0 / (1 << b)
return r * (1 - r) ** (2 ** b - 1) / (1 - (1 - r) ** (2 * b))
|
def _calc_a(self, r, b):
'''
Compute the function A(r, b)
'''
if r == 0.0:
# Find the limit of A(r, b) as r -> 0.
return 1.0 / (1 << b)
return r * (1 - r) ** (2 ** b - 1) / (1 - (1 - r) ** (2 * b))
|
[
"Compute",
"the",
"function",
"A",
"(",
"r",
"b",
")"
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/b_bit_minhash.py#L129-L136
|
[
"def",
"_calc_a",
"(",
"self",
",",
"r",
",",
"b",
")",
":",
"if",
"r",
"==",
"0.0",
":",
"# Find the limit of A(r, b) as r -> 0.",
"return",
"1.0",
"/",
"(",
"1",
"<<",
"b",
")",
"return",
"r",
"*",
"(",
"1",
"-",
"r",
")",
"**",
"(",
"2",
"**",
"b",
"-",
"1",
")",
"/",
"(",
"1",
"-",
"(",
"1",
"-",
"r",
")",
"**",
"(",
"2",
"*",
"b",
")",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
bBitMinHash._calc_c
|
Compute the functions C1 and C2
|
datasketch/b_bit_minhash.py
|
def _calc_c(self, a1, a2, r1, r2):
'''
Compute the functions C1 and C2
'''
if r1 == 0.0 and r2 == 0.0:
# Find the limits of C1 and C2 as r1 -> 0 and r2 -> 0
# Since the b-value must be the same and r1 = r2,
# we have A1(r1, b1) = A2(r2, b2) = A,
# then the limits for both C1 and C2 are A.
return a1, a2
div = 1 / (r1 + r2)
c1 = (a1 * r2 + a2 * r1) * div
c2 = (a1 * r1 + a2 * r2) * div
return c1, c2
|
def _calc_c(self, a1, a2, r1, r2):
'''
Compute the functions C1 and C2
'''
if r1 == 0.0 and r2 == 0.0:
# Find the limits of C1 and C2 as r1 -> 0 and r2 -> 0
# Since the b-value must be the same and r1 = r2,
# we have A1(r1, b1) = A2(r2, b2) = A,
# then the limits for both C1 and C2 are A.
return a1, a2
div = 1 / (r1 + r2)
c1 = (a1 * r2 + a2 * r1) * div
c2 = (a1 * r1 + a2 * r2) * div
return c1, c2
|
[
"Compute",
"the",
"functions",
"C1",
"and",
"C2"
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/b_bit_minhash.py#L138-L151
|
[
"def",
"_calc_c",
"(",
"self",
",",
"a1",
",",
"a2",
",",
"r1",
",",
"r2",
")",
":",
"if",
"r1",
"==",
"0.0",
"and",
"r2",
"==",
"0.0",
":",
"# Find the limits of C1 and C2 as r1 -> 0 and r2 -> 0",
"# Since the b-value must be the same and r1 = r2,",
"# we have A1(r1, b1) = A2(r2, b2) = A,",
"# then the limits for both C1 and C2 are A.",
"return",
"a1",
",",
"a2",
"div",
"=",
"1",
"/",
"(",
"r1",
"+",
"r2",
")",
"c1",
"=",
"(",
"a1",
"*",
"r2",
"+",
"a2",
"*",
"r1",
")",
"*",
"div",
"c2",
"=",
"(",
"a1",
"*",
"r1",
"+",
"a2",
"*",
"r2",
")",
"*",
"div",
"return",
"c1",
",",
"c2"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
LeanMinHash._initialize_slots
|
Initialize the slots of the LeanMinHash.
Args:
seed (int): The random seed controls the set of random
permutation functions generated for this LeanMinHash.
hashvalues: The hash values is the internal state of the LeanMinHash.
|
datasketch/lean_minhash.py
|
def _initialize_slots(self, seed, hashvalues):
'''Initialize the slots of the LeanMinHash.
Args:
seed (int): The random seed controls the set of random
permutation functions generated for this LeanMinHash.
hashvalues: The hash values is the internal state of the LeanMinHash.
'''
self.seed = seed
self.hashvalues = self._parse_hashvalues(hashvalues)
|
def _initialize_slots(self, seed, hashvalues):
'''Initialize the slots of the LeanMinHash.
Args:
seed (int): The random seed controls the set of random
permutation functions generated for this LeanMinHash.
hashvalues: The hash values is the internal state of the LeanMinHash.
'''
self.seed = seed
self.hashvalues = self._parse_hashvalues(hashvalues)
|
[
"Initialize",
"the",
"slots",
"of",
"the",
"LeanMinHash",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lean_minhash.py#L51-L60
|
[
"def",
"_initialize_slots",
"(",
"self",
",",
"seed",
",",
"hashvalues",
")",
":",
"self",
".",
"seed",
"=",
"seed",
"self",
".",
"hashvalues",
"=",
"self",
".",
"_parse_hashvalues",
"(",
"hashvalues",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
LeanMinHash.bytesize
|
Compute the byte size after serialization.
Args:
byteorder (str, optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
Returns:
int: Size in number of bytes after serialization.
|
datasketch/lean_minhash.py
|
def bytesize(self, byteorder='@'):
'''Compute the byte size after serialization.
Args:
byteorder (str, optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
Returns:
int: Size in number of bytes after serialization.
'''
# Use 8 bytes to store the seed integer
seed_size = struct.calcsize(byteorder+'q')
# Use 4 bytes to store the number of hash values
length_size = struct.calcsize(byteorder+'i')
# Use 4 bytes to store each hash value as we are using the lower 32 bit
hashvalue_size = struct.calcsize(byteorder+'I')
return seed_size + length_size + len(self) * hashvalue_size
|
def bytesize(self, byteorder='@'):
'''Compute the byte size after serialization.
Args:
byteorder (str, optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
Returns:
int: Size in number of bytes after serialization.
'''
# Use 8 bytes to store the seed integer
seed_size = struct.calcsize(byteorder+'q')
# Use 4 bytes to store the number of hash values
length_size = struct.calcsize(byteorder+'i')
# Use 4 bytes to store each hash value as we are using the lower 32 bit
hashvalue_size = struct.calcsize(byteorder+'I')
return seed_size + length_size + len(self) * hashvalue_size
|
[
"Compute",
"the",
"byte",
"size",
"after",
"serialization",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lean_minhash.py#L76-L95
|
[
"def",
"bytesize",
"(",
"self",
",",
"byteorder",
"=",
"'@'",
")",
":",
"# Use 8 bytes to store the seed integer",
"seed_size",
"=",
"struct",
".",
"calcsize",
"(",
"byteorder",
"+",
"'q'",
")",
"# Use 4 bytes to store the number of hash values",
"length_size",
"=",
"struct",
".",
"calcsize",
"(",
"byteorder",
"+",
"'i'",
")",
"# Use 4 bytes to store each hash value as we are using the lower 32 bit",
"hashvalue_size",
"=",
"struct",
".",
"calcsize",
"(",
"byteorder",
"+",
"'I'",
")",
"return",
"seed_size",
"+",
"length_size",
"+",
"len",
"(",
"self",
")",
"*",
"hashvalue_size"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
LeanMinHash.serialize
|
Serialize this lean MinHash and store the result in an allocated buffer.
Args:
buf (buffer): `buf` must implement the `buffer`_ interface.
One such example is the built-in `bytearray`_ class.
byteorder (str, optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
This is preferred over using `pickle`_ if the serialized lean MinHash needs
to be used by another program in a different programming language.
The serialization schema:
1. The first 8 bytes is the seed integer
2. The next 4 bytes is the number of hash values
3. The rest is the serialized hash values, each uses 4 bytes
Example:
To serialize a single lean MinHash into a `bytearray`_ buffer.
.. code-block:: python
buf = bytearray(lean_minhash.bytesize())
lean_minhash.serialize(buf)
To serialize multiple lean MinHash into a `bytearray`_ buffer.
.. code-block:: python
# assuming lean_minhashs is a list of LeanMinHash with the same size
size = lean_minhashs[0].bytesize()
buf = bytearray(size*len(lean_minhashs))
for i, lean_minhash in enumerate(lean_minhashs):
lean_minhash.serialize(buf[i*size:])
.. _`buffer`: https://docs.python.org/3/c-api/buffer.html
.. _`bytearray`: https://docs.python.org/3.6/library/functions.html#bytearray
.. _`byteorder`: https://docs.python.org/3/library/struct.html
|
datasketch/lean_minhash.py
|
def serialize(self, buf, byteorder='@'):
'''
Serialize this lean MinHash and store the result in an allocated buffer.
Args:
buf (buffer): `buf` must implement the `buffer`_ interface.
One such example is the built-in `bytearray`_ class.
byteorder (str, optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
This is preferred over using `pickle`_ if the serialized lean MinHash needs
to be used by another program in a different programming language.
The serialization schema:
1. The first 8 bytes is the seed integer
2. The next 4 bytes is the number of hash values
3. The rest is the serialized hash values, each uses 4 bytes
Example:
To serialize a single lean MinHash into a `bytearray`_ buffer.
.. code-block:: python
buf = bytearray(lean_minhash.bytesize())
lean_minhash.serialize(buf)
To serialize multiple lean MinHash into a `bytearray`_ buffer.
.. code-block:: python
# assuming lean_minhashs is a list of LeanMinHash with the same size
size = lean_minhashs[0].bytesize()
buf = bytearray(size*len(lean_minhashs))
for i, lean_minhash in enumerate(lean_minhashs):
lean_minhash.serialize(buf[i*size:])
.. _`buffer`: https://docs.python.org/3/c-api/buffer.html
.. _`bytearray`: https://docs.python.org/3.6/library/functions.html#bytearray
.. _`byteorder`: https://docs.python.org/3/library/struct.html
'''
if len(buf) < self.bytesize():
raise ValueError("The buffer does not have enough space\
for holding this MinHash.")
fmt = "%sqi%dI" % (byteorder, len(self))
struct.pack_into(fmt, buf, 0,
self.seed, len(self), *self.hashvalues)
|
def serialize(self, buf, byteorder='@'):
'''
Serialize this lean MinHash and store the result in an allocated buffer.
Args:
buf (buffer): `buf` must implement the `buffer`_ interface.
One such example is the built-in `bytearray`_ class.
byteorder (str, optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
This is preferred over using `pickle`_ if the serialized lean MinHash needs
to be used by another program in a different programming language.
The serialization schema:
1. The first 8 bytes is the seed integer
2. The next 4 bytes is the number of hash values
3. The rest is the serialized hash values, each uses 4 bytes
Example:
To serialize a single lean MinHash into a `bytearray`_ buffer.
.. code-block:: python
buf = bytearray(lean_minhash.bytesize())
lean_minhash.serialize(buf)
To serialize multiple lean MinHash into a `bytearray`_ buffer.
.. code-block:: python
# assuming lean_minhashs is a list of LeanMinHash with the same size
size = lean_minhashs[0].bytesize()
buf = bytearray(size*len(lean_minhashs))
for i, lean_minhash in enumerate(lean_minhashs):
lean_minhash.serialize(buf[i*size:])
.. _`buffer`: https://docs.python.org/3/c-api/buffer.html
.. _`bytearray`: https://docs.python.org/3.6/library/functions.html#bytearray
.. _`byteorder`: https://docs.python.org/3/library/struct.html
'''
if len(buf) < self.bytesize():
raise ValueError("The buffer does not have enough space\
for holding this MinHash.")
fmt = "%sqi%dI" % (byteorder, len(self))
struct.pack_into(fmt, buf, 0,
self.seed, len(self), *self.hashvalues)
|
[
"Serialize",
"this",
"lean",
"MinHash",
"and",
"store",
"the",
"result",
"in",
"an",
"allocated",
"buffer",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lean_minhash.py#L97-L145
|
[
"def",
"serialize",
"(",
"self",
",",
"buf",
",",
"byteorder",
"=",
"'@'",
")",
":",
"if",
"len",
"(",
"buf",
")",
"<",
"self",
".",
"bytesize",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"The buffer does not have enough space\\\n for holding this MinHash.\"",
")",
"fmt",
"=",
"\"%sqi%dI\"",
"%",
"(",
"byteorder",
",",
"len",
"(",
"self",
")",
")",
"struct",
".",
"pack_into",
"(",
"fmt",
",",
"buf",
",",
"0",
",",
"self",
".",
"seed",
",",
"len",
"(",
"self",
")",
",",
"*",
"self",
".",
"hashvalues",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
LeanMinHash.deserialize
|
Deserialize a lean MinHash from a buffer.
Args:
buf (buffer): `buf` must implement the `buffer`_ interface.
One such example is the built-in `bytearray`_ class.
byteorder (str. optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
Return:
datasketch.LeanMinHash: The deserialized lean MinHash
Example:
To deserialize a lean MinHash from a buffer.
.. code-block:: python
lean_minhash = LeanMinHash.deserialize(buf)
|
datasketch/lean_minhash.py
|
def deserialize(cls, buf, byteorder='@'):
'''
Deserialize a lean MinHash from a buffer.
Args:
buf (buffer): `buf` must implement the `buffer`_ interface.
One such example is the built-in `bytearray`_ class.
byteorder (str. optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
Return:
datasketch.LeanMinHash: The deserialized lean MinHash
Example:
To deserialize a lean MinHash from a buffer.
.. code-block:: python
lean_minhash = LeanMinHash.deserialize(buf)
'''
fmt_seed_size = "%sqi" % byteorder
fmt_hash = byteorder + "%dI"
try:
seed, num_perm = struct.unpack_from(fmt_seed_size, buf, 0)
except TypeError:
seed, num_perm = struct.unpack_from(fmt_seed_size, buffer(buf), 0)
offset = struct.calcsize(fmt_seed_size)
try:
hashvalues = struct.unpack_from(fmt_hash % num_perm, buf, offset)
except TypeError:
hashvalues = struct.unpack_from(fmt_hash % num_perm, buffer(buf), offset)
lmh = object.__new__(LeanMinHash)
lmh._initialize_slots(seed, hashvalues)
return lmh
|
def deserialize(cls, buf, byteorder='@'):
'''
Deserialize a lean MinHash from a buffer.
Args:
buf (buffer): `buf` must implement the `buffer`_ interface.
One such example is the built-in `bytearray`_ class.
byteorder (str. optional): This is byte order of the serialized data. Use one
of the `byte order characters
<https://docs.python.org/3/library/struct.html#byte-order-size-and-alignment>`_:
``@``, ``=``, ``<``, ``>``, and ``!``.
Default is ``@`` -- the native order.
Return:
datasketch.LeanMinHash: The deserialized lean MinHash
Example:
To deserialize a lean MinHash from a buffer.
.. code-block:: python
lean_minhash = LeanMinHash.deserialize(buf)
'''
fmt_seed_size = "%sqi" % byteorder
fmt_hash = byteorder + "%dI"
try:
seed, num_perm = struct.unpack_from(fmt_seed_size, buf, 0)
except TypeError:
seed, num_perm = struct.unpack_from(fmt_seed_size, buffer(buf), 0)
offset = struct.calcsize(fmt_seed_size)
try:
hashvalues = struct.unpack_from(fmt_hash % num_perm, buf, offset)
except TypeError:
hashvalues = struct.unpack_from(fmt_hash % num_perm, buffer(buf), offset)
lmh = object.__new__(LeanMinHash)
lmh._initialize_slots(seed, hashvalues)
return lmh
|
[
"Deserialize",
"a",
"lean",
"MinHash",
"from",
"a",
"buffer",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lean_minhash.py#L148-L184
|
[
"def",
"deserialize",
"(",
"cls",
",",
"buf",
",",
"byteorder",
"=",
"'@'",
")",
":",
"fmt_seed_size",
"=",
"\"%sqi\"",
"%",
"byteorder",
"fmt_hash",
"=",
"byteorder",
"+",
"\"%dI\"",
"try",
":",
"seed",
",",
"num_perm",
"=",
"struct",
".",
"unpack_from",
"(",
"fmt_seed_size",
",",
"buf",
",",
"0",
")",
"except",
"TypeError",
":",
"seed",
",",
"num_perm",
"=",
"struct",
".",
"unpack_from",
"(",
"fmt_seed_size",
",",
"buffer",
"(",
"buf",
")",
",",
"0",
")",
"offset",
"=",
"struct",
".",
"calcsize",
"(",
"fmt_seed_size",
")",
"try",
":",
"hashvalues",
"=",
"struct",
".",
"unpack_from",
"(",
"fmt_hash",
"%",
"num_perm",
",",
"buf",
",",
"offset",
")",
"except",
"TypeError",
":",
"hashvalues",
"=",
"struct",
".",
"unpack_from",
"(",
"fmt_hash",
"%",
"num_perm",
",",
"buffer",
"(",
"buf",
")",
",",
"offset",
")",
"lmh",
"=",
"object",
".",
"__new__",
"(",
"LeanMinHash",
")",
"lmh",
".",
"_initialize_slots",
"(",
"seed",
",",
"hashvalues",
")",
"return",
"lmh"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHash.update
|
Update this MinHash with a new value.
The value will be hashed using the hash function specified by
the `hashfunc` argument in the constructor.
Args:
b: The value to be hashed using the hash function specified.
Example:
To update with a new string value (using the default SHA1 hash
function, which requires bytes as input):
.. code-block:: python
minhash = Minhash()
minhash.update("new value".encode('utf-8'))
We can also use a different hash function, for example, `pyfarmhash`:
.. code-block:: python
import farmhash
def _hash_32(b):
return farmhash.hash32(b)
minhash = MinHash(hashfunc=_hash_32)
minhash.update("new value")
|
datasketch/minhash.py
|
def update(self, b):
'''Update this MinHash with a new value.
The value will be hashed using the hash function specified by
the `hashfunc` argument in the constructor.
Args:
b: The value to be hashed using the hash function specified.
Example:
To update with a new string value (using the default SHA1 hash
function, which requires bytes as input):
.. code-block:: python
minhash = Minhash()
minhash.update("new value".encode('utf-8'))
We can also use a different hash function, for example, `pyfarmhash`:
.. code-block:: python
import farmhash
def _hash_32(b):
return farmhash.hash32(b)
minhash = MinHash(hashfunc=_hash_32)
minhash.update("new value")
'''
hv = self.hashfunc(b)
a, b = self.permutations
phv = np.bitwise_and((a * hv + b) % _mersenne_prime, np.uint64(_max_hash))
self.hashvalues = np.minimum(phv, self.hashvalues)
|
def update(self, b):
'''Update this MinHash with a new value.
The value will be hashed using the hash function specified by
the `hashfunc` argument in the constructor.
Args:
b: The value to be hashed using the hash function specified.
Example:
To update with a new string value (using the default SHA1 hash
function, which requires bytes as input):
.. code-block:: python
minhash = Minhash()
minhash.update("new value".encode('utf-8'))
We can also use a different hash function, for example, `pyfarmhash`:
.. code-block:: python
import farmhash
def _hash_32(b):
return farmhash.hash32(b)
minhash = MinHash(hashfunc=_hash_32)
minhash.update("new value")
'''
hv = self.hashfunc(b)
a, b = self.permutations
phv = np.bitwise_and((a * hv + b) % _mersenne_prime, np.uint64(_max_hash))
self.hashvalues = np.minimum(phv, self.hashvalues)
|
[
"Update",
"this",
"MinHash",
"with",
"a",
"new",
"value",
".",
"The",
"value",
"will",
"be",
"hashed",
"using",
"the",
"hash",
"function",
"specified",
"by",
"the",
"hashfunc",
"argument",
"in",
"the",
"constructor",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/minhash.py#L105-L135
|
[
"def",
"update",
"(",
"self",
",",
"b",
")",
":",
"hv",
"=",
"self",
".",
"hashfunc",
"(",
"b",
")",
"a",
",",
"b",
"=",
"self",
".",
"permutations",
"phv",
"=",
"np",
".",
"bitwise_and",
"(",
"(",
"a",
"*",
"hv",
"+",
"b",
")",
"%",
"_mersenne_prime",
",",
"np",
".",
"uint64",
"(",
"_max_hash",
")",
")",
"self",
".",
"hashvalues",
"=",
"np",
".",
"minimum",
"(",
"phv",
",",
"self",
".",
"hashvalues",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHash.jaccard
|
Estimate the `Jaccard similarity`_ (resemblance) between the sets
represented by this MinHash and the other.
Args:
other (datasketch.MinHash): The other MinHash.
Returns:
float: The Jaccard similarity, which is between 0.0 and 1.0.
|
datasketch/minhash.py
|
def jaccard(self, other):
'''Estimate the `Jaccard similarity`_ (resemblance) between the sets
represented by this MinHash and the other.
Args:
other (datasketch.MinHash): The other MinHash.
Returns:
float: The Jaccard similarity, which is between 0.0 and 1.0.
'''
if other.seed != self.seed:
raise ValueError("Cannot compute Jaccard given MinHash with\
different seeds")
if len(self) != len(other):
raise ValueError("Cannot compute Jaccard given MinHash with\
different numbers of permutation functions")
return np.float(np.count_nonzero(self.hashvalues==other.hashvalues)) /\
np.float(len(self))
|
def jaccard(self, other):
'''Estimate the `Jaccard similarity`_ (resemblance) between the sets
represented by this MinHash and the other.
Args:
other (datasketch.MinHash): The other MinHash.
Returns:
float: The Jaccard similarity, which is between 0.0 and 1.0.
'''
if other.seed != self.seed:
raise ValueError("Cannot compute Jaccard given MinHash with\
different seeds")
if len(self) != len(other):
raise ValueError("Cannot compute Jaccard given MinHash with\
different numbers of permutation functions")
return np.float(np.count_nonzero(self.hashvalues==other.hashvalues)) /\
np.float(len(self))
|
[
"Estimate",
"the",
"Jaccard",
"similarity",
"_",
"(",
"resemblance",
")",
"between",
"the",
"sets",
"represented",
"by",
"this",
"MinHash",
"and",
"the",
"other",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/minhash.py#L137-L154
|
[
"def",
"jaccard",
"(",
"self",
",",
"other",
")",
":",
"if",
"other",
".",
"seed",
"!=",
"self",
".",
"seed",
":",
"raise",
"ValueError",
"(",
"\"Cannot compute Jaccard given MinHash with\\\n different seeds\"",
")",
"if",
"len",
"(",
"self",
")",
"!=",
"len",
"(",
"other",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot compute Jaccard given MinHash with\\\n different numbers of permutation functions\"",
")",
"return",
"np",
".",
"float",
"(",
"np",
".",
"count_nonzero",
"(",
"self",
".",
"hashvalues",
"==",
"other",
".",
"hashvalues",
")",
")",
"/",
"np",
".",
"float",
"(",
"len",
"(",
"self",
")",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHash.count
|
Estimate the cardinality count based on the technique described in
`this paper <http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=365694>`_.
Returns:
int: The estimated cardinality of the set represented by this MinHash.
|
datasketch/minhash.py
|
def count(self):
'''Estimate the cardinality count based on the technique described in
`this paper <http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=365694>`_.
Returns:
int: The estimated cardinality of the set represented by this MinHash.
'''
k = len(self)
return np.float(k) / np.sum(self.hashvalues / np.float(_max_hash)) - 1.0
|
def count(self):
'''Estimate the cardinality count based on the technique described in
`this paper <http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=365694>`_.
Returns:
int: The estimated cardinality of the set represented by this MinHash.
'''
k = len(self)
return np.float(k) / np.sum(self.hashvalues / np.float(_max_hash)) - 1.0
|
[
"Estimate",
"the",
"cardinality",
"count",
"based",
"on",
"the",
"technique",
"described",
"in",
"this",
"paper",
"<http",
":",
"//",
"ieeexplore",
".",
"ieee",
".",
"org",
"/",
"stamp",
"/",
"stamp",
".",
"jsp?arnumber",
"=",
"365694",
">",
"_",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/minhash.py#L156-L164
|
[
"def",
"count",
"(",
"self",
")",
":",
"k",
"=",
"len",
"(",
"self",
")",
"return",
"np",
".",
"float",
"(",
"k",
")",
"/",
"np",
".",
"sum",
"(",
"self",
".",
"hashvalues",
"/",
"np",
".",
"float",
"(",
"_max_hash",
")",
")",
"-",
"1.0"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHash.merge
|
Merge the other MinHash with this one, making this one the union
of both.
Args:
other (datasketch.MinHash): The other MinHash.
|
datasketch/minhash.py
|
def merge(self, other):
'''Merge the other MinHash with this one, making this one the union
of both.
Args:
other (datasketch.MinHash): The other MinHash.
'''
if other.seed != self.seed:
raise ValueError("Cannot merge MinHash with\
different seeds")
if len(self) != len(other):
raise ValueError("Cannot merge MinHash with\
different numbers of permutation functions")
self.hashvalues = np.minimum(other.hashvalues, self.hashvalues)
|
def merge(self, other):
'''Merge the other MinHash with this one, making this one the union
of both.
Args:
other (datasketch.MinHash): The other MinHash.
'''
if other.seed != self.seed:
raise ValueError("Cannot merge MinHash with\
different seeds")
if len(self) != len(other):
raise ValueError("Cannot merge MinHash with\
different numbers of permutation functions")
self.hashvalues = np.minimum(other.hashvalues, self.hashvalues)
|
[
"Merge",
"the",
"other",
"MinHash",
"with",
"this",
"one",
"making",
"this",
"one",
"the",
"union",
"of",
"both",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/minhash.py#L166-L179
|
[
"def",
"merge",
"(",
"self",
",",
"other",
")",
":",
"if",
"other",
".",
"seed",
"!=",
"self",
".",
"seed",
":",
"raise",
"ValueError",
"(",
"\"Cannot merge MinHash with\\\n different seeds\"",
")",
"if",
"len",
"(",
"self",
")",
"!=",
"len",
"(",
"other",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot merge MinHash with\\\n different numbers of permutation functions\"",
")",
"self",
".",
"hashvalues",
"=",
"np",
".",
"minimum",
"(",
"other",
".",
"hashvalues",
",",
"self",
".",
"hashvalues",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHash.copy
|
:returns: datasketch.MinHash -- A copy of this MinHash by exporting its state.
|
datasketch/minhash.py
|
def copy(self):
'''
:returns: datasketch.MinHash -- A copy of this MinHash by exporting its state.
'''
return MinHash(seed=self.seed, hashfunc=self.hashfunc,
hashvalues=self.digest(),
permutations=self.permutations)
|
def copy(self):
'''
:returns: datasketch.MinHash -- A copy of this MinHash by exporting its state.
'''
return MinHash(seed=self.seed, hashfunc=self.hashfunc,
hashvalues=self.digest(),
permutations=self.permutations)
|
[
":",
"returns",
":",
"datasketch",
".",
"MinHash",
"--",
"A",
"copy",
"of",
"this",
"MinHash",
"by",
"exporting",
"its",
"state",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/minhash.py#L207-L213
|
[
"def",
"copy",
"(",
"self",
")",
":",
"return",
"MinHash",
"(",
"seed",
"=",
"self",
".",
"seed",
",",
"hashfunc",
"=",
"self",
".",
"hashfunc",
",",
"hashvalues",
"=",
"self",
".",
"digest",
"(",
")",
",",
"permutations",
"=",
"self",
".",
"permutations",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHash.union
|
Create a MinHash which is the union of the MinHash objects passed as arguments.
Args:
*mhs: The MinHash objects to be united. The argument list length is variable,
but must be at least 2.
Returns:
datasketch.MinHash: A new union MinHash.
|
datasketch/minhash.py
|
def union(cls, *mhs):
'''Create a MinHash which is the union of the MinHash objects passed as arguments.
Args:
*mhs: The MinHash objects to be united. The argument list length is variable,
but must be at least 2.
Returns:
datasketch.MinHash: A new union MinHash.
'''
if len(mhs) < 2:
raise ValueError("Cannot union less than 2 MinHash")
num_perm = len(mhs[0])
seed = mhs[0].seed
if any((seed != m.seed or num_perm != len(m)) for m in mhs):
raise ValueError("The unioning MinHash must have the\
same seed and number of permutation functions")
hashvalues = np.minimum.reduce([m.hashvalues for m in mhs])
permutations = mhs[0].permutations
return cls(num_perm=num_perm, seed=seed, hashvalues=hashvalues,
permutations=permutations)
|
def union(cls, *mhs):
'''Create a MinHash which is the union of the MinHash objects passed as arguments.
Args:
*mhs: The MinHash objects to be united. The argument list length is variable,
but must be at least 2.
Returns:
datasketch.MinHash: A new union MinHash.
'''
if len(mhs) < 2:
raise ValueError("Cannot union less than 2 MinHash")
num_perm = len(mhs[0])
seed = mhs[0].seed
if any((seed != m.seed or num_perm != len(m)) for m in mhs):
raise ValueError("The unioning MinHash must have the\
same seed and number of permutation functions")
hashvalues = np.minimum.reduce([m.hashvalues for m in mhs])
permutations = mhs[0].permutations
return cls(num_perm=num_perm, seed=seed, hashvalues=hashvalues,
permutations=permutations)
|
[
"Create",
"a",
"MinHash",
"which",
"is",
"the",
"union",
"of",
"the",
"MinHash",
"objects",
"passed",
"as",
"arguments",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/minhash.py#L230-L250
|
[
"def",
"union",
"(",
"cls",
",",
"*",
"mhs",
")",
":",
"if",
"len",
"(",
"mhs",
")",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"Cannot union less than 2 MinHash\"",
")",
"num_perm",
"=",
"len",
"(",
"mhs",
"[",
"0",
"]",
")",
"seed",
"=",
"mhs",
"[",
"0",
"]",
".",
"seed",
"if",
"any",
"(",
"(",
"seed",
"!=",
"m",
".",
"seed",
"or",
"num_perm",
"!=",
"len",
"(",
"m",
")",
")",
"for",
"m",
"in",
"mhs",
")",
":",
"raise",
"ValueError",
"(",
"\"The unioning MinHash must have the\\\n same seed and number of permutation functions\"",
")",
"hashvalues",
"=",
"np",
".",
"minimum",
".",
"reduce",
"(",
"[",
"m",
".",
"hashvalues",
"for",
"m",
"in",
"mhs",
"]",
")",
"permutations",
"=",
"mhs",
"[",
"0",
"]",
".",
"permutations",
"return",
"cls",
"(",
"num_perm",
"=",
"num_perm",
",",
"seed",
"=",
"seed",
",",
"hashvalues",
"=",
"hashvalues",
",",
"permutations",
"=",
"permutations",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
_false_positive_probability
|
Compute the false positive probability given the containment threshold.
xq is the ratio of x/q.
|
datasketch/lshensemble.py
|
def _false_positive_probability(threshold, b, r, xq):
'''
Compute the false positive probability given the containment threshold.
xq is the ratio of x/q.
'''
_probability = lambda t : 1 - (1 - (t/(1 + xq - t))**float(r))**float(b)
if xq >= threshold:
a, err = integrate(_probability, 0.0, threshold)
return a
a, err = integrate(_probability, 0.0, xq)
return a
|
def _false_positive_probability(threshold, b, r, xq):
'''
Compute the false positive probability given the containment threshold.
xq is the ratio of x/q.
'''
_probability = lambda t : 1 - (1 - (t/(1 + xq - t))**float(r))**float(b)
if xq >= threshold:
a, err = integrate(_probability, 0.0, threshold)
return a
a, err = integrate(_probability, 0.0, xq)
return a
|
[
"Compute",
"the",
"false",
"positive",
"probability",
"given",
"the",
"containment",
"threshold",
".",
"xq",
"is",
"the",
"ratio",
"of",
"x",
"/",
"q",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshensemble.py#L7-L17
|
[
"def",
"_false_positive_probability",
"(",
"threshold",
",",
"b",
",",
"r",
",",
"xq",
")",
":",
"_probability",
"=",
"lambda",
"t",
":",
"1",
"-",
"(",
"1",
"-",
"(",
"t",
"/",
"(",
"1",
"+",
"xq",
"-",
"t",
")",
")",
"**",
"float",
"(",
"r",
")",
")",
"**",
"float",
"(",
"b",
")",
"if",
"xq",
">=",
"threshold",
":",
"a",
",",
"err",
"=",
"integrate",
"(",
"_probability",
",",
"0.0",
",",
"threshold",
")",
"return",
"a",
"a",
",",
"err",
"=",
"integrate",
"(",
"_probability",
",",
"0.0",
",",
"xq",
")",
"return",
"a"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
_optimal_param
|
Compute the optimal parameters that minimizes the weighted sum
of probabilities of false positive and false negative.
xq is the ratio of x/q.
|
datasketch/lshensemble.py
|
def _optimal_param(threshold, num_perm, max_r, xq, false_positive_weight,
false_negative_weight):
'''
Compute the optimal parameters that minimizes the weighted sum
of probabilities of false positive and false negative.
xq is the ratio of x/q.
'''
min_error = float("inf")
opt = (0, 0)
for b in range(1, num_perm+1):
for r in range(1, max_r+1):
if b*r > num_perm:
continue
fp = _false_positive_probability(threshold, b, r, xq)
fn = _false_negative_probability(threshold, b, r, xq)
error = fp*false_positive_weight + fn*false_negative_weight
if error < min_error:
min_error = error
opt = (b, r)
return opt
|
def _optimal_param(threshold, num_perm, max_r, xq, false_positive_weight,
false_negative_weight):
'''
Compute the optimal parameters that minimizes the weighted sum
of probabilities of false positive and false negative.
xq is the ratio of x/q.
'''
min_error = float("inf")
opt = (0, 0)
for b in range(1, num_perm+1):
for r in range(1, max_r+1):
if b*r > num_perm:
continue
fp = _false_positive_probability(threshold, b, r, xq)
fn = _false_negative_probability(threshold, b, r, xq)
error = fp*false_positive_weight + fn*false_negative_weight
if error < min_error:
min_error = error
opt = (b, r)
return opt
|
[
"Compute",
"the",
"optimal",
"parameters",
"that",
"minimizes",
"the",
"weighted",
"sum",
"of",
"probabilities",
"of",
"false",
"positive",
"and",
"false",
"negative",
".",
"xq",
"is",
"the",
"ratio",
"of",
"x",
"/",
"q",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshensemble.py#L34-L53
|
[
"def",
"_optimal_param",
"(",
"threshold",
",",
"num_perm",
",",
"max_r",
",",
"xq",
",",
"false_positive_weight",
",",
"false_negative_weight",
")",
":",
"min_error",
"=",
"float",
"(",
"\"inf\"",
")",
"opt",
"=",
"(",
"0",
",",
"0",
")",
"for",
"b",
"in",
"range",
"(",
"1",
",",
"num_perm",
"+",
"1",
")",
":",
"for",
"r",
"in",
"range",
"(",
"1",
",",
"max_r",
"+",
"1",
")",
":",
"if",
"b",
"*",
"r",
">",
"num_perm",
":",
"continue",
"fp",
"=",
"_false_positive_probability",
"(",
"threshold",
",",
"b",
",",
"r",
",",
"xq",
")",
"fn",
"=",
"_false_negative_probability",
"(",
"threshold",
",",
"b",
",",
"r",
",",
"xq",
")",
"error",
"=",
"fp",
"*",
"false_positive_weight",
"+",
"fn",
"*",
"false_negative_weight",
"if",
"error",
"<",
"min_error",
":",
"min_error",
"=",
"error",
"opt",
"=",
"(",
"b",
",",
"r",
")",
"return",
"opt"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHashLSHEnsemble.index
|
Index all sets given their keys, MinHashes, and sizes.
It can be called only once after the index is created.
Args:
entries (`iterable` of `tuple`): An iterable of tuples, each must be
in the form of `(key, minhash, size)`, where `key` is the unique
identifier of a set, `minhash` is the MinHash of the set,
and `size` is the size or number of unique items in the set.
Note:
`size` must be positive.
|
datasketch/lshensemble.py
|
def index(self, entries):
'''
Index all sets given their keys, MinHashes, and sizes.
It can be called only once after the index is created.
Args:
entries (`iterable` of `tuple`): An iterable of tuples, each must be
in the form of `(key, minhash, size)`, where `key` is the unique
identifier of a set, `minhash` is the MinHash of the set,
and `size` is the size or number of unique items in the set.
Note:
`size` must be positive.
'''
if not self.is_empty():
raise ValueError("Cannot call index again on a non-empty index")
if not isinstance(entries, list):
queue = deque([])
for key, minhash, size in entries:
if size <= 0:
raise ValueError("Set size must be positive")
queue.append((key, minhash, size))
entries = list(queue)
if len(entries) == 0:
raise ValueError("entries is empty")
# Create optimal partitions.
sizes, counts = np.array(sorted(
Counter(e[2] for e in entries).most_common())).T
partitions = optimal_partitions(sizes, counts, len(self.indexes))
for i, (lower, upper) in enumerate(partitions):
self.lowers[i], self.uppers[i] = lower, upper
# Insert into partitions.
entries.sort(key=lambda e : e[2])
curr_part = 0
for key, minhash, size in entries:
if size > self.uppers[curr_part]:
curr_part += 1
for r in self.indexes[curr_part]:
self.indexes[curr_part][r].insert(key, minhash)
|
def index(self, entries):
'''
Index all sets given their keys, MinHashes, and sizes.
It can be called only once after the index is created.
Args:
entries (`iterable` of `tuple`): An iterable of tuples, each must be
in the form of `(key, minhash, size)`, where `key` is the unique
identifier of a set, `minhash` is the MinHash of the set,
and `size` is the size or number of unique items in the set.
Note:
`size` must be positive.
'''
if not self.is_empty():
raise ValueError("Cannot call index again on a non-empty index")
if not isinstance(entries, list):
queue = deque([])
for key, minhash, size in entries:
if size <= 0:
raise ValueError("Set size must be positive")
queue.append((key, minhash, size))
entries = list(queue)
if len(entries) == 0:
raise ValueError("entries is empty")
# Create optimal partitions.
sizes, counts = np.array(sorted(
Counter(e[2] for e in entries).most_common())).T
partitions = optimal_partitions(sizes, counts, len(self.indexes))
for i, (lower, upper) in enumerate(partitions):
self.lowers[i], self.uppers[i] = lower, upper
# Insert into partitions.
entries.sort(key=lambda e : e[2])
curr_part = 0
for key, minhash, size in entries:
if size > self.uppers[curr_part]:
curr_part += 1
for r in self.indexes[curr_part]:
self.indexes[curr_part][r].insert(key, minhash)
|
[
"Index",
"all",
"sets",
"given",
"their",
"keys",
"MinHashes",
"and",
"sizes",
".",
"It",
"can",
"be",
"called",
"only",
"once",
"after",
"the",
"index",
"is",
"created",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshensemble.py#L139-L177
|
[
"def",
"index",
"(",
"self",
",",
"entries",
")",
":",
"if",
"not",
"self",
".",
"is_empty",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot call index again on a non-empty index\"",
")",
"if",
"not",
"isinstance",
"(",
"entries",
",",
"list",
")",
":",
"queue",
"=",
"deque",
"(",
"[",
"]",
")",
"for",
"key",
",",
"minhash",
",",
"size",
"in",
"entries",
":",
"if",
"size",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Set size must be positive\"",
")",
"queue",
".",
"append",
"(",
"(",
"key",
",",
"minhash",
",",
"size",
")",
")",
"entries",
"=",
"list",
"(",
"queue",
")",
"if",
"len",
"(",
"entries",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"entries is empty\"",
")",
"# Create optimal partitions.",
"sizes",
",",
"counts",
"=",
"np",
".",
"array",
"(",
"sorted",
"(",
"Counter",
"(",
"e",
"[",
"2",
"]",
"for",
"e",
"in",
"entries",
")",
".",
"most_common",
"(",
")",
")",
")",
".",
"T",
"partitions",
"=",
"optimal_partitions",
"(",
"sizes",
",",
"counts",
",",
"len",
"(",
"self",
".",
"indexes",
")",
")",
"for",
"i",
",",
"(",
"lower",
",",
"upper",
")",
"in",
"enumerate",
"(",
"partitions",
")",
":",
"self",
".",
"lowers",
"[",
"i",
"]",
",",
"self",
".",
"uppers",
"[",
"i",
"]",
"=",
"lower",
",",
"upper",
"# Insert into partitions.",
"entries",
".",
"sort",
"(",
"key",
"=",
"lambda",
"e",
":",
"e",
"[",
"2",
"]",
")",
"curr_part",
"=",
"0",
"for",
"key",
",",
"minhash",
",",
"size",
"in",
"entries",
":",
"if",
"size",
">",
"self",
".",
"uppers",
"[",
"curr_part",
"]",
":",
"curr_part",
"+=",
"1",
"for",
"r",
"in",
"self",
".",
"indexes",
"[",
"curr_part",
"]",
":",
"self",
".",
"indexes",
"[",
"curr_part",
"]",
"[",
"r",
"]",
".",
"insert",
"(",
"key",
",",
"minhash",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHashLSHEnsemble.query
|
Giving the MinHash and size of the query set, retrieve
keys that references sets with containment with respect to
the query set greater than the threshold.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
size (int): The size (number of unique items) of the query set.
Returns:
`iterator` of keys.
|
datasketch/lshensemble.py
|
def query(self, minhash, size):
'''
Giving the MinHash and size of the query set, retrieve
keys that references sets with containment with respect to
the query set greater than the threshold.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
size (int): The size (number of unique items) of the query set.
Returns:
`iterator` of keys.
'''
for i, index in enumerate(self.indexes):
u = self.uppers[i]
if u is None:
continue
b, r = self._get_optimal_param(u, size)
for key in index[r]._query_b(minhash, b):
yield key
|
def query(self, minhash, size):
'''
Giving the MinHash and size of the query set, retrieve
keys that references sets with containment with respect to
the query set greater than the threshold.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
size (int): The size (number of unique items) of the query set.
Returns:
`iterator` of keys.
'''
for i, index in enumerate(self.indexes):
u = self.uppers[i]
if u is None:
continue
b, r = self._get_optimal_param(u, size)
for key in index[r]._query_b(minhash, b):
yield key
|
[
"Giving",
"the",
"MinHash",
"and",
"size",
"of",
"the",
"query",
"set",
"retrieve",
"keys",
"that",
"references",
"sets",
"with",
"containment",
"with",
"respect",
"to",
"the",
"query",
"set",
"greater",
"than",
"the",
"threshold",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshensemble.py#L179-L198
|
[
"def",
"query",
"(",
"self",
",",
"minhash",
",",
"size",
")",
":",
"for",
"i",
",",
"index",
"in",
"enumerate",
"(",
"self",
".",
"indexes",
")",
":",
"u",
"=",
"self",
".",
"uppers",
"[",
"i",
"]",
"if",
"u",
"is",
"None",
":",
"continue",
"b",
",",
"r",
"=",
"self",
".",
"_get_optimal_param",
"(",
"u",
",",
"size",
")",
"for",
"key",
"in",
"index",
"[",
"r",
"]",
".",
"_query_b",
"(",
"minhash",
",",
"b",
")",
":",
"yield",
"key"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHashLSHEnsemble.is_empty
|
Returns:
bool: Check if the index is empty.
|
datasketch/lshensemble.py
|
def is_empty(self):
'''
Returns:
bool: Check if the index is empty.
'''
return all(all(index[r].is_empty() for r in index)
for index in self.indexes)
|
def is_empty(self):
'''
Returns:
bool: Check if the index is empty.
'''
return all(all(index[r].is_empty() for r in index)
for index in self.indexes)
|
[
"Returns",
":",
"bool",
":",
"Check",
"if",
"the",
"index",
"is",
"empty",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshensemble.py#L211-L217
|
[
"def",
"is_empty",
"(",
"self",
")",
":",
"return",
"all",
"(",
"all",
"(",
"index",
"[",
"r",
"]",
".",
"is_empty",
"(",
")",
"for",
"r",
"in",
"index",
")",
"for",
"index",
"in",
"self",
".",
"indexes",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
WeightedMinHash.jaccard
|
Estimate the `weighted Jaccard similarity`_ between the
multi-sets represented by this weighted MinHash and the other.
Args:
other (datasketch.WeightedMinHash): The other weighted MinHash.
Returns:
float: The weighted Jaccard similarity between 0.0 and 1.0.
.. _`weighted Jaccard similarity`: http://mathoverflow.net/questions/123339/weighted-jaccard-similarity
|
datasketch/weighted_minhash.py
|
def jaccard(self, other):
'''Estimate the `weighted Jaccard similarity`_ between the
multi-sets represented by this weighted MinHash and the other.
Args:
other (datasketch.WeightedMinHash): The other weighted MinHash.
Returns:
float: The weighted Jaccard similarity between 0.0 and 1.0.
.. _`weighted Jaccard similarity`: http://mathoverflow.net/questions/123339/weighted-jaccard-similarity
'''
if other.seed != self.seed:
raise ValueError("Cannot compute Jaccard given WeightedMinHash objects with\
different seeds")
if len(self) != len(other):
raise ValueError("Cannot compute Jaccard given WeightedMinHash objects with\
different numbers of hash values")
# Check how many pairs of (k, t) hashvalues are equal
intersection = 0
for this, that in zip(self.hashvalues, other.hashvalues):
if np.array_equal(this, that):
intersection += 1
return float(intersection) / float(len(self))
|
def jaccard(self, other):
'''Estimate the `weighted Jaccard similarity`_ between the
multi-sets represented by this weighted MinHash and the other.
Args:
other (datasketch.WeightedMinHash): The other weighted MinHash.
Returns:
float: The weighted Jaccard similarity between 0.0 and 1.0.
.. _`weighted Jaccard similarity`: http://mathoverflow.net/questions/123339/weighted-jaccard-similarity
'''
if other.seed != self.seed:
raise ValueError("Cannot compute Jaccard given WeightedMinHash objects with\
different seeds")
if len(self) != len(other):
raise ValueError("Cannot compute Jaccard given WeightedMinHash objects with\
different numbers of hash values")
# Check how many pairs of (k, t) hashvalues are equal
intersection = 0
for this, that in zip(self.hashvalues, other.hashvalues):
if np.array_equal(this, that):
intersection += 1
return float(intersection) / float(len(self))
|
[
"Estimate",
"the",
"weighted",
"Jaccard",
"similarity",
"_",
"between",
"the",
"multi",
"-",
"sets",
"represented",
"by",
"this",
"weighted",
"MinHash",
"and",
"the",
"other",
".",
"Args",
":",
"other",
"(",
"datasketch",
".",
"WeightedMinHash",
")",
":",
"The",
"other",
"weighted",
"MinHash",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/weighted_minhash.py#L22-L45
|
[
"def",
"jaccard",
"(",
"self",
",",
"other",
")",
":",
"if",
"other",
".",
"seed",
"!=",
"self",
".",
"seed",
":",
"raise",
"ValueError",
"(",
"\"Cannot compute Jaccard given WeightedMinHash objects with\\\n different seeds\"",
")",
"if",
"len",
"(",
"self",
")",
"!=",
"len",
"(",
"other",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot compute Jaccard given WeightedMinHash objects with\\\n different numbers of hash values\"",
")",
"# Check how many pairs of (k, t) hashvalues are equal",
"intersection",
"=",
"0",
"for",
"this",
",",
"that",
"in",
"zip",
"(",
"self",
".",
"hashvalues",
",",
"other",
".",
"hashvalues",
")",
":",
"if",
"np",
".",
"array_equal",
"(",
"this",
",",
"that",
")",
":",
"intersection",
"+=",
"1",
"return",
"float",
"(",
"intersection",
")",
"/",
"float",
"(",
"len",
"(",
"self",
")",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
WeightedMinHashGenerator.minhash
|
Create a new weighted MinHash given a weighted Jaccard vector.
Each dimension is an integer
frequency of the corresponding element in the multi-set represented
by the vector.
Args:
v (numpy.array): The Jaccard vector.
|
datasketch/weighted_minhash.py
|
def minhash(self, v):
'''Create a new weighted MinHash given a weighted Jaccard vector.
Each dimension is an integer
frequency of the corresponding element in the multi-set represented
by the vector.
Args:
v (numpy.array): The Jaccard vector.
'''
if not isinstance(v, collections.Iterable):
raise TypeError("Input vector must be an iterable")
if not len(v) == self.dim:
raise ValueError("Input dimension mismatch, expecting %d" % self.dim)
if not isinstance(v, np.ndarray):
v = np.array(v, dtype=np.float32)
elif v.dtype != np.float32:
v = v.astype(np.float32)
hashvalues = np.zeros((self.sample_size, 2), dtype=np.int)
vzeros = (v == 0)
if vzeros.all():
raise ValueError("Input is all zeros")
v[vzeros] = np.nan
vlog = np.log(v)
for i in range(self.sample_size):
t = np.floor((vlog / self.rs[i]) + self.betas[i])
ln_y = (t - self.betas[i]) * self.rs[i]
ln_a = self.ln_cs[i] - ln_y - self.rs[i]
k = np.nanargmin(ln_a)
hashvalues[i][0], hashvalues[i][1] = k, int(t[k])
return WeightedMinHash(self.seed, hashvalues)
|
def minhash(self, v):
'''Create a new weighted MinHash given a weighted Jaccard vector.
Each dimension is an integer
frequency of the corresponding element in the multi-set represented
by the vector.
Args:
v (numpy.array): The Jaccard vector.
'''
if not isinstance(v, collections.Iterable):
raise TypeError("Input vector must be an iterable")
if not len(v) == self.dim:
raise ValueError("Input dimension mismatch, expecting %d" % self.dim)
if not isinstance(v, np.ndarray):
v = np.array(v, dtype=np.float32)
elif v.dtype != np.float32:
v = v.astype(np.float32)
hashvalues = np.zeros((self.sample_size, 2), dtype=np.int)
vzeros = (v == 0)
if vzeros.all():
raise ValueError("Input is all zeros")
v[vzeros] = np.nan
vlog = np.log(v)
for i in range(self.sample_size):
t = np.floor((vlog / self.rs[i]) + self.betas[i])
ln_y = (t - self.betas[i]) * self.rs[i]
ln_a = self.ln_cs[i] - ln_y - self.rs[i]
k = np.nanargmin(ln_a)
hashvalues[i][0], hashvalues[i][1] = k, int(t[k])
return WeightedMinHash(self.seed, hashvalues)
|
[
"Create",
"a",
"new",
"weighted",
"MinHash",
"given",
"a",
"weighted",
"Jaccard",
"vector",
".",
"Each",
"dimension",
"is",
"an",
"integer",
"frequency",
"of",
"the",
"corresponding",
"element",
"in",
"the",
"multi",
"-",
"set",
"represented",
"by",
"the",
"vector",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/weighted_minhash.py#L107-L136
|
[
"def",
"minhash",
"(",
"self",
",",
"v",
")",
":",
"if",
"not",
"isinstance",
"(",
"v",
",",
"collections",
".",
"Iterable",
")",
":",
"raise",
"TypeError",
"(",
"\"Input vector must be an iterable\"",
")",
"if",
"not",
"len",
"(",
"v",
")",
"==",
"self",
".",
"dim",
":",
"raise",
"ValueError",
"(",
"\"Input dimension mismatch, expecting %d\"",
"%",
"self",
".",
"dim",
")",
"if",
"not",
"isinstance",
"(",
"v",
",",
"np",
".",
"ndarray",
")",
":",
"v",
"=",
"np",
".",
"array",
"(",
"v",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"elif",
"v",
".",
"dtype",
"!=",
"np",
".",
"float32",
":",
"v",
"=",
"v",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"hashvalues",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"sample_size",
",",
"2",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"vzeros",
"=",
"(",
"v",
"==",
"0",
")",
"if",
"vzeros",
".",
"all",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Input is all zeros\"",
")",
"v",
"[",
"vzeros",
"]",
"=",
"np",
".",
"nan",
"vlog",
"=",
"np",
".",
"log",
"(",
"v",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"sample_size",
")",
":",
"t",
"=",
"np",
".",
"floor",
"(",
"(",
"vlog",
"/",
"self",
".",
"rs",
"[",
"i",
"]",
")",
"+",
"self",
".",
"betas",
"[",
"i",
"]",
")",
"ln_y",
"=",
"(",
"t",
"-",
"self",
".",
"betas",
"[",
"i",
"]",
")",
"*",
"self",
".",
"rs",
"[",
"i",
"]",
"ln_a",
"=",
"self",
".",
"ln_cs",
"[",
"i",
"]",
"-",
"ln_y",
"-",
"self",
".",
"rs",
"[",
"i",
"]",
"k",
"=",
"np",
".",
"nanargmin",
"(",
"ln_a",
")",
"hashvalues",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"hashvalues",
"[",
"i",
"]",
"[",
"1",
"]",
"=",
"k",
",",
"int",
"(",
"t",
"[",
"k",
"]",
")",
"return",
"WeightedMinHash",
"(",
"self",
".",
"seed",
",",
"hashvalues",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHashLSH.insert
|
Insert a key to the index, together
with a MinHash (or weighted MinHash) of the set referenced by
the key.
:param str key: The identifier of the set.
:param datasketch.MinHash minhash: The MinHash of the set.
:param bool check_duplication: To avoid duplicate keys in the storage (`default=True`).
It's recommended to not change the default, but
if you want to avoid the overhead during insert
you can set `check_duplication = False`.
|
datasketch/lsh.py
|
def insert(self, key, minhash, check_duplication=True):
'''
Insert a key to the index, together
with a MinHash (or weighted MinHash) of the set referenced by
the key.
:param str key: The identifier of the set.
:param datasketch.MinHash minhash: The MinHash of the set.
:param bool check_duplication: To avoid duplicate keys in the storage (`default=True`).
It's recommended to not change the default, but
if you want to avoid the overhead during insert
you can set `check_duplication = False`.
'''
self._insert(key, minhash, check_duplication=check_duplication, buffer=False)
|
def insert(self, key, minhash, check_duplication=True):
'''
Insert a key to the index, together
with a MinHash (or weighted MinHash) of the set referenced by
the key.
:param str key: The identifier of the set.
:param datasketch.MinHash minhash: The MinHash of the set.
:param bool check_duplication: To avoid duplicate keys in the storage (`default=True`).
It's recommended to not change the default, but
if you want to avoid the overhead during insert
you can set `check_duplication = False`.
'''
self._insert(key, minhash, check_duplication=check_duplication, buffer=False)
|
[
"Insert",
"a",
"key",
"to",
"the",
"index",
"together",
"with",
"a",
"MinHash",
"(",
"or",
"weighted",
"MinHash",
")",
"of",
"the",
"set",
"referenced",
"by",
"the",
"key",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lsh.py#L136-L149
|
[
"def",
"insert",
"(",
"self",
",",
"key",
",",
"minhash",
",",
"check_duplication",
"=",
"True",
")",
":",
"self",
".",
"_insert",
"(",
"key",
",",
"minhash",
",",
"check_duplication",
"=",
"check_duplication",
",",
"buffer",
"=",
"False",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHashLSH.remove
|
Remove the key from the index.
Args:
key (hashable): The unique identifier of a set.
|
datasketch/lsh.py
|
def remove(self, key):
'''
Remove the key from the index.
Args:
key (hashable): The unique identifier of a set.
'''
if self.prepickle:
key = pickle.dumps(key)
if key not in self.keys:
raise ValueError("The given key does not exist")
for H, hashtable in zip(self.keys[key], self.hashtables):
hashtable.remove_val(H, key)
if not hashtable.get(H):
hashtable.remove(H)
self.keys.remove(key)
|
def remove(self, key):
'''
Remove the key from the index.
Args:
key (hashable): The unique identifier of a set.
'''
if self.prepickle:
key = pickle.dumps(key)
if key not in self.keys:
raise ValueError("The given key does not exist")
for H, hashtable in zip(self.keys[key], self.hashtables):
hashtable.remove_val(H, key)
if not hashtable.get(H):
hashtable.remove(H)
self.keys.remove(key)
|
[
"Remove",
"the",
"key",
"from",
"the",
"index",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lsh.py#L213-L229
|
[
"def",
"remove",
"(",
"self",
",",
"key",
")",
":",
"if",
"self",
".",
"prepickle",
":",
"key",
"=",
"pickle",
".",
"dumps",
"(",
"key",
")",
"if",
"key",
"not",
"in",
"self",
".",
"keys",
":",
"raise",
"ValueError",
"(",
"\"The given key does not exist\"",
")",
"for",
"H",
",",
"hashtable",
"in",
"zip",
"(",
"self",
".",
"keys",
"[",
"key",
"]",
",",
"self",
".",
"hashtables",
")",
":",
"hashtable",
".",
"remove_val",
"(",
"H",
",",
"key",
")",
"if",
"not",
"hashtable",
".",
"get",
"(",
"H",
")",
":",
"hashtable",
".",
"remove",
"(",
"H",
")",
"self",
".",
"keys",
".",
"remove",
"(",
"key",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHashLSH.get_subset_counts
|
Returns the bucket allocation counts (see :func:`~datasketch.MinHashLSH.get_counts` above)
restricted to the list of keys given.
Args:
keys (hashable) : the keys for which to get the bucket allocation
counts
|
datasketch/lsh.py
|
def get_subset_counts(self, *keys):
'''
Returns the bucket allocation counts (see :func:`~datasketch.MinHashLSH.get_counts` above)
restricted to the list of keys given.
Args:
keys (hashable) : the keys for which to get the bucket allocation
counts
'''
if self.prepickle:
key_set = [pickle.dumps(key) for key in set(keys)]
else:
key_set = list(set(keys))
hashtables = [unordered_storage({'type': 'dict'}) for _ in
range(self.b)]
Hss = self.keys.getmany(*key_set)
for key, Hs in zip(key_set, Hss):
for H, hashtable in zip(Hs, hashtables):
hashtable.insert(H, key)
return [hashtable.itemcounts() for hashtable in hashtables]
|
def get_subset_counts(self, *keys):
'''
Returns the bucket allocation counts (see :func:`~datasketch.MinHashLSH.get_counts` above)
restricted to the list of keys given.
Args:
keys (hashable) : the keys for which to get the bucket allocation
counts
'''
if self.prepickle:
key_set = [pickle.dumps(key) for key in set(keys)]
else:
key_set = list(set(keys))
hashtables = [unordered_storage({'type': 'dict'}) for _ in
range(self.b)]
Hss = self.keys.getmany(*key_set)
for key, Hs in zip(key_set, Hss):
for H, hashtable in zip(Hs, hashtables):
hashtable.insert(H, key)
return [hashtable.itemcounts() for hashtable in hashtables]
|
[
"Returns",
"the",
"bucket",
"allocation",
"counts",
"(",
"see",
":",
"func",
":",
"~datasketch",
".",
"MinHashLSH",
".",
"get_counts",
"above",
")",
"restricted",
"to",
"the",
"list",
"of",
"keys",
"given",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lsh.py#L268-L287
|
[
"def",
"get_subset_counts",
"(",
"self",
",",
"*",
"keys",
")",
":",
"if",
"self",
".",
"prepickle",
":",
"key_set",
"=",
"[",
"pickle",
".",
"dumps",
"(",
"key",
")",
"for",
"key",
"in",
"set",
"(",
"keys",
")",
"]",
"else",
":",
"key_set",
"=",
"list",
"(",
"set",
"(",
"keys",
")",
")",
"hashtables",
"=",
"[",
"unordered_storage",
"(",
"{",
"'type'",
":",
"'dict'",
"}",
")",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"b",
")",
"]",
"Hss",
"=",
"self",
".",
"keys",
".",
"getmany",
"(",
"*",
"key_set",
")",
"for",
"key",
",",
"Hs",
"in",
"zip",
"(",
"key_set",
",",
"Hss",
")",
":",
"for",
"H",
",",
"hashtable",
"in",
"zip",
"(",
"Hs",
",",
"hashtables",
")",
":",
"hashtable",
".",
"insert",
"(",
"H",
",",
"key",
")",
"return",
"[",
"hashtable",
".",
"itemcounts",
"(",
")",
"for",
"hashtable",
"in",
"hashtables",
"]"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
HyperLogLog.update
|
Update the HyperLogLog with a new data value in bytes.
The value will be hashed using the hash function specified by
the `hashfunc` argument in the constructor.
Args:
b: The value to be hashed using the hash function specified.
Example:
To update with a new string value (using the default SHA1 hash
function, which requires bytes as input):
.. code-block:: python
hll = HyperLogLog()
hll.update("new value".encode('utf-8'))
We can also use a different hash function, for example, `pyfarmhash`:
.. code-block:: python
import farmhash
def _hash_32(b):
return farmhash.hash32(b)
hll = HyperLogLog(hashfunc=_hash_32)
hll.update("new value")
|
datasketch/hyperloglog.py
|
def update(self, b):
'''
Update the HyperLogLog with a new data value in bytes.
The value will be hashed using the hash function specified by
the `hashfunc` argument in the constructor.
Args:
b: The value to be hashed using the hash function specified.
Example:
To update with a new string value (using the default SHA1 hash
function, which requires bytes as input):
.. code-block:: python
hll = HyperLogLog()
hll.update("new value".encode('utf-8'))
We can also use a different hash function, for example, `pyfarmhash`:
.. code-block:: python
import farmhash
def _hash_32(b):
return farmhash.hash32(b)
hll = HyperLogLog(hashfunc=_hash_32)
hll.update("new value")
'''
# Digest the hash object to get the hash value
hv = self.hashfunc(b)
# Get the index of the register using the first p bits of the hash
reg_index = hv & (self.m - 1)
# Get the rest of the hash
bits = hv >> self.p
# Update the register
self.reg[reg_index] = max(self.reg[reg_index], self._get_rank(bits))
|
def update(self, b):
'''
Update the HyperLogLog with a new data value in bytes.
The value will be hashed using the hash function specified by
the `hashfunc` argument in the constructor.
Args:
b: The value to be hashed using the hash function specified.
Example:
To update with a new string value (using the default SHA1 hash
function, which requires bytes as input):
.. code-block:: python
hll = HyperLogLog()
hll.update("new value".encode('utf-8'))
We can also use a different hash function, for example, `pyfarmhash`:
.. code-block:: python
import farmhash
def _hash_32(b):
return farmhash.hash32(b)
hll = HyperLogLog(hashfunc=_hash_32)
hll.update("new value")
'''
# Digest the hash object to get the hash value
hv = self.hashfunc(b)
# Get the index of the register using the first p bits of the hash
reg_index = hv & (self.m - 1)
# Get the rest of the hash
bits = hv >> self.p
# Update the register
self.reg[reg_index] = max(self.reg[reg_index], self._get_rank(bits))
|
[
"Update",
"the",
"HyperLogLog",
"with",
"a",
"new",
"data",
"value",
"in",
"bytes",
".",
"The",
"value",
"will",
"be",
"hashed",
"using",
"the",
"hash",
"function",
"specified",
"by",
"the",
"hashfunc",
"argument",
"in",
"the",
"constructor",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/hyperloglog.py#L89-L124
|
[
"def",
"update",
"(",
"self",
",",
"b",
")",
":",
"# Digest the hash object to get the hash value",
"hv",
"=",
"self",
".",
"hashfunc",
"(",
"b",
")",
"# Get the index of the register using the first p bits of the hash",
"reg_index",
"=",
"hv",
"&",
"(",
"self",
".",
"m",
"-",
"1",
")",
"# Get the rest of the hash",
"bits",
"=",
"hv",
">>",
"self",
".",
"p",
"# Update the register",
"self",
".",
"reg",
"[",
"reg_index",
"]",
"=",
"max",
"(",
"self",
".",
"reg",
"[",
"reg_index",
"]",
",",
"self",
".",
"_get_rank",
"(",
"bits",
")",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
HyperLogLog.count
|
Estimate the cardinality of the data values seen so far.
Returns:
int: The estimated cardinality.
|
datasketch/hyperloglog.py
|
def count(self):
'''
Estimate the cardinality of the data values seen so far.
Returns:
int: The estimated cardinality.
'''
# Use HyperLogLog estimation function
e = self.alpha * float(self.m ** 2) / np.sum(2.0**(-self.reg))
# Small range correction
if e <= (5.0 / 2.0) * self.m:
num_zero = self.m - np.count_nonzero(self.reg)
return self._linearcounting(num_zero)
# Normal range, no correction
if e <= (1.0 / 30.0) * (1 << 32):
return e
# Large range correction
return self._largerange_correction(e)
|
def count(self):
'''
Estimate the cardinality of the data values seen so far.
Returns:
int: The estimated cardinality.
'''
# Use HyperLogLog estimation function
e = self.alpha * float(self.m ** 2) / np.sum(2.0**(-self.reg))
# Small range correction
if e <= (5.0 / 2.0) * self.m:
num_zero = self.m - np.count_nonzero(self.reg)
return self._linearcounting(num_zero)
# Normal range, no correction
if e <= (1.0 / 30.0) * (1 << 32):
return e
# Large range correction
return self._largerange_correction(e)
|
[
"Estimate",
"the",
"cardinality",
"of",
"the",
"data",
"values",
"seen",
"so",
"far",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/hyperloglog.py#L126-L143
|
[
"def",
"count",
"(",
"self",
")",
":",
"# Use HyperLogLog estimation function",
"e",
"=",
"self",
".",
"alpha",
"*",
"float",
"(",
"self",
".",
"m",
"**",
"2",
")",
"/",
"np",
".",
"sum",
"(",
"2.0",
"**",
"(",
"-",
"self",
".",
"reg",
")",
")",
"# Small range correction",
"if",
"e",
"<=",
"(",
"5.0",
"/",
"2.0",
")",
"*",
"self",
".",
"m",
":",
"num_zero",
"=",
"self",
".",
"m",
"-",
"np",
".",
"count_nonzero",
"(",
"self",
".",
"reg",
")",
"return",
"self",
".",
"_linearcounting",
"(",
"num_zero",
")",
"# Normal range, no correction",
"if",
"e",
"<=",
"(",
"1.0",
"/",
"30.0",
")",
"*",
"(",
"1",
"<<",
"32",
")",
":",
"return",
"e",
"# Large range correction",
"return",
"self",
".",
"_largerange_correction",
"(",
"e",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
HyperLogLog.merge
|
Merge the other HyperLogLog with this one, making this the union of the
two.
Args:
other (datasketch.HyperLogLog):
|
datasketch/hyperloglog.py
|
def merge(self, other):
'''
Merge the other HyperLogLog with this one, making this the union of the
two.
Args:
other (datasketch.HyperLogLog):
'''
if self.m != other.m or self.p != other.p:
raise ValueError("Cannot merge HyperLogLog with different\
precisions.")
self.reg = np.maximum(self.reg, other.reg)
|
def merge(self, other):
'''
Merge the other HyperLogLog with this one, making this the union of the
two.
Args:
other (datasketch.HyperLogLog):
'''
if self.m != other.m or self.p != other.p:
raise ValueError("Cannot merge HyperLogLog with different\
precisions.")
self.reg = np.maximum(self.reg, other.reg)
|
[
"Merge",
"the",
"other",
"HyperLogLog",
"with",
"this",
"one",
"making",
"this",
"the",
"union",
"of",
"the",
"two",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/hyperloglog.py#L145-L156
|
[
"def",
"merge",
"(",
"self",
",",
"other",
")",
":",
"if",
"self",
".",
"m",
"!=",
"other",
".",
"m",
"or",
"self",
".",
"p",
"!=",
"other",
".",
"p",
":",
"raise",
"ValueError",
"(",
"\"Cannot merge HyperLogLog with different\\\n precisions.\"",
")",
"self",
".",
"reg",
"=",
"np",
".",
"maximum",
"(",
"self",
".",
"reg",
",",
"other",
".",
"reg",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
HyperLogLog.clear
|
Reset the current HyperLogLog to empty.
|
datasketch/hyperloglog.py
|
def clear(self):
'''
Reset the current HyperLogLog to empty.
'''
self.reg = np.zeros((self.m,), dtype=np.int8)
|
def clear(self):
'''
Reset the current HyperLogLog to empty.
'''
self.reg = np.zeros((self.m,), dtype=np.int8)
|
[
"Reset",
"the",
"current",
"HyperLogLog",
"to",
"empty",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/hyperloglog.py#L184-L188
|
[
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"reg",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"m",
",",
")",
",",
"dtype",
"=",
"np",
".",
"int8",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
apk
|
Computes the average precision at k.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
actual : list
A list of elements that are to be predicted (order doesn't matter)
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
|
benchmark/average_precision.py
|
def apk(actual, predicted, k=10):
"""
Computes the average precision at k.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
actual : list
A list of elements that are to be predicted (order doesn't matter)
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if len(predicted)>k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i,p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if len(actual) == 0:
return 0.0
return score / min(len(actual), k)
|
def apk(actual, predicted, k=10):
"""
Computes the average precision at k.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
actual : list
A list of elements that are to be predicted (order doesn't matter)
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if len(predicted)>k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i,p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if len(actual) == 0:
return 0.0
return score / min(len(actual), k)
|
[
"Computes",
"the",
"average",
"precision",
"at",
"k",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/benchmark/average_precision.py#L3-L39
|
[
"def",
"apk",
"(",
"actual",
",",
"predicted",
",",
"k",
"=",
"10",
")",
":",
"if",
"len",
"(",
"predicted",
")",
">",
"k",
":",
"predicted",
"=",
"predicted",
"[",
":",
"k",
"]",
"score",
"=",
"0.0",
"num_hits",
"=",
"0.0",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"predicted",
")",
":",
"if",
"p",
"in",
"actual",
"and",
"p",
"not",
"in",
"predicted",
"[",
":",
"i",
"]",
":",
"num_hits",
"+=",
"1.0",
"score",
"+=",
"num_hits",
"/",
"(",
"i",
"+",
"1.0",
")",
"if",
"len",
"(",
"actual",
")",
"==",
"0",
":",
"return",
"0.0",
"return",
"score",
"/",
"min",
"(",
"len",
"(",
"actual",
")",
",",
"k",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
mapk
|
Computes the mean average precision at k.
This function computes the mean average prescision at k between two lists
of lists of items.
Parameters
----------
actual : list
A list of lists of elements that are to be predicted
(order doesn't matter in the lists)
predicted : list
A list of lists of predicted elements
(order matters in the lists)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average precision at k over the input lists
|
benchmark/average_precision.py
|
def mapk(actual, predicted, k=10):
"""
Computes the mean average precision at k.
This function computes the mean average prescision at k between two lists
of lists of items.
Parameters
----------
actual : list
A list of lists of elements that are to be predicted
(order doesn't matter in the lists)
predicted : list
A list of lists of predicted elements
(order matters in the lists)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average precision at k over the input lists
"""
return np.mean([apk(a,p,k) for a,p in zip(actual, predicted)])
|
def mapk(actual, predicted, k=10):
"""
Computes the mean average precision at k.
This function computes the mean average prescision at k between two lists
of lists of items.
Parameters
----------
actual : list
A list of lists of elements that are to be predicted
(order doesn't matter in the lists)
predicted : list
A list of lists of predicted elements
(order matters in the lists)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The mean average precision at k over the input lists
"""
return np.mean([apk(a,p,k) for a,p in zip(actual, predicted)])
|
[
"Computes",
"the",
"mean",
"average",
"precision",
"at",
"k",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/benchmark/average_precision.py#L41-L65
|
[
"def",
"mapk",
"(",
"actual",
",",
"predicted",
",",
"k",
"=",
"10",
")",
":",
"return",
"np",
".",
"mean",
"(",
"[",
"apk",
"(",
"a",
",",
"p",
",",
"k",
")",
"for",
"a",
",",
"p",
"in",
"zip",
"(",
"actual",
",",
"predicted",
")",
"]",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHashLSHForest.add
|
Add a unique key, together
with a MinHash (or weighted MinHash) of the set referenced by the key.
Note:
The key won't be searchbale until the
:func:`datasketch.MinHashLSHForest.index` method is called.
Args:
key (hashable): The unique identifier of the set.
minhash (datasketch.MinHash): The MinHash of the set.
|
datasketch/lshforest.py
|
def add(self, key, minhash):
'''
Add a unique key, together
with a MinHash (or weighted MinHash) of the set referenced by the key.
Note:
The key won't be searchbale until the
:func:`datasketch.MinHashLSHForest.index` method is called.
Args:
key (hashable): The unique identifier of the set.
minhash (datasketch.MinHash): The MinHash of the set.
'''
if len(minhash) < self.k*self.l:
raise ValueError("The num_perm of MinHash out of range")
if key in self.keys:
raise ValueError("The given key has already been added")
self.keys[key] = [self._H(minhash.hashvalues[start:end])
for start, end in self.hashranges]
for H, hashtable in zip(self.keys[key], self.hashtables):
hashtable[H].append(key)
|
def add(self, key, minhash):
'''
Add a unique key, together
with a MinHash (or weighted MinHash) of the set referenced by the key.
Note:
The key won't be searchbale until the
:func:`datasketch.MinHashLSHForest.index` method is called.
Args:
key (hashable): The unique identifier of the set.
minhash (datasketch.MinHash): The MinHash of the set.
'''
if len(minhash) < self.k*self.l:
raise ValueError("The num_perm of MinHash out of range")
if key in self.keys:
raise ValueError("The given key has already been added")
self.keys[key] = [self._H(minhash.hashvalues[start:end])
for start, end in self.hashranges]
for H, hashtable in zip(self.keys[key], self.hashtables):
hashtable[H].append(key)
|
[
"Add",
"a",
"unique",
"key",
"together",
"with",
"a",
"MinHash",
"(",
"or",
"weighted",
"MinHash",
")",
"of",
"the",
"set",
"referenced",
"by",
"the",
"key",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshforest.py#L40-L60
|
[
"def",
"add",
"(",
"self",
",",
"key",
",",
"minhash",
")",
":",
"if",
"len",
"(",
"minhash",
")",
"<",
"self",
".",
"k",
"*",
"self",
".",
"l",
":",
"raise",
"ValueError",
"(",
"\"The num_perm of MinHash out of range\"",
")",
"if",
"key",
"in",
"self",
".",
"keys",
":",
"raise",
"ValueError",
"(",
"\"The given key has already been added\"",
")",
"self",
".",
"keys",
"[",
"key",
"]",
"=",
"[",
"self",
".",
"_H",
"(",
"minhash",
".",
"hashvalues",
"[",
"start",
":",
"end",
"]",
")",
"for",
"start",
",",
"end",
"in",
"self",
".",
"hashranges",
"]",
"for",
"H",
",",
"hashtable",
"in",
"zip",
"(",
"self",
".",
"keys",
"[",
"key",
"]",
",",
"self",
".",
"hashtables",
")",
":",
"hashtable",
"[",
"H",
"]",
".",
"append",
"(",
"key",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHashLSHForest.index
|
Index all the keys added so far and make them searchable.
|
datasketch/lshforest.py
|
def index(self):
'''
Index all the keys added so far and make them searchable.
'''
for i, hashtable in enumerate(self.hashtables):
self.sorted_hashtables[i] = [H for H in hashtable.keys()]
self.sorted_hashtables[i].sort()
|
def index(self):
'''
Index all the keys added so far and make them searchable.
'''
for i, hashtable in enumerate(self.hashtables):
self.sorted_hashtables[i] = [H for H in hashtable.keys()]
self.sorted_hashtables[i].sort()
|
[
"Index",
"all",
"the",
"keys",
"added",
"so",
"far",
"and",
"make",
"them",
"searchable",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshforest.py#L62-L68
|
[
"def",
"index",
"(",
"self",
")",
":",
"for",
"i",
",",
"hashtable",
"in",
"enumerate",
"(",
"self",
".",
"hashtables",
")",
":",
"self",
".",
"sorted_hashtables",
"[",
"i",
"]",
"=",
"[",
"H",
"for",
"H",
"in",
"hashtable",
".",
"keys",
"(",
")",
"]",
"self",
".",
"sorted_hashtables",
"[",
"i",
"]",
".",
"sort",
"(",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHashLSHForest.query
|
Return the approximate top-k keys that have the highest
Jaccard similarities to the query set.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
k (int): The maximum number of keys to return.
Returns:
`list` of at most k keys.
|
datasketch/lshforest.py
|
def query(self, minhash, k):
'''
Return the approximate top-k keys that have the highest
Jaccard similarities to the query set.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
k (int): The maximum number of keys to return.
Returns:
`list` of at most k keys.
'''
if k <= 0:
raise ValueError("k must be positive")
if len(minhash) < self.k*self.l:
raise ValueError("The num_perm of MinHash out of range")
results = set()
r = self.k
while r > 0:
for key in self._query(minhash, r, self.l):
results.add(key)
if len(results) >= k:
return list(results)
r -= 1
return list(results)
|
def query(self, minhash, k):
'''
Return the approximate top-k keys that have the highest
Jaccard similarities to the query set.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
k (int): The maximum number of keys to return.
Returns:
`list` of at most k keys.
'''
if k <= 0:
raise ValueError("k must be positive")
if len(minhash) < self.k*self.l:
raise ValueError("The num_perm of MinHash out of range")
results = set()
r = self.k
while r > 0:
for key in self._query(minhash, r, self.l):
results.add(key)
if len(results) >= k:
return list(results)
r -= 1
return list(results)
|
[
"Return",
"the",
"approximate",
"top",
"-",
"k",
"keys",
"that",
"have",
"the",
"highest",
"Jaccard",
"similarities",
"to",
"the",
"query",
"set",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshforest.py#L87-L111
|
[
"def",
"query",
"(",
"self",
",",
"minhash",
",",
"k",
")",
":",
"if",
"k",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"k must be positive\"",
")",
"if",
"len",
"(",
"minhash",
")",
"<",
"self",
".",
"k",
"*",
"self",
".",
"l",
":",
"raise",
"ValueError",
"(",
"\"The num_perm of MinHash out of range\"",
")",
"results",
"=",
"set",
"(",
")",
"r",
"=",
"self",
".",
"k",
"while",
"r",
">",
"0",
":",
"for",
"key",
"in",
"self",
".",
"_query",
"(",
"minhash",
",",
"r",
",",
"self",
".",
"l",
")",
":",
"results",
".",
"add",
"(",
"key",
")",
"if",
"len",
"(",
"results",
")",
">=",
"k",
":",
"return",
"list",
"(",
"results",
")",
"r",
"-=",
"1",
"return",
"list",
"(",
"results",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
MinHashLSHForest._binary_search
|
https://golang.org/src/sort/search.go?s=2247:2287#L49
|
datasketch/lshforest.py
|
def _binary_search(self, n, func):
'''
https://golang.org/src/sort/search.go?s=2247:2287#L49
'''
i, j = 0, n
while i < j:
h = int(i + (j - i) / 2)
if not func(h):
i = h + 1
else:
j = h
return i
|
def _binary_search(self, n, func):
'''
https://golang.org/src/sort/search.go?s=2247:2287#L49
'''
i, j = 0, n
while i < j:
h = int(i + (j - i) / 2)
if not func(h):
i = h + 1
else:
j = h
return i
|
[
"https",
":",
"//",
"golang",
".",
"org",
"/",
"src",
"/",
"sort",
"/",
"search",
".",
"go?s",
"=",
"2247",
":",
"2287#L49"
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/lshforest.py#L113-L124
|
[
"def",
"_binary_search",
"(",
"self",
",",
"n",
",",
"func",
")",
":",
"i",
",",
"j",
"=",
"0",
",",
"n",
"while",
"i",
"<",
"j",
":",
"h",
"=",
"int",
"(",
"i",
"+",
"(",
"j",
"-",
"i",
")",
"/",
"2",
")",
"if",
"not",
"func",
"(",
"h",
")",
":",
"i",
"=",
"h",
"+",
"1",
"else",
":",
"j",
"=",
"h",
"return",
"i"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
AsyncMinHashLSH.close
|
Cleanup client resources and disconnect from AsyncMinHashLSH storage.
|
datasketch/experimental/aio/lsh.py
|
async def close(self):
"""
Cleanup client resources and disconnect from AsyncMinHashLSH storage.
"""
async with self._lock:
for t in self.hashtables:
await t.close()
if self.keys is not None:
await self.keys.close()
self._initialized = False
|
async def close(self):
"""
Cleanup client resources and disconnect from AsyncMinHashLSH storage.
"""
async with self._lock:
for t in self.hashtables:
await t.close()
if self.keys is not None:
await self.keys.close()
self._initialized = False
|
[
"Cleanup",
"client",
"resources",
"and",
"disconnect",
"from",
"AsyncMinHashLSH",
"storage",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/experimental/aio/lsh.py#L167-L178
|
[
"async",
"def",
"close",
"(",
"self",
")",
":",
"async",
"with",
"self",
".",
"_lock",
":",
"for",
"t",
"in",
"self",
".",
"hashtables",
":",
"await",
"t",
".",
"close",
"(",
")",
"if",
"self",
".",
"keys",
"is",
"not",
"None",
":",
"await",
"self",
".",
"keys",
".",
"close",
"(",
")",
"self",
".",
"_initialized",
"=",
"False"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
AsyncMinHashLSH.query
|
see :class:`datasketch.MinHashLSH`.
|
datasketch/experimental/aio/lsh.py
|
async def query(self, minhash):
"""
see :class:`datasketch.MinHashLSH`.
"""
if len(minhash) != self.h:
raise ValueError("Expecting minhash with length %d, "
"got %d" % (self.h, len(minhash)))
fs = (hashtable.get(self._H(minhash.hashvalues[start:end]))
for (start, end), hashtable in zip(self.hashranges, self.hashtables))
candidates = frozenset(chain.from_iterable(await asyncio.gather(*fs)))
return list(candidates)
|
async def query(self, minhash):
"""
see :class:`datasketch.MinHashLSH`.
"""
if len(minhash) != self.h:
raise ValueError("Expecting minhash with length %d, "
"got %d" % (self.h, len(minhash)))
fs = (hashtable.get(self._H(minhash.hashvalues[start:end]))
for (start, end), hashtable in zip(self.hashranges, self.hashtables))
candidates = frozenset(chain.from_iterable(await asyncio.gather(*fs)))
return list(candidates)
|
[
"see",
":",
"class",
":",
"datasketch",
".",
"MinHashLSH",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/experimental/aio/lsh.py#L275-L287
|
[
"async",
"def",
"query",
"(",
"self",
",",
"minhash",
")",
":",
"if",
"len",
"(",
"minhash",
")",
"!=",
"self",
".",
"h",
":",
"raise",
"ValueError",
"(",
"\"Expecting minhash with length %d, \"",
"\"got %d\"",
"%",
"(",
"self",
".",
"h",
",",
"len",
"(",
"minhash",
")",
")",
")",
"fs",
"=",
"(",
"hashtable",
".",
"get",
"(",
"self",
".",
"_H",
"(",
"minhash",
".",
"hashvalues",
"[",
"start",
":",
"end",
"]",
")",
")",
"for",
"(",
"start",
",",
"end",
")",
",",
"hashtable",
"in",
"zip",
"(",
"self",
".",
"hashranges",
",",
"self",
".",
"hashtables",
")",
")",
"candidates",
"=",
"frozenset",
"(",
"chain",
".",
"from_iterable",
"(",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"fs",
")",
")",
")",
"return",
"list",
"(",
"candidates",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
AsyncMinHashLSH.get_counts
|
see :class:`datasketch.MinHashLSH`.
|
datasketch/experimental/aio/lsh.py
|
async def get_counts(self):
"""
see :class:`datasketch.MinHashLSH`.
"""
fs = (hashtable.itemcounts() for hashtable in self.hashtables)
return await asyncio.gather(*fs)
|
async def get_counts(self):
"""
see :class:`datasketch.MinHashLSH`.
"""
fs = (hashtable.itemcounts() for hashtable in self.hashtables)
return await asyncio.gather(*fs)
|
[
"see",
":",
"class",
":",
"datasketch",
".",
"MinHashLSH",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/experimental/aio/lsh.py#L341-L346
|
[
"async",
"def",
"get_counts",
"(",
"self",
")",
":",
"fs",
"=",
"(",
"hashtable",
".",
"itemcounts",
"(",
")",
"for",
"hashtable",
"in",
"self",
".",
"hashtables",
")",
"return",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"fs",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
ordered_storage
|
Return ordered storage system based on the specified config.
The canonical example of such a storage container is
``defaultdict(list)``. Thus, the return value of this method contains
keys and values. The values are ordered lists with the last added
item at the end.
Args:
config (dict): Defines the configurations for the storage.
For in-memory storage, the config ``{'type': 'dict'}`` will
suffice. For Redis storage, the type should be ``'redis'`` and
the configurations for the Redis database should be supplied
under the key ``'redis'``. These parameters should be in a form
suitable for `redis.Redis`. The parameters may alternatively
contain references to environment variables, in which case
literal configuration values should be replaced by dicts of
the form::
{'env': 'REDIS_HOSTNAME',
'default': 'localhost'}
For a full example, see :ref:`minhash_lsh_at_scale`
name (bytes, optional): A reference name for this storage container.
For dict-type containers, this is ignored. For Redis containers,
this name is used to prefix keys pertaining to this storage
container within the database.
|
datasketch/storage.py
|
def ordered_storage(config, name=None):
'''Return ordered storage system based on the specified config.
The canonical example of such a storage container is
``defaultdict(list)``. Thus, the return value of this method contains
keys and values. The values are ordered lists with the last added
item at the end.
Args:
config (dict): Defines the configurations for the storage.
For in-memory storage, the config ``{'type': 'dict'}`` will
suffice. For Redis storage, the type should be ``'redis'`` and
the configurations for the Redis database should be supplied
under the key ``'redis'``. These parameters should be in a form
suitable for `redis.Redis`. The parameters may alternatively
contain references to environment variables, in which case
literal configuration values should be replaced by dicts of
the form::
{'env': 'REDIS_HOSTNAME',
'default': 'localhost'}
For a full example, see :ref:`minhash_lsh_at_scale`
name (bytes, optional): A reference name for this storage container.
For dict-type containers, this is ignored. For Redis containers,
this name is used to prefix keys pertaining to this storage
container within the database.
'''
tp = config['type']
if tp == 'dict':
return DictListStorage(config)
if tp == 'redis':
return RedisListStorage(config, name=name)
|
def ordered_storage(config, name=None):
'''Return ordered storage system based on the specified config.
The canonical example of such a storage container is
``defaultdict(list)``. Thus, the return value of this method contains
keys and values. The values are ordered lists with the last added
item at the end.
Args:
config (dict): Defines the configurations for the storage.
For in-memory storage, the config ``{'type': 'dict'}`` will
suffice. For Redis storage, the type should be ``'redis'`` and
the configurations for the Redis database should be supplied
under the key ``'redis'``. These parameters should be in a form
suitable for `redis.Redis`. The parameters may alternatively
contain references to environment variables, in which case
literal configuration values should be replaced by dicts of
the form::
{'env': 'REDIS_HOSTNAME',
'default': 'localhost'}
For a full example, see :ref:`minhash_lsh_at_scale`
name (bytes, optional): A reference name for this storage container.
For dict-type containers, this is ignored. For Redis containers,
this name is used to prefix keys pertaining to this storage
container within the database.
'''
tp = config['type']
if tp == 'dict':
return DictListStorage(config)
if tp == 'redis':
return RedisListStorage(config, name=name)
|
[
"Return",
"ordered",
"storage",
"system",
"based",
"on",
"the",
"specified",
"config",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/storage.py#L13-L46
|
[
"def",
"ordered_storage",
"(",
"config",
",",
"name",
"=",
"None",
")",
":",
"tp",
"=",
"config",
"[",
"'type'",
"]",
"if",
"tp",
"==",
"'dict'",
":",
"return",
"DictListStorage",
"(",
"config",
")",
"if",
"tp",
"==",
"'redis'",
":",
"return",
"RedisListStorage",
"(",
"config",
",",
"name",
"=",
"name",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
unordered_storage
|
Return an unordered storage system based on the specified config.
The canonical example of such a storage container is
``defaultdict(set)``. Thus, the return value of this method contains
keys and values. The values are unordered sets.
Args:
config (dict): Defines the configurations for the storage.
For in-memory storage, the config ``{'type': 'dict'}`` will
suffice. For Redis storage, the type should be ``'redis'`` and
the configurations for the Redis database should be supplied
under the key ``'redis'``. These parameters should be in a form
suitable for `redis.Redis`. The parameters may alternatively
contain references to environment variables, in which case
literal configuration values should be replaced by dicts of
the form::
{'env': 'REDIS_HOSTNAME',
'default': 'localhost'}
For a full example, see :ref:`minhash_lsh_at_scale`
name (bytes, optional): A reference name for this storage container.
For dict-type containers, this is ignored. For Redis containers,
this name is used to prefix keys pertaining to this storage
container within the database.
|
datasketch/storage.py
|
def unordered_storage(config, name=None):
'''Return an unordered storage system based on the specified config.
The canonical example of such a storage container is
``defaultdict(set)``. Thus, the return value of this method contains
keys and values. The values are unordered sets.
Args:
config (dict): Defines the configurations for the storage.
For in-memory storage, the config ``{'type': 'dict'}`` will
suffice. For Redis storage, the type should be ``'redis'`` and
the configurations for the Redis database should be supplied
under the key ``'redis'``. These parameters should be in a form
suitable for `redis.Redis`. The parameters may alternatively
contain references to environment variables, in which case
literal configuration values should be replaced by dicts of
the form::
{'env': 'REDIS_HOSTNAME',
'default': 'localhost'}
For a full example, see :ref:`minhash_lsh_at_scale`
name (bytes, optional): A reference name for this storage container.
For dict-type containers, this is ignored. For Redis containers,
this name is used to prefix keys pertaining to this storage
container within the database.
'''
tp = config['type']
if tp == 'dict':
return DictSetStorage(config)
if tp == 'redis':
return RedisSetStorage(config, name=name)
|
def unordered_storage(config, name=None):
'''Return an unordered storage system based on the specified config.
The canonical example of such a storage container is
``defaultdict(set)``. Thus, the return value of this method contains
keys and values. The values are unordered sets.
Args:
config (dict): Defines the configurations for the storage.
For in-memory storage, the config ``{'type': 'dict'}`` will
suffice. For Redis storage, the type should be ``'redis'`` and
the configurations for the Redis database should be supplied
under the key ``'redis'``. These parameters should be in a form
suitable for `redis.Redis`. The parameters may alternatively
contain references to environment variables, in which case
literal configuration values should be replaced by dicts of
the form::
{'env': 'REDIS_HOSTNAME',
'default': 'localhost'}
For a full example, see :ref:`minhash_lsh_at_scale`
name (bytes, optional): A reference name for this storage container.
For dict-type containers, this is ignored. For Redis containers,
this name is used to prefix keys pertaining to this storage
container within the database.
'''
tp = config['type']
if tp == 'dict':
return DictSetStorage(config)
if tp == 'redis':
return RedisSetStorage(config, name=name)
|
[
"Return",
"an",
"unordered",
"storage",
"system",
"based",
"on",
"the",
"specified",
"config",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/storage.py#L49-L81
|
[
"def",
"unordered_storage",
"(",
"config",
",",
"name",
"=",
"None",
")",
":",
"tp",
"=",
"config",
"[",
"'type'",
"]",
"if",
"tp",
"==",
"'dict'",
":",
"return",
"DictSetStorage",
"(",
"config",
")",
"if",
"tp",
"==",
"'redis'",
":",
"return",
"RedisSetStorage",
"(",
"config",
",",
"name",
"=",
"name",
")"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
DictListStorage.itemcounts
|
Returns a dict where the keys are the keys of the container.
The values are the *lengths* of the value sequences stored
in this container.
|
datasketch/storage.py
|
def itemcounts(self, **kwargs):
'''Returns a dict where the keys are the keys of the container.
The values are the *lengths* of the value sequences stored
in this container.
'''
return {k: len(v) for k, v in self._dict.items()}
|
def itemcounts(self, **kwargs):
'''Returns a dict where the keys are the keys of the container.
The values are the *lengths* of the value sequences stored
in this container.
'''
return {k: len(v) for k, v in self._dict.items()}
|
[
"Returns",
"a",
"dict",
"where",
"the",
"keys",
"are",
"the",
"keys",
"of",
"the",
"container",
".",
"The",
"values",
"are",
"the",
"*",
"lengths",
"*",
"of",
"the",
"value",
"sequences",
"stored",
"in",
"this",
"container",
"."
] |
ekzhu/datasketch
|
python
|
https://github.com/ekzhu/datasketch/blob/b3e4129987890a2beb04f2c0b6dc618ae35f2e14/datasketch/storage.py#L191-L196
|
[
"def",
"itemcounts",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"{",
"k",
":",
"len",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_dict",
".",
"items",
"(",
")",
"}"
] |
b3e4129987890a2beb04f2c0b6dc618ae35f2e14
|
test
|
TwitterLoginSerializer.get_social_login
|
:param adapter: allauth.socialaccount Adapter subclass.
Usually OAuthAdapter or Auth2Adapter
:param app: `allauth.socialaccount.SocialApp` instance
:param token: `allauth.socialaccount.SocialToken` instance
:param response: Provider's response for OAuth1. Not used in the
:returns: A populated instance of the
`allauth.socialaccount.SocialLoginView` instance
|
rest_auth/social_serializers.py
|
def get_social_login(self, adapter, app, token, response):
"""
:param adapter: allauth.socialaccount Adapter subclass.
Usually OAuthAdapter or Auth2Adapter
:param app: `allauth.socialaccount.SocialApp` instance
:param token: `allauth.socialaccount.SocialToken` instance
:param response: Provider's response for OAuth1. Not used in the
:returns: A populated instance of the
`allauth.socialaccount.SocialLoginView` instance
"""
request = self._get_request()
social_login = adapter.complete_login(request, app, token,
response=response)
social_login.token = token
return social_login
|
def get_social_login(self, adapter, app, token, response):
"""
:param adapter: allauth.socialaccount Adapter subclass.
Usually OAuthAdapter or Auth2Adapter
:param app: `allauth.socialaccount.SocialApp` instance
:param token: `allauth.socialaccount.SocialToken` instance
:param response: Provider's response for OAuth1. Not used in the
:returns: A populated instance of the
`allauth.socialaccount.SocialLoginView` instance
"""
request = self._get_request()
social_login = adapter.complete_login(request, app, token,
response=response)
social_login.token = token
return social_login
|
[
":",
"param",
"adapter",
":",
"allauth",
".",
"socialaccount",
"Adapter",
"subclass",
".",
"Usually",
"OAuthAdapter",
"or",
"Auth2Adapter",
":",
"param",
"app",
":",
"allauth",
".",
"socialaccount",
".",
"SocialApp",
"instance",
":",
"param",
"token",
":",
"allauth",
".",
"socialaccount",
".",
"SocialToken",
"instance",
":",
"param",
"response",
":",
"Provider",
"s",
"response",
"for",
"OAuth1",
".",
"Not",
"used",
"in",
"the",
":",
"returns",
":",
"A",
"populated",
"instance",
"of",
"the",
"allauth",
".",
"socialaccount",
".",
"SocialLoginView",
"instance"
] |
Tivix/django-rest-auth
|
python
|
https://github.com/Tivix/django-rest-auth/blob/624ad01afbc86fa15b4e652406f3bdcd01f36e00/rest_auth/social_serializers.py#L24-L38
|
[
"def",
"get_social_login",
"(",
"self",
",",
"adapter",
",",
"app",
",",
"token",
",",
"response",
")",
":",
"request",
"=",
"self",
".",
"_get_request",
"(",
")",
"social_login",
"=",
"adapter",
".",
"complete_login",
"(",
"request",
",",
"app",
",",
"token",
",",
"response",
"=",
"response",
")",
"social_login",
".",
"token",
"=",
"token",
"return",
"social_login"
] |
624ad01afbc86fa15b4e652406f3bdcd01f36e00
|
test
|
JWTSerializer.get_user
|
Required to allow using custom USER_DETAILS_SERIALIZER in
JWTSerializer. Defining it here to avoid circular imports
|
rest_auth/serializers.py
|
def get_user(self, obj):
"""
Required to allow using custom USER_DETAILS_SERIALIZER in
JWTSerializer. Defining it here to avoid circular imports
"""
rest_auth_serializers = getattr(settings, 'REST_AUTH_SERIALIZERS', {})
JWTUserDetailsSerializer = import_callable(
rest_auth_serializers.get('USER_DETAILS_SERIALIZER', UserDetailsSerializer)
)
user_data = JWTUserDetailsSerializer(obj['user'], context=self.context).data
return user_data
|
def get_user(self, obj):
"""
Required to allow using custom USER_DETAILS_SERIALIZER in
JWTSerializer. Defining it here to avoid circular imports
"""
rest_auth_serializers = getattr(settings, 'REST_AUTH_SERIALIZERS', {})
JWTUserDetailsSerializer = import_callable(
rest_auth_serializers.get('USER_DETAILS_SERIALIZER', UserDetailsSerializer)
)
user_data = JWTUserDetailsSerializer(obj['user'], context=self.context).data
return user_data
|
[
"Required",
"to",
"allow",
"using",
"custom",
"USER_DETAILS_SERIALIZER",
"in",
"JWTSerializer",
".",
"Defining",
"it",
"here",
"to",
"avoid",
"circular",
"imports"
] |
Tivix/django-rest-auth
|
python
|
https://github.com/Tivix/django-rest-auth/blob/624ad01afbc86fa15b4e652406f3bdcd01f36e00/rest_auth/serializers.py#L143-L153
|
[
"def",
"get_user",
"(",
"self",
",",
"obj",
")",
":",
"rest_auth_serializers",
"=",
"getattr",
"(",
"settings",
",",
"'REST_AUTH_SERIALIZERS'",
",",
"{",
"}",
")",
"JWTUserDetailsSerializer",
"=",
"import_callable",
"(",
"rest_auth_serializers",
".",
"get",
"(",
"'USER_DETAILS_SERIALIZER'",
",",
"UserDetailsSerializer",
")",
")",
"user_data",
"=",
"JWTUserDetailsSerializer",
"(",
"obj",
"[",
"'user'",
"]",
",",
"context",
"=",
"self",
".",
"context",
")",
".",
"data",
"return",
"user_data"
] |
624ad01afbc86fa15b4e652406f3bdcd01f36e00
|
test
|
SocialConnectMixin.get_social_login
|
Set the social login process state to connect rather than login
Refer to the implementation of get_social_login in base class and to the
allauth.socialaccount.helpers module complete_social_login function.
|
rest_auth/registration/serializers.py
|
def get_social_login(self, *args, **kwargs):
"""
Set the social login process state to connect rather than login
Refer to the implementation of get_social_login in base class and to the
allauth.socialaccount.helpers module complete_social_login function.
"""
social_login = super(SocialConnectMixin, self).get_social_login(*args, **kwargs)
social_login.state['process'] = AuthProcess.CONNECT
return social_login
|
def get_social_login(self, *args, **kwargs):
"""
Set the social login process state to connect rather than login
Refer to the implementation of get_social_login in base class and to the
allauth.socialaccount.helpers module complete_social_login function.
"""
social_login = super(SocialConnectMixin, self).get_social_login(*args, **kwargs)
social_login.state['process'] = AuthProcess.CONNECT
return social_login
|
[
"Set",
"the",
"social",
"login",
"process",
"state",
"to",
"connect",
"rather",
"than",
"login",
"Refer",
"to",
"the",
"implementation",
"of",
"get_social_login",
"in",
"base",
"class",
"and",
"to",
"the",
"allauth",
".",
"socialaccount",
".",
"helpers",
"module",
"complete_social_login",
"function",
"."
] |
Tivix/django-rest-auth
|
python
|
https://github.com/Tivix/django-rest-auth/blob/624ad01afbc86fa15b4e652406f3bdcd01f36e00/rest_auth/registration/serializers.py#L151-L159
|
[
"def",
"get_social_login",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"social_login",
"=",
"super",
"(",
"SocialConnectMixin",
",",
"self",
")",
".",
"get_social_login",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"social_login",
".",
"state",
"[",
"'process'",
"]",
"=",
"AuthProcess",
".",
"CONNECT",
"return",
"social_login"
] |
624ad01afbc86fa15b4e652406f3bdcd01f36e00
|
test
|
select_text
|
Select the correct text from the Japanese number, reading and
alternatives
|
num2words/lang_JA.py
|
def select_text(text, reading=False, prefer=None):
"""Select the correct text from the Japanese number, reading and
alternatives"""
# select kanji number or kana reading
if reading:
text = text[1]
else:
text = text[0]
# select the preferred one or the first one from multiple alternatives
if not isinstance(text, strtype):
common = set(text) & set(prefer or set())
if len(common) == 1:
text = common.pop()
else:
text = text[0]
return text
|
def select_text(text, reading=False, prefer=None):
"""Select the correct text from the Japanese number, reading and
alternatives"""
# select kanji number or kana reading
if reading:
text = text[1]
else:
text = text[0]
# select the preferred one or the first one from multiple alternatives
if not isinstance(text, strtype):
common = set(text) & set(prefer or set())
if len(common) == 1:
text = common.pop()
else:
text = text[0]
return text
|
[
"Select",
"the",
"correct",
"text",
"from",
"the",
"Japanese",
"number",
"reading",
"and",
"alternatives"
] |
savoirfairelinux/num2words
|
python
|
https://github.com/savoirfairelinux/num2words/blob/f4b2bac098ae8e4850cf2f185f6ff52a5979641f/num2words/lang_JA.py#L25-L42
|
[
"def",
"select_text",
"(",
"text",
",",
"reading",
"=",
"False",
",",
"prefer",
"=",
"None",
")",
":",
"# select kanji number or kana reading",
"if",
"reading",
":",
"text",
"=",
"text",
"[",
"1",
"]",
"else",
":",
"text",
"=",
"text",
"[",
"0",
"]",
"# select the preferred one or the first one from multiple alternatives",
"if",
"not",
"isinstance",
"(",
"text",
",",
"strtype",
")",
":",
"common",
"=",
"set",
"(",
"text",
")",
"&",
"set",
"(",
"prefer",
"or",
"set",
"(",
")",
")",
"if",
"len",
"(",
"common",
")",
"==",
"1",
":",
"text",
"=",
"common",
".",
"pop",
"(",
")",
"else",
":",
"text",
"=",
"text",
"[",
"0",
"]",
"return",
"text"
] |
f4b2bac098ae8e4850cf2f185f6ff52a5979641f
|
test
|
rendaku_merge_pairs
|
Merge lpair < rpair while applying semi-irregular rendaku rules
|
num2words/lang_JA.py
|
def rendaku_merge_pairs(lpair, rpair):
"""Merge lpair < rpair while applying semi-irregular rendaku rules"""
ltext, lnum = lpair
rtext, rnum = rpair
if lnum > rnum:
raise ValueError
if rpair == ("ひゃく", 100):
if lpair == ("さん", 3):
rtext = "びゃく"
elif lpair == ("ろく", 6):
ltext = "ろっ"
rtext = "ぴゃく"
elif lpair == ("はち", 8):
ltext = "はっ"
rtext = "ぴゃく"
elif rpair == ("せん", 1000):
if lpair == ("さん", 3):
rtext = "ぜん"
elif lpair == ("はち", 8):
ltext = "はっ"
elif rpair == ("ちょう", 10**12):
if lpair == ("いち", 1):
ltext = "いっ"
elif lpair == ("はち", 8):
ltext = "はっ"
elif lpair == ("じゅう", 10):
ltext = "じゅっ"
elif rpair == ("けい", 10**16):
if lpair == ("いち", 1):
ltext = "いっ"
elif lpair == ("ろく", 6):
ltext = "ろっ"
elif lpair == ("はち", 8):
ltext = "はっ"
elif lpair == ("じゅう", 10):
ltext = "じゅっ"
elif lpair == ("ひゃく", 100):
ltext = "ひゃっ"
return ("%s%s" % (ltext, rtext), lnum * rnum)
|
def rendaku_merge_pairs(lpair, rpair):
"""Merge lpair < rpair while applying semi-irregular rendaku rules"""
ltext, lnum = lpair
rtext, rnum = rpair
if lnum > rnum:
raise ValueError
if rpair == ("ひゃく", 100):
if lpair == ("さん", 3):
rtext = "びゃく"
elif lpair == ("ろく", 6):
ltext = "ろっ"
rtext = "ぴゃく"
elif lpair == ("はち", 8):
ltext = "はっ"
rtext = "ぴゃく"
elif rpair == ("せん", 1000):
if lpair == ("さん", 3):
rtext = "ぜん"
elif lpair == ("はち", 8):
ltext = "はっ"
elif rpair == ("ちょう", 10**12):
if lpair == ("いち", 1):
ltext = "いっ"
elif lpair == ("はち", 8):
ltext = "はっ"
elif lpair == ("じゅう", 10):
ltext = "じゅっ"
elif rpair == ("けい", 10**16):
if lpair == ("いち", 1):
ltext = "いっ"
elif lpair == ("ろく", 6):
ltext = "ろっ"
elif lpair == ("はち", 8):
ltext = "はっ"
elif lpair == ("じゅう", 10):
ltext = "じゅっ"
elif lpair == ("ひゃく", 100):
ltext = "ひゃっ"
return ("%s%s" % (ltext, rtext), lnum * rnum)
|
[
"Merge",
"lpair",
"<",
"rpair",
"while",
"applying",
"semi",
"-",
"irregular",
"rendaku",
"rules"
] |
savoirfairelinux/num2words
|
python
|
https://github.com/savoirfairelinux/num2words/blob/f4b2bac098ae8e4850cf2f185f6ff52a5979641f/num2words/lang_JA.py#L45-L85
|
[
"def",
"rendaku_merge_pairs",
"(",
"lpair",
",",
"rpair",
")",
":",
"ltext",
",",
"lnum",
"=",
"lpair",
"rtext",
",",
"rnum",
"=",
"rpair",
"if",
"lnum",
">",
"rnum",
":",
"raise",
"ValueError",
"if",
"rpair",
"==",
"(",
"\"ひゃく\", 100)",
":",
"",
"",
"",
"if",
"lpair",
"==",
"(",
"\"さん\", 3)",
":",
"",
"",
"",
"rtext",
"=",
"\"びゃく\"",
"elif",
"lpair",
"==",
"(",
"\"ろく\", 6)",
":",
"",
"",
"",
"ltext",
"=",
"\"ろっ\"",
"rtext",
"=",
"\"ぴゃく\"",
"elif",
"lpair",
"==",
"(",
"\"はち\", 8)",
":",
"",
"",
"",
"ltext",
"=",
"\"はっ\"",
"rtext",
"=",
"\"ぴゃく\"",
"elif",
"rpair",
"==",
"(",
"\"せん\", 10",
"0",
"):",
"",
"",
"if",
"lpair",
"==",
"(",
"\"さん\", 3)",
":",
"",
"",
"",
"rtext",
"=",
"\"ぜん\"",
"elif",
"lpair",
"==",
"(",
"\"はち\", 8)",
":",
"",
"",
"",
"ltext",
"=",
"\"はっ\"",
"elif",
"rpair",
"==",
"(",
"\"ちょう\", 10**",
"1",
"):",
"",
"",
"",
"",
"if",
"lpair",
"==",
"(",
"\"いち\", 1)",
":",
"",
"",
"",
"ltext",
"=",
"\"いっ\"",
"elif",
"lpair",
"==",
"(",
"\"はち\", 8)",
":",
"",
"",
"",
"ltext",
"=",
"\"はっ\"",
"elif",
"lpair",
"==",
"(",
"\"じゅう\", 10):",
"",
"",
"",
"",
"ltext",
"=",
"\"じゅっ\"",
"elif",
"rpair",
"==",
"(",
"\"けい\", 10",
"*",
"16",
"):",
"",
"",
"",
"if",
"lpair",
"==",
"(",
"\"いち\", 1)",
":",
"",
"",
"",
"ltext",
"=",
"\"いっ\"",
"elif",
"lpair",
"==",
"(",
"\"ろく\", 6)",
":",
"",
"",
"",
"ltext",
"=",
"\"ろっ\"",
"elif",
"lpair",
"==",
"(",
"\"はち\", 8)",
":",
"",
"",
"",
"ltext",
"=",
"\"はっ\"",
"elif",
"lpair",
"==",
"(",
"\"じゅう\", 10):",
"",
"",
"",
"",
"ltext",
"=",
"\"じゅっ\"",
"elif",
"lpair",
"==",
"(",
"\"ひゃく\", 100)",
":",
"",
"",
"",
"ltext",
"=",
"\"ひゃっ\"",
"return",
"(",
"\"%s%s\"",
"%",
"(",
"ltext",
",",
"rtext",
")",
",",
"lnum",
"*",
"rnum",
")"
] |
f4b2bac098ae8e4850cf2f185f6ff52a5979641f
|
test
|
Num2Word_ID.split_by_3
|
starting here, it groups the number by three from the tail
'1234567' -> (('1',),('234',),('567',))
:param number:str
:rtype:tuple
|
num2words/lang_ID.py
|
def split_by_3(self, number):
"""
starting here, it groups the number by three from the tail
'1234567' -> (('1',),('234',),('567',))
:param number:str
:rtype:tuple
"""
blocks = ()
length = len(number)
if length < 3:
blocks += ((number,),)
else:
len_of_first_block = length % 3
if len_of_first_block > 0:
first_block = number[0:len_of_first_block],
blocks += first_block,
for i in range(len_of_first_block, length, 3):
next_block = (number[i:i + 3],),
blocks += next_block
return blocks
|
def split_by_3(self, number):
"""
starting here, it groups the number by three from the tail
'1234567' -> (('1',),('234',),('567',))
:param number:str
:rtype:tuple
"""
blocks = ()
length = len(number)
if length < 3:
blocks += ((number,),)
else:
len_of_first_block = length % 3
if len_of_first_block > 0:
first_block = number[0:len_of_first_block],
blocks += first_block,
for i in range(len_of_first_block, length, 3):
next_block = (number[i:i + 3],),
blocks += next_block
return blocks
|
[
"starting",
"here",
"it",
"groups",
"the",
"number",
"by",
"three",
"from",
"the",
"tail",
"1234567",
"-",
">",
"((",
"1",
")",
"(",
"234",
")",
"(",
"567",
"))",
":",
"param",
"number",
":",
"str",
":",
"rtype",
":",
"tuple"
] |
savoirfairelinux/num2words
|
python
|
https://github.com/savoirfairelinux/num2words/blob/f4b2bac098ae8e4850cf2f185f6ff52a5979641f/num2words/lang_ID.py#L53-L76
|
[
"def",
"split_by_3",
"(",
"self",
",",
"number",
")",
":",
"blocks",
"=",
"(",
")",
"length",
"=",
"len",
"(",
"number",
")",
"if",
"length",
"<",
"3",
":",
"blocks",
"+=",
"(",
"(",
"number",
",",
")",
",",
")",
"else",
":",
"len_of_first_block",
"=",
"length",
"%",
"3",
"if",
"len_of_first_block",
">",
"0",
":",
"first_block",
"=",
"number",
"[",
"0",
":",
"len_of_first_block",
"]",
",",
"blocks",
"+=",
"first_block",
",",
"for",
"i",
"in",
"range",
"(",
"len_of_first_block",
",",
"length",
",",
"3",
")",
":",
"next_block",
"=",
"(",
"number",
"[",
"i",
":",
"i",
"+",
"3",
"]",
",",
")",
",",
"blocks",
"+=",
"next_block",
"return",
"blocks"
] |
f4b2bac098ae8e4850cf2f185f6ff52a5979641f
|
test
|
Num2Word_ID.spell
|
it adds the list of spelling to the blocks
(
('1',),('034',)) -> (('1',['satu']),('234',['tiga', 'puluh', 'empat'])
)
:param blocks: tuple
:rtype: tuple
|
num2words/lang_ID.py
|
def spell(self, blocks):
"""
it adds the list of spelling to the blocks
(
('1',),('034',)) -> (('1',['satu']),('234',['tiga', 'puluh', 'empat'])
)
:param blocks: tuple
:rtype: tuple
"""
word_blocks = ()
first_block = blocks[0]
if len(first_block[0]) == 1:
if first_block[0] == '0':
spelling = ['nol']
else:
spelling = self.BASE[int(first_block[0])]
elif len(first_block[0]) == 2:
spelling = self.puluh(first_block[0])
else:
spelling = (
self.ratus(first_block[0][0]) + self.puluh(first_block[0][1:3])
)
word_blocks += (first_block[0], spelling),
for block in blocks[1:]:
spelling = self.ratus(block[0][0]) + self.puluh(block[0][1:3])
block += spelling,
word_blocks += block,
return word_blocks
|
def spell(self, blocks):
"""
it adds the list of spelling to the blocks
(
('1',),('034',)) -> (('1',['satu']),('234',['tiga', 'puluh', 'empat'])
)
:param blocks: tuple
:rtype: tuple
"""
word_blocks = ()
first_block = blocks[0]
if len(first_block[0]) == 1:
if first_block[0] == '0':
spelling = ['nol']
else:
spelling = self.BASE[int(first_block[0])]
elif len(first_block[0]) == 2:
spelling = self.puluh(first_block[0])
else:
spelling = (
self.ratus(first_block[0][0]) + self.puluh(first_block[0][1:3])
)
word_blocks += (first_block[0], spelling),
for block in blocks[1:]:
spelling = self.ratus(block[0][0]) + self.puluh(block[0][1:3])
block += spelling,
word_blocks += block,
return word_blocks
|
[
"it",
"adds",
"the",
"list",
"of",
"spelling",
"to",
"the",
"blocks",
"(",
"(",
"1",
")",
"(",
"034",
"))",
"-",
">",
"((",
"1",
"[",
"satu",
"]",
")",
"(",
"234",
"[",
"tiga",
"puluh",
"empat",
"]",
")",
")",
":",
"param",
"blocks",
":",
"tuple",
":",
"rtype",
":",
"tuple"
] |
savoirfairelinux/num2words
|
python
|
https://github.com/savoirfairelinux/num2words/blob/f4b2bac098ae8e4850cf2f185f6ff52a5979641f/num2words/lang_ID.py#L78-L108
|
[
"def",
"spell",
"(",
"self",
",",
"blocks",
")",
":",
"word_blocks",
"=",
"(",
")",
"first_block",
"=",
"blocks",
"[",
"0",
"]",
"if",
"len",
"(",
"first_block",
"[",
"0",
"]",
")",
"==",
"1",
":",
"if",
"first_block",
"[",
"0",
"]",
"==",
"'0'",
":",
"spelling",
"=",
"[",
"'nol'",
"]",
"else",
":",
"spelling",
"=",
"self",
".",
"BASE",
"[",
"int",
"(",
"first_block",
"[",
"0",
"]",
")",
"]",
"elif",
"len",
"(",
"first_block",
"[",
"0",
"]",
")",
"==",
"2",
":",
"spelling",
"=",
"self",
".",
"puluh",
"(",
"first_block",
"[",
"0",
"]",
")",
"else",
":",
"spelling",
"=",
"(",
"self",
".",
"ratus",
"(",
"first_block",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"+",
"self",
".",
"puluh",
"(",
"first_block",
"[",
"0",
"]",
"[",
"1",
":",
"3",
"]",
")",
")",
"word_blocks",
"+=",
"(",
"first_block",
"[",
"0",
"]",
",",
"spelling",
")",
",",
"for",
"block",
"in",
"blocks",
"[",
"1",
":",
"]",
":",
"spelling",
"=",
"self",
".",
"ratus",
"(",
"block",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"+",
"self",
".",
"puluh",
"(",
"block",
"[",
"0",
"]",
"[",
"1",
":",
"3",
"]",
")",
"block",
"+=",
"spelling",
",",
"word_blocks",
"+=",
"block",
",",
"return",
"word_blocks"
] |
f4b2bac098ae8e4850cf2f185f6ff52a5979641f
|
test
|
Num2Word_ID.join
|
join the words by first join lists in the tuple
:param word_blocks: tuple
:rtype: str
|
num2words/lang_ID.py
|
def join(self, word_blocks, float_part):
"""
join the words by first join lists in the tuple
:param word_blocks: tuple
:rtype: str
"""
word_list = []
length = len(word_blocks) - 1
first_block = word_blocks[0],
start = 0
if length == 1 and first_block[0][0] == '1':
word_list += ['seribu']
start = 1
for i in range(start, length + 1, 1):
word_list += word_blocks[i][1]
if not word_blocks[i][1]:
continue
if i == length:
break
word_list += [self.TENS_TO[(length - i) * 3]]
return ' '.join(word_list) + float_part
|
def join(self, word_blocks, float_part):
"""
join the words by first join lists in the tuple
:param word_blocks: tuple
:rtype: str
"""
word_list = []
length = len(word_blocks) - 1
first_block = word_blocks[0],
start = 0
if length == 1 and first_block[0][0] == '1':
word_list += ['seribu']
start = 1
for i in range(start, length + 1, 1):
word_list += word_blocks[i][1]
if not word_blocks[i][1]:
continue
if i == length:
break
word_list += [self.TENS_TO[(length - i) * 3]]
return ' '.join(word_list) + float_part
|
[
"join",
"the",
"words",
"by",
"first",
"join",
"lists",
"in",
"the",
"tuple",
":",
"param",
"word_blocks",
":",
"tuple",
":",
"rtype",
":",
"str"
] |
savoirfairelinux/num2words
|
python
|
https://github.com/savoirfairelinux/num2words/blob/f4b2bac098ae8e4850cf2f185f6ff52a5979641f/num2words/lang_ID.py#L146-L169
|
[
"def",
"join",
"(",
"self",
",",
"word_blocks",
",",
"float_part",
")",
":",
"word_list",
"=",
"[",
"]",
"length",
"=",
"len",
"(",
"word_blocks",
")",
"-",
"1",
"first_block",
"=",
"word_blocks",
"[",
"0",
"]",
",",
"start",
"=",
"0",
"if",
"length",
"==",
"1",
"and",
"first_block",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"'1'",
":",
"word_list",
"+=",
"[",
"'seribu'",
"]",
"start",
"=",
"1",
"for",
"i",
"in",
"range",
"(",
"start",
",",
"length",
"+",
"1",
",",
"1",
")",
":",
"word_list",
"+=",
"word_blocks",
"[",
"i",
"]",
"[",
"1",
"]",
"if",
"not",
"word_blocks",
"[",
"i",
"]",
"[",
"1",
"]",
":",
"continue",
"if",
"i",
"==",
"length",
":",
"break",
"word_list",
"+=",
"[",
"self",
".",
"TENS_TO",
"[",
"(",
"length",
"-",
"i",
")",
"*",
"3",
"]",
"]",
"return",
"' '",
".",
"join",
"(",
"word_list",
")",
"+",
"float_part"
] |
f4b2bac098ae8e4850cf2f185f6ff52a5979641f
|
test
|
Num2Word_Base.to_currency
|
Args:
val: Numeric value
currency (str): Currency code
cents (bool): Verbose cents
separator (str): Cent separator
adjective (bool): Prefix currency name with adjective
Returns:
str: Formatted string
|
num2words/base.py
|
def to_currency(self, val, currency='EUR', cents=True, separator=',',
adjective=False):
"""
Args:
val: Numeric value
currency (str): Currency code
cents (bool): Verbose cents
separator (str): Cent separator
adjective (bool): Prefix currency name with adjective
Returns:
str: Formatted string
"""
left, right, is_negative = parse_currency_parts(val)
try:
cr1, cr2 = self.CURRENCY_FORMS[currency]
except KeyError:
raise NotImplementedError(
'Currency code "%s" not implemented for "%s"' %
(currency, self.__class__.__name__))
if adjective and currency in self.CURRENCY_ADJECTIVES:
cr1 = prefix_currency(self.CURRENCY_ADJECTIVES[currency], cr1)
minus_str = "%s " % self.negword if is_negative else ""
cents_str = self._cents_verbose(right, currency) \
if cents else self._cents_terse(right, currency)
return u'%s%s %s%s %s %s' % (
minus_str,
self.to_cardinal(left),
self.pluralize(left, cr1),
separator,
cents_str,
self.pluralize(right, cr2)
)
|
def to_currency(self, val, currency='EUR', cents=True, separator=',',
adjective=False):
"""
Args:
val: Numeric value
currency (str): Currency code
cents (bool): Verbose cents
separator (str): Cent separator
adjective (bool): Prefix currency name with adjective
Returns:
str: Formatted string
"""
left, right, is_negative = parse_currency_parts(val)
try:
cr1, cr2 = self.CURRENCY_FORMS[currency]
except KeyError:
raise NotImplementedError(
'Currency code "%s" not implemented for "%s"' %
(currency, self.__class__.__name__))
if adjective and currency in self.CURRENCY_ADJECTIVES:
cr1 = prefix_currency(self.CURRENCY_ADJECTIVES[currency], cr1)
minus_str = "%s " % self.negword if is_negative else ""
cents_str = self._cents_verbose(right, currency) \
if cents else self._cents_terse(right, currency)
return u'%s%s %s%s %s %s' % (
minus_str,
self.to_cardinal(left),
self.pluralize(left, cr1),
separator,
cents_str,
self.pluralize(right, cr2)
)
|
[
"Args",
":",
"val",
":",
"Numeric",
"value",
"currency",
"(",
"str",
")",
":",
"Currency",
"code",
"cents",
"(",
"bool",
")",
":",
"Verbose",
"cents",
"separator",
"(",
"str",
")",
":",
"Cent",
"separator",
"adjective",
"(",
"bool",
")",
":",
"Prefix",
"currency",
"name",
"with",
"adjective",
"Returns",
":",
"str",
":",
"Formatted",
"string"
] |
savoirfairelinux/num2words
|
python
|
https://github.com/savoirfairelinux/num2words/blob/f4b2bac098ae8e4850cf2f185f6ff52a5979641f/num2words/base.py#L266-L303
|
[
"def",
"to_currency",
"(",
"self",
",",
"val",
",",
"currency",
"=",
"'EUR'",
",",
"cents",
"=",
"True",
",",
"separator",
"=",
"','",
",",
"adjective",
"=",
"False",
")",
":",
"left",
",",
"right",
",",
"is_negative",
"=",
"parse_currency_parts",
"(",
"val",
")",
"try",
":",
"cr1",
",",
"cr2",
"=",
"self",
".",
"CURRENCY_FORMS",
"[",
"currency",
"]",
"except",
"KeyError",
":",
"raise",
"NotImplementedError",
"(",
"'Currency code \"%s\" not implemented for \"%s\"'",
"%",
"(",
"currency",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"if",
"adjective",
"and",
"currency",
"in",
"self",
".",
"CURRENCY_ADJECTIVES",
":",
"cr1",
"=",
"prefix_currency",
"(",
"self",
".",
"CURRENCY_ADJECTIVES",
"[",
"currency",
"]",
",",
"cr1",
")",
"minus_str",
"=",
"\"%s \"",
"%",
"self",
".",
"negword",
"if",
"is_negative",
"else",
"\"\"",
"cents_str",
"=",
"self",
".",
"_cents_verbose",
"(",
"right",
",",
"currency",
")",
"if",
"cents",
"else",
"self",
".",
"_cents_terse",
"(",
"right",
",",
"currency",
")",
"return",
"u'%s%s %s%s %s %s'",
"%",
"(",
"minus_str",
",",
"self",
".",
"to_cardinal",
"(",
"left",
")",
",",
"self",
".",
"pluralize",
"(",
"left",
",",
"cr1",
")",
",",
"separator",
",",
"cents_str",
",",
"self",
".",
"pluralize",
"(",
"right",
",",
"cr2",
")",
")"
] |
f4b2bac098ae8e4850cf2f185f6ff52a5979641f
|
test
|
parse_scoped_selector
|
Parse scoped selector.
|
gin/config_parser.py
|
def parse_scoped_selector(scoped_selector):
"""Parse scoped selector."""
# Conver Macro (%scope/name) to (scope/name/macro.value)
if scoped_selector[0] == '%':
if scoped_selector.endswith('.value'):
err_str = '{} is invalid cannot use % and end with .value'
raise ValueError(err_str.format(scoped_selector))
scoped_selector = scoped_selector[1:] + '/macro.value'
scope_selector_list = scoped_selector.rsplit('/', 1)
scope = ''.join(scope_selector_list[:-1])
selector = scope_selector_list[-1]
return scope, selector
|
def parse_scoped_selector(scoped_selector):
"""Parse scoped selector."""
# Conver Macro (%scope/name) to (scope/name/macro.value)
if scoped_selector[0] == '%':
if scoped_selector.endswith('.value'):
err_str = '{} is invalid cannot use % and end with .value'
raise ValueError(err_str.format(scoped_selector))
scoped_selector = scoped_selector[1:] + '/macro.value'
scope_selector_list = scoped_selector.rsplit('/', 1)
scope = ''.join(scope_selector_list[:-1])
selector = scope_selector_list[-1]
return scope, selector
|
[
"Parse",
"scoped",
"selector",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config_parser.py#L455-L466
|
[
"def",
"parse_scoped_selector",
"(",
"scoped_selector",
")",
":",
"# Conver Macro (%scope/name) to (scope/name/macro.value)",
"if",
"scoped_selector",
"[",
"0",
"]",
"==",
"'%'",
":",
"if",
"scoped_selector",
".",
"endswith",
"(",
"'.value'",
")",
":",
"err_str",
"=",
"'{} is invalid cannot use % and end with .value'",
"raise",
"ValueError",
"(",
"err_str",
".",
"format",
"(",
"scoped_selector",
")",
")",
"scoped_selector",
"=",
"scoped_selector",
"[",
"1",
":",
"]",
"+",
"'/macro.value'",
"scope_selector_list",
"=",
"scoped_selector",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"scope",
"=",
"''",
".",
"join",
"(",
"scope_selector_list",
"[",
":",
"-",
"1",
"]",
")",
"selector",
"=",
"scope_selector_list",
"[",
"-",
"1",
"]",
"return",
"scope",
",",
"selector"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
ConfigParser.parse_statement
|
Parse a single statement.
Returns:
Either a `BindingStatement`, `ImportStatement`, `IncludeStatement`, or
`None` if no more statements can be parsed (EOF reached).
|
gin/config_parser.py
|
def parse_statement(self):
"""Parse a single statement.
Returns:
Either a `BindingStatement`, `ImportStatement`, `IncludeStatement`, or
`None` if no more statements can be parsed (EOF reached).
"""
self._skip_whitespace_and_comments()
if self._current_token.kind == tokenize.ENDMARKER:
return None
# Save off location, but ignore char_num for any statement-level errors.
stmt_loc = self._current_location(ignore_char_num=True)
binding_key_or_keyword = self._parse_selector()
statement = None
if self._current_token.value != '=':
if binding_key_or_keyword == 'import':
module = self._parse_selector(scoped=False)
statement = ImportStatement(module, stmt_loc)
elif binding_key_or_keyword == 'include':
str_loc = self._current_location()
success, filename = self._maybe_parse_basic_type()
if not success or not isinstance(filename, str):
self._raise_syntax_error('Expected file path as string.', str_loc)
statement = IncludeStatement(filename, stmt_loc)
else:
self._raise_syntax_error("Expected '='.")
else: # We saw an '='.
self._advance_one_token()
value = self.parse_value()
scope, selector, arg_name = parse_binding_key(binding_key_or_keyword)
statement = BindingStatement(scope, selector, arg_name, value, stmt_loc)
assert statement, 'Internal parsing error.'
if (self._current_token.kind != tokenize.NEWLINE and
self._current_token.kind != tokenize.ENDMARKER):
self._raise_syntax_error('Expected newline.')
elif self._current_token.kind == tokenize.NEWLINE:
self._advance_one_token()
return statement
|
def parse_statement(self):
"""Parse a single statement.
Returns:
Either a `BindingStatement`, `ImportStatement`, `IncludeStatement`, or
`None` if no more statements can be parsed (EOF reached).
"""
self._skip_whitespace_and_comments()
if self._current_token.kind == tokenize.ENDMARKER:
return None
# Save off location, but ignore char_num for any statement-level errors.
stmt_loc = self._current_location(ignore_char_num=True)
binding_key_or_keyword = self._parse_selector()
statement = None
if self._current_token.value != '=':
if binding_key_or_keyword == 'import':
module = self._parse_selector(scoped=False)
statement = ImportStatement(module, stmt_loc)
elif binding_key_or_keyword == 'include':
str_loc = self._current_location()
success, filename = self._maybe_parse_basic_type()
if not success or not isinstance(filename, str):
self._raise_syntax_error('Expected file path as string.', str_loc)
statement = IncludeStatement(filename, stmt_loc)
else:
self._raise_syntax_error("Expected '='.")
else: # We saw an '='.
self._advance_one_token()
value = self.parse_value()
scope, selector, arg_name = parse_binding_key(binding_key_or_keyword)
statement = BindingStatement(scope, selector, arg_name, value, stmt_loc)
assert statement, 'Internal parsing error.'
if (self._current_token.kind != tokenize.NEWLINE and
self._current_token.kind != tokenize.ENDMARKER):
self._raise_syntax_error('Expected newline.')
elif self._current_token.kind == tokenize.NEWLINE:
self._advance_one_token()
return statement
|
[
"Parse",
"a",
"single",
"statement",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config_parser.py#L193-L234
|
[
"def",
"parse_statement",
"(",
"self",
")",
":",
"self",
".",
"_skip_whitespace_and_comments",
"(",
")",
"if",
"self",
".",
"_current_token",
".",
"kind",
"==",
"tokenize",
".",
"ENDMARKER",
":",
"return",
"None",
"# Save off location, but ignore char_num for any statement-level errors.",
"stmt_loc",
"=",
"self",
".",
"_current_location",
"(",
"ignore_char_num",
"=",
"True",
")",
"binding_key_or_keyword",
"=",
"self",
".",
"_parse_selector",
"(",
")",
"statement",
"=",
"None",
"if",
"self",
".",
"_current_token",
".",
"value",
"!=",
"'='",
":",
"if",
"binding_key_or_keyword",
"==",
"'import'",
":",
"module",
"=",
"self",
".",
"_parse_selector",
"(",
"scoped",
"=",
"False",
")",
"statement",
"=",
"ImportStatement",
"(",
"module",
",",
"stmt_loc",
")",
"elif",
"binding_key_or_keyword",
"==",
"'include'",
":",
"str_loc",
"=",
"self",
".",
"_current_location",
"(",
")",
"success",
",",
"filename",
"=",
"self",
".",
"_maybe_parse_basic_type",
"(",
")",
"if",
"not",
"success",
"or",
"not",
"isinstance",
"(",
"filename",
",",
"str",
")",
":",
"self",
".",
"_raise_syntax_error",
"(",
"'Expected file path as string.'",
",",
"str_loc",
")",
"statement",
"=",
"IncludeStatement",
"(",
"filename",
",",
"stmt_loc",
")",
"else",
":",
"self",
".",
"_raise_syntax_error",
"(",
"\"Expected '='.\"",
")",
"else",
":",
"# We saw an '='.",
"self",
".",
"_advance_one_token",
"(",
")",
"value",
"=",
"self",
".",
"parse_value",
"(",
")",
"scope",
",",
"selector",
",",
"arg_name",
"=",
"parse_binding_key",
"(",
"binding_key_or_keyword",
")",
"statement",
"=",
"BindingStatement",
"(",
"scope",
",",
"selector",
",",
"arg_name",
",",
"value",
",",
"stmt_loc",
")",
"assert",
"statement",
",",
"'Internal parsing error.'",
"if",
"(",
"self",
".",
"_current_token",
".",
"kind",
"!=",
"tokenize",
".",
"NEWLINE",
"and",
"self",
".",
"_current_token",
".",
"kind",
"!=",
"tokenize",
".",
"ENDMARKER",
")",
":",
"self",
".",
"_raise_syntax_error",
"(",
"'Expected newline.'",
")",
"elif",
"self",
".",
"_current_token",
".",
"kind",
"==",
"tokenize",
".",
"NEWLINE",
":",
"self",
".",
"_advance_one_token",
"(",
")",
"return",
"statement"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
ConfigParser.parse_value
|
Parse a single literal value.
Returns:
The parsed value.
|
gin/config_parser.py
|
def parse_value(self):
"""Parse a single literal value.
Returns:
The parsed value.
"""
parsers = [
self._maybe_parse_container, self._maybe_parse_basic_type,
self._maybe_parse_configurable_reference, self._maybe_parse_macro
]
for parser in parsers:
success, value = parser()
if success:
return value
self._raise_syntax_error('Unable to parse value.')
|
def parse_value(self):
"""Parse a single literal value.
Returns:
The parsed value.
"""
parsers = [
self._maybe_parse_container, self._maybe_parse_basic_type,
self._maybe_parse_configurable_reference, self._maybe_parse_macro
]
for parser in parsers:
success, value = parser()
if success:
return value
self._raise_syntax_error('Unable to parse value.')
|
[
"Parse",
"a",
"single",
"literal",
"value",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config_parser.py#L236-L250
|
[
"def",
"parse_value",
"(",
"self",
")",
":",
"parsers",
"=",
"[",
"self",
".",
"_maybe_parse_container",
",",
"self",
".",
"_maybe_parse_basic_type",
",",
"self",
".",
"_maybe_parse_configurable_reference",
",",
"self",
".",
"_maybe_parse_macro",
"]",
"for",
"parser",
"in",
"parsers",
":",
"success",
",",
"value",
"=",
"parser",
"(",
")",
"if",
"success",
":",
"return",
"value",
"self",
".",
"_raise_syntax_error",
"(",
"'Unable to parse value.'",
")"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
ConfigParser.advance_one_line
|
Advances to next line.
|
gin/config_parser.py
|
def advance_one_line(self):
"""Advances to next line."""
current_line = self._current_token.line_number
while current_line == self._current_token.line_number:
self._current_token = ConfigParser.Token(*next(self._token_generator))
|
def advance_one_line(self):
"""Advances to next line."""
current_line = self._current_token.line_number
while current_line == self._current_token.line_number:
self._current_token = ConfigParser.Token(*next(self._token_generator))
|
[
"Advances",
"to",
"next",
"line",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config_parser.py#L260-L265
|
[
"def",
"advance_one_line",
"(",
"self",
")",
":",
"current_line",
"=",
"self",
".",
"_current_token",
".",
"line_number",
"while",
"current_line",
"==",
"self",
".",
"_current_token",
".",
"line_number",
":",
"self",
".",
"_current_token",
"=",
"ConfigParser",
".",
"Token",
"(",
"*",
"next",
"(",
"self",
".",
"_token_generator",
")",
")"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
ConfigParser._parse_selector
|
Parse a (possibly scoped) selector.
A selector is a sequence of one or more valid Python-style identifiers
separated by periods (see also `SelectorMap`). A scoped selector is a
selector that may be preceded by scope names (separated by slashes).
Args:
scoped: Whether scopes are allowed.
allow_periods_in_scope: Whether to allow period characters in the scope
names preceding the selector.
Returns:
The parsed selector (as a string).
Raises:
SyntaxError: If the scope or selector is malformatted.
|
gin/config_parser.py
|
def _parse_selector(self, scoped=True, allow_periods_in_scope=False):
"""Parse a (possibly scoped) selector.
A selector is a sequence of one or more valid Python-style identifiers
separated by periods (see also `SelectorMap`). A scoped selector is a
selector that may be preceded by scope names (separated by slashes).
Args:
scoped: Whether scopes are allowed.
allow_periods_in_scope: Whether to allow period characters in the scope
names preceding the selector.
Returns:
The parsed selector (as a string).
Raises:
SyntaxError: If the scope or selector is malformatted.
"""
if self._current_token.kind != tokenize.NAME:
self._raise_syntax_error('Unexpected token.')
begin_line_num = self._current_token.begin[0]
begin_char_num = self._current_token.begin[1]
end_char_num = self._current_token.end[1]
line = self._current_token.line
selector_parts = []
# This accepts an alternating sequence of NAME and '/' or '.' tokens.
step_parity = 0
while (step_parity == 0 and self._current_token.kind == tokenize.NAME or
step_parity == 1 and self._current_token.value in ('/', '.')):
selector_parts.append(self._current_token.value)
step_parity = not step_parity
end_char_num = self._current_token.end[1]
self._advance_one_token()
self._skip_whitespace_and_comments()
# Due to tokenization, most whitespace has been stripped already. To prevent
# whitespace inside the scoped selector, we verify that it matches an
# untokenized version of the selector obtained from the first through last
# character positions of the consumed tokens in the line being parsed.
scoped_selector = ''.join(selector_parts)
untokenized_scoped_selector = line[begin_char_num:end_char_num]
# Also check that it's properly formatted (e.g., no consecutive slashes).
scope_re = IDENTIFIER_RE
if allow_periods_in_scope:
scope_re = MODULE_RE
selector_re = MODULE_RE
scope_parts = scoped_selector.split('/')
valid_format = all(scope_re.match(scope) for scope in scope_parts[:-1])
valid_format &= bool(selector_re.match(scope_parts[-1]))
valid_format &= bool(scoped or len(scope_parts) == 1)
if untokenized_scoped_selector != scoped_selector or not valid_format:
location = (self._filename, begin_line_num, begin_char_num + 1, line)
self._raise_syntax_error('Malformatted scope or selector.', location)
return scoped_selector
|
def _parse_selector(self, scoped=True, allow_periods_in_scope=False):
"""Parse a (possibly scoped) selector.
A selector is a sequence of one or more valid Python-style identifiers
separated by periods (see also `SelectorMap`). A scoped selector is a
selector that may be preceded by scope names (separated by slashes).
Args:
scoped: Whether scopes are allowed.
allow_periods_in_scope: Whether to allow period characters in the scope
names preceding the selector.
Returns:
The parsed selector (as a string).
Raises:
SyntaxError: If the scope or selector is malformatted.
"""
if self._current_token.kind != tokenize.NAME:
self._raise_syntax_error('Unexpected token.')
begin_line_num = self._current_token.begin[0]
begin_char_num = self._current_token.begin[1]
end_char_num = self._current_token.end[1]
line = self._current_token.line
selector_parts = []
# This accepts an alternating sequence of NAME and '/' or '.' tokens.
step_parity = 0
while (step_parity == 0 and self._current_token.kind == tokenize.NAME or
step_parity == 1 and self._current_token.value in ('/', '.')):
selector_parts.append(self._current_token.value)
step_parity = not step_parity
end_char_num = self._current_token.end[1]
self._advance_one_token()
self._skip_whitespace_and_comments()
# Due to tokenization, most whitespace has been stripped already. To prevent
# whitespace inside the scoped selector, we verify that it matches an
# untokenized version of the selector obtained from the first through last
# character positions of the consumed tokens in the line being parsed.
scoped_selector = ''.join(selector_parts)
untokenized_scoped_selector = line[begin_char_num:end_char_num]
# Also check that it's properly formatted (e.g., no consecutive slashes).
scope_re = IDENTIFIER_RE
if allow_periods_in_scope:
scope_re = MODULE_RE
selector_re = MODULE_RE
scope_parts = scoped_selector.split('/')
valid_format = all(scope_re.match(scope) for scope in scope_parts[:-1])
valid_format &= bool(selector_re.match(scope_parts[-1]))
valid_format &= bool(scoped or len(scope_parts) == 1)
if untokenized_scoped_selector != scoped_selector or not valid_format:
location = (self._filename, begin_line_num, begin_char_num + 1, line)
self._raise_syntax_error('Malformatted scope or selector.', location)
return scoped_selector
|
[
"Parse",
"a",
"(",
"possibly",
"scoped",
")",
"selector",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config_parser.py#L297-L354
|
[
"def",
"_parse_selector",
"(",
"self",
",",
"scoped",
"=",
"True",
",",
"allow_periods_in_scope",
"=",
"False",
")",
":",
"if",
"self",
".",
"_current_token",
".",
"kind",
"!=",
"tokenize",
".",
"NAME",
":",
"self",
".",
"_raise_syntax_error",
"(",
"'Unexpected token.'",
")",
"begin_line_num",
"=",
"self",
".",
"_current_token",
".",
"begin",
"[",
"0",
"]",
"begin_char_num",
"=",
"self",
".",
"_current_token",
".",
"begin",
"[",
"1",
"]",
"end_char_num",
"=",
"self",
".",
"_current_token",
".",
"end",
"[",
"1",
"]",
"line",
"=",
"self",
".",
"_current_token",
".",
"line",
"selector_parts",
"=",
"[",
"]",
"# This accepts an alternating sequence of NAME and '/' or '.' tokens.",
"step_parity",
"=",
"0",
"while",
"(",
"step_parity",
"==",
"0",
"and",
"self",
".",
"_current_token",
".",
"kind",
"==",
"tokenize",
".",
"NAME",
"or",
"step_parity",
"==",
"1",
"and",
"self",
".",
"_current_token",
".",
"value",
"in",
"(",
"'/'",
",",
"'.'",
")",
")",
":",
"selector_parts",
".",
"append",
"(",
"self",
".",
"_current_token",
".",
"value",
")",
"step_parity",
"=",
"not",
"step_parity",
"end_char_num",
"=",
"self",
".",
"_current_token",
".",
"end",
"[",
"1",
"]",
"self",
".",
"_advance_one_token",
"(",
")",
"self",
".",
"_skip_whitespace_and_comments",
"(",
")",
"# Due to tokenization, most whitespace has been stripped already. To prevent",
"# whitespace inside the scoped selector, we verify that it matches an",
"# untokenized version of the selector obtained from the first through last",
"# character positions of the consumed tokens in the line being parsed.",
"scoped_selector",
"=",
"''",
".",
"join",
"(",
"selector_parts",
")",
"untokenized_scoped_selector",
"=",
"line",
"[",
"begin_char_num",
":",
"end_char_num",
"]",
"# Also check that it's properly formatted (e.g., no consecutive slashes).",
"scope_re",
"=",
"IDENTIFIER_RE",
"if",
"allow_periods_in_scope",
":",
"scope_re",
"=",
"MODULE_RE",
"selector_re",
"=",
"MODULE_RE",
"scope_parts",
"=",
"scoped_selector",
".",
"split",
"(",
"'/'",
")",
"valid_format",
"=",
"all",
"(",
"scope_re",
".",
"match",
"(",
"scope",
")",
"for",
"scope",
"in",
"scope_parts",
"[",
":",
"-",
"1",
"]",
")",
"valid_format",
"&=",
"bool",
"(",
"selector_re",
".",
"match",
"(",
"scope_parts",
"[",
"-",
"1",
"]",
")",
")",
"valid_format",
"&=",
"bool",
"(",
"scoped",
"or",
"len",
"(",
"scope_parts",
")",
"==",
"1",
")",
"if",
"untokenized_scoped_selector",
"!=",
"scoped_selector",
"or",
"not",
"valid_format",
":",
"location",
"=",
"(",
"self",
".",
"_filename",
",",
"begin_line_num",
",",
"begin_char_num",
"+",
"1",
",",
"line",
")",
"self",
".",
"_raise_syntax_error",
"(",
"'Malformatted scope or selector.'",
",",
"location",
")",
"return",
"scoped_selector"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
ConfigParser._maybe_parse_container
|
Try to parse a container type (dict, list, or tuple).
|
gin/config_parser.py
|
def _maybe_parse_container(self):
"""Try to parse a container type (dict, list, or tuple)."""
bracket_types = {
'{': ('}', dict, self._parse_dict_item),
'(': (')', tuple, self.parse_value),
'[': (']', list, self.parse_value)
}
if self._current_token.value in bracket_types:
open_bracket = self._current_token.value
close_bracket, type_fn, parse_item = bracket_types[open_bracket]
self._advance()
values = []
saw_comma = False
while self._current_token.value != close_bracket:
values.append(parse_item())
if self._current_token.value == ',':
saw_comma = True
self._advance()
elif self._current_token.value != close_bracket:
self._raise_syntax_error("Expected ',' or '%s'." % close_bracket)
# If it's just a single value enclosed in parentheses without a trailing
# comma, it's not a tuple, so just grab the value.
if type_fn is tuple and len(values) == 1 and not saw_comma:
type_fn = lambda x: x[0]
self._advance()
return True, type_fn(values)
return False, None
|
def _maybe_parse_container(self):
"""Try to parse a container type (dict, list, or tuple)."""
bracket_types = {
'{': ('}', dict, self._parse_dict_item),
'(': (')', tuple, self.parse_value),
'[': (']', list, self.parse_value)
}
if self._current_token.value in bracket_types:
open_bracket = self._current_token.value
close_bracket, type_fn, parse_item = bracket_types[open_bracket]
self._advance()
values = []
saw_comma = False
while self._current_token.value != close_bracket:
values.append(parse_item())
if self._current_token.value == ',':
saw_comma = True
self._advance()
elif self._current_token.value != close_bracket:
self._raise_syntax_error("Expected ',' or '%s'." % close_bracket)
# If it's just a single value enclosed in parentheses without a trailing
# comma, it's not a tuple, so just grab the value.
if type_fn is tuple and len(values) == 1 and not saw_comma:
type_fn = lambda x: x[0]
self._advance()
return True, type_fn(values)
return False, None
|
[
"Try",
"to",
"parse",
"a",
"container",
"type",
"(",
"dict",
"list",
"or",
"tuple",
")",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config_parser.py#L356-L386
|
[
"def",
"_maybe_parse_container",
"(",
"self",
")",
":",
"bracket_types",
"=",
"{",
"'{'",
":",
"(",
"'}'",
",",
"dict",
",",
"self",
".",
"_parse_dict_item",
")",
",",
"'('",
":",
"(",
"')'",
",",
"tuple",
",",
"self",
".",
"parse_value",
")",
",",
"'['",
":",
"(",
"']'",
",",
"list",
",",
"self",
".",
"parse_value",
")",
"}",
"if",
"self",
".",
"_current_token",
".",
"value",
"in",
"bracket_types",
":",
"open_bracket",
"=",
"self",
".",
"_current_token",
".",
"value",
"close_bracket",
",",
"type_fn",
",",
"parse_item",
"=",
"bracket_types",
"[",
"open_bracket",
"]",
"self",
".",
"_advance",
"(",
")",
"values",
"=",
"[",
"]",
"saw_comma",
"=",
"False",
"while",
"self",
".",
"_current_token",
".",
"value",
"!=",
"close_bracket",
":",
"values",
".",
"append",
"(",
"parse_item",
"(",
")",
")",
"if",
"self",
".",
"_current_token",
".",
"value",
"==",
"','",
":",
"saw_comma",
"=",
"True",
"self",
".",
"_advance",
"(",
")",
"elif",
"self",
".",
"_current_token",
".",
"value",
"!=",
"close_bracket",
":",
"self",
".",
"_raise_syntax_error",
"(",
"\"Expected ',' or '%s'.\"",
"%",
"close_bracket",
")",
"# If it's just a single value enclosed in parentheses without a trailing",
"# comma, it's not a tuple, so just grab the value.",
"if",
"type_fn",
"is",
"tuple",
"and",
"len",
"(",
"values",
")",
"==",
"1",
"and",
"not",
"saw_comma",
":",
"type_fn",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
"self",
".",
"_advance",
"(",
")",
"return",
"True",
",",
"type_fn",
"(",
"values",
")",
"return",
"False",
",",
"None"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
ConfigParser._maybe_parse_basic_type
|
Try to parse a basic type (str, bool, number).
|
gin/config_parser.py
|
def _maybe_parse_basic_type(self):
"""Try to parse a basic type (str, bool, number)."""
token_value = ''
# Allow a leading dash to handle negative numbers.
if self._current_token.value == '-':
token_value += self._current_token.value
self._advance()
basic_type_tokens = [tokenize.NAME, tokenize.NUMBER, tokenize.STRING]
continue_parsing = self._current_token.kind in basic_type_tokens
if not continue_parsing:
return False, None
while continue_parsing:
token_value += self._current_token.value
try:
value = ast.literal_eval(token_value)
except Exception as e: # pylint: disable=broad-except
err_str = "{}\n Failed to parse token '{}'"
self._raise_syntax_error(err_str.format(e, token_value))
was_string = self._current_token.kind == tokenize.STRING
self._advance()
is_string = self._current_token.kind == tokenize.STRING
continue_parsing = was_string and is_string
return True, value
|
def _maybe_parse_basic_type(self):
"""Try to parse a basic type (str, bool, number)."""
token_value = ''
# Allow a leading dash to handle negative numbers.
if self._current_token.value == '-':
token_value += self._current_token.value
self._advance()
basic_type_tokens = [tokenize.NAME, tokenize.NUMBER, tokenize.STRING]
continue_parsing = self._current_token.kind in basic_type_tokens
if not continue_parsing:
return False, None
while continue_parsing:
token_value += self._current_token.value
try:
value = ast.literal_eval(token_value)
except Exception as e: # pylint: disable=broad-except
err_str = "{}\n Failed to parse token '{}'"
self._raise_syntax_error(err_str.format(e, token_value))
was_string = self._current_token.kind == tokenize.STRING
self._advance()
is_string = self._current_token.kind == tokenize.STRING
continue_parsing = was_string and is_string
return True, value
|
[
"Try",
"to",
"parse",
"a",
"basic",
"type",
"(",
"str",
"bool",
"number",
")",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config_parser.py#L388-L415
|
[
"def",
"_maybe_parse_basic_type",
"(",
"self",
")",
":",
"token_value",
"=",
"''",
"# Allow a leading dash to handle negative numbers.",
"if",
"self",
".",
"_current_token",
".",
"value",
"==",
"'-'",
":",
"token_value",
"+=",
"self",
".",
"_current_token",
".",
"value",
"self",
".",
"_advance",
"(",
")",
"basic_type_tokens",
"=",
"[",
"tokenize",
".",
"NAME",
",",
"tokenize",
".",
"NUMBER",
",",
"tokenize",
".",
"STRING",
"]",
"continue_parsing",
"=",
"self",
".",
"_current_token",
".",
"kind",
"in",
"basic_type_tokens",
"if",
"not",
"continue_parsing",
":",
"return",
"False",
",",
"None",
"while",
"continue_parsing",
":",
"token_value",
"+=",
"self",
".",
"_current_token",
".",
"value",
"try",
":",
"value",
"=",
"ast",
".",
"literal_eval",
"(",
"token_value",
")",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=broad-except",
"err_str",
"=",
"\"{}\\n Failed to parse token '{}'\"",
"self",
".",
"_raise_syntax_error",
"(",
"err_str",
".",
"format",
"(",
"e",
",",
"token_value",
")",
")",
"was_string",
"=",
"self",
".",
"_current_token",
".",
"kind",
"==",
"tokenize",
".",
"STRING",
"self",
".",
"_advance",
"(",
")",
"is_string",
"=",
"self",
".",
"_current_token",
".",
"kind",
"==",
"tokenize",
".",
"STRING",
"continue_parsing",
"=",
"was_string",
"and",
"is_string",
"return",
"True",
",",
"value"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
ConfigParser._maybe_parse_configurable_reference
|
Try to parse a configurable reference (@[scope/name/]fn_name[()]).
|
gin/config_parser.py
|
def _maybe_parse_configurable_reference(self):
"""Try to parse a configurable reference (@[scope/name/]fn_name[()])."""
if self._current_token.value != '@':
return False, None
location = self._current_location()
self._advance_one_token()
scoped_name = self._parse_selector(allow_periods_in_scope=True)
evaluate = False
if self._current_token.value == '(':
evaluate = True
self._advance()
if self._current_token.value != ')':
self._raise_syntax_error("Expected ')'.")
self._advance_one_token()
self._skip_whitespace_and_comments()
with utils.try_with_location(location):
reference = self._delegate.configurable_reference(scoped_name, evaluate)
return True, reference
|
def _maybe_parse_configurable_reference(self):
"""Try to parse a configurable reference (@[scope/name/]fn_name[()])."""
if self._current_token.value != '@':
return False, None
location = self._current_location()
self._advance_one_token()
scoped_name = self._parse_selector(allow_periods_in_scope=True)
evaluate = False
if self._current_token.value == '(':
evaluate = True
self._advance()
if self._current_token.value != ')':
self._raise_syntax_error("Expected ')'.")
self._advance_one_token()
self._skip_whitespace_and_comments()
with utils.try_with_location(location):
reference = self._delegate.configurable_reference(scoped_name, evaluate)
return True, reference
|
[
"Try",
"to",
"parse",
"a",
"configurable",
"reference",
"("
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config_parser.py#L417-L438
|
[
"def",
"_maybe_parse_configurable_reference",
"(",
"self",
")",
":",
"if",
"self",
".",
"_current_token",
".",
"value",
"!=",
"'@'",
":",
"return",
"False",
",",
"None",
"location",
"=",
"self",
".",
"_current_location",
"(",
")",
"self",
".",
"_advance_one_token",
"(",
")",
"scoped_name",
"=",
"self",
".",
"_parse_selector",
"(",
"allow_periods_in_scope",
"=",
"True",
")",
"evaluate",
"=",
"False",
"if",
"self",
".",
"_current_token",
".",
"value",
"==",
"'('",
":",
"evaluate",
"=",
"True",
"self",
".",
"_advance",
"(",
")",
"if",
"self",
".",
"_current_token",
".",
"value",
"!=",
"')'",
":",
"self",
".",
"_raise_syntax_error",
"(",
"\"Expected ')'.\"",
")",
"self",
".",
"_advance_one_token",
"(",
")",
"self",
".",
"_skip_whitespace_and_comments",
"(",
")",
"with",
"utils",
".",
"try_with_location",
"(",
"location",
")",
":",
"reference",
"=",
"self",
".",
"_delegate",
".",
"configurable_reference",
"(",
"scoped_name",
",",
"evaluate",
")",
"return",
"True",
",",
"reference"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
ConfigParser._maybe_parse_macro
|
Try to parse an macro (%scope/name).
|
gin/config_parser.py
|
def _maybe_parse_macro(self):
"""Try to parse an macro (%scope/name)."""
if self._current_token.value != '%':
return False, None
location = self._current_location()
self._advance_one_token()
scoped_name = self._parse_selector(allow_periods_in_scope=True)
with utils.try_with_location(location):
macro = self._delegate.macro(scoped_name)
return True, macro
|
def _maybe_parse_macro(self):
"""Try to parse an macro (%scope/name)."""
if self._current_token.value != '%':
return False, None
location = self._current_location()
self._advance_one_token()
scoped_name = self._parse_selector(allow_periods_in_scope=True)
with utils.try_with_location(location):
macro = self._delegate.macro(scoped_name)
return True, macro
|
[
"Try",
"to",
"parse",
"an",
"macro",
"(",
"%scope",
"/",
"name",
")",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config_parser.py#L440-L452
|
[
"def",
"_maybe_parse_macro",
"(",
"self",
")",
":",
"if",
"self",
".",
"_current_token",
".",
"value",
"!=",
"'%'",
":",
"return",
"False",
",",
"None",
"location",
"=",
"self",
".",
"_current_location",
"(",
")",
"self",
".",
"_advance_one_token",
"(",
")",
"scoped_name",
"=",
"self",
".",
"_parse_selector",
"(",
"allow_periods_in_scope",
"=",
"True",
")",
"with",
"utils",
".",
"try_with_location",
"(",
"location",
")",
":",
"macro",
"=",
"self",
".",
"_delegate",
".",
"macro",
"(",
"scoped_name",
")",
"return",
"True",
",",
"macro"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
augment_exception_message_and_reraise
|
Reraises `exception`, appending `message` to its string representation.
|
gin/utils.py
|
def augment_exception_message_and_reraise(exception, message):
"""Reraises `exception`, appending `message` to its string representation."""
class ExceptionProxy(type(exception)):
"""Acts as a proxy for an exception with an augmented message."""
__module__ = type(exception).__module__
def __init__(self):
pass
def __getattr__(self, attr_name):
return getattr(exception, attr_name)
def __str__(self):
return str(exception) + message
ExceptionProxy.__name__ = type(exception).__name__
proxy = ExceptionProxy()
if six.PY3:
ExceptionProxy.__qualname__ = type(exception).__qualname__
six.raise_from(proxy.with_traceback(exception.__traceback__), None)
else:
six.reraise(proxy, None, sys.exc_info()[2])
|
def augment_exception_message_and_reraise(exception, message):
"""Reraises `exception`, appending `message` to its string representation."""
class ExceptionProxy(type(exception)):
"""Acts as a proxy for an exception with an augmented message."""
__module__ = type(exception).__module__
def __init__(self):
pass
def __getattr__(self, attr_name):
return getattr(exception, attr_name)
def __str__(self):
return str(exception) + message
ExceptionProxy.__name__ = type(exception).__name__
proxy = ExceptionProxy()
if six.PY3:
ExceptionProxy.__qualname__ = type(exception).__qualname__
six.raise_from(proxy.with_traceback(exception.__traceback__), None)
else:
six.reraise(proxy, None, sys.exc_info()[2])
|
[
"Reraises",
"exception",
"appending",
"message",
"to",
"its",
"string",
"representation",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/utils.py#L28-L51
|
[
"def",
"augment_exception_message_and_reraise",
"(",
"exception",
",",
"message",
")",
":",
"class",
"ExceptionProxy",
"(",
"type",
"(",
"exception",
")",
")",
":",
"\"\"\"Acts as a proxy for an exception with an augmented message.\"\"\"",
"__module__",
"=",
"type",
"(",
"exception",
")",
".",
"__module__",
"def",
"__init__",
"(",
"self",
")",
":",
"pass",
"def",
"__getattr__",
"(",
"self",
",",
"attr_name",
")",
":",
"return",
"getattr",
"(",
"exception",
",",
"attr_name",
")",
"def",
"__str__",
"(",
"self",
")",
":",
"return",
"str",
"(",
"exception",
")",
"+",
"message",
"ExceptionProxy",
".",
"__name__",
"=",
"type",
"(",
"exception",
")",
".",
"__name__",
"proxy",
"=",
"ExceptionProxy",
"(",
")",
"if",
"six",
".",
"PY3",
":",
"ExceptionProxy",
".",
"__qualname__",
"=",
"type",
"(",
"exception",
")",
".",
"__qualname__",
"six",
".",
"raise_from",
"(",
"proxy",
".",
"with_traceback",
"(",
"exception",
".",
"__traceback__",
")",
",",
"None",
")",
"else",
":",
"six",
".",
"reraise",
"(",
"proxy",
",",
"None",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
GinConfigSaverHook._markdownify_operative_config_str
|
Convert an operative config string to markdown format.
|
gin/tf/utils.py
|
def _markdownify_operative_config_str(self, string):
"""Convert an operative config string to markdown format."""
# TODO: Total hack below. Implement more principled formatting.
def process(line):
"""Convert a single line to markdown format."""
if not line.startswith('#'):
return ' ' + line
line = line[2:]
if line.startswith('===='):
return ''
if line.startswith('None'):
return ' # None.'
if line.endswith(':'):
return '#### ' + line
return line
output_lines = []
for line in string.splitlines():
procd_line = process(line)
if procd_line is not None:
output_lines.append(procd_line)
return '\n'.join(output_lines)
|
def _markdownify_operative_config_str(self, string):
"""Convert an operative config string to markdown format."""
# TODO: Total hack below. Implement more principled formatting.
def process(line):
"""Convert a single line to markdown format."""
if not line.startswith('#'):
return ' ' + line
line = line[2:]
if line.startswith('===='):
return ''
if line.startswith('None'):
return ' # None.'
if line.endswith(':'):
return '#### ' + line
return line
output_lines = []
for line in string.splitlines():
procd_line = process(line)
if procd_line is not None:
output_lines.append(procd_line)
return '\n'.join(output_lines)
|
[
"Convert",
"an",
"operative",
"config",
"string",
"to",
"markdown",
"format",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/tf/utils.py#L82-L106
|
[
"def",
"_markdownify_operative_config_str",
"(",
"self",
",",
"string",
")",
":",
"# TODO: Total hack below. Implement more principled formatting.",
"def",
"process",
"(",
"line",
")",
":",
"\"\"\"Convert a single line to markdown format.\"\"\"",
"if",
"not",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"return",
"' '",
"+",
"line",
"line",
"=",
"line",
"[",
"2",
":",
"]",
"if",
"line",
".",
"startswith",
"(",
"'===='",
")",
":",
"return",
"''",
"if",
"line",
".",
"startswith",
"(",
"'None'",
")",
":",
"return",
"' # None.'",
"if",
"line",
".",
"endswith",
"(",
"':'",
")",
":",
"return",
"'#### '",
"+",
"line",
"return",
"line",
"output_lines",
"=",
"[",
"]",
"for",
"line",
"in",
"string",
".",
"splitlines",
"(",
")",
":",
"procd_line",
"=",
"process",
"(",
"line",
")",
"if",
"procd_line",
"is",
"not",
"None",
":",
"output_lines",
".",
"append",
"(",
"procd_line",
")",
"return",
"'\\n'",
".",
"join",
"(",
"output_lines",
")"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
GinConfigSaverHook.after_create_session
|
Writes out Gin's operative config, and maybe adds a summary of it.
|
gin/tf/utils.py
|
def after_create_session(self, session=None, coord=None):
"""Writes out Gin's operative config, and maybe adds a summary of it."""
config_str = config.operative_config_str()
if not tf.gfile.IsDirectory(self._output_dir):
tf.gfile.MakeDirs(self._output_dir)
global_step_val = 0
if session is not None:
global_step = tf.train.get_global_step()
if global_step is not None:
global_step_val = session.run(global_step)
filename = '%s-%s.gin' % (self._base_name, global_step_val)
config_path = os.path.join(self._output_dir, filename)
with tf.gfile.GFile(config_path, 'w') as f:
f.write(config_str)
if self._summarize_config:
md_config_str = self._markdownify_operative_config_str(config_str)
summary_metadata = summary_pb2.SummaryMetadata()
summary_metadata.plugin_data.plugin_name = 'text'
summary_metadata.plugin_data.content = b'{}'
text_tensor = tf.make_tensor_proto(md_config_str)
summary = summary_pb2.Summary()
summary.value.add(
tag='gin/' + self._base_name,
tensor=text_tensor,
metadata=summary_metadata)
if not self._summary_writer:
# Creating the FileWriter also creates the events file, so it should be
# done here (where it is most likely to only occur on chief workers), as
# opposed to in the constructor.
self._summary_writer = tf.summary.FileWriterCache.get(self._output_dir)
self._summary_writer.add_summary(summary, global_step_val)
self._summary_writer.flush()
|
def after_create_session(self, session=None, coord=None):
"""Writes out Gin's operative config, and maybe adds a summary of it."""
config_str = config.operative_config_str()
if not tf.gfile.IsDirectory(self._output_dir):
tf.gfile.MakeDirs(self._output_dir)
global_step_val = 0
if session is not None:
global_step = tf.train.get_global_step()
if global_step is not None:
global_step_val = session.run(global_step)
filename = '%s-%s.gin' % (self._base_name, global_step_val)
config_path = os.path.join(self._output_dir, filename)
with tf.gfile.GFile(config_path, 'w') as f:
f.write(config_str)
if self._summarize_config:
md_config_str = self._markdownify_operative_config_str(config_str)
summary_metadata = summary_pb2.SummaryMetadata()
summary_metadata.plugin_data.plugin_name = 'text'
summary_metadata.plugin_data.content = b'{}'
text_tensor = tf.make_tensor_proto(md_config_str)
summary = summary_pb2.Summary()
summary.value.add(
tag='gin/' + self._base_name,
tensor=text_tensor,
metadata=summary_metadata)
if not self._summary_writer:
# Creating the FileWriter also creates the events file, so it should be
# done here (where it is most likely to only occur on chief workers), as
# opposed to in the constructor.
self._summary_writer = tf.summary.FileWriterCache.get(self._output_dir)
self._summary_writer.add_summary(summary, global_step_val)
self._summary_writer.flush()
|
[
"Writes",
"out",
"Gin",
"s",
"operative",
"config",
"and",
"maybe",
"adds",
"a",
"summary",
"of",
"it",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/tf/utils.py#L108-L140
|
[
"def",
"after_create_session",
"(",
"self",
",",
"session",
"=",
"None",
",",
"coord",
"=",
"None",
")",
":",
"config_str",
"=",
"config",
".",
"operative_config_str",
"(",
")",
"if",
"not",
"tf",
".",
"gfile",
".",
"IsDirectory",
"(",
"self",
".",
"_output_dir",
")",
":",
"tf",
".",
"gfile",
".",
"MakeDirs",
"(",
"self",
".",
"_output_dir",
")",
"global_step_val",
"=",
"0",
"if",
"session",
"is",
"not",
"None",
":",
"global_step",
"=",
"tf",
".",
"train",
".",
"get_global_step",
"(",
")",
"if",
"global_step",
"is",
"not",
"None",
":",
"global_step_val",
"=",
"session",
".",
"run",
"(",
"global_step",
")",
"filename",
"=",
"'%s-%s.gin'",
"%",
"(",
"self",
".",
"_base_name",
",",
"global_step_val",
")",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_output_dir",
",",
"filename",
")",
"with",
"tf",
".",
"gfile",
".",
"GFile",
"(",
"config_path",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"config_str",
")",
"if",
"self",
".",
"_summarize_config",
":",
"md_config_str",
"=",
"self",
".",
"_markdownify_operative_config_str",
"(",
"config_str",
")",
"summary_metadata",
"=",
"summary_pb2",
".",
"SummaryMetadata",
"(",
")",
"summary_metadata",
".",
"plugin_data",
".",
"plugin_name",
"=",
"'text'",
"summary_metadata",
".",
"plugin_data",
".",
"content",
"=",
"b'{}'",
"text_tensor",
"=",
"tf",
".",
"make_tensor_proto",
"(",
"md_config_str",
")",
"summary",
"=",
"summary_pb2",
".",
"Summary",
"(",
")",
"summary",
".",
"value",
".",
"add",
"(",
"tag",
"=",
"'gin/'",
"+",
"self",
".",
"_base_name",
",",
"tensor",
"=",
"text_tensor",
",",
"metadata",
"=",
"summary_metadata",
")",
"if",
"not",
"self",
".",
"_summary_writer",
":",
"# Creating the FileWriter also creates the events file, so it should be",
"# done here (where it is most likely to only occur on chief workers), as",
"# opposed to in the constructor.",
"self",
".",
"_summary_writer",
"=",
"tf",
".",
"summary",
".",
"FileWriterCache",
".",
"get",
"(",
"self",
".",
"_output_dir",
")",
"self",
".",
"_summary_writer",
".",
"add_summary",
"(",
"summary",
",",
"global_step_val",
")",
"self",
".",
"_summary_writer",
".",
"flush",
"(",
")"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
_find_class_construction_fn
|
Find the first __init__ or __new__ method in the given class's MRO.
|
gin/config.py
|
def _find_class_construction_fn(cls):
"""Find the first __init__ or __new__ method in the given class's MRO."""
for base in type.mro(cls):
if '__init__' in base.__dict__:
return base.__init__
if '__new__' in base.__dict__:
return base.__new__
|
def _find_class_construction_fn(cls):
"""Find the first __init__ or __new__ method in the given class's MRO."""
for base in type.mro(cls):
if '__init__' in base.__dict__:
return base.__init__
if '__new__' in base.__dict__:
return base.__new__
|
[
"Find",
"the",
"first",
"__init__",
"or",
"__new__",
"method",
"in",
"the",
"given",
"class",
"s",
"MRO",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config.py#L155-L161
|
[
"def",
"_find_class_construction_fn",
"(",
"cls",
")",
":",
"for",
"base",
"in",
"type",
".",
"mro",
"(",
"cls",
")",
":",
"if",
"'__init__'",
"in",
"base",
".",
"__dict__",
":",
"return",
"base",
".",
"__init__",
"if",
"'__new__'",
"in",
"base",
".",
"__dict__",
":",
"return",
"base",
".",
"__new__"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
_ensure_wrappability
|
Make sure `fn` can be wrapped cleanly by functools.wraps.
|
gin/config.py
|
def _ensure_wrappability(fn):
"""Make sure `fn` can be wrapped cleanly by functools.wraps."""
# Handle "wrapped_descriptor" and "method-wrapper" types.
if isinstance(fn, (type(object.__init__), type(object.__call__))):
# pylint: disable=unnecessary-lambda
wrappable_fn = lambda *args, **kwargs: fn(*args, **kwargs)
wrappable_fn.__name__ = fn.__name__
wrappable_fn.__doc__ = fn.__doc__
wrappable_fn.__module__ = '' # These types have no __module__, sigh.
wrappable_fn.__wrapped__ = fn
return wrappable_fn
# Otherwise we're good to go...
return fn
|
def _ensure_wrappability(fn):
"""Make sure `fn` can be wrapped cleanly by functools.wraps."""
# Handle "wrapped_descriptor" and "method-wrapper" types.
if isinstance(fn, (type(object.__init__), type(object.__call__))):
# pylint: disable=unnecessary-lambda
wrappable_fn = lambda *args, **kwargs: fn(*args, **kwargs)
wrappable_fn.__name__ = fn.__name__
wrappable_fn.__doc__ = fn.__doc__
wrappable_fn.__module__ = '' # These types have no __module__, sigh.
wrappable_fn.__wrapped__ = fn
return wrappable_fn
# Otherwise we're good to go...
return fn
|
[
"Make",
"sure",
"fn",
"can",
"be",
"wrapped",
"cleanly",
"by",
"functools",
".",
"wraps",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config.py#L164-L177
|
[
"def",
"_ensure_wrappability",
"(",
"fn",
")",
":",
"# Handle \"wrapped_descriptor\" and \"method-wrapper\" types.",
"if",
"isinstance",
"(",
"fn",
",",
"(",
"type",
"(",
"object",
".",
"__init__",
")",
",",
"type",
"(",
"object",
".",
"__call__",
")",
")",
")",
":",
"# pylint: disable=unnecessary-lambda",
"wrappable_fn",
"=",
"lambda",
"*",
"args",
",",
"*",
"*",
"kwargs",
":",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"wrappable_fn",
".",
"__name__",
"=",
"fn",
".",
"__name__",
"wrappable_fn",
".",
"__doc__",
"=",
"fn",
".",
"__doc__",
"wrappable_fn",
".",
"__module__",
"=",
"''",
"# These types have no __module__, sigh.",
"wrappable_fn",
".",
"__wrapped__",
"=",
"fn",
"return",
"wrappable_fn",
"# Otherwise we're good to go...",
"return",
"fn"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
_decorate_fn_or_cls
|
Decorate a function or class with the given decorator.
When `fn_or_cls` is a function, applies `decorator` to the function and
returns the (decorated) result.
When `fn_or_cls` is a class and the `subclass` parameter is `False`, this will
replace `fn_or_cls.__init__` with the result of applying `decorator` to it.
When `fn_or_cls` is a class and `subclass` is `True`, this will subclass the
class, but with `__init__` defined to be the result of applying `decorator` to
`fn_or_cls.__init__`. The decorated class has metadata (docstring, name, and
module information) copied over from `fn_or_cls`. The goal is to provide a
decorated class the behaves as much like the original as possible, without
modifying it (for example, inspection operations using `isinstance` or
`issubclass` should behave the same way as on the original class).
Args:
decorator: The decorator to use.
fn_or_cls: The function or class to decorate.
subclass: Whether to decorate classes by subclassing. This argument is
ignored if `fn_or_cls` is not a class.
Returns:
The decorated function or class.
|
gin/config.py
|
def _decorate_fn_or_cls(decorator, fn_or_cls, subclass=False):
"""Decorate a function or class with the given decorator.
When `fn_or_cls` is a function, applies `decorator` to the function and
returns the (decorated) result.
When `fn_or_cls` is a class and the `subclass` parameter is `False`, this will
replace `fn_or_cls.__init__` with the result of applying `decorator` to it.
When `fn_or_cls` is a class and `subclass` is `True`, this will subclass the
class, but with `__init__` defined to be the result of applying `decorator` to
`fn_or_cls.__init__`. The decorated class has metadata (docstring, name, and
module information) copied over from `fn_or_cls`. The goal is to provide a
decorated class the behaves as much like the original as possible, without
modifying it (for example, inspection operations using `isinstance` or
`issubclass` should behave the same way as on the original class).
Args:
decorator: The decorator to use.
fn_or_cls: The function or class to decorate.
subclass: Whether to decorate classes by subclassing. This argument is
ignored if `fn_or_cls` is not a class.
Returns:
The decorated function or class.
"""
if not inspect.isclass(fn_or_cls):
return decorator(_ensure_wrappability(fn_or_cls))
construction_fn = _find_class_construction_fn(fn_or_cls)
if subclass:
class DecoratedClass(fn_or_cls):
__doc__ = fn_or_cls.__doc__
__module__ = fn_or_cls.__module__
DecoratedClass.__name__ = fn_or_cls.__name__
if six.PY3:
DecoratedClass.__qualname__ = fn_or_cls.__qualname__
cls = DecoratedClass
else:
cls = fn_or_cls
decorated_fn = decorator(_ensure_wrappability(construction_fn))
if construction_fn.__name__ == '__new__':
decorated_fn = staticmethod(decorated_fn)
setattr(cls, construction_fn.__name__, decorated_fn)
return cls
|
def _decorate_fn_or_cls(decorator, fn_or_cls, subclass=False):
"""Decorate a function or class with the given decorator.
When `fn_or_cls` is a function, applies `decorator` to the function and
returns the (decorated) result.
When `fn_or_cls` is a class and the `subclass` parameter is `False`, this will
replace `fn_or_cls.__init__` with the result of applying `decorator` to it.
When `fn_or_cls` is a class and `subclass` is `True`, this will subclass the
class, but with `__init__` defined to be the result of applying `decorator` to
`fn_or_cls.__init__`. The decorated class has metadata (docstring, name, and
module information) copied over from `fn_or_cls`. The goal is to provide a
decorated class the behaves as much like the original as possible, without
modifying it (for example, inspection operations using `isinstance` or
`issubclass` should behave the same way as on the original class).
Args:
decorator: The decorator to use.
fn_or_cls: The function or class to decorate.
subclass: Whether to decorate classes by subclassing. This argument is
ignored if `fn_or_cls` is not a class.
Returns:
The decorated function or class.
"""
if not inspect.isclass(fn_or_cls):
return decorator(_ensure_wrappability(fn_or_cls))
construction_fn = _find_class_construction_fn(fn_or_cls)
if subclass:
class DecoratedClass(fn_or_cls):
__doc__ = fn_or_cls.__doc__
__module__ = fn_or_cls.__module__
DecoratedClass.__name__ = fn_or_cls.__name__
if six.PY3:
DecoratedClass.__qualname__ = fn_or_cls.__qualname__
cls = DecoratedClass
else:
cls = fn_or_cls
decorated_fn = decorator(_ensure_wrappability(construction_fn))
if construction_fn.__name__ == '__new__':
decorated_fn = staticmethod(decorated_fn)
setattr(cls, construction_fn.__name__, decorated_fn)
return cls
|
[
"Decorate",
"a",
"function",
"or",
"class",
"with",
"the",
"given",
"decorator",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config.py#L180-L226
|
[
"def",
"_decorate_fn_or_cls",
"(",
"decorator",
",",
"fn_or_cls",
",",
"subclass",
"=",
"False",
")",
":",
"if",
"not",
"inspect",
".",
"isclass",
"(",
"fn_or_cls",
")",
":",
"return",
"decorator",
"(",
"_ensure_wrappability",
"(",
"fn_or_cls",
")",
")",
"construction_fn",
"=",
"_find_class_construction_fn",
"(",
"fn_or_cls",
")",
"if",
"subclass",
":",
"class",
"DecoratedClass",
"(",
"fn_or_cls",
")",
":",
"__doc__",
"=",
"fn_or_cls",
".",
"__doc__",
"__module__",
"=",
"fn_or_cls",
".",
"__module__",
"DecoratedClass",
".",
"__name__",
"=",
"fn_or_cls",
".",
"__name__",
"if",
"six",
".",
"PY3",
":",
"DecoratedClass",
".",
"__qualname__",
"=",
"fn_or_cls",
".",
"__qualname__",
"cls",
"=",
"DecoratedClass",
"else",
":",
"cls",
"=",
"fn_or_cls",
"decorated_fn",
"=",
"decorator",
"(",
"_ensure_wrappability",
"(",
"construction_fn",
")",
")",
"if",
"construction_fn",
".",
"__name__",
"==",
"'__new__'",
":",
"decorated_fn",
"=",
"staticmethod",
"(",
"decorated_fn",
")",
"setattr",
"(",
"cls",
",",
"construction_fn",
".",
"__name__",
",",
"decorated_fn",
")",
"return",
"cls"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
_should_skip
|
Checks whether `selector` should be skipped (if unknown).
|
gin/config.py
|
def _should_skip(selector, skip_unknown):
"""Checks whether `selector` should be skipped (if unknown)."""
_validate_skip_unknown(skip_unknown)
if _REGISTRY.matching_selectors(selector):
return False # Never skip known configurables.
if isinstance(skip_unknown, (list, tuple, set)):
return selector in skip_unknown
return skip_unknown
|
def _should_skip(selector, skip_unknown):
"""Checks whether `selector` should be skipped (if unknown)."""
_validate_skip_unknown(skip_unknown)
if _REGISTRY.matching_selectors(selector):
return False # Never skip known configurables.
if isinstance(skip_unknown, (list, tuple, set)):
return selector in skip_unknown
return skip_unknown
|
[
"Checks",
"whether",
"selector",
"should",
"be",
"skipped",
"(",
"if",
"unknown",
")",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config.py#L382-L389
|
[
"def",
"_should_skip",
"(",
"selector",
",",
"skip_unknown",
")",
":",
"_validate_skip_unknown",
"(",
"skip_unknown",
")",
"if",
"_REGISTRY",
".",
"matching_selectors",
"(",
"selector",
")",
":",
"return",
"False",
"# Never skip known configurables.",
"if",
"isinstance",
"(",
"skip_unknown",
",",
"(",
"list",
",",
"tuple",
",",
"set",
")",
")",
":",
"return",
"selector",
"in",
"skip_unknown",
"return",
"skip_unknown"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
_format_value
|
Returns `value` in a format parseable by `parse_value`, or `None`.
Simply put, This function ensures that when it returns a string value, the
following will hold:
parse_value(_format_value(value)) == value
Args:
value: The value to format.
Returns:
A string representation of `value` when `value` is literally representable,
or `None`.
|
gin/config.py
|
def _format_value(value):
"""Returns `value` in a format parseable by `parse_value`, or `None`.
Simply put, This function ensures that when it returns a string value, the
following will hold:
parse_value(_format_value(value)) == value
Args:
value: The value to format.
Returns:
A string representation of `value` when `value` is literally representable,
or `None`.
"""
literal = repr(value)
try:
if parse_value(literal) == value:
return literal
except SyntaxError:
pass
return None
|
def _format_value(value):
"""Returns `value` in a format parseable by `parse_value`, or `None`.
Simply put, This function ensures that when it returns a string value, the
following will hold:
parse_value(_format_value(value)) == value
Args:
value: The value to format.
Returns:
A string representation of `value` when `value` is literally representable,
or `None`.
"""
literal = repr(value)
try:
if parse_value(literal) == value:
return literal
except SyntaxError:
pass
return None
|
[
"Returns",
"value",
"in",
"a",
"format",
"parseable",
"by",
"parse_value",
"or",
"None",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config.py#L503-L524
|
[
"def",
"_format_value",
"(",
"value",
")",
":",
"literal",
"=",
"repr",
"(",
"value",
")",
"try",
":",
"if",
"parse_value",
"(",
"literal",
")",
"==",
"value",
":",
"return",
"literal",
"except",
"SyntaxError",
":",
"pass",
"return",
"None"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
test
|
clear_config
|
Clears the global configuration.
This clears any parameter values set by `bind_parameter` or `parse_config`, as
well as the set of dynamically imported modules. It does not remove any
configurable functions or classes from the registry of configurables.
Args:
clear_constants: Whether to clear constants created by `constant`. Defaults
to False.
|
gin/config.py
|
def clear_config(clear_constants=False):
"""Clears the global configuration.
This clears any parameter values set by `bind_parameter` or `parse_config`, as
well as the set of dynamically imported modules. It does not remove any
configurable functions or classes from the registry of configurables.
Args:
clear_constants: Whether to clear constants created by `constant`. Defaults
to False.
"""
_set_config_is_locked(False)
_CONFIG.clear()
_SINGLETONS.clear()
if clear_constants:
_CONSTANTS.clear()
else:
saved_constants = _CONSTANTS.copy()
_CONSTANTS.clear() # Clear then redefine constants (re-adding bindings).
for name, value in six.iteritems(saved_constants):
constant(name, value)
_IMPORTED_MODULES.clear()
_OPERATIVE_CONFIG.clear()
|
def clear_config(clear_constants=False):
"""Clears the global configuration.
This clears any parameter values set by `bind_parameter` or `parse_config`, as
well as the set of dynamically imported modules. It does not remove any
configurable functions or classes from the registry of configurables.
Args:
clear_constants: Whether to clear constants created by `constant`. Defaults
to False.
"""
_set_config_is_locked(False)
_CONFIG.clear()
_SINGLETONS.clear()
if clear_constants:
_CONSTANTS.clear()
else:
saved_constants = _CONSTANTS.copy()
_CONSTANTS.clear() # Clear then redefine constants (re-adding bindings).
for name, value in six.iteritems(saved_constants):
constant(name, value)
_IMPORTED_MODULES.clear()
_OPERATIVE_CONFIG.clear()
|
[
"Clears",
"the",
"global",
"configuration",
"."
] |
google/gin-config
|
python
|
https://github.com/google/gin-config/blob/17a170e0a6711005d1c78e67cf493dc44674d44f/gin/config.py#L540-L562
|
[
"def",
"clear_config",
"(",
"clear_constants",
"=",
"False",
")",
":",
"_set_config_is_locked",
"(",
"False",
")",
"_CONFIG",
".",
"clear",
"(",
")",
"_SINGLETONS",
".",
"clear",
"(",
")",
"if",
"clear_constants",
":",
"_CONSTANTS",
".",
"clear",
"(",
")",
"else",
":",
"saved_constants",
"=",
"_CONSTANTS",
".",
"copy",
"(",
")",
"_CONSTANTS",
".",
"clear",
"(",
")",
"# Clear then redefine constants (re-adding bindings).",
"for",
"name",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"saved_constants",
")",
":",
"constant",
"(",
"name",
",",
"value",
")",
"_IMPORTED_MODULES",
".",
"clear",
"(",
")",
"_OPERATIVE_CONFIG",
".",
"clear",
"(",
")"
] |
17a170e0a6711005d1c78e67cf493dc44674d44f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.