partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
remove_nodes
|
Create DataFrames of nodes and edges that do not include specified nodes.
Parameters
----------
network : pandana.Network
rm_nodes : array_like
A list, array, Index, or Series of node IDs that should *not*
be saved as part of the Network.
Returns
-------
nodes, edges : pandas.DataFrame
|
pandana/loaders/pandash5.py
|
def remove_nodes(network, rm_nodes):
"""
Create DataFrames of nodes and edges that do not include specified nodes.
Parameters
----------
network : pandana.Network
rm_nodes : array_like
A list, array, Index, or Series of node IDs that should *not*
be saved as part of the Network.
Returns
-------
nodes, edges : pandas.DataFrame
"""
rm_nodes = set(rm_nodes)
ndf = network.nodes_df
edf = network.edges_df
nodes_to_keep = ~ndf.index.isin(rm_nodes)
edges_to_keep = ~(edf['from'].isin(rm_nodes) | edf['to'].isin(rm_nodes))
return ndf.loc[nodes_to_keep], edf.loc[edges_to_keep]
|
def remove_nodes(network, rm_nodes):
"""
Create DataFrames of nodes and edges that do not include specified nodes.
Parameters
----------
network : pandana.Network
rm_nodes : array_like
A list, array, Index, or Series of node IDs that should *not*
be saved as part of the Network.
Returns
-------
nodes, edges : pandas.DataFrame
"""
rm_nodes = set(rm_nodes)
ndf = network.nodes_df
edf = network.edges_df
nodes_to_keep = ~ndf.index.isin(rm_nodes)
edges_to_keep = ~(edf['from'].isin(rm_nodes) | edf['to'].isin(rm_nodes))
return ndf.loc[nodes_to_keep], edf.loc[edges_to_keep]
|
[
"Create",
"DataFrames",
"of",
"nodes",
"and",
"edges",
"that",
"do",
"not",
"include",
"specified",
"nodes",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/loaders/pandash5.py#L4-L27
|
[
"def",
"remove_nodes",
"(",
"network",
",",
"rm_nodes",
")",
":",
"rm_nodes",
"=",
"set",
"(",
"rm_nodes",
")",
"ndf",
"=",
"network",
".",
"nodes_df",
"edf",
"=",
"network",
".",
"edges_df",
"nodes_to_keep",
"=",
"~",
"ndf",
".",
"index",
".",
"isin",
"(",
"rm_nodes",
")",
"edges_to_keep",
"=",
"~",
"(",
"edf",
"[",
"'from'",
"]",
".",
"isin",
"(",
"rm_nodes",
")",
"|",
"edf",
"[",
"'to'",
"]",
".",
"isin",
"(",
"rm_nodes",
")",
")",
"return",
"ndf",
".",
"loc",
"[",
"nodes_to_keep",
"]",
",",
"edf",
".",
"loc",
"[",
"edges_to_keep",
"]"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
network_to_pandas_hdf5
|
Save a Network's data to a Pandas HDFStore.
Parameters
----------
network : pandana.Network
filename : str
rm_nodes : array_like
A list, array, Index, or Series of node IDs that should *not*
be saved as part of the Network.
|
pandana/loaders/pandash5.py
|
def network_to_pandas_hdf5(network, filename, rm_nodes=None):
"""
Save a Network's data to a Pandas HDFStore.
Parameters
----------
network : pandana.Network
filename : str
rm_nodes : array_like
A list, array, Index, or Series of node IDs that should *not*
be saved as part of the Network.
"""
if rm_nodes is not None:
nodes, edges = remove_nodes(network, rm_nodes)
else:
nodes, edges = network.nodes_df, network.edges_df
with pd.HDFStore(filename, mode='w') as store:
store['nodes'] = nodes
store['edges'] = edges
store['two_way'] = pd.Series([network._twoway])
store['impedance_names'] = pd.Series(network.impedance_names)
|
def network_to_pandas_hdf5(network, filename, rm_nodes=None):
"""
Save a Network's data to a Pandas HDFStore.
Parameters
----------
network : pandana.Network
filename : str
rm_nodes : array_like
A list, array, Index, or Series of node IDs that should *not*
be saved as part of the Network.
"""
if rm_nodes is not None:
nodes, edges = remove_nodes(network, rm_nodes)
else:
nodes, edges = network.nodes_df, network.edges_df
with pd.HDFStore(filename, mode='w') as store:
store['nodes'] = nodes
store['edges'] = edges
store['two_way'] = pd.Series([network._twoway])
store['impedance_names'] = pd.Series(network.impedance_names)
|
[
"Save",
"a",
"Network",
"s",
"data",
"to",
"a",
"Pandas",
"HDFStore",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/loaders/pandash5.py#L30-L53
|
[
"def",
"network_to_pandas_hdf5",
"(",
"network",
",",
"filename",
",",
"rm_nodes",
"=",
"None",
")",
":",
"if",
"rm_nodes",
"is",
"not",
"None",
":",
"nodes",
",",
"edges",
"=",
"remove_nodes",
"(",
"network",
",",
"rm_nodes",
")",
"else",
":",
"nodes",
",",
"edges",
"=",
"network",
".",
"nodes_df",
",",
"network",
".",
"edges_df",
"with",
"pd",
".",
"HDFStore",
"(",
"filename",
",",
"mode",
"=",
"'w'",
")",
"as",
"store",
":",
"store",
"[",
"'nodes'",
"]",
"=",
"nodes",
"store",
"[",
"'edges'",
"]",
"=",
"edges",
"store",
"[",
"'two_way'",
"]",
"=",
"pd",
".",
"Series",
"(",
"[",
"network",
".",
"_twoway",
"]",
")",
"store",
"[",
"'impedance_names'",
"]",
"=",
"pd",
".",
"Series",
"(",
"network",
".",
"impedance_names",
")"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
network_from_pandas_hdf5
|
Build a Network from data in a Pandas HDFStore.
Parameters
----------
cls : class
Class to instantiate, usually pandana.Network.
filename : str
Returns
-------
network : pandana.Network
|
pandana/loaders/pandash5.py
|
def network_from_pandas_hdf5(cls, filename):
"""
Build a Network from data in a Pandas HDFStore.
Parameters
----------
cls : class
Class to instantiate, usually pandana.Network.
filename : str
Returns
-------
network : pandana.Network
"""
with pd.HDFStore(filename) as store:
nodes = store['nodes']
edges = store['edges']
two_way = store['two_way'][0]
imp_names = store['impedance_names'].tolist()
return cls(
nodes['x'], nodes['y'], edges['from'], edges['to'], edges[imp_names],
twoway=two_way)
|
def network_from_pandas_hdf5(cls, filename):
"""
Build a Network from data in a Pandas HDFStore.
Parameters
----------
cls : class
Class to instantiate, usually pandana.Network.
filename : str
Returns
-------
network : pandana.Network
"""
with pd.HDFStore(filename) as store:
nodes = store['nodes']
edges = store['edges']
two_way = store['two_way'][0]
imp_names = store['impedance_names'].tolist()
return cls(
nodes['x'], nodes['y'], edges['from'], edges['to'], edges[imp_names],
twoway=two_way)
|
[
"Build",
"a",
"Network",
"from",
"data",
"in",
"a",
"Pandas",
"HDFStore",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/loaders/pandash5.py#L56-L79
|
[
"def",
"network_from_pandas_hdf5",
"(",
"cls",
",",
"filename",
")",
":",
"with",
"pd",
".",
"HDFStore",
"(",
"filename",
")",
"as",
"store",
":",
"nodes",
"=",
"store",
"[",
"'nodes'",
"]",
"edges",
"=",
"store",
"[",
"'edges'",
"]",
"two_way",
"=",
"store",
"[",
"'two_way'",
"]",
"[",
"0",
"]",
"imp_names",
"=",
"store",
"[",
"'impedance_names'",
"]",
".",
"tolist",
"(",
")",
"return",
"cls",
"(",
"nodes",
"[",
"'x'",
"]",
",",
"nodes",
"[",
"'y'",
"]",
",",
"edges",
"[",
"'from'",
"]",
",",
"edges",
"[",
"'to'",
"]",
",",
"edges",
"[",
"imp_names",
"]",
",",
"twoway",
"=",
"two_way",
")"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
Network.bbox
|
The bounding box for nodes in this network [xmin, ymin, xmax, ymax]
|
pandana/network.py
|
def bbox(self):
"""
The bounding box for nodes in this network [xmin, ymin, xmax, ymax]
"""
return [self.nodes_df.x.min(), self.nodes_df.y.min(),
self.nodes_df.x.max(), self.nodes_df.y.max()]
|
def bbox(self):
"""
The bounding box for nodes in this network [xmin, ymin, xmax, ymax]
"""
return [self.nodes_df.x.min(), self.nodes_df.y.min(),
self.nodes_df.x.max(), self.nodes_df.y.max()]
|
[
"The",
"bounding",
"box",
"for",
"nodes",
"in",
"this",
"network",
"[",
"xmin",
"ymin",
"xmax",
"ymax",
"]"
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/network.py#L151-L156
|
[
"def",
"bbox",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"nodes_df",
".",
"x",
".",
"min",
"(",
")",
",",
"self",
".",
"nodes_df",
".",
"y",
".",
"min",
"(",
")",
",",
"self",
".",
"nodes_df",
".",
"x",
".",
"max",
"(",
")",
",",
"self",
".",
"nodes_df",
".",
"y",
".",
"max",
"(",
")",
"]"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
Network.set
|
Characterize urban space with a variable that is related to nodes in
the network.
Parameters
----------
node_ids : Pandas Series, int
A series of node_ids which are usually computed using
get_node_ids on this object.
variable : Pandas Series, numeric, optional
A series which represents some variable defined in urban space.
It could be the location of buildings, or the income of all
households - just about anything can be aggregated using the
network queries provided here and this provides the api to set
the variable at its disaggregate locations. Note that node_id
and variable should have the same index (although the index is
not actually used). If variable is not set, then it is assumed
that the variable is all "ones" at the location specified by
node_ids. This could be, for instance, the location of all
coffee shops which don't really have a variable to aggregate. The
variable is connected to the closest node in the Pandana network
which assumes no impedance between the location of the variable
and the location of the closest network node.
name : string, optional
Name the variable. This is optional in the sense that if you don't
specify it, the default name will be used. Since the same
default name is used by aggregate on this object, you can
alternate between characterize and aggregate calls without
setting names.
Returns
-------
Nothing
|
pandana/network.py
|
def set(self, node_ids, variable=None, name="tmp"):
"""
Characterize urban space with a variable that is related to nodes in
the network.
Parameters
----------
node_ids : Pandas Series, int
A series of node_ids which are usually computed using
get_node_ids on this object.
variable : Pandas Series, numeric, optional
A series which represents some variable defined in urban space.
It could be the location of buildings, or the income of all
households - just about anything can be aggregated using the
network queries provided here and this provides the api to set
the variable at its disaggregate locations. Note that node_id
and variable should have the same index (although the index is
not actually used). If variable is not set, then it is assumed
that the variable is all "ones" at the location specified by
node_ids. This could be, for instance, the location of all
coffee shops which don't really have a variable to aggregate. The
variable is connected to the closest node in the Pandana network
which assumes no impedance between the location of the variable
and the location of the closest network node.
name : string, optional
Name the variable. This is optional in the sense that if you don't
specify it, the default name will be used. Since the same
default name is used by aggregate on this object, you can
alternate between characterize and aggregate calls without
setting names.
Returns
-------
Nothing
"""
if variable is None:
variable = pd.Series(np.ones(len(node_ids)), index=node_ids.index)
df = pd.DataFrame({name: variable,
"node_idx": self._node_indexes(node_ids)})
length = len(df)
df = df.dropna(how="any")
newl = len(df)
if length-newl > 0:
print(
"Removed %d rows because they contain missing values" %
(length-newl))
self.variable_names.add(name)
self.net.initialize_access_var(name.encode('utf-8'),
df.node_idx.values.astype('int'),
df[name].values.astype('double'))
|
def set(self, node_ids, variable=None, name="tmp"):
"""
Characterize urban space with a variable that is related to nodes in
the network.
Parameters
----------
node_ids : Pandas Series, int
A series of node_ids which are usually computed using
get_node_ids on this object.
variable : Pandas Series, numeric, optional
A series which represents some variable defined in urban space.
It could be the location of buildings, or the income of all
households - just about anything can be aggregated using the
network queries provided here and this provides the api to set
the variable at its disaggregate locations. Note that node_id
and variable should have the same index (although the index is
not actually used). If variable is not set, then it is assumed
that the variable is all "ones" at the location specified by
node_ids. This could be, for instance, the location of all
coffee shops which don't really have a variable to aggregate. The
variable is connected to the closest node in the Pandana network
which assumes no impedance between the location of the variable
and the location of the closest network node.
name : string, optional
Name the variable. This is optional in the sense that if you don't
specify it, the default name will be used. Since the same
default name is used by aggregate on this object, you can
alternate between characterize and aggregate calls without
setting names.
Returns
-------
Nothing
"""
if variable is None:
variable = pd.Series(np.ones(len(node_ids)), index=node_ids.index)
df = pd.DataFrame({name: variable,
"node_idx": self._node_indexes(node_ids)})
length = len(df)
df = df.dropna(how="any")
newl = len(df)
if length-newl > 0:
print(
"Removed %d rows because they contain missing values" %
(length-newl))
self.variable_names.add(name)
self.net.initialize_access_var(name.encode('utf-8'),
df.node_idx.values.astype('int'),
df[name].values.astype('double'))
|
[
"Characterize",
"urban",
"space",
"with",
"a",
"variable",
"that",
"is",
"related",
"to",
"nodes",
"in",
"the",
"network",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/network.py#L188-L242
|
[
"def",
"set",
"(",
"self",
",",
"node_ids",
",",
"variable",
"=",
"None",
",",
"name",
"=",
"\"tmp\"",
")",
":",
"if",
"variable",
"is",
"None",
":",
"variable",
"=",
"pd",
".",
"Series",
"(",
"np",
".",
"ones",
"(",
"len",
"(",
"node_ids",
")",
")",
",",
"index",
"=",
"node_ids",
".",
"index",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"name",
":",
"variable",
",",
"\"node_idx\"",
":",
"self",
".",
"_node_indexes",
"(",
"node_ids",
")",
"}",
")",
"length",
"=",
"len",
"(",
"df",
")",
"df",
"=",
"df",
".",
"dropna",
"(",
"how",
"=",
"\"any\"",
")",
"newl",
"=",
"len",
"(",
"df",
")",
"if",
"length",
"-",
"newl",
">",
"0",
":",
"print",
"(",
"\"Removed %d rows because they contain missing values\"",
"%",
"(",
"length",
"-",
"newl",
")",
")",
"self",
".",
"variable_names",
".",
"add",
"(",
"name",
")",
"self",
".",
"net",
".",
"initialize_access_var",
"(",
"name",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"df",
".",
"node_idx",
".",
"values",
".",
"astype",
"(",
"'int'",
")",
",",
"df",
"[",
"name",
"]",
".",
"values",
".",
"astype",
"(",
"'double'",
")",
")"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
Network.aggregate
|
Aggregate information for every source node in the network - this is
really the main purpose of this library. This allows you to touch
the data specified by calling set and perform some aggregation on it
within the specified distance. For instance, summing the population
within 1000 meters.
Parameters
----------
distance : float
The maximum distance to aggregate data within. 'distance' can
represent any impedance unit that you have set as your edge
weight. This will usually be a distance unit in meters however
if you have customized the impedance this could be in other
units such as utility or time etc.
type : string
The type of aggregation, can be one of "ave", "sum", "std",
"count", and now "min", "25pct", "median", "75pct", and "max" will
compute the associated quantiles. (Quantiles are computed by
sorting so might be slower than the others.)
decay : string
The type of decay to apply, which makes things that are further
away count less in the aggregation - must be one of "linear",
"exponential" or "flat" (which means no decay). Linear is the
fastest computation to perform. When performing an "ave",
the decay is typically "flat"
imp_name : string, optional
The impedance name to use for the aggregation on this network.
Must be one of the impedance names passed in the constructor of
this object. If not specified, there must be only one impedance
passed in the constructor, which will be used.
name : string, optional
The variable to aggregate. This variable will have been created
and named by a call to set. If not specified, the default
variable name will be used so that the most recent call to set
without giving a name will be the variable used.
Returns
-------
agg : Pandas Series
Returns a Pandas Series for every origin node in the network,
with the index which is the same as the node_ids passed to the
init method and the values are the aggregations for each source
node in the network.
|
pandana/network.py
|
def aggregate(self, distance, type="sum", decay="linear", imp_name=None,
name="tmp"):
"""
Aggregate information for every source node in the network - this is
really the main purpose of this library. This allows you to touch
the data specified by calling set and perform some aggregation on it
within the specified distance. For instance, summing the population
within 1000 meters.
Parameters
----------
distance : float
The maximum distance to aggregate data within. 'distance' can
represent any impedance unit that you have set as your edge
weight. This will usually be a distance unit in meters however
if you have customized the impedance this could be in other
units such as utility or time etc.
type : string
The type of aggregation, can be one of "ave", "sum", "std",
"count", and now "min", "25pct", "median", "75pct", and "max" will
compute the associated quantiles. (Quantiles are computed by
sorting so might be slower than the others.)
decay : string
The type of decay to apply, which makes things that are further
away count less in the aggregation - must be one of "linear",
"exponential" or "flat" (which means no decay). Linear is the
fastest computation to perform. When performing an "ave",
the decay is typically "flat"
imp_name : string, optional
The impedance name to use for the aggregation on this network.
Must be one of the impedance names passed in the constructor of
this object. If not specified, there must be only one impedance
passed in the constructor, which will be used.
name : string, optional
The variable to aggregate. This variable will have been created
and named by a call to set. If not specified, the default
variable name will be used so that the most recent call to set
without giving a name will be the variable used.
Returns
-------
agg : Pandas Series
Returns a Pandas Series for every origin node in the network,
with the index which is the same as the node_ids passed to the
init method and the values are the aggregations for each source
node in the network.
"""
imp_num = self._imp_name_to_num(imp_name)
type = type.lower()
if type == "ave":
type = "mean" # changed generic ave to mean
assert name in self.variable_names, "A variable with that name " \
"has not yet been initialized"
res = self.net.get_all_aggregate_accessibility_variables(distance,
name.encode('utf-8'),
type.encode('utf-8'),
decay.encode('utf-8'),
imp_num)
return pd.Series(res, index=self.node_ids)
|
def aggregate(self, distance, type="sum", decay="linear", imp_name=None,
name="tmp"):
"""
Aggregate information for every source node in the network - this is
really the main purpose of this library. This allows you to touch
the data specified by calling set and perform some aggregation on it
within the specified distance. For instance, summing the population
within 1000 meters.
Parameters
----------
distance : float
The maximum distance to aggregate data within. 'distance' can
represent any impedance unit that you have set as your edge
weight. This will usually be a distance unit in meters however
if you have customized the impedance this could be in other
units such as utility or time etc.
type : string
The type of aggregation, can be one of "ave", "sum", "std",
"count", and now "min", "25pct", "median", "75pct", and "max" will
compute the associated quantiles. (Quantiles are computed by
sorting so might be slower than the others.)
decay : string
The type of decay to apply, which makes things that are further
away count less in the aggregation - must be one of "linear",
"exponential" or "flat" (which means no decay). Linear is the
fastest computation to perform. When performing an "ave",
the decay is typically "flat"
imp_name : string, optional
The impedance name to use for the aggregation on this network.
Must be one of the impedance names passed in the constructor of
this object. If not specified, there must be only one impedance
passed in the constructor, which will be used.
name : string, optional
The variable to aggregate. This variable will have been created
and named by a call to set. If not specified, the default
variable name will be used so that the most recent call to set
without giving a name will be the variable used.
Returns
-------
agg : Pandas Series
Returns a Pandas Series for every origin node in the network,
with the index which is the same as the node_ids passed to the
init method and the values are the aggregations for each source
node in the network.
"""
imp_num = self._imp_name_to_num(imp_name)
type = type.lower()
if type == "ave":
type = "mean" # changed generic ave to mean
assert name in self.variable_names, "A variable with that name " \
"has not yet been initialized"
res = self.net.get_all_aggregate_accessibility_variables(distance,
name.encode('utf-8'),
type.encode('utf-8'),
decay.encode('utf-8'),
imp_num)
return pd.Series(res, index=self.node_ids)
|
[
"Aggregate",
"information",
"for",
"every",
"source",
"node",
"in",
"the",
"network",
"-",
"this",
"is",
"really",
"the",
"main",
"purpose",
"of",
"this",
"library",
".",
"This",
"allows",
"you",
"to",
"touch",
"the",
"data",
"specified",
"by",
"calling",
"set",
"and",
"perform",
"some",
"aggregation",
"on",
"it",
"within",
"the",
"specified",
"distance",
".",
"For",
"instance",
"summing",
"the",
"population",
"within",
"1000",
"meters",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/network.py#L274-L336
|
[
"def",
"aggregate",
"(",
"self",
",",
"distance",
",",
"type",
"=",
"\"sum\"",
",",
"decay",
"=",
"\"linear\"",
",",
"imp_name",
"=",
"None",
",",
"name",
"=",
"\"tmp\"",
")",
":",
"imp_num",
"=",
"self",
".",
"_imp_name_to_num",
"(",
"imp_name",
")",
"type",
"=",
"type",
".",
"lower",
"(",
")",
"if",
"type",
"==",
"\"ave\"",
":",
"type",
"=",
"\"mean\"",
"# changed generic ave to mean",
"assert",
"name",
"in",
"self",
".",
"variable_names",
",",
"\"A variable with that name \"",
"\"has not yet been initialized\"",
"res",
"=",
"self",
".",
"net",
".",
"get_all_aggregate_accessibility_variables",
"(",
"distance",
",",
"name",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"type",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"decay",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"imp_num",
")",
"return",
"pd",
".",
"Series",
"(",
"res",
",",
"index",
"=",
"self",
".",
"node_ids",
")"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
Network.get_node_ids
|
Assign node_ids to data specified by x_col and y_col
Parameters
----------
x_col : Pandas series (float)
A Pandas Series where values specify the x (e.g. longitude)
location of dataset.
y_col : Pandas series (float)
A Pandas Series where values specify the y (e.g. latitude)
location of dataset. x_col and y_col should use the same index.
mapping_distance : float, optional
The maximum distance that will be considered a match between the
x, y data and the nearest node in the network. This will usually
be a distance unit in meters however if you have customized the
impedance this could be in other units such as utility or time
etc. If not specified, every x, y coordinate will be mapped to
the nearest node.
Returns
-------
node_ids : Pandas series (int)
Returns a Pandas Series of node_ids for each x, y in the
input data. The index is the same as the indexes of the
x, y input data, and the values are the mapped node_ids.
If mapping distance is not passed and if there are no nans in the
x, y data, this will be the same length as the x, y data.
If the mapping is imperfect, this function returns all the
input x, y's that were successfully mapped to node_ids.
|
pandana/network.py
|
def get_node_ids(self, x_col, y_col, mapping_distance=None):
"""
Assign node_ids to data specified by x_col and y_col
Parameters
----------
x_col : Pandas series (float)
A Pandas Series where values specify the x (e.g. longitude)
location of dataset.
y_col : Pandas series (float)
A Pandas Series where values specify the y (e.g. latitude)
location of dataset. x_col and y_col should use the same index.
mapping_distance : float, optional
The maximum distance that will be considered a match between the
x, y data and the nearest node in the network. This will usually
be a distance unit in meters however if you have customized the
impedance this could be in other units such as utility or time
etc. If not specified, every x, y coordinate will be mapped to
the nearest node.
Returns
-------
node_ids : Pandas series (int)
Returns a Pandas Series of node_ids for each x, y in the
input data. The index is the same as the indexes of the
x, y input data, and the values are the mapped node_ids.
If mapping distance is not passed and if there are no nans in the
x, y data, this will be the same length as the x, y data.
If the mapping is imperfect, this function returns all the
input x, y's that were successfully mapped to node_ids.
"""
xys = pd.DataFrame({'x': x_col, 'y': y_col})
distances, indexes = self.kdtree.query(xys.as_matrix())
indexes = np.transpose(indexes)[0]
distances = np.transpose(distances)[0]
node_ids = self.nodes_df.iloc[indexes].index
df = pd.DataFrame({"node_id": node_ids, "distance": distances},
index=xys.index)
if mapping_distance is not None:
df = df[df.distance <= mapping_distance]
return df.node_id
|
def get_node_ids(self, x_col, y_col, mapping_distance=None):
"""
Assign node_ids to data specified by x_col and y_col
Parameters
----------
x_col : Pandas series (float)
A Pandas Series where values specify the x (e.g. longitude)
location of dataset.
y_col : Pandas series (float)
A Pandas Series where values specify the y (e.g. latitude)
location of dataset. x_col and y_col should use the same index.
mapping_distance : float, optional
The maximum distance that will be considered a match between the
x, y data and the nearest node in the network. This will usually
be a distance unit in meters however if you have customized the
impedance this could be in other units such as utility or time
etc. If not specified, every x, y coordinate will be mapped to
the nearest node.
Returns
-------
node_ids : Pandas series (int)
Returns a Pandas Series of node_ids for each x, y in the
input data. The index is the same as the indexes of the
x, y input data, and the values are the mapped node_ids.
If mapping distance is not passed and if there are no nans in the
x, y data, this will be the same length as the x, y data.
If the mapping is imperfect, this function returns all the
input x, y's that were successfully mapped to node_ids.
"""
xys = pd.DataFrame({'x': x_col, 'y': y_col})
distances, indexes = self.kdtree.query(xys.as_matrix())
indexes = np.transpose(indexes)[0]
distances = np.transpose(distances)[0]
node_ids = self.nodes_df.iloc[indexes].index
df = pd.DataFrame({"node_id": node_ids, "distance": distances},
index=xys.index)
if mapping_distance is not None:
df = df[df.distance <= mapping_distance]
return df.node_id
|
[
"Assign",
"node_ids",
"to",
"data",
"specified",
"by",
"x_col",
"and",
"y_col"
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/network.py#L338-L383
|
[
"def",
"get_node_ids",
"(",
"self",
",",
"x_col",
",",
"y_col",
",",
"mapping_distance",
"=",
"None",
")",
":",
"xys",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"'x'",
":",
"x_col",
",",
"'y'",
":",
"y_col",
"}",
")",
"distances",
",",
"indexes",
"=",
"self",
".",
"kdtree",
".",
"query",
"(",
"xys",
".",
"as_matrix",
"(",
")",
")",
"indexes",
"=",
"np",
".",
"transpose",
"(",
"indexes",
")",
"[",
"0",
"]",
"distances",
"=",
"np",
".",
"transpose",
"(",
"distances",
")",
"[",
"0",
"]",
"node_ids",
"=",
"self",
".",
"nodes_df",
".",
"iloc",
"[",
"indexes",
"]",
".",
"index",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"\"node_id\"",
":",
"node_ids",
",",
"\"distance\"",
":",
"distances",
"}",
",",
"index",
"=",
"xys",
".",
"index",
")",
"if",
"mapping_distance",
"is",
"not",
"None",
":",
"df",
"=",
"df",
"[",
"df",
".",
"distance",
"<=",
"mapping_distance",
"]",
"return",
"df",
".",
"node_id"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
Network.plot
|
Plot an array of data on a map using matplotlib and Basemap,
automatically matching the data to the Pandana network node positions.
Keyword arguments are passed to the plotting routine.
Parameters
----------
data : pandas.Series
Numeric data with the same length and index as the nodes
in the network.
bbox : tuple, optional
(lat_min, lng_min, lat_max, lng_max)
plot_type : {'hexbin', 'scatter'}, optional
fig_kwargs : dict, optional
Keyword arguments that will be passed to
matplotlib.pyplot.subplots. Use this to specify things like
figure size or background color.
bmap_kwargs : dict, optional
Keyword arguments that will be passed to the Basemap constructor.
This can be used to specify a projection or coastline resolution.
plot_kwargs : dict, optional
Keyword arguments that will be passed to the matplotlib plotting
command used. Use this to control plot styles and color maps used.
cbar_kwargs : dict, optional
Keyword arguments passed to the Basemap.colorbar method.
Use this to control color bar location and label.
Returns
-------
bmap : Basemap
fig : matplotlib.Figure
ax : matplotlib.Axes
|
pandana/network.py
|
def plot(
self, data, bbox=None, plot_type='scatter',
fig_kwargs=None, bmap_kwargs=None, plot_kwargs=None,
cbar_kwargs=None):
"""
Plot an array of data on a map using matplotlib and Basemap,
automatically matching the data to the Pandana network node positions.
Keyword arguments are passed to the plotting routine.
Parameters
----------
data : pandas.Series
Numeric data with the same length and index as the nodes
in the network.
bbox : tuple, optional
(lat_min, lng_min, lat_max, lng_max)
plot_type : {'hexbin', 'scatter'}, optional
fig_kwargs : dict, optional
Keyword arguments that will be passed to
matplotlib.pyplot.subplots. Use this to specify things like
figure size or background color.
bmap_kwargs : dict, optional
Keyword arguments that will be passed to the Basemap constructor.
This can be used to specify a projection or coastline resolution.
plot_kwargs : dict, optional
Keyword arguments that will be passed to the matplotlib plotting
command used. Use this to control plot styles and color maps used.
cbar_kwargs : dict, optional
Keyword arguments passed to the Basemap.colorbar method.
Use this to control color bar location and label.
Returns
-------
bmap : Basemap
fig : matplotlib.Figure
ax : matplotlib.Axes
"""
from mpl_toolkits.basemap import Basemap
fig_kwargs = fig_kwargs or {}
bmap_kwargs = bmap_kwargs or {}
plot_kwargs = plot_kwargs or {}
cbar_kwargs = cbar_kwargs or {}
if not bbox:
bbox = (
self.nodes_df.y.min(),
self.nodes_df.x.min(),
self.nodes_df.y.max(),
self.nodes_df.x.max())
fig, ax = plt.subplots(**fig_kwargs)
bmap = Basemap(
bbox[1], bbox[0], bbox[3], bbox[2], ax=ax, **bmap_kwargs)
bmap.drawcoastlines()
bmap.drawmapboundary()
x, y = bmap(self.nodes_df.x.values, self.nodes_df.y.values)
if plot_type == 'scatter':
plot = bmap.scatter(
x, y, c=data.values, **plot_kwargs)
elif plot_type == 'hexbin':
plot = bmap.hexbin(
x, y, C=data.values, **plot_kwargs)
bmap.colorbar(plot, **cbar_kwargs)
return bmap, fig, ax
|
def plot(
self, data, bbox=None, plot_type='scatter',
fig_kwargs=None, bmap_kwargs=None, plot_kwargs=None,
cbar_kwargs=None):
"""
Plot an array of data on a map using matplotlib and Basemap,
automatically matching the data to the Pandana network node positions.
Keyword arguments are passed to the plotting routine.
Parameters
----------
data : pandas.Series
Numeric data with the same length and index as the nodes
in the network.
bbox : tuple, optional
(lat_min, lng_min, lat_max, lng_max)
plot_type : {'hexbin', 'scatter'}, optional
fig_kwargs : dict, optional
Keyword arguments that will be passed to
matplotlib.pyplot.subplots. Use this to specify things like
figure size or background color.
bmap_kwargs : dict, optional
Keyword arguments that will be passed to the Basemap constructor.
This can be used to specify a projection or coastline resolution.
plot_kwargs : dict, optional
Keyword arguments that will be passed to the matplotlib plotting
command used. Use this to control plot styles and color maps used.
cbar_kwargs : dict, optional
Keyword arguments passed to the Basemap.colorbar method.
Use this to control color bar location and label.
Returns
-------
bmap : Basemap
fig : matplotlib.Figure
ax : matplotlib.Axes
"""
from mpl_toolkits.basemap import Basemap
fig_kwargs = fig_kwargs or {}
bmap_kwargs = bmap_kwargs or {}
plot_kwargs = plot_kwargs or {}
cbar_kwargs = cbar_kwargs or {}
if not bbox:
bbox = (
self.nodes_df.y.min(),
self.nodes_df.x.min(),
self.nodes_df.y.max(),
self.nodes_df.x.max())
fig, ax = plt.subplots(**fig_kwargs)
bmap = Basemap(
bbox[1], bbox[0], bbox[3], bbox[2], ax=ax, **bmap_kwargs)
bmap.drawcoastlines()
bmap.drawmapboundary()
x, y = bmap(self.nodes_df.x.values, self.nodes_df.y.values)
if plot_type == 'scatter':
plot = bmap.scatter(
x, y, c=data.values, **plot_kwargs)
elif plot_type == 'hexbin':
plot = bmap.hexbin(
x, y, C=data.values, **plot_kwargs)
bmap.colorbar(plot, **cbar_kwargs)
return bmap, fig, ax
|
[
"Plot",
"an",
"array",
"of",
"data",
"on",
"a",
"map",
"using",
"matplotlib",
"and",
"Basemap",
"automatically",
"matching",
"the",
"data",
"to",
"the",
"Pandana",
"network",
"node",
"positions",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/network.py#L385-L456
|
[
"def",
"plot",
"(",
"self",
",",
"data",
",",
"bbox",
"=",
"None",
",",
"plot_type",
"=",
"'scatter'",
",",
"fig_kwargs",
"=",
"None",
",",
"bmap_kwargs",
"=",
"None",
",",
"plot_kwargs",
"=",
"None",
",",
"cbar_kwargs",
"=",
"None",
")",
":",
"from",
"mpl_toolkits",
".",
"basemap",
"import",
"Basemap",
"fig_kwargs",
"=",
"fig_kwargs",
"or",
"{",
"}",
"bmap_kwargs",
"=",
"bmap_kwargs",
"or",
"{",
"}",
"plot_kwargs",
"=",
"plot_kwargs",
"or",
"{",
"}",
"cbar_kwargs",
"=",
"cbar_kwargs",
"or",
"{",
"}",
"if",
"not",
"bbox",
":",
"bbox",
"=",
"(",
"self",
".",
"nodes_df",
".",
"y",
".",
"min",
"(",
")",
",",
"self",
".",
"nodes_df",
".",
"x",
".",
"min",
"(",
")",
",",
"self",
".",
"nodes_df",
".",
"y",
".",
"max",
"(",
")",
",",
"self",
".",
"nodes_df",
".",
"x",
".",
"max",
"(",
")",
")",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"*",
"*",
"fig_kwargs",
")",
"bmap",
"=",
"Basemap",
"(",
"bbox",
"[",
"1",
"]",
",",
"bbox",
"[",
"0",
"]",
",",
"bbox",
"[",
"3",
"]",
",",
"bbox",
"[",
"2",
"]",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"bmap_kwargs",
")",
"bmap",
".",
"drawcoastlines",
"(",
")",
"bmap",
".",
"drawmapboundary",
"(",
")",
"x",
",",
"y",
"=",
"bmap",
"(",
"self",
".",
"nodes_df",
".",
"x",
".",
"values",
",",
"self",
".",
"nodes_df",
".",
"y",
".",
"values",
")",
"if",
"plot_type",
"==",
"'scatter'",
":",
"plot",
"=",
"bmap",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"c",
"=",
"data",
".",
"values",
",",
"*",
"*",
"plot_kwargs",
")",
"elif",
"plot_type",
"==",
"'hexbin'",
":",
"plot",
"=",
"bmap",
".",
"hexbin",
"(",
"x",
",",
"y",
",",
"C",
"=",
"data",
".",
"values",
",",
"*",
"*",
"plot_kwargs",
")",
"bmap",
".",
"colorbar",
"(",
"plot",
",",
"*",
"*",
"cbar_kwargs",
")",
"return",
"bmap",
",",
"fig",
",",
"ax"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
Network.set_pois
|
Set the location of all the pois of this category. The pois are
connected to the closest node in the Pandana network which assumes
no impedance between the location of the variable and the location
of the closest network node.
Parameters
----------
category : string
The name of the category for this set of pois
maxdist - the maximum distance that will later be used in
find_all_nearest_pois
maxitems - the maximum number of items that will later be requested
in find_all_nearest_pois
x_col : Pandas Series (float)
The x location (longitude) of pois in this category
y_col : Pandas Series (Float)
The y location (latitude) of pois in this category
Returns
-------
Nothing
|
pandana/network.py
|
def set_pois(self, category, maxdist, maxitems, x_col, y_col):
"""
Set the location of all the pois of this category. The pois are
connected to the closest node in the Pandana network which assumes
no impedance between the location of the variable and the location
of the closest network node.
Parameters
----------
category : string
The name of the category for this set of pois
maxdist - the maximum distance that will later be used in
find_all_nearest_pois
maxitems - the maximum number of items that will later be requested
in find_all_nearest_pois
x_col : Pandas Series (float)
The x location (longitude) of pois in this category
y_col : Pandas Series (Float)
The y location (latitude) of pois in this category
Returns
-------
Nothing
"""
if category not in self.poi_category_names:
self.poi_category_names.append(category)
self.max_pois = maxitems
node_ids = self.get_node_ids(x_col, y_col)
self.poi_category_indexes[category] = node_ids.index
node_idx = self._node_indexes(node_ids)
self.net.initialize_category(maxdist, maxitems, category.encode('utf-8'), node_idx.values)
|
def set_pois(self, category, maxdist, maxitems, x_col, y_col):
"""
Set the location of all the pois of this category. The pois are
connected to the closest node in the Pandana network which assumes
no impedance between the location of the variable and the location
of the closest network node.
Parameters
----------
category : string
The name of the category for this set of pois
maxdist - the maximum distance that will later be used in
find_all_nearest_pois
maxitems - the maximum number of items that will later be requested
in find_all_nearest_pois
x_col : Pandas Series (float)
The x location (longitude) of pois in this category
y_col : Pandas Series (Float)
The y location (latitude) of pois in this category
Returns
-------
Nothing
"""
if category not in self.poi_category_names:
self.poi_category_names.append(category)
self.max_pois = maxitems
node_ids = self.get_node_ids(x_col, y_col)
self.poi_category_indexes[category] = node_ids.index
node_idx = self._node_indexes(node_ids)
self.net.initialize_category(maxdist, maxitems, category.encode('utf-8'), node_idx.values)
|
[
"Set",
"the",
"location",
"of",
"all",
"the",
"pois",
"of",
"this",
"category",
".",
"The",
"pois",
"are",
"connected",
"to",
"the",
"closest",
"node",
"in",
"the",
"Pandana",
"network",
"which",
"assumes",
"no",
"impedance",
"between",
"the",
"location",
"of",
"the",
"variable",
"and",
"the",
"location",
"of",
"the",
"closest",
"network",
"node",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/network.py#L458-L493
|
[
"def",
"set_pois",
"(",
"self",
",",
"category",
",",
"maxdist",
",",
"maxitems",
",",
"x_col",
",",
"y_col",
")",
":",
"if",
"category",
"not",
"in",
"self",
".",
"poi_category_names",
":",
"self",
".",
"poi_category_names",
".",
"append",
"(",
"category",
")",
"self",
".",
"max_pois",
"=",
"maxitems",
"node_ids",
"=",
"self",
".",
"get_node_ids",
"(",
"x_col",
",",
"y_col",
")",
"self",
".",
"poi_category_indexes",
"[",
"category",
"]",
"=",
"node_ids",
".",
"index",
"node_idx",
"=",
"self",
".",
"_node_indexes",
"(",
"node_ids",
")",
"self",
".",
"net",
".",
"initialize_category",
"(",
"maxdist",
",",
"maxitems",
",",
"category",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"node_idx",
".",
"values",
")"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
Network.nearest_pois
|
Find the distance to the nearest pois from each source node. The
bigger values in this case mean less accessibility.
Parameters
----------
distance : float
The maximum distance to look for pois. This will usually be a
distance unit in meters however if you have customized the
impedance this could be in other units such as utility or time
etc.
category : string
The name of the category of poi to look for
num_pois : int
The number of pois to look for, this also sets the number of
columns in the DataFrame that gets returned
max_distance : float, optional
The value to set the distance to if there is NO poi within the
specified distance - if not specified, gets set to distance. This
will usually be a distance unit in meters however if you have
customized the impedance this could be in other units such as
utility or time etc.
imp_name : string, optional
The impedance name to use for the aggregation on this network.
Must be one of the impedance names passed in the constructor of
this object. If not specified, there must be only one impedance
passed in the constructor, which will be used.
include_poi_ids : bool, optional
If this flag is set to true, the call will add columns to the
return DataFrame - instead of just returning the distance for
the nth POI, it will also return the id of that POI. The names
of the columns with the poi ids will be poi1, poi2, etc - it
will take roughly twice as long to include these ids as to not
include them
Returns
-------
d : Pandas DataFrame
Like aggregate, this series has an index of all the node ids for
the network. Unlike aggregate, this method returns a dataframe
with the number of columns equal to the distances to the Nth
closest poi. For instance, if you ask for the 10 closest poi to
each node, column d[1] wil be the distance to the 1st closest poi
of that category while column d[2] will be the distance to the 2nd
closest poi, and so on.
|
pandana/network.py
|
def nearest_pois(self, distance, category, num_pois=1, max_distance=None,
imp_name=None, include_poi_ids=False):
"""
Find the distance to the nearest pois from each source node. The
bigger values in this case mean less accessibility.
Parameters
----------
distance : float
The maximum distance to look for pois. This will usually be a
distance unit in meters however if you have customized the
impedance this could be in other units such as utility or time
etc.
category : string
The name of the category of poi to look for
num_pois : int
The number of pois to look for, this also sets the number of
columns in the DataFrame that gets returned
max_distance : float, optional
The value to set the distance to if there is NO poi within the
specified distance - if not specified, gets set to distance. This
will usually be a distance unit in meters however if you have
customized the impedance this could be in other units such as
utility or time etc.
imp_name : string, optional
The impedance name to use for the aggregation on this network.
Must be one of the impedance names passed in the constructor of
this object. If not specified, there must be only one impedance
passed in the constructor, which will be used.
include_poi_ids : bool, optional
If this flag is set to true, the call will add columns to the
return DataFrame - instead of just returning the distance for
the nth POI, it will also return the id of that POI. The names
of the columns with the poi ids will be poi1, poi2, etc - it
will take roughly twice as long to include these ids as to not
include them
Returns
-------
d : Pandas DataFrame
Like aggregate, this series has an index of all the node ids for
the network. Unlike aggregate, this method returns a dataframe
with the number of columns equal to the distances to the Nth
closest poi. For instance, if you ask for the 10 closest poi to
each node, column d[1] wil be the distance to the 1st closest poi
of that category while column d[2] will be the distance to the 2nd
closest poi, and so on.
"""
if max_distance is None:
max_distance = distance
if category not in self.poi_category_names:
assert 0, "Need to call set_pois for this category"
if num_pois > self.max_pois:
assert 0, "Asking for more pois than set in init_pois"
imp_num = self._imp_name_to_num(imp_name)
dists, poi_ids = self.net.find_all_nearest_pois(
distance,
num_pois,
category.encode('utf-8'),
imp_num)
dists[dists == -1] = max_distance
df = pd.DataFrame(dists, index=self.node_ids)
df.columns = list(range(1, num_pois+1))
if include_poi_ids:
df2 = pd.DataFrame(poi_ids, index=self.node_ids)
df2.columns = ["poi%d" % i for i in range(1, num_pois+1)]
for col in df2.columns:
# if this is still all working according to plan at this point
# the great magic trick is now to turn the integer position of
# the poi, which is painstakingly returned from the c++ code,
# and turn it into the actual index that was used when it was
# initialized as a pandas series - this really is pandas-like
# thinking. it's complicated on the inside, but quite
# intuitive to the user I think
s = df2[col].astype('int')
df2[col] = self.poi_category_indexes[category].values[s]
df2.loc[s == -1, col] = np.nan
df = pd.concat([df, df2], axis=1)
return df
|
def nearest_pois(self, distance, category, num_pois=1, max_distance=None,
imp_name=None, include_poi_ids=False):
"""
Find the distance to the nearest pois from each source node. The
bigger values in this case mean less accessibility.
Parameters
----------
distance : float
The maximum distance to look for pois. This will usually be a
distance unit in meters however if you have customized the
impedance this could be in other units such as utility or time
etc.
category : string
The name of the category of poi to look for
num_pois : int
The number of pois to look for, this also sets the number of
columns in the DataFrame that gets returned
max_distance : float, optional
The value to set the distance to if there is NO poi within the
specified distance - if not specified, gets set to distance. This
will usually be a distance unit in meters however if you have
customized the impedance this could be in other units such as
utility or time etc.
imp_name : string, optional
The impedance name to use for the aggregation on this network.
Must be one of the impedance names passed in the constructor of
this object. If not specified, there must be only one impedance
passed in the constructor, which will be used.
include_poi_ids : bool, optional
If this flag is set to true, the call will add columns to the
return DataFrame - instead of just returning the distance for
the nth POI, it will also return the id of that POI. The names
of the columns with the poi ids will be poi1, poi2, etc - it
will take roughly twice as long to include these ids as to not
include them
Returns
-------
d : Pandas DataFrame
Like aggregate, this series has an index of all the node ids for
the network. Unlike aggregate, this method returns a dataframe
with the number of columns equal to the distances to the Nth
closest poi. For instance, if you ask for the 10 closest poi to
each node, column d[1] wil be the distance to the 1st closest poi
of that category while column d[2] will be the distance to the 2nd
closest poi, and so on.
"""
if max_distance is None:
max_distance = distance
if category not in self.poi_category_names:
assert 0, "Need to call set_pois for this category"
if num_pois > self.max_pois:
assert 0, "Asking for more pois than set in init_pois"
imp_num = self._imp_name_to_num(imp_name)
dists, poi_ids = self.net.find_all_nearest_pois(
distance,
num_pois,
category.encode('utf-8'),
imp_num)
dists[dists == -1] = max_distance
df = pd.DataFrame(dists, index=self.node_ids)
df.columns = list(range(1, num_pois+1))
if include_poi_ids:
df2 = pd.DataFrame(poi_ids, index=self.node_ids)
df2.columns = ["poi%d" % i for i in range(1, num_pois+1)]
for col in df2.columns:
# if this is still all working according to plan at this point
# the great magic trick is now to turn the integer position of
# the poi, which is painstakingly returned from the c++ code,
# and turn it into the actual index that was used when it was
# initialized as a pandas series - this really is pandas-like
# thinking. it's complicated on the inside, but quite
# intuitive to the user I think
s = df2[col].astype('int')
df2[col] = self.poi_category_indexes[category].values[s]
df2.loc[s == -1, col] = np.nan
df = pd.concat([df, df2], axis=1)
return df
|
[
"Find",
"the",
"distance",
"to",
"the",
"nearest",
"pois",
"from",
"each",
"source",
"node",
".",
"The",
"bigger",
"values",
"in",
"this",
"case",
"mean",
"less",
"accessibility",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/network.py#L495-L581
|
[
"def",
"nearest_pois",
"(",
"self",
",",
"distance",
",",
"category",
",",
"num_pois",
"=",
"1",
",",
"max_distance",
"=",
"None",
",",
"imp_name",
"=",
"None",
",",
"include_poi_ids",
"=",
"False",
")",
":",
"if",
"max_distance",
"is",
"None",
":",
"max_distance",
"=",
"distance",
"if",
"category",
"not",
"in",
"self",
".",
"poi_category_names",
":",
"assert",
"0",
",",
"\"Need to call set_pois for this category\"",
"if",
"num_pois",
">",
"self",
".",
"max_pois",
":",
"assert",
"0",
",",
"\"Asking for more pois than set in init_pois\"",
"imp_num",
"=",
"self",
".",
"_imp_name_to_num",
"(",
"imp_name",
")",
"dists",
",",
"poi_ids",
"=",
"self",
".",
"net",
".",
"find_all_nearest_pois",
"(",
"distance",
",",
"num_pois",
",",
"category",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"imp_num",
")",
"dists",
"[",
"dists",
"==",
"-",
"1",
"]",
"=",
"max_distance",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"dists",
",",
"index",
"=",
"self",
".",
"node_ids",
")",
"df",
".",
"columns",
"=",
"list",
"(",
"range",
"(",
"1",
",",
"num_pois",
"+",
"1",
")",
")",
"if",
"include_poi_ids",
":",
"df2",
"=",
"pd",
".",
"DataFrame",
"(",
"poi_ids",
",",
"index",
"=",
"self",
".",
"node_ids",
")",
"df2",
".",
"columns",
"=",
"[",
"\"poi%d\"",
"%",
"i",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"num_pois",
"+",
"1",
")",
"]",
"for",
"col",
"in",
"df2",
".",
"columns",
":",
"# if this is still all working according to plan at this point",
"# the great magic trick is now to turn the integer position of",
"# the poi, which is painstakingly returned from the c++ code,",
"# and turn it into the actual index that was used when it was",
"# initialized as a pandas series - this really is pandas-like",
"# thinking. it's complicated on the inside, but quite",
"# intuitive to the user I think",
"s",
"=",
"df2",
"[",
"col",
"]",
".",
"astype",
"(",
"'int'",
")",
"df2",
"[",
"col",
"]",
"=",
"self",
".",
"poi_category_indexes",
"[",
"category",
"]",
".",
"values",
"[",
"s",
"]",
"df2",
".",
"loc",
"[",
"s",
"==",
"-",
"1",
",",
"col",
"]",
"=",
"np",
".",
"nan",
"df",
"=",
"pd",
".",
"concat",
"(",
"[",
"df",
",",
"df2",
"]",
",",
"axis",
"=",
"1",
")",
"return",
"df"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
Network.low_connectivity_nodes
|
Identify nodes that are connected to fewer than some threshold
of other nodes within a given distance.
Parameters
----------
impedance : float
Distance within which to search for other connected nodes. This
will usually be a distance unit in meters however if you have
customized the impedance this could be in other units such as
utility or time etc.
count : int
Threshold for connectivity. If a node is connected to fewer
than this many nodes within `impedance` it will be identified
as "low connectivity".
imp_name : string, optional
The impedance name to use for the aggregation on this network.
Must be one of the impedance names passed in the constructor of
this object. If not specified, there must be only one impedance
passed in the constructor, which will be used.
Returns
-------
node_ids : array
List of "low connectivity" node IDs.
|
pandana/network.py
|
def low_connectivity_nodes(self, impedance, count, imp_name=None):
"""
Identify nodes that are connected to fewer than some threshold
of other nodes within a given distance.
Parameters
----------
impedance : float
Distance within which to search for other connected nodes. This
will usually be a distance unit in meters however if you have
customized the impedance this could be in other units such as
utility or time etc.
count : int
Threshold for connectivity. If a node is connected to fewer
than this many nodes within `impedance` it will be identified
as "low connectivity".
imp_name : string, optional
The impedance name to use for the aggregation on this network.
Must be one of the impedance names passed in the constructor of
this object. If not specified, there must be only one impedance
passed in the constructor, which will be used.
Returns
-------
node_ids : array
List of "low connectivity" node IDs.
"""
# set a counter variable on all nodes
self.set(self.node_ids.to_series(), name='counter')
# count nodes within impedance range
agg = self.aggregate(
impedance, type='count', imp_name=imp_name, name='counter')
return np.array(agg[agg < count].index)
|
def low_connectivity_nodes(self, impedance, count, imp_name=None):
"""
Identify nodes that are connected to fewer than some threshold
of other nodes within a given distance.
Parameters
----------
impedance : float
Distance within which to search for other connected nodes. This
will usually be a distance unit in meters however if you have
customized the impedance this could be in other units such as
utility or time etc.
count : int
Threshold for connectivity. If a node is connected to fewer
than this many nodes within `impedance` it will be identified
as "low connectivity".
imp_name : string, optional
The impedance name to use for the aggregation on this network.
Must be one of the impedance names passed in the constructor of
this object. If not specified, there must be only one impedance
passed in the constructor, which will be used.
Returns
-------
node_ids : array
List of "low connectivity" node IDs.
"""
# set a counter variable on all nodes
self.set(self.node_ids.to_series(), name='counter')
# count nodes within impedance range
agg = self.aggregate(
impedance, type='count', imp_name=imp_name, name='counter')
return np.array(agg[agg < count].index)
|
[
"Identify",
"nodes",
"that",
"are",
"connected",
"to",
"fewer",
"than",
"some",
"threshold",
"of",
"other",
"nodes",
"within",
"a",
"given",
"distance",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/network.py#L583-L618
|
[
"def",
"low_connectivity_nodes",
"(",
"self",
",",
"impedance",
",",
"count",
",",
"imp_name",
"=",
"None",
")",
":",
"# set a counter variable on all nodes",
"self",
".",
"set",
"(",
"self",
".",
"node_ids",
".",
"to_series",
"(",
")",
",",
"name",
"=",
"'counter'",
")",
"# count nodes within impedance range",
"agg",
"=",
"self",
".",
"aggregate",
"(",
"impedance",
",",
"type",
"=",
"'count'",
",",
"imp_name",
"=",
"imp_name",
",",
"name",
"=",
"'counter'",
")",
"return",
"np",
".",
"array",
"(",
"agg",
"[",
"agg",
"<",
"count",
"]",
".",
"index",
")"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
pdna_network_from_bbox
|
Make a Pandana network from a bounding lat/lon box
request to the Overpass API. Distance will be in the default units meters.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
network_type : {'walk', 'drive'}, optional
Specify whether the network will be used for walking or driving.
A value of 'walk' attempts to exclude things like freeways,
while a value of 'drive' attempts to exclude things like
bike and walking paths.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes.
If none, server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is in
Returns
-------
network : pandana.Network
|
pandana/loaders/osm.py
|
def pdna_network_from_bbox(
lat_min=None, lng_min=None, lat_max=None, lng_max=None, bbox=None,
network_type='walk', two_way=True,
timeout=180, memory=None, max_query_area_size=50 * 1000 * 50 * 1000):
"""
Make a Pandana network from a bounding lat/lon box
request to the Overpass API. Distance will be in the default units meters.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
network_type : {'walk', 'drive'}, optional
Specify whether the network will be used for walking or driving.
A value of 'walk' attempts to exclude things like freeways,
while a value of 'drive' attempts to exclude things like
bike and walking paths.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes.
If none, server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is in
Returns
-------
network : pandana.Network
"""
nodes, edges = network_from_bbox(lat_min=lat_min, lng_min=lng_min,
lat_max=lat_max, lng_max=lng_max,
bbox=bbox, network_type=network_type,
two_way=two_way, timeout=timeout,
memory=memory,
max_query_area_size=max_query_area_size)
return Network(
nodes['x'], nodes['y'],
edges['from'], edges['to'], edges[['distance']])
|
def pdna_network_from_bbox(
lat_min=None, lng_min=None, lat_max=None, lng_max=None, bbox=None,
network_type='walk', two_way=True,
timeout=180, memory=None, max_query_area_size=50 * 1000 * 50 * 1000):
"""
Make a Pandana network from a bounding lat/lon box
request to the Overpass API. Distance will be in the default units meters.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
bbox : tuple
Bounding box formatted as a 4 element tuple:
(lng_max, lat_min, lng_min, lat_max)
network_type : {'walk', 'drive'}, optional
Specify whether the network will be used for walking or driving.
A value of 'walk' attempts to exclude things like freeways,
while a value of 'drive' attempts to exclude things like
bike and walking paths.
two_way : bool, optional
Whether the routes are two-way. If True, node pairs will only
occur once.
timeout : int, optional
the timeout interval for requests and to pass to Overpass API
memory : int, optional
server memory allocation size for the query, in bytes.
If none, server will use its default allocation size
max_query_area_size : float, optional
max area for any part of the geometry, in the units the geometry is in
Returns
-------
network : pandana.Network
"""
nodes, edges = network_from_bbox(lat_min=lat_min, lng_min=lng_min,
lat_max=lat_max, lng_max=lng_max,
bbox=bbox, network_type=network_type,
two_way=two_way, timeout=timeout,
memory=memory,
max_query_area_size=max_query_area_size)
return Network(
nodes['x'], nodes['y'],
edges['from'], edges['to'], edges[['distance']])
|
[
"Make",
"a",
"Pandana",
"network",
"from",
"a",
"bounding",
"lat",
"/",
"lon",
"box",
"request",
"to",
"the",
"Overpass",
"API",
".",
"Distance",
"will",
"be",
"in",
"the",
"default",
"units",
"meters",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/loaders/osm.py#L13-L58
|
[
"def",
"pdna_network_from_bbox",
"(",
"lat_min",
"=",
"None",
",",
"lng_min",
"=",
"None",
",",
"lat_max",
"=",
"None",
",",
"lng_max",
"=",
"None",
",",
"bbox",
"=",
"None",
",",
"network_type",
"=",
"'walk'",
",",
"two_way",
"=",
"True",
",",
"timeout",
"=",
"180",
",",
"memory",
"=",
"None",
",",
"max_query_area_size",
"=",
"50",
"*",
"1000",
"*",
"50",
"*",
"1000",
")",
":",
"nodes",
",",
"edges",
"=",
"network_from_bbox",
"(",
"lat_min",
"=",
"lat_min",
",",
"lng_min",
"=",
"lng_min",
",",
"lat_max",
"=",
"lat_max",
",",
"lng_max",
"=",
"lng_max",
",",
"bbox",
"=",
"bbox",
",",
"network_type",
"=",
"network_type",
",",
"two_way",
"=",
"two_way",
",",
"timeout",
"=",
"timeout",
",",
"memory",
"=",
"memory",
",",
"max_query_area_size",
"=",
"max_query_area_size",
")",
"return",
"Network",
"(",
"nodes",
"[",
"'x'",
"]",
",",
"nodes",
"[",
"'y'",
"]",
",",
"edges",
"[",
"'from'",
"]",
",",
"edges",
"[",
"'to'",
"]",
",",
"edges",
"[",
"[",
"'distance'",
"]",
"]",
")"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
process_node
|
Process a node element entry into a dict suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
Returns
-------
node : dict
|
pandana/loaders/osm.py
|
def process_node(e):
"""
Process a node element entry into a dict suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
Returns
-------
node : dict
"""
uninteresting_tags = {
'source',
'source_ref',
'source:ref',
'history',
'attribution',
'created_by',
'tiger:tlid',
'tiger:upload_uuid',
}
node = {
'id': e['id'],
'lat': e['lat'],
'lon': e['lon']
}
if 'tags' in e:
for t, v in list(e['tags'].items()):
if t not in uninteresting_tags:
node[t] = v
return node
|
def process_node(e):
"""
Process a node element entry into a dict suitable for going into
a Pandas DataFrame.
Parameters
----------
e : dict
Returns
-------
node : dict
"""
uninteresting_tags = {
'source',
'source_ref',
'source:ref',
'history',
'attribution',
'created_by',
'tiger:tlid',
'tiger:upload_uuid',
}
node = {
'id': e['id'],
'lat': e['lat'],
'lon': e['lon']
}
if 'tags' in e:
for t, v in list(e['tags'].items()):
if t not in uninteresting_tags:
node[t] = v
return node
|
[
"Process",
"a",
"node",
"element",
"entry",
"into",
"a",
"dict",
"suitable",
"for",
"going",
"into",
"a",
"Pandas",
"DataFrame",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/loaders/osm.py#L61-L96
|
[
"def",
"process_node",
"(",
"e",
")",
":",
"uninteresting_tags",
"=",
"{",
"'source'",
",",
"'source_ref'",
",",
"'source:ref'",
",",
"'history'",
",",
"'attribution'",
",",
"'created_by'",
",",
"'tiger:tlid'",
",",
"'tiger:upload_uuid'",
",",
"}",
"node",
"=",
"{",
"'id'",
":",
"e",
"[",
"'id'",
"]",
",",
"'lat'",
":",
"e",
"[",
"'lat'",
"]",
",",
"'lon'",
":",
"e",
"[",
"'lon'",
"]",
"}",
"if",
"'tags'",
"in",
"e",
":",
"for",
"t",
",",
"v",
"in",
"list",
"(",
"e",
"[",
"'tags'",
"]",
".",
"items",
"(",
")",
")",
":",
"if",
"t",
"not",
"in",
"uninteresting_tags",
":",
"node",
"[",
"t",
"]",
"=",
"v",
"return",
"node"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
make_osm_query
|
Make a request to OSM and return the parsed JSON.
Parameters
----------
query : str
A string in the Overpass QL format.
Returns
-------
data : dict
|
pandana/loaders/osm.py
|
def make_osm_query(query):
"""
Make a request to OSM and return the parsed JSON.
Parameters
----------
query : str
A string in the Overpass QL format.
Returns
-------
data : dict
"""
osm_url = 'http://www.overpass-api.de/api/interpreter'
req = requests.get(osm_url, params={'data': query})
req.raise_for_status()
return req.json()
|
def make_osm_query(query):
"""
Make a request to OSM and return the parsed JSON.
Parameters
----------
query : str
A string in the Overpass QL format.
Returns
-------
data : dict
"""
osm_url = 'http://www.overpass-api.de/api/interpreter'
req = requests.get(osm_url, params={'data': query})
req.raise_for_status()
return req.json()
|
[
"Make",
"a",
"request",
"to",
"OSM",
"and",
"return",
"the",
"parsed",
"JSON",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/loaders/osm.py#L99-L117
|
[
"def",
"make_osm_query",
"(",
"query",
")",
":",
"osm_url",
"=",
"'http://www.overpass-api.de/api/interpreter'",
"req",
"=",
"requests",
".",
"get",
"(",
"osm_url",
",",
"params",
"=",
"{",
"'data'",
":",
"query",
"}",
")",
"req",
".",
"raise_for_status",
"(",
")",
"return",
"req",
".",
"json",
"(",
")"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
build_node_query
|
Build the string for a node-based OSM query.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
tags : str or list of str, optional
Node tags that will be used to filter the search.
See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide
for information about OSM Overpass queries
and http://wiki.openstreetmap.org/wiki/Map_Features
for a list of tags.
Returns
-------
query : str
|
pandana/loaders/osm.py
|
def build_node_query(lat_min, lng_min, lat_max, lng_max, tags=None):
"""
Build the string for a node-based OSM query.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
tags : str or list of str, optional
Node tags that will be used to filter the search.
See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide
for information about OSM Overpass queries
and http://wiki.openstreetmap.org/wiki/Map_Features
for a list of tags.
Returns
-------
query : str
"""
if tags is not None:
if isinstance(tags, str):
tags = [tags]
tags = ''.join('[{}]'.format(t) for t in tags)
else:
tags = ''
query_fmt = (
'[out:json];'
'('
' node'
' {tags}'
' ({lat_min},{lng_min},{lat_max},{lng_max});'
');'
'out;')
return query_fmt.format(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
tags=tags)
|
def build_node_query(lat_min, lng_min, lat_max, lng_max, tags=None):
"""
Build the string for a node-based OSM query.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
tags : str or list of str, optional
Node tags that will be used to filter the search.
See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide
for information about OSM Overpass queries
and http://wiki.openstreetmap.org/wiki/Map_Features
for a list of tags.
Returns
-------
query : str
"""
if tags is not None:
if isinstance(tags, str):
tags = [tags]
tags = ''.join('[{}]'.format(t) for t in tags)
else:
tags = ''
query_fmt = (
'[out:json];'
'('
' node'
' {tags}'
' ({lat_min},{lng_min},{lat_max},{lng_max});'
');'
'out;')
return query_fmt.format(
lat_min=lat_min, lng_min=lng_min, lat_max=lat_max, lng_max=lng_max,
tags=tags)
|
[
"Build",
"the",
"string",
"for",
"a",
"node",
"-",
"based",
"OSM",
"query",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/loaders/osm.py#L120-L157
|
[
"def",
"build_node_query",
"(",
"lat_min",
",",
"lng_min",
",",
"lat_max",
",",
"lng_max",
",",
"tags",
"=",
"None",
")",
":",
"if",
"tags",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"tags",
",",
"str",
")",
":",
"tags",
"=",
"[",
"tags",
"]",
"tags",
"=",
"''",
".",
"join",
"(",
"'[{}]'",
".",
"format",
"(",
"t",
")",
"for",
"t",
"in",
"tags",
")",
"else",
":",
"tags",
"=",
"''",
"query_fmt",
"=",
"(",
"'[out:json];'",
"'('",
"' node'",
"' {tags}'",
"' ({lat_min},{lng_min},{lat_max},{lng_max});'",
"');'",
"'out;'",
")",
"return",
"query_fmt",
".",
"format",
"(",
"lat_min",
"=",
"lat_min",
",",
"lng_min",
"=",
"lng_min",
",",
"lat_max",
"=",
"lat_max",
",",
"lng_max",
"=",
"lng_max",
",",
"tags",
"=",
"tags",
")"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
node_query
|
Search for OSM nodes within a bounding box that match given tags.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
tags : str or list of str, optional
Node tags that will be used to filter the search.
See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide
for information about OSM Overpass queries
and http://wiki.openstreetmap.org/wiki/Map_Features
for a list of tags.
Returns
-------
nodes : pandas.DataFrame
Will have 'lat' and 'lon' columns, plus other columns for the
tags associated with the node (these will vary based on the query).
Index will be the OSM node IDs.
|
pandana/loaders/osm.py
|
def node_query(lat_min, lng_min, lat_max, lng_max, tags=None):
"""
Search for OSM nodes within a bounding box that match given tags.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
tags : str or list of str, optional
Node tags that will be used to filter the search.
See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide
for information about OSM Overpass queries
and http://wiki.openstreetmap.org/wiki/Map_Features
for a list of tags.
Returns
-------
nodes : pandas.DataFrame
Will have 'lat' and 'lon' columns, plus other columns for the
tags associated with the node (these will vary based on the query).
Index will be the OSM node IDs.
"""
node_data = make_osm_query(build_node_query(
lat_min, lng_min, lat_max, lng_max, tags=tags))
if len(node_data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = [process_node(n) for n in node_data['elements']]
return pd.DataFrame.from_records(nodes, index='id')
|
def node_query(lat_min, lng_min, lat_max, lng_max, tags=None):
"""
Search for OSM nodes within a bounding box that match given tags.
Parameters
----------
lat_min, lng_min, lat_max, lng_max : float
tags : str or list of str, optional
Node tags that will be used to filter the search.
See http://wiki.openstreetmap.org/wiki/Overpass_API/Language_Guide
for information about OSM Overpass queries
and http://wiki.openstreetmap.org/wiki/Map_Features
for a list of tags.
Returns
-------
nodes : pandas.DataFrame
Will have 'lat' and 'lon' columns, plus other columns for the
tags associated with the node (these will vary based on the query).
Index will be the OSM node IDs.
"""
node_data = make_osm_query(build_node_query(
lat_min, lng_min, lat_max, lng_max, tags=tags))
if len(node_data['elements']) == 0:
raise RuntimeError('OSM query results contain no data.')
nodes = [process_node(n) for n in node_data['elements']]
return pd.DataFrame.from_records(nodes, index='id')
|
[
"Search",
"for",
"OSM",
"nodes",
"within",
"a",
"bounding",
"box",
"that",
"match",
"given",
"tags",
"."
] |
UDST/pandana
|
python
|
https://github.com/UDST/pandana/blob/961a7ef8d3b0144b190cb60bbd61845fca6fb314/pandana/loaders/osm.py#L160-L189
|
[
"def",
"node_query",
"(",
"lat_min",
",",
"lng_min",
",",
"lat_max",
",",
"lng_max",
",",
"tags",
"=",
"None",
")",
":",
"node_data",
"=",
"make_osm_query",
"(",
"build_node_query",
"(",
"lat_min",
",",
"lng_min",
",",
"lat_max",
",",
"lng_max",
",",
"tags",
"=",
"tags",
")",
")",
"if",
"len",
"(",
"node_data",
"[",
"'elements'",
"]",
")",
"==",
"0",
":",
"raise",
"RuntimeError",
"(",
"'OSM query results contain no data.'",
")",
"nodes",
"=",
"[",
"process_node",
"(",
"n",
")",
"for",
"n",
"in",
"node_data",
"[",
"'elements'",
"]",
"]",
"return",
"pd",
".",
"DataFrame",
".",
"from_records",
"(",
"nodes",
",",
"index",
"=",
"'id'",
")"
] |
961a7ef8d3b0144b190cb60bbd61845fca6fb314
|
test
|
equal
|
Shortcut function for ``unittest.TestCase.assertEqual()``.
Arguments:
x (mixed)
y (mixed)
Raises:
AssertionError: in case of assertion error.
Returns:
bool
|
pook/assertion.py
|
def equal(x, y):
"""
Shortcut function for ``unittest.TestCase.assertEqual()``.
Arguments:
x (mixed)
y (mixed)
Raises:
AssertionError: in case of assertion error.
Returns:
bool
"""
if PY_3:
return test_case().assertEqual(x, y) or True
assert x == y
|
def equal(x, y):
"""
Shortcut function for ``unittest.TestCase.assertEqual()``.
Arguments:
x (mixed)
y (mixed)
Raises:
AssertionError: in case of assertion error.
Returns:
bool
"""
if PY_3:
return test_case().assertEqual(x, y) or True
assert x == y
|
[
"Shortcut",
"function",
"for",
"unittest",
".",
"TestCase",
".",
"assertEqual",
"()",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/assertion.py#L22-L39
|
[
"def",
"equal",
"(",
"x",
",",
"y",
")",
":",
"if",
"PY_3",
":",
"return",
"test_case",
"(",
")",
".",
"assertEqual",
"(",
"x",
",",
"y",
")",
"or",
"True",
"assert",
"x",
"==",
"y"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
matches
|
Tries to match a regular expression value ``x`` against ``y``.
Aliast``unittest.TestCase.assertEqual()``
Arguments:
x (regex|str): regular expression to test.
y (str): value to match.
regex_expr (bool): enables regex string based expression matching.
Raises:
AssertionError: in case of mismatching.
Returns:
bool
|
pook/assertion.py
|
def matches(x, y, regex_expr=False):
"""
Tries to match a regular expression value ``x`` against ``y``.
Aliast``unittest.TestCase.assertEqual()``
Arguments:
x (regex|str): regular expression to test.
y (str): value to match.
regex_expr (bool): enables regex string based expression matching.
Raises:
AssertionError: in case of mismatching.
Returns:
bool
"""
# Parse regex expression, if needed
x = strip_regex(x) if regex_expr and isregex_expr(x) else x
# Run regex assertion
if PY_3:
# Retrieve original regex pattern
x = x.pattern if isregex(x) else x
# Assert regular expression via unittest matchers
return test_case().assertRegex(y, x) or True
# Primitive regex matching for Python 2.7
if isinstance(x, str):
x = re.compile(x, re.IGNORECASE)
assert x.match(y) is not None
|
def matches(x, y, regex_expr=False):
"""
Tries to match a regular expression value ``x`` against ``y``.
Aliast``unittest.TestCase.assertEqual()``
Arguments:
x (regex|str): regular expression to test.
y (str): value to match.
regex_expr (bool): enables regex string based expression matching.
Raises:
AssertionError: in case of mismatching.
Returns:
bool
"""
# Parse regex expression, if needed
x = strip_regex(x) if regex_expr and isregex_expr(x) else x
# Run regex assertion
if PY_3:
# Retrieve original regex pattern
x = x.pattern if isregex(x) else x
# Assert regular expression via unittest matchers
return test_case().assertRegex(y, x) or True
# Primitive regex matching for Python 2.7
if isinstance(x, str):
x = re.compile(x, re.IGNORECASE)
assert x.match(y) is not None
|
[
"Tries",
"to",
"match",
"a",
"regular",
"expression",
"value",
"x",
"against",
"y",
".",
"Aliast",
"unittest",
".",
"TestCase",
".",
"assertEqual",
"()"
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/assertion.py#L42-L72
|
[
"def",
"matches",
"(",
"x",
",",
"y",
",",
"regex_expr",
"=",
"False",
")",
":",
"# Parse regex expression, if needed",
"x",
"=",
"strip_regex",
"(",
"x",
")",
"if",
"regex_expr",
"and",
"isregex_expr",
"(",
"x",
")",
"else",
"x",
"# Run regex assertion",
"if",
"PY_3",
":",
"# Retrieve original regex pattern",
"x",
"=",
"x",
".",
"pattern",
"if",
"isregex",
"(",
"x",
")",
"else",
"x",
"# Assert regular expression via unittest matchers",
"return",
"test_case",
"(",
")",
".",
"assertRegex",
"(",
"y",
",",
"x",
")",
"or",
"True",
"# Primitive regex matching for Python 2.7",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"x",
"=",
"re",
".",
"compile",
"(",
"x",
",",
"re",
".",
"IGNORECASE",
")",
"assert",
"x",
".",
"match",
"(",
"y",
")",
"is",
"not",
"None"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
isregex_expr
|
Returns ``True`` is the given expression value is a regular expression
like string with prefix ``re/`` and suffix ``/``, otherwise ``False``.
Arguments:
expr (mixed): expression value to test.
Returns:
bool
|
pook/regex.py
|
def isregex_expr(expr):
"""
Returns ``True`` is the given expression value is a regular expression
like string with prefix ``re/`` and suffix ``/``, otherwise ``False``.
Arguments:
expr (mixed): expression value to test.
Returns:
bool
"""
if not isinstance(expr, str):
return False
return all([
len(expr) > 3,
expr.startswith('re/'),
expr.endswith('/')
])
|
def isregex_expr(expr):
"""
Returns ``True`` is the given expression value is a regular expression
like string with prefix ``re/`` and suffix ``/``, otherwise ``False``.
Arguments:
expr (mixed): expression value to test.
Returns:
bool
"""
if not isinstance(expr, str):
return False
return all([
len(expr) > 3,
expr.startswith('re/'),
expr.endswith('/')
])
|
[
"Returns",
"True",
"is",
"the",
"given",
"expression",
"value",
"is",
"a",
"regular",
"expression",
"like",
"string",
"with",
"prefix",
"re",
"/",
"and",
"suffix",
"/",
"otherwise",
"False",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/regex.py#L7-L25
|
[
"def",
"isregex_expr",
"(",
"expr",
")",
":",
"if",
"not",
"isinstance",
"(",
"expr",
",",
"str",
")",
":",
"return",
"False",
"return",
"all",
"(",
"[",
"len",
"(",
"expr",
")",
">",
"3",
",",
"expr",
".",
"startswith",
"(",
"'re/'",
")",
",",
"expr",
".",
"endswith",
"(",
"'/'",
")",
"]",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
isregex
|
Returns ``True`` if the input argument object is a native
regular expression object, otherwise ``False``.
Arguments:
value (mixed): input value to test.
Returns:
bool
|
pook/regex.py
|
def isregex(value):
"""
Returns ``True`` if the input argument object is a native
regular expression object, otherwise ``False``.
Arguments:
value (mixed): input value to test.
Returns:
bool
"""
if not value:
return False
return any((isregex_expr(value), isinstance(value, retype)))
|
def isregex(value):
"""
Returns ``True`` if the input argument object is a native
regular expression object, otherwise ``False``.
Arguments:
value (mixed): input value to test.
Returns:
bool
"""
if not value:
return False
return any((isregex_expr(value), isinstance(value, retype)))
|
[
"Returns",
"True",
"if",
"the",
"input",
"argument",
"object",
"is",
"a",
"native",
"regular",
"expression",
"object",
"otherwise",
"False",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/regex.py#L28-L41
|
[
"def",
"isregex",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"False",
"return",
"any",
"(",
"(",
"isregex_expr",
"(",
"value",
")",
",",
"isinstance",
"(",
"value",
",",
"retype",
")",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
BaseMatcher.compare
|
Compares two values with regular expression matching support.
Arguments:
value (mixed): value to compare.
expectation (mixed): value to match.
regex_expr (bool, optional): enables string based regex matching.
Returns:
bool
|
pook/matchers/base.py
|
def compare(self, value, expectation, regex_expr=False):
"""
Compares two values with regular expression matching support.
Arguments:
value (mixed): value to compare.
expectation (mixed): value to match.
regex_expr (bool, optional): enables string based regex matching.
Returns:
bool
"""
return compare(value, expectation, regex_expr=regex_expr)
|
def compare(self, value, expectation, regex_expr=False):
"""
Compares two values with regular expression matching support.
Arguments:
value (mixed): value to compare.
expectation (mixed): value to match.
regex_expr (bool, optional): enables string based regex matching.
Returns:
bool
"""
return compare(value, expectation, regex_expr=regex_expr)
|
[
"Compares",
"two",
"values",
"with",
"regular",
"expression",
"matching",
"support",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/matchers/base.py#L47-L59
|
[
"def",
"compare",
"(",
"self",
",",
"value",
",",
"expectation",
",",
"regex_expr",
"=",
"False",
")",
":",
"return",
"compare",
"(",
"value",
",",
"expectation",
",",
"regex_expr",
"=",
"regex_expr",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
fluent
|
Simple function decorator allowing easy method chaining.
Arguments:
fn (function): target function to decorate.
|
pook/decorators.py
|
def fluent(fn):
"""
Simple function decorator allowing easy method chaining.
Arguments:
fn (function): target function to decorate.
"""
@functools.wraps(fn)
def wrapper(self, *args, **kw):
# Trigger method proxy
result = fn(self, *args, **kw)
# Return self instance or method result
return self if result is None else result
return wrapper
|
def fluent(fn):
"""
Simple function decorator allowing easy method chaining.
Arguments:
fn (function): target function to decorate.
"""
@functools.wraps(fn)
def wrapper(self, *args, **kw):
# Trigger method proxy
result = fn(self, *args, **kw)
# Return self instance or method result
return self if result is None else result
return wrapper
|
[
"Simple",
"function",
"decorator",
"allowing",
"easy",
"method",
"chaining",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/decorators.py#L4-L17
|
[
"def",
"fluent",
"(",
"fn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"# Trigger method proxy",
"result",
"=",
"fn",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"# Return self instance or method result",
"return",
"self",
"if",
"result",
"is",
"None",
"else",
"result",
"return",
"wrapper"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
compare
|
Compares an string or regular expression againast a given value.
Arguments:
expr (str|regex): string or regular expression value to compare.
value (str): value to compare against to.
regex_expr (bool, optional): enables string based regex matching.
Raises:
AssertionError: in case of assertion error.
Returns:
bool
|
pook/compare.py
|
def compare(expr, value, regex_expr=False):
"""
Compares an string or regular expression againast a given value.
Arguments:
expr (str|regex): string or regular expression value to compare.
value (str): value to compare against to.
regex_expr (bool, optional): enables string based regex matching.
Raises:
AssertionError: in case of assertion error.
Returns:
bool
"""
# Strict equality comparison
if expr == value:
return True
# Infer negate expression to match, if needed
negate = False
if isinstance(expr, str):
negate = expr.startswith(NEGATE)
expr = strip_negate(expr) if negate else expr
try:
# RegExp or strict equality comparison
test(expr, value, regex_expr=regex_expr)
except Exception as err:
if negate:
return True
else:
raise err
return True
|
def compare(expr, value, regex_expr=False):
"""
Compares an string or regular expression againast a given value.
Arguments:
expr (str|regex): string or regular expression value to compare.
value (str): value to compare against to.
regex_expr (bool, optional): enables string based regex matching.
Raises:
AssertionError: in case of assertion error.
Returns:
bool
"""
# Strict equality comparison
if expr == value:
return True
# Infer negate expression to match, if needed
negate = False
if isinstance(expr, str):
negate = expr.startswith(NEGATE)
expr = strip_negate(expr) if negate else expr
try:
# RegExp or strict equality comparison
test(expr, value, regex_expr=regex_expr)
except Exception as err:
if negate:
return True
else:
raise err
return True
|
[
"Compares",
"an",
"string",
"or",
"regular",
"expression",
"againast",
"a",
"given",
"value",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/compare.py#L26-L60
|
[
"def",
"compare",
"(",
"expr",
",",
"value",
",",
"regex_expr",
"=",
"False",
")",
":",
"# Strict equality comparison",
"if",
"expr",
"==",
"value",
":",
"return",
"True",
"# Infer negate expression to match, if needed",
"negate",
"=",
"False",
"if",
"isinstance",
"(",
"expr",
",",
"str",
")",
":",
"negate",
"=",
"expr",
".",
"startswith",
"(",
"NEGATE",
")",
"expr",
"=",
"strip_negate",
"(",
"expr",
")",
"if",
"negate",
"else",
"expr",
"try",
":",
"# RegExp or strict equality comparison",
"test",
"(",
"expr",
",",
"value",
",",
"regex_expr",
"=",
"regex_expr",
")",
"except",
"Exception",
"as",
"err",
":",
"if",
"negate",
":",
"return",
"True",
"else",
":",
"raise",
"err",
"return",
"True"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
trigger_methods
|
Triggers specific class methods using a simple reflection
mechanism based on the given input dictionary params.
Arguments:
instance (object): target instance to dynamically trigger methods.
args (iterable): input arguments to trigger objects to
Returns:
None
|
pook/helpers.py
|
def trigger_methods(instance, args):
""""
Triggers specific class methods using a simple reflection
mechanism based on the given input dictionary params.
Arguments:
instance (object): target instance to dynamically trigger methods.
args (iterable): input arguments to trigger objects to
Returns:
None
"""
# Start the magic
for name in sorted(args):
value = args[name]
target = instance
# If response attibutes
if name.startswith('response_') or name.startswith('reply_'):
name = name.replace('response_', '').replace('reply_', '')
# If instance has response attribute, use it
if hasattr(instance, '_response'):
target = instance._response
# Retrieve class member for inspection and future use
member = getattr(target, name, None)
# Is attribute
isattr = name in dir(target)
iscallable = ismethod(member) and not isfunction(member)
if not iscallable and not isattr:
raise PookInvalidArgument('Unsupported argument: {}'.format(name))
# Set attribute or trigger method
if iscallable:
member(value)
else:
setattr(target, name, value)
|
def trigger_methods(instance, args):
""""
Triggers specific class methods using a simple reflection
mechanism based on the given input dictionary params.
Arguments:
instance (object): target instance to dynamically trigger methods.
args (iterable): input arguments to trigger objects to
Returns:
None
"""
# Start the magic
for name in sorted(args):
value = args[name]
target = instance
# If response attibutes
if name.startswith('response_') or name.startswith('reply_'):
name = name.replace('response_', '').replace('reply_', '')
# If instance has response attribute, use it
if hasattr(instance, '_response'):
target = instance._response
# Retrieve class member for inspection and future use
member = getattr(target, name, None)
# Is attribute
isattr = name in dir(target)
iscallable = ismethod(member) and not isfunction(member)
if not iscallable and not isattr:
raise PookInvalidArgument('Unsupported argument: {}'.format(name))
# Set attribute or trigger method
if iscallable:
member(value)
else:
setattr(target, name, value)
|
[
"Triggers",
"specific",
"class",
"methods",
"using",
"a",
"simple",
"reflection",
"mechanism",
"based",
"on",
"the",
"given",
"input",
"dictionary",
"params",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/helpers.py#L5-L43
|
[
"def",
"trigger_methods",
"(",
"instance",
",",
"args",
")",
":",
"# Start the magic",
"for",
"name",
"in",
"sorted",
"(",
"args",
")",
":",
"value",
"=",
"args",
"[",
"name",
"]",
"target",
"=",
"instance",
"# If response attibutes",
"if",
"name",
".",
"startswith",
"(",
"'response_'",
")",
"or",
"name",
".",
"startswith",
"(",
"'reply_'",
")",
":",
"name",
"=",
"name",
".",
"replace",
"(",
"'response_'",
",",
"''",
")",
".",
"replace",
"(",
"'reply_'",
",",
"''",
")",
"# If instance has response attribute, use it",
"if",
"hasattr",
"(",
"instance",
",",
"'_response'",
")",
":",
"target",
"=",
"instance",
".",
"_response",
"# Retrieve class member for inspection and future use",
"member",
"=",
"getattr",
"(",
"target",
",",
"name",
",",
"None",
")",
"# Is attribute",
"isattr",
"=",
"name",
"in",
"dir",
"(",
"target",
")",
"iscallable",
"=",
"ismethod",
"(",
"member",
")",
"and",
"not",
"isfunction",
"(",
"member",
")",
"if",
"not",
"iscallable",
"and",
"not",
"isattr",
":",
"raise",
"PookInvalidArgument",
"(",
"'Unsupported argument: {}'",
".",
"format",
"(",
"name",
")",
")",
"# Set attribute or trigger method",
"if",
"iscallable",
":",
"member",
"(",
"value",
")",
"else",
":",
"setattr",
"(",
"target",
",",
"name",
",",
"value",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
MatcherEngine.match
|
Match the given HTTP request instance against the registered
matcher functions in the current engine.
Arguments:
request (pook.Request): outgoing request to match.
Returns:
tuple(bool, list[Exception]): ``True`` if all matcher tests
passes, otherwise ``False``. Also returns an optional list
of error exceptions.
|
pook/matcher.py
|
def match(self, request):
"""
Match the given HTTP request instance against the registered
matcher functions in the current engine.
Arguments:
request (pook.Request): outgoing request to match.
Returns:
tuple(bool, list[Exception]): ``True`` if all matcher tests
passes, otherwise ``False``. Also returns an optional list
of error exceptions.
"""
errors = []
def match(matcher):
try:
return matcher.match(request)
except Exception as err:
err = '{}: {}'.format(type(matcher).__name__, err)
errors.append(err)
return False
return all([match(matcher) for matcher in self]), errors
|
def match(self, request):
"""
Match the given HTTP request instance against the registered
matcher functions in the current engine.
Arguments:
request (pook.Request): outgoing request to match.
Returns:
tuple(bool, list[Exception]): ``True`` if all matcher tests
passes, otherwise ``False``. Also returns an optional list
of error exceptions.
"""
errors = []
def match(matcher):
try:
return matcher.match(request)
except Exception as err:
err = '{}: {}'.format(type(matcher).__name__, err)
errors.append(err)
return False
return all([match(matcher) for matcher in self]), errors
|
[
"Match",
"the",
"given",
"HTTP",
"request",
"instance",
"against",
"the",
"registered",
"matcher",
"functions",
"in",
"the",
"current",
"engine",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/matcher.py#L23-L46
|
[
"def",
"match",
"(",
"self",
",",
"request",
")",
":",
"errors",
"=",
"[",
"]",
"def",
"match",
"(",
"matcher",
")",
":",
"try",
":",
"return",
"matcher",
".",
"match",
"(",
"request",
")",
"except",
"Exception",
"as",
"err",
":",
"err",
"=",
"'{}: {}'",
".",
"format",
"(",
"type",
"(",
"matcher",
")",
".",
"__name__",
",",
"err",
")",
"errors",
".",
"append",
"(",
"err",
")",
"return",
"False",
"return",
"all",
"(",
"[",
"match",
"(",
"matcher",
")",
"for",
"matcher",
"in",
"self",
"]",
")",
",",
"errors"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
get
|
Returns a matcher instance by class or alias name.
Arguments:
name (str): matcher class name or alias.
Returns:
matcher: found matcher instance, otherwise ``None``.
|
pook/matchers/api.py
|
def get(name):
"""
Returns a matcher instance by class or alias name.
Arguments:
name (str): matcher class name or alias.
Returns:
matcher: found matcher instance, otherwise ``None``.
"""
for matcher in matchers:
if matcher.__name__ == name or getattr(matcher, 'name', None) == name:
return matcher
|
def get(name):
"""
Returns a matcher instance by class or alias name.
Arguments:
name (str): matcher class name or alias.
Returns:
matcher: found matcher instance, otherwise ``None``.
"""
for matcher in matchers:
if matcher.__name__ == name or getattr(matcher, 'name', None) == name:
return matcher
|
[
"Returns",
"a",
"matcher",
"instance",
"by",
"class",
"or",
"alias",
"name",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/matchers/api.py#L58-L70
|
[
"def",
"get",
"(",
"name",
")",
":",
"for",
"matcher",
"in",
"matchers",
":",
"if",
"matcher",
".",
"__name__",
"==",
"name",
"or",
"getattr",
"(",
"matcher",
",",
"'name'",
",",
"None",
")",
"==",
"name",
":",
"return",
"matcher"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
init
|
Initializes a matcher instance passing variadic arguments to
its constructor. Acts as a delegator proxy.
Arguments:
name (str): matcher class name or alias to execute.
*args (mixed): variadic argument
Returns:
matcher: matcher instance.
Raises:
ValueError: if matcher was not found.
|
pook/matchers/api.py
|
def init(name, *args):
"""
Initializes a matcher instance passing variadic arguments to
its constructor. Acts as a delegator proxy.
Arguments:
name (str): matcher class name or alias to execute.
*args (mixed): variadic argument
Returns:
matcher: matcher instance.
Raises:
ValueError: if matcher was not found.
"""
matcher = get(name)
if not matcher:
raise ValueError('Cannot find matcher: {}'.format(name))
return matcher(*args)
|
def init(name, *args):
"""
Initializes a matcher instance passing variadic arguments to
its constructor. Acts as a delegator proxy.
Arguments:
name (str): matcher class name or alias to execute.
*args (mixed): variadic argument
Returns:
matcher: matcher instance.
Raises:
ValueError: if matcher was not found.
"""
matcher = get(name)
if not matcher:
raise ValueError('Cannot find matcher: {}'.format(name))
return matcher(*args)
|
[
"Initializes",
"a",
"matcher",
"instance",
"passing",
"variadic",
"arguments",
"to",
"its",
"constructor",
".",
"Acts",
"as",
"a",
"delegator",
"proxy",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/matchers/api.py#L73-L91
|
[
"def",
"init",
"(",
"name",
",",
"*",
"args",
")",
":",
"matcher",
"=",
"get",
"(",
"name",
")",
"if",
"not",
"matcher",
":",
"raise",
"ValueError",
"(",
"'Cannot find matcher: {}'",
".",
"format",
"(",
"name",
")",
")",
"return",
"matcher",
"(",
"*",
"args",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Response.header
|
Defines a new response header.
Alias to ``Response.header()``.
Arguments:
header (str): header name.
value (str): header value.
Returns:
self: ``pook.Response`` current instance.
|
pook/response.py
|
def header(self, key, value):
"""
Defines a new response header.
Alias to ``Response.header()``.
Arguments:
header (str): header name.
value (str): header value.
Returns:
self: ``pook.Response`` current instance.
"""
if type(key) is tuple:
key, value = str(key[0]), key[1]
headers = {key: value}
self._headers.extend(headers)
|
def header(self, key, value):
"""
Defines a new response header.
Alias to ``Response.header()``.
Arguments:
header (str): header name.
value (str): header value.
Returns:
self: ``pook.Response`` current instance.
"""
if type(key) is tuple:
key, value = str(key[0]), key[1]
headers = {key: value}
self._headers.extend(headers)
|
[
"Defines",
"a",
"new",
"response",
"header",
".",
"Alias",
"to",
"Response",
".",
"header",
"()",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/response.py#L50-L66
|
[
"def",
"header",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"type",
"(",
"key",
")",
"is",
"tuple",
":",
"key",
",",
"value",
"=",
"str",
"(",
"key",
"[",
"0",
"]",
")",
",",
"key",
"[",
"1",
"]",
"headers",
"=",
"{",
"key",
":",
"value",
"}",
"self",
".",
"_headers",
".",
"extend",
"(",
"headers",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Response.body
|
Defines response body data.
Arguments:
body (str|bytes): response body to use.
Returns:
self: ``pook.Response`` current instance.
|
pook/response.py
|
def body(self, body):
"""
Defines response body data.
Arguments:
body (str|bytes): response body to use.
Returns:
self: ``pook.Response`` current instance.
"""
if isinstance(body, bytes):
body = body.decode('utf-8')
self._body = body
|
def body(self, body):
"""
Defines response body data.
Arguments:
body (str|bytes): response body to use.
Returns:
self: ``pook.Response`` current instance.
"""
if isinstance(body, bytes):
body = body.decode('utf-8')
self._body = body
|
[
"Defines",
"response",
"body",
"data",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/response.py#L148-L161
|
[
"def",
"body",
"(",
"self",
",",
"body",
")",
":",
"if",
"isinstance",
"(",
"body",
",",
"bytes",
")",
":",
"body",
"=",
"body",
".",
"decode",
"(",
"'utf-8'",
")",
"self",
".",
"_body",
"=",
"body"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Response.json
|
Defines the mock response JSON body.
Arguments:
data (dict|list|str): JSON body data.
Returns:
self: ``pook.Response`` current instance.
|
pook/response.py
|
def json(self, data):
"""
Defines the mock response JSON body.
Arguments:
data (dict|list|str): JSON body data.
Returns:
self: ``pook.Response`` current instance.
"""
self._headers['Content-Type'] = 'application/json'
if not isinstance(data, str):
data = json.dumps(data, indent=4)
self._body = data
|
def json(self, data):
"""
Defines the mock response JSON body.
Arguments:
data (dict|list|str): JSON body data.
Returns:
self: ``pook.Response`` current instance.
"""
self._headers['Content-Type'] = 'application/json'
if not isinstance(data, str):
data = json.dumps(data, indent=4)
self._body = data
|
[
"Defines",
"the",
"mock",
"response",
"JSON",
"body",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/response.py#L164-L177
|
[
"def",
"json",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_headers",
"[",
"'Content-Type'",
"]",
"=",
"'application/json'",
"if",
"not",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"indent",
"=",
"4",
")",
"self",
".",
"_body",
"=",
"data"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
HTTPHeaderDict.set
|
Sets a header field with the given value, removing
previous values.
Usage::
headers = HTTPHeaderDict(foo='bar')
headers.set('Foo', 'baz')
headers['foo']
> 'baz'
|
pook/headers.py
|
def set(self, key, val):
"""
Sets a header field with the given value, removing
previous values.
Usage::
headers = HTTPHeaderDict(foo='bar')
headers.set('Foo', 'baz')
headers['foo']
> 'baz'
"""
key_lower = key.lower()
new_vals = key, val
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
self._container[key_lower] = [vals[0], vals[1], val]
|
def set(self, key, val):
"""
Sets a header field with the given value, removing
previous values.
Usage::
headers = HTTPHeaderDict(foo='bar')
headers.set('Foo', 'baz')
headers['foo']
> 'baz'
"""
key_lower = key.lower()
new_vals = key, val
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
self._container[key_lower] = [vals[0], vals[1], val]
|
[
"Sets",
"a",
"header",
"field",
"with",
"the",
"given",
"value",
"removing",
"previous",
"values",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/headers.py#L141-L158
|
[
"def",
"set",
"(",
"self",
",",
"key",
",",
"val",
")",
":",
"key_lower",
"=",
"key",
".",
"lower",
"(",
")",
"new_vals",
"=",
"key",
",",
"val",
"# Keep the common case aka no item present as fast as possible",
"vals",
"=",
"self",
".",
"_container",
".",
"setdefault",
"(",
"key_lower",
",",
"new_vals",
")",
"if",
"new_vals",
"is",
"not",
"vals",
":",
"self",
".",
"_container",
"[",
"key_lower",
"]",
"=",
"[",
"vals",
"[",
"0",
"]",
",",
"vals",
"[",
"1",
"]",
",",
"val",
"]"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
_append_funcs
|
Helper function to append functions into a given list.
Arguments:
target (list): receptor list to append functions.
items (iterable): iterable that yields elements to append.
|
pook/mock.py
|
def _append_funcs(target, items):
"""
Helper function to append functions into a given list.
Arguments:
target (list): receptor list to append functions.
items (iterable): iterable that yields elements to append.
"""
[target.append(item) for item in items
if isfunction(item) or ismethod(item)]
|
def _append_funcs(target, items):
"""
Helper function to append functions into a given list.
Arguments:
target (list): receptor list to append functions.
items (iterable): iterable that yields elements to append.
"""
[target.append(item) for item in items
if isfunction(item) or ismethod(item)]
|
[
"Helper",
"function",
"to",
"append",
"functions",
"into",
"a",
"given",
"list",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L16-L25
|
[
"def",
"_append_funcs",
"(",
"target",
",",
"items",
")",
":",
"[",
"target",
".",
"append",
"(",
"item",
")",
"for",
"item",
"in",
"items",
"if",
"isfunction",
"(",
"item",
")",
"or",
"ismethod",
"(",
"item",
")",
"]"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
_trigger_request
|
Triggers request mock definition methods dynamically based on input
keyword arguments passed to `pook.Mock` constructor.
This is used to provide a more Pythonic interface vs chainable API
approach.
|
pook/mock.py
|
def _trigger_request(instance, request):
"""
Triggers request mock definition methods dynamically based on input
keyword arguments passed to `pook.Mock` constructor.
This is used to provide a more Pythonic interface vs chainable API
approach.
"""
if not isinstance(request, Request):
raise TypeError('request must be instance of pook.Request')
# Register request matchers
for key in request.keys:
if hasattr(instance, key):
getattr(instance, key)(getattr(request, key))
|
def _trigger_request(instance, request):
"""
Triggers request mock definition methods dynamically based on input
keyword arguments passed to `pook.Mock` constructor.
This is used to provide a more Pythonic interface vs chainable API
approach.
"""
if not isinstance(request, Request):
raise TypeError('request must be instance of pook.Request')
# Register request matchers
for key in request.keys:
if hasattr(instance, key):
getattr(instance, key)(getattr(request, key))
|
[
"Triggers",
"request",
"mock",
"definition",
"methods",
"dynamically",
"based",
"on",
"input",
"keyword",
"arguments",
"passed",
"to",
"pook",
".",
"Mock",
"constructor",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L28-L42
|
[
"def",
"_trigger_request",
"(",
"instance",
",",
"request",
")",
":",
"if",
"not",
"isinstance",
"(",
"request",
",",
"Request",
")",
":",
"raise",
"TypeError",
"(",
"'request must be instance of pook.Request'",
")",
"# Register request matchers",
"for",
"key",
"in",
"request",
".",
"keys",
":",
"if",
"hasattr",
"(",
"instance",
",",
"key",
")",
":",
"getattr",
"(",
"instance",
",",
"key",
")",
"(",
"getattr",
"(",
"request",
",",
"key",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.url
|
Defines the mock URL to match.
It can be a full URL with path and query params.
Protocol schema is optional, defaults to ``http://``.
Arguments:
url (str): mock URL to match. E.g: ``server.com/api``.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def url(self, url):
"""
Defines the mock URL to match.
It can be a full URL with path and query params.
Protocol schema is optional, defaults to ``http://``.
Arguments:
url (str): mock URL to match. E.g: ``server.com/api``.
Returns:
self: current Mock instance.
"""
self._request.url = url
self.add_matcher(matcher('URLMatcher', url))
|
def url(self, url):
"""
Defines the mock URL to match.
It can be a full URL with path and query params.
Protocol schema is optional, defaults to ``http://``.
Arguments:
url (str): mock URL to match. E.g: ``server.com/api``.
Returns:
self: current Mock instance.
"""
self._request.url = url
self.add_matcher(matcher('URLMatcher', url))
|
[
"Defines",
"the",
"mock",
"URL",
"to",
"match",
".",
"It",
"can",
"be",
"a",
"full",
"URL",
"with",
"path",
"and",
"query",
"params",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L138-L152
|
[
"def",
"url",
"(",
"self",
",",
"url",
")",
":",
"self",
".",
"_request",
".",
"url",
"=",
"url",
"self",
".",
"add_matcher",
"(",
"matcher",
"(",
"'URLMatcher'",
",",
"url",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.method
|
Defines the HTTP method to match.
Use ``*`` to match any method.
Arguments:
method (str): method value to match. E.g: ``GET``.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def method(self, method):
"""
Defines the HTTP method to match.
Use ``*`` to match any method.
Arguments:
method (str): method value to match. E.g: ``GET``.
Returns:
self: current Mock instance.
"""
self._request.method = method
self.add_matcher(matcher('MethodMatcher', method))
|
def method(self, method):
"""
Defines the HTTP method to match.
Use ``*`` to match any method.
Arguments:
method (str): method value to match. E.g: ``GET``.
Returns:
self: current Mock instance.
"""
self._request.method = method
self.add_matcher(matcher('MethodMatcher', method))
|
[
"Defines",
"the",
"HTTP",
"method",
"to",
"match",
".",
"Use",
"*",
"to",
"match",
"any",
"method",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L155-L167
|
[
"def",
"method",
"(",
"self",
",",
"method",
")",
":",
"self",
".",
"_request",
".",
"method",
"=",
"method",
"self",
".",
"add_matcher",
"(",
"matcher",
"(",
"'MethodMatcher'",
",",
"method",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.path
|
Defines a URL path to match.
Only call this method if the URL has no path already defined.
Arguments:
path (str): URL path value to match. E.g: ``/api/users``.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def path(self, path):
"""
Defines a URL path to match.
Only call this method if the URL has no path already defined.
Arguments:
path (str): URL path value to match. E.g: ``/api/users``.
Returns:
self: current Mock instance.
"""
url = furl(self._request.rawurl)
url.path = path
self._request.url = url.url
self.add_matcher(matcher('PathMatcher', path))
|
def path(self, path):
"""
Defines a URL path to match.
Only call this method if the URL has no path already defined.
Arguments:
path (str): URL path value to match. E.g: ``/api/users``.
Returns:
self: current Mock instance.
"""
url = furl(self._request.rawurl)
url.path = path
self._request.url = url.url
self.add_matcher(matcher('PathMatcher', path))
|
[
"Defines",
"a",
"URL",
"path",
"to",
"match",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L170-L185
|
[
"def",
"path",
"(",
"self",
",",
"path",
")",
":",
"url",
"=",
"furl",
"(",
"self",
".",
"_request",
".",
"rawurl",
")",
"url",
".",
"path",
"=",
"path",
"self",
".",
"_request",
".",
"url",
"=",
"url",
".",
"url",
"self",
".",
"add_matcher",
"(",
"matcher",
"(",
"'PathMatcher'",
",",
"path",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.header
|
Defines a URL path to match.
Only call this method if the URL has no path already defined.
Arguments:
path (str): URL path value to match. E.g: ``/api/users``.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def header(self, name, value):
"""
Defines a URL path to match.
Only call this method if the URL has no path already defined.
Arguments:
path (str): URL path value to match. E.g: ``/api/users``.
Returns:
self: current Mock instance.
"""
headers = {name: value}
self._request.headers = headers
self.add_matcher(matcher('HeadersMatcher', headers))
|
def header(self, name, value):
"""
Defines a URL path to match.
Only call this method if the URL has no path already defined.
Arguments:
path (str): URL path value to match. E.g: ``/api/users``.
Returns:
self: current Mock instance.
"""
headers = {name: value}
self._request.headers = headers
self.add_matcher(matcher('HeadersMatcher', headers))
|
[
"Defines",
"a",
"URL",
"path",
"to",
"match",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L188-L202
|
[
"def",
"header",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"headers",
"=",
"{",
"name",
":",
"value",
"}",
"self",
".",
"_request",
".",
"headers",
"=",
"headers",
"self",
".",
"add_matcher",
"(",
"matcher",
"(",
"'HeadersMatcher'",
",",
"headers",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.headers
|
Defines a dictionary of arguments.
Header keys are case insensitive.
Arguments:
headers (dict): headers to match.
**headers (dict): headers to match as variadic keyword arguments.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def headers(self, headers=None, **kw):
"""
Defines a dictionary of arguments.
Header keys are case insensitive.
Arguments:
headers (dict): headers to match.
**headers (dict): headers to match as variadic keyword arguments.
Returns:
self: current Mock instance.
"""
headers = kw if kw else headers
self._request.headers = headers
self.add_matcher(matcher('HeadersMatcher', headers))
|
def headers(self, headers=None, **kw):
"""
Defines a dictionary of arguments.
Header keys are case insensitive.
Arguments:
headers (dict): headers to match.
**headers (dict): headers to match as variadic keyword arguments.
Returns:
self: current Mock instance.
"""
headers = kw if kw else headers
self._request.headers = headers
self.add_matcher(matcher('HeadersMatcher', headers))
|
[
"Defines",
"a",
"dictionary",
"of",
"arguments",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L205-L220
|
[
"def",
"headers",
"(",
"self",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"headers",
"=",
"kw",
"if",
"kw",
"else",
"headers",
"self",
".",
"_request",
".",
"headers",
"=",
"headers",
"self",
".",
"add_matcher",
"(",
"matcher",
"(",
"'HeadersMatcher'",
",",
"headers",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.header_present
|
Defines a new header matcher expectation that must be present in the
outgoing request in order to be satisfied, no matter what value it
hosts.
Header keys are case insensitive.
Arguments:
*names (str): header or headers names to match.
Returns:
self: current Mock instance.
Example::
(pook.get('server.com/api')
.header_present('content-type'))
|
pook/mock.py
|
def header_present(self, *names):
"""
Defines a new header matcher expectation that must be present in the
outgoing request in order to be satisfied, no matter what value it
hosts.
Header keys are case insensitive.
Arguments:
*names (str): header or headers names to match.
Returns:
self: current Mock instance.
Example::
(pook.get('server.com/api')
.header_present('content-type'))
"""
for name in names:
headers = {name: re.compile('(.*)')}
self.add_matcher(matcher('HeadersMatcher', headers))
|
def header_present(self, *names):
"""
Defines a new header matcher expectation that must be present in the
outgoing request in order to be satisfied, no matter what value it
hosts.
Header keys are case insensitive.
Arguments:
*names (str): header or headers names to match.
Returns:
self: current Mock instance.
Example::
(pook.get('server.com/api')
.header_present('content-type'))
"""
for name in names:
headers = {name: re.compile('(.*)')}
self.add_matcher(matcher('HeadersMatcher', headers))
|
[
"Defines",
"a",
"new",
"header",
"matcher",
"expectation",
"that",
"must",
"be",
"present",
"in",
"the",
"outgoing",
"request",
"in",
"order",
"to",
"be",
"satisfied",
"no",
"matter",
"what",
"value",
"it",
"hosts",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L223-L244
|
[
"def",
"header_present",
"(",
"self",
",",
"*",
"names",
")",
":",
"for",
"name",
"in",
"names",
":",
"headers",
"=",
"{",
"name",
":",
"re",
".",
"compile",
"(",
"'(.*)'",
")",
"}",
"self",
".",
"add_matcher",
"(",
"matcher",
"(",
"'HeadersMatcher'",
",",
"headers",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.headers_present
|
Defines a list of headers that must be present in the
outgoing request in order to satisfy the matcher, no matter what value
the headers hosts.
Header keys are case insensitive.
Arguments:
headers (list|tuple): header keys to match.
Returns:
self: current Mock instance.
Example::
(pook.get('server.com/api')
.headers_present(['content-type', 'Authorization']))
|
pook/mock.py
|
def headers_present(self, headers):
"""
Defines a list of headers that must be present in the
outgoing request in order to satisfy the matcher, no matter what value
the headers hosts.
Header keys are case insensitive.
Arguments:
headers (list|tuple): header keys to match.
Returns:
self: current Mock instance.
Example::
(pook.get('server.com/api')
.headers_present(['content-type', 'Authorization']))
"""
headers = {name: re.compile('(.*)') for name in headers}
self.add_matcher(matcher('HeadersMatcher', headers))
|
def headers_present(self, headers):
"""
Defines a list of headers that must be present in the
outgoing request in order to satisfy the matcher, no matter what value
the headers hosts.
Header keys are case insensitive.
Arguments:
headers (list|tuple): header keys to match.
Returns:
self: current Mock instance.
Example::
(pook.get('server.com/api')
.headers_present(['content-type', 'Authorization']))
"""
headers = {name: re.compile('(.*)') for name in headers}
self.add_matcher(matcher('HeadersMatcher', headers))
|
[
"Defines",
"a",
"list",
"of",
"headers",
"that",
"must",
"be",
"present",
"in",
"the",
"outgoing",
"request",
"in",
"order",
"to",
"satisfy",
"the",
"matcher",
"no",
"matter",
"what",
"value",
"the",
"headers",
"hosts",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L247-L267
|
[
"def",
"headers_present",
"(",
"self",
",",
"headers",
")",
":",
"headers",
"=",
"{",
"name",
":",
"re",
".",
"compile",
"(",
"'(.*)'",
")",
"for",
"name",
"in",
"headers",
"}",
"self",
".",
"add_matcher",
"(",
"matcher",
"(",
"'HeadersMatcher'",
",",
"headers",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.content
|
Defines the ``Content-Type`` outgoing header value to match.
You can pass one of the following type aliases instead of the full
MIME type representation:
- ``json`` = ``application/json``
- ``xml`` = ``application/xml``
- ``html`` = ``text/html``
- ``text`` = ``text/plain``
- ``urlencoded`` = ``application/x-www-form-urlencoded``
- ``form`` = ``application/x-www-form-urlencoded``
- ``form-data`` = ``application/x-www-form-urlencoded``
Arguments:
value (str): type alias or header value to match.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def content(self, value):
"""
Defines the ``Content-Type`` outgoing header value to match.
You can pass one of the following type aliases instead of the full
MIME type representation:
- ``json`` = ``application/json``
- ``xml`` = ``application/xml``
- ``html`` = ``text/html``
- ``text`` = ``text/plain``
- ``urlencoded`` = ``application/x-www-form-urlencoded``
- ``form`` = ``application/x-www-form-urlencoded``
- ``form-data`` = ``application/x-www-form-urlencoded``
Arguments:
value (str): type alias or header value to match.
Returns:
self: current Mock instance.
"""
header = {'Content-Type': TYPES.get(value, value)}
self._request.headers = header
self.add_matcher(matcher('HeadersMatcher', header))
|
def content(self, value):
"""
Defines the ``Content-Type`` outgoing header value to match.
You can pass one of the following type aliases instead of the full
MIME type representation:
- ``json`` = ``application/json``
- ``xml`` = ``application/xml``
- ``html`` = ``text/html``
- ``text`` = ``text/plain``
- ``urlencoded`` = ``application/x-www-form-urlencoded``
- ``form`` = ``application/x-www-form-urlencoded``
- ``form-data`` = ``application/x-www-form-urlencoded``
Arguments:
value (str): type alias or header value to match.
Returns:
self: current Mock instance.
"""
header = {'Content-Type': TYPES.get(value, value)}
self._request.headers = header
self.add_matcher(matcher('HeadersMatcher', header))
|
[
"Defines",
"the",
"Content",
"-",
"Type",
"outgoing",
"header",
"value",
"to",
"match",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L294-L317
|
[
"def",
"content",
"(",
"self",
",",
"value",
")",
":",
"header",
"=",
"{",
"'Content-Type'",
":",
"TYPES",
".",
"get",
"(",
"value",
",",
"value",
")",
"}",
"self",
".",
"_request",
".",
"headers",
"=",
"header",
"self",
".",
"add_matcher",
"(",
"matcher",
"(",
"'HeadersMatcher'",
",",
"header",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.params
|
Defines a set of URL query params to match.
Arguments:
params (dict): set of params to match.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def params(self, params):
"""
Defines a set of URL query params to match.
Arguments:
params (dict): set of params to match.
Returns:
self: current Mock instance.
"""
url = furl(self._request.rawurl)
url = url.add(params)
self._request.url = url.url
self.add_matcher(matcher('QueryMatcher', params))
|
def params(self, params):
"""
Defines a set of URL query params to match.
Arguments:
params (dict): set of params to match.
Returns:
self: current Mock instance.
"""
url = furl(self._request.rawurl)
url = url.add(params)
self._request.url = url.url
self.add_matcher(matcher('QueryMatcher', params))
|
[
"Defines",
"a",
"set",
"of",
"URL",
"query",
"params",
"to",
"match",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L347-L360
|
[
"def",
"params",
"(",
"self",
",",
"params",
")",
":",
"url",
"=",
"furl",
"(",
"self",
".",
"_request",
".",
"rawurl",
")",
"url",
"=",
"url",
".",
"add",
"(",
"params",
")",
"self",
".",
"_request",
".",
"url",
"=",
"url",
".",
"url",
"self",
".",
"add_matcher",
"(",
"matcher",
"(",
"'QueryMatcher'",
",",
"params",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.body
|
Defines the body data to match.
``body`` argument can be a ``str``, ``binary`` or a regular expression.
Arguments:
body (str|binary|regex): body data to match.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def body(self, body):
"""
Defines the body data to match.
``body`` argument can be a ``str``, ``binary`` or a regular expression.
Arguments:
body (str|binary|regex): body data to match.
Returns:
self: current Mock instance.
"""
self._request.body = body
self.add_matcher(matcher('BodyMatcher', body))
|
def body(self, body):
"""
Defines the body data to match.
``body`` argument can be a ``str``, ``binary`` or a regular expression.
Arguments:
body (str|binary|regex): body data to match.
Returns:
self: current Mock instance.
"""
self._request.body = body
self.add_matcher(matcher('BodyMatcher', body))
|
[
"Defines",
"the",
"body",
"data",
"to",
"match",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L363-L376
|
[
"def",
"body",
"(",
"self",
",",
"body",
")",
":",
"self",
".",
"_request",
".",
"body",
"=",
"body",
"self",
".",
"add_matcher",
"(",
"matcher",
"(",
"'BodyMatcher'",
",",
"body",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.json
|
Defines the JSON body to match.
``json`` argument can be an JSON string, a JSON serializable
Python structure, such as a ``dict`` or ``list`` or it can be
a regular expression used to match the body.
Arguments:
json (str|dict|list|regex): body JSON to match.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def json(self, json):
"""
Defines the JSON body to match.
``json`` argument can be an JSON string, a JSON serializable
Python structure, such as a ``dict`` or ``list`` or it can be
a regular expression used to match the body.
Arguments:
json (str|dict|list|regex): body JSON to match.
Returns:
self: current Mock instance.
"""
self._request.json = json
self.add_matcher(matcher('JSONMatcher', json))
|
def json(self, json):
"""
Defines the JSON body to match.
``json`` argument can be an JSON string, a JSON serializable
Python structure, such as a ``dict`` or ``list`` or it can be
a regular expression used to match the body.
Arguments:
json (str|dict|list|regex): body JSON to match.
Returns:
self: current Mock instance.
"""
self._request.json = json
self.add_matcher(matcher('JSONMatcher', json))
|
[
"Defines",
"the",
"JSON",
"body",
"to",
"match",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L379-L394
|
[
"def",
"json",
"(",
"self",
",",
"json",
")",
":",
"self",
".",
"_request",
".",
"json",
"=",
"json",
"self",
".",
"add_matcher",
"(",
"matcher",
"(",
"'JSONMatcher'",
",",
"json",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.xml
|
Defines a XML body value to match.
Arguments:
xml (str|regex): body XML to match.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def xml(self, xml):
"""
Defines a XML body value to match.
Arguments:
xml (str|regex): body XML to match.
Returns:
self: current Mock instance.
"""
self._request.xml = xml
self.add_matcher(matcher('XMLMatcher', xml))
|
def xml(self, xml):
"""
Defines a XML body value to match.
Arguments:
xml (str|regex): body XML to match.
Returns:
self: current Mock instance.
"""
self._request.xml = xml
self.add_matcher(matcher('XMLMatcher', xml))
|
[
"Defines",
"a",
"XML",
"body",
"value",
"to",
"match",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L410-L421
|
[
"def",
"xml",
"(",
"self",
",",
"xml",
")",
":",
"self",
".",
"_request",
".",
"xml",
"=",
"xml",
"self",
".",
"add_matcher",
"(",
"matcher",
"(",
"'XMLMatcher'",
",",
"xml",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.file
|
Reads the body to match from a disk file.
Arguments:
path (str): relative or absolute path to file to read from.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def file(self, path):
"""
Reads the body to match from a disk file.
Arguments:
path (str): relative or absolute path to file to read from.
Returns:
self: current Mock instance.
"""
with open(path, 'r') as f:
self.body(str(f.read()))
|
def file(self, path):
"""
Reads the body to match from a disk file.
Arguments:
path (str): relative or absolute path to file to read from.
Returns:
self: current Mock instance.
"""
with open(path, 'r') as f:
self.body(str(f.read()))
|
[
"Reads",
"the",
"body",
"to",
"match",
"from",
"a",
"disk",
"file",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L424-L435
|
[
"def",
"file",
"(",
"self",
",",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"self",
".",
"body",
"(",
"str",
"(",
"f",
".",
"read",
"(",
")",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.persist
|
Enables persistent mode for the current mock.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def persist(self, status=None):
"""
Enables persistent mode for the current mock.
Returns:
self: current Mock instance.
"""
self._persist = status if type(status) is bool else True
|
def persist(self, status=None):
"""
Enables persistent mode for the current mock.
Returns:
self: current Mock instance.
"""
self._persist = status if type(status) is bool else True
|
[
"Enables",
"persistent",
"mode",
"for",
"the",
"current",
"mock",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L496-L503
|
[
"def",
"persist",
"(",
"self",
",",
"status",
"=",
"None",
")",
":",
"self",
".",
"_persist",
"=",
"status",
"if",
"type",
"(",
"status",
")",
"is",
"bool",
"else",
"True"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.error
|
Defines a simulated exception error that will be raised.
Arguments:
error (str|Exception): error to raise.
Returns:
self: current Mock instance.
|
pook/mock.py
|
def error(self, error):
"""
Defines a simulated exception error that will be raised.
Arguments:
error (str|Exception): error to raise.
Returns:
self: current Mock instance.
"""
self._error = RuntimeError(error) if isinstance(error, str) else error
|
def error(self, error):
"""
Defines a simulated exception error that will be raised.
Arguments:
error (str|Exception): error to raise.
Returns:
self: current Mock instance.
"""
self._error = RuntimeError(error) if isinstance(error, str) else error
|
[
"Defines",
"a",
"simulated",
"exception",
"error",
"that",
"will",
"be",
"raised",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L562-L572
|
[
"def",
"error",
"(",
"self",
",",
"error",
")",
":",
"self",
".",
"_error",
"=",
"RuntimeError",
"(",
"error",
")",
"if",
"isinstance",
"(",
"error",
",",
"str",
")",
"else",
"error"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.reply
|
Defines the mock response.
Arguments:
status (int, optional): response status code. Defaults to ``200``.
**kw (dict): optional keyword arguments passed to ``pook.Response``
constructor.
Returns:
pook.Response: mock response definition instance.
|
pook/mock.py
|
def reply(self, status=200, new_response=False, **kw):
"""
Defines the mock response.
Arguments:
status (int, optional): response status code. Defaults to ``200``.
**kw (dict): optional keyword arguments passed to ``pook.Response``
constructor.
Returns:
pook.Response: mock response definition instance.
"""
# Use or create a Response mock instance
res = Response(**kw) if new_response else self._response
# Define HTTP mandatory response status
res.status(status or res._status)
# Expose current mock instance in response for self-reference
res.mock = self
# Define mock response
self._response = res
# Return response
return res
|
def reply(self, status=200, new_response=False, **kw):
"""
Defines the mock response.
Arguments:
status (int, optional): response status code. Defaults to ``200``.
**kw (dict): optional keyword arguments passed to ``pook.Response``
constructor.
Returns:
pook.Response: mock response definition instance.
"""
# Use or create a Response mock instance
res = Response(**kw) if new_response else self._response
# Define HTTP mandatory response status
res.status(status or res._status)
# Expose current mock instance in response for self-reference
res.mock = self
# Define mock response
self._response = res
# Return response
return res
|
[
"Defines",
"the",
"mock",
"response",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L574-L595
|
[
"def",
"reply",
"(",
"self",
",",
"status",
"=",
"200",
",",
"new_response",
"=",
"False",
",",
"*",
"*",
"kw",
")",
":",
"# Use or create a Response mock instance",
"res",
"=",
"Response",
"(",
"*",
"*",
"kw",
")",
"if",
"new_response",
"else",
"self",
".",
"_response",
"# Define HTTP mandatory response status",
"res",
".",
"status",
"(",
"status",
"or",
"res",
".",
"_status",
")",
"# Expose current mock instance in response for self-reference",
"res",
".",
"mock",
"=",
"self",
"# Define mock response",
"self",
".",
"_response",
"=",
"res",
"# Return response",
"return",
"res"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Mock.match
|
Matches an outgoing HTTP request against the current mock matchers.
This method acts like a delegator to `pook.MatcherEngine`.
Arguments:
request (pook.Request): request instance to match.
Raises:
Exception: if the mock has an exception defined.
Returns:
tuple(bool, list[Exception]): ``True`` if the mock matches
the outgoing HTTP request, otherwise ``False``. Also returns
an optional list of error exceptions.
|
pook/mock.py
|
def match(self, request):
"""
Matches an outgoing HTTP request against the current mock matchers.
This method acts like a delegator to `pook.MatcherEngine`.
Arguments:
request (pook.Request): request instance to match.
Raises:
Exception: if the mock has an exception defined.
Returns:
tuple(bool, list[Exception]): ``True`` if the mock matches
the outgoing HTTP request, otherwise ``False``. Also returns
an optional list of error exceptions.
"""
# If mock already expired, fail it
if self._times <= 0:
raise PookExpiredMock('Mock expired')
# Trigger mock filters
for test in self.filters:
if not test(request, self):
return False, []
# Trigger mock mappers
for mapper in self.mappers:
request = mapper(request, self)
if not request:
raise ValueError('map function must return a request object')
# Match incoming request against registered mock matchers
matches, errors = self.matchers.match(request)
# If not matched, return False
if not matches:
return False, errors
# Register matched request for further inspecion and reference
self._calls.append(request)
# Increase mock call counter
self._matches += 1
if not self._persist:
self._times -= 1
# Raise simulated error
if self._error:
raise self._error
# Trigger callback when matched
for callback in self.callbacks:
callback(request, self)
return True, []
|
def match(self, request):
"""
Matches an outgoing HTTP request against the current mock matchers.
This method acts like a delegator to `pook.MatcherEngine`.
Arguments:
request (pook.Request): request instance to match.
Raises:
Exception: if the mock has an exception defined.
Returns:
tuple(bool, list[Exception]): ``True`` if the mock matches
the outgoing HTTP request, otherwise ``False``. Also returns
an optional list of error exceptions.
"""
# If mock already expired, fail it
if self._times <= 0:
raise PookExpiredMock('Mock expired')
# Trigger mock filters
for test in self.filters:
if not test(request, self):
return False, []
# Trigger mock mappers
for mapper in self.mappers:
request = mapper(request, self)
if not request:
raise ValueError('map function must return a request object')
# Match incoming request against registered mock matchers
matches, errors = self.matchers.match(request)
# If not matched, return False
if not matches:
return False, errors
# Register matched request for further inspecion and reference
self._calls.append(request)
# Increase mock call counter
self._matches += 1
if not self._persist:
self._times -= 1
# Raise simulated error
if self._error:
raise self._error
# Trigger callback when matched
for callback in self.callbacks:
callback(request, self)
return True, []
|
[
"Matches",
"an",
"outgoing",
"HTTP",
"request",
"against",
"the",
"current",
"mock",
"matchers",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L697-L752
|
[
"def",
"match",
"(",
"self",
",",
"request",
")",
":",
"# If mock already expired, fail it",
"if",
"self",
".",
"_times",
"<=",
"0",
":",
"raise",
"PookExpiredMock",
"(",
"'Mock expired'",
")",
"# Trigger mock filters",
"for",
"test",
"in",
"self",
".",
"filters",
":",
"if",
"not",
"test",
"(",
"request",
",",
"self",
")",
":",
"return",
"False",
",",
"[",
"]",
"# Trigger mock mappers",
"for",
"mapper",
"in",
"self",
".",
"mappers",
":",
"request",
"=",
"mapper",
"(",
"request",
",",
"self",
")",
"if",
"not",
"request",
":",
"raise",
"ValueError",
"(",
"'map function must return a request object'",
")",
"# Match incoming request against registered mock matchers",
"matches",
",",
"errors",
"=",
"self",
".",
"matchers",
".",
"match",
"(",
"request",
")",
"# If not matched, return False",
"if",
"not",
"matches",
":",
"return",
"False",
",",
"errors",
"# Register matched request for further inspecion and reference",
"self",
".",
"_calls",
".",
"append",
"(",
"request",
")",
"# Increase mock call counter",
"self",
".",
"_matches",
"+=",
"1",
"if",
"not",
"self",
".",
"_persist",
":",
"self",
".",
"_times",
"-=",
"1",
"# Raise simulated error",
"if",
"self",
".",
"_error",
":",
"raise",
"self",
".",
"_error",
"# Trigger callback when matched",
"for",
"callback",
"in",
"self",
".",
"callbacks",
":",
"callback",
"(",
"request",
",",
"self",
")",
"return",
"True",
",",
"[",
"]"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
activate_async
|
Async version of activate decorator
Arguments:
fn (function): function that be wrapped by decorator.
_engine (Engine): pook engine instance
Returns:
function: decorator wrapper function.
|
pook/activate_async.py
|
def activate_async(fn, _engine):
"""
Async version of activate decorator
Arguments:
fn (function): function that be wrapped by decorator.
_engine (Engine): pook engine instance
Returns:
function: decorator wrapper function.
"""
@coroutine
@functools.wraps(fn)
def wrapper(*args, **kw):
_engine.activate()
try:
if iscoroutinefunction(fn):
yield from fn(*args, **kw) # noqa
else:
fn(*args, **kw)
finally:
_engine.disable()
return wrapper
|
def activate_async(fn, _engine):
"""
Async version of activate decorator
Arguments:
fn (function): function that be wrapped by decorator.
_engine (Engine): pook engine instance
Returns:
function: decorator wrapper function.
"""
@coroutine
@functools.wraps(fn)
def wrapper(*args, **kw):
_engine.activate()
try:
if iscoroutinefunction(fn):
yield from fn(*args, **kw) # noqa
else:
fn(*args, **kw)
finally:
_engine.disable()
return wrapper
|
[
"Async",
"version",
"of",
"activate",
"decorator"
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/activate_async.py#L5-L28
|
[
"def",
"activate_async",
"(",
"fn",
",",
"_engine",
")",
":",
"@",
"coroutine",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"_engine",
".",
"activate",
"(",
")",
"try",
":",
"if",
"iscoroutinefunction",
"(",
"fn",
")",
":",
"yield",
"from",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"# noqa",
"else",
":",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"finally",
":",
"_engine",
".",
"disable",
"(",
")",
"return",
"wrapper"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Engine.set_mock_engine
|
Sets a custom mock engine, replacing the built-in one.
This is particularly useful if you want to replace the built-in
HTTP traffic mock interceptor engine with your custom one.
For mock engine implementation details, see `pook.MockEngine`.
Arguments:
engine (pook.MockEngine): custom mock engine to use.
|
pook/engine.py
|
def set_mock_engine(self, engine):
"""
Sets a custom mock engine, replacing the built-in one.
This is particularly useful if you want to replace the built-in
HTTP traffic mock interceptor engine with your custom one.
For mock engine implementation details, see `pook.MockEngine`.
Arguments:
engine (pook.MockEngine): custom mock engine to use.
"""
if not engine:
raise TypeError('engine must be a valid object')
# Instantiate mock engine
mock_engine = engine(self)
# Validate minimum viable interface
methods = ('activate', 'disable')
if not all([hasattr(mock_engine, method) for method in methods]):
raise NotImplementedError('engine must implementent the '
'required methods')
# Use the custom mock engine
self.mock_engine = mock_engine
# Enable mock engine, if needed
if self.active:
self.mock_engine.activate()
|
def set_mock_engine(self, engine):
"""
Sets a custom mock engine, replacing the built-in one.
This is particularly useful if you want to replace the built-in
HTTP traffic mock interceptor engine with your custom one.
For mock engine implementation details, see `pook.MockEngine`.
Arguments:
engine (pook.MockEngine): custom mock engine to use.
"""
if not engine:
raise TypeError('engine must be a valid object')
# Instantiate mock engine
mock_engine = engine(self)
# Validate minimum viable interface
methods = ('activate', 'disable')
if not all([hasattr(mock_engine, method) for method in methods]):
raise NotImplementedError('engine must implementent the '
'required methods')
# Use the custom mock engine
self.mock_engine = mock_engine
# Enable mock engine, if needed
if self.active:
self.mock_engine.activate()
|
[
"Sets",
"a",
"custom",
"mock",
"engine",
"replacing",
"the",
"built",
"-",
"in",
"one",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/engine.py#L53-L82
|
[
"def",
"set_mock_engine",
"(",
"self",
",",
"engine",
")",
":",
"if",
"not",
"engine",
":",
"raise",
"TypeError",
"(",
"'engine must be a valid object'",
")",
"# Instantiate mock engine",
"mock_engine",
"=",
"engine",
"(",
"self",
")",
"# Validate minimum viable interface",
"methods",
"=",
"(",
"'activate'",
",",
"'disable'",
")",
"if",
"not",
"all",
"(",
"[",
"hasattr",
"(",
"mock_engine",
",",
"method",
")",
"for",
"method",
"in",
"methods",
"]",
")",
":",
"raise",
"NotImplementedError",
"(",
"'engine must implementent the '",
"'required methods'",
")",
"# Use the custom mock engine",
"self",
".",
"mock_engine",
"=",
"mock_engine",
"# Enable mock engine, if needed",
"if",
"self",
".",
"active",
":",
"self",
".",
"mock_engine",
".",
"activate",
"(",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Engine.enable_network
|
Enables real networking mode, optionally passing one or multiple
hostnames that would be used as filter.
If at least one hostname matches with the outgoing traffic, the
request will be executed via the real network.
Arguments:
*hostnames: optional list of host names to enable real network
against them. hostname value can be a regular expression.
|
pook/engine.py
|
def enable_network(self, *hostnames):
"""
Enables real networking mode, optionally passing one or multiple
hostnames that would be used as filter.
If at least one hostname matches with the outgoing traffic, the
request will be executed via the real network.
Arguments:
*hostnames: optional list of host names to enable real network
against them. hostname value can be a regular expression.
"""
def hostname_filter(hostname, req):
if isregex(hostname):
return hostname.match(req.url.hostname)
return req.url.hostname == hostname
for hostname in hostnames:
self.use_network_filter(partial(hostname_filter, hostname))
self.networking = True
|
def enable_network(self, *hostnames):
"""
Enables real networking mode, optionally passing one or multiple
hostnames that would be used as filter.
If at least one hostname matches with the outgoing traffic, the
request will be executed via the real network.
Arguments:
*hostnames: optional list of host names to enable real network
against them. hostname value can be a regular expression.
"""
def hostname_filter(hostname, req):
if isregex(hostname):
return hostname.match(req.url.hostname)
return req.url.hostname == hostname
for hostname in hostnames:
self.use_network_filter(partial(hostname_filter, hostname))
self.networking = True
|
[
"Enables",
"real",
"networking",
"mode",
"optionally",
"passing",
"one",
"or",
"multiple",
"hostnames",
"that",
"would",
"be",
"used",
"as",
"filter",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/engine.py#L84-L104
|
[
"def",
"enable_network",
"(",
"self",
",",
"*",
"hostnames",
")",
":",
"def",
"hostname_filter",
"(",
"hostname",
",",
"req",
")",
":",
"if",
"isregex",
"(",
"hostname",
")",
":",
"return",
"hostname",
".",
"match",
"(",
"req",
".",
"url",
".",
"hostname",
")",
"return",
"req",
".",
"url",
".",
"hostname",
"==",
"hostname",
"for",
"hostname",
"in",
"hostnames",
":",
"self",
".",
"use_network_filter",
"(",
"partial",
"(",
"hostname_filter",
",",
"hostname",
")",
")",
"self",
".",
"networking",
"=",
"True"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Engine.mock
|
Creates and registers a new HTTP mock in the current engine.
Arguments:
url (str): request URL to mock.
activate (bool): force mock engine activation.
Defaults to ``False``.
**kw (mixed): variadic keyword arguments for ``Mock`` constructor.
Returns:
pook.Mock: new mock instance.
|
pook/engine.py
|
def mock(self, url=None, **kw):
"""
Creates and registers a new HTTP mock in the current engine.
Arguments:
url (str): request URL to mock.
activate (bool): force mock engine activation.
Defaults to ``False``.
**kw (mixed): variadic keyword arguments for ``Mock`` constructor.
Returns:
pook.Mock: new mock instance.
"""
# Activate mock engine, if explicitly requested
if kw.get('activate'):
kw.pop('activate')
self.activate()
# Create the new HTTP mock expectation
mock = Mock(url=url, **kw)
# Expose current engine instance via mock
mock._engine = self
# Register the mock in the current engine
self.add_mock(mock)
# Return it for consumer satisfaction
return mock
|
def mock(self, url=None, **kw):
"""
Creates and registers a new HTTP mock in the current engine.
Arguments:
url (str): request URL to mock.
activate (bool): force mock engine activation.
Defaults to ``False``.
**kw (mixed): variadic keyword arguments for ``Mock`` constructor.
Returns:
pook.Mock: new mock instance.
"""
# Activate mock engine, if explicitly requested
if kw.get('activate'):
kw.pop('activate')
self.activate()
# Create the new HTTP mock expectation
mock = Mock(url=url, **kw)
# Expose current engine instance via mock
mock._engine = self
# Register the mock in the current engine
self.add_mock(mock)
# Return it for consumer satisfaction
return mock
|
[
"Creates",
"and",
"registers",
"a",
"new",
"HTTP",
"mock",
"in",
"the",
"current",
"engine",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/engine.py#L129-L155
|
[
"def",
"mock",
"(",
"self",
",",
"url",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"# Activate mock engine, if explicitly requested",
"if",
"kw",
".",
"get",
"(",
"'activate'",
")",
":",
"kw",
".",
"pop",
"(",
"'activate'",
")",
"self",
".",
"activate",
"(",
")",
"# Create the new HTTP mock expectation",
"mock",
"=",
"Mock",
"(",
"url",
"=",
"url",
",",
"*",
"*",
"kw",
")",
"# Expose current engine instance via mock",
"mock",
".",
"_engine",
"=",
"self",
"# Register the mock in the current engine",
"self",
".",
"add_mock",
"(",
"mock",
")",
"# Return it for consumer satisfaction",
"return",
"mock"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Engine.remove_mock
|
Removes a specific mock instance by object reference.
Arguments:
mock (pook.Mock): mock instance to remove.
|
pook/engine.py
|
def remove_mock(self, mock):
"""
Removes a specific mock instance by object reference.
Arguments:
mock (pook.Mock): mock instance to remove.
"""
self.mocks = [m for m in self.mocks if m is not mock]
|
def remove_mock(self, mock):
"""
Removes a specific mock instance by object reference.
Arguments:
mock (pook.Mock): mock instance to remove.
"""
self.mocks = [m for m in self.mocks if m is not mock]
|
[
"Removes",
"a",
"specific",
"mock",
"instance",
"by",
"object",
"reference",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/engine.py#L166-L173
|
[
"def",
"remove_mock",
"(",
"self",
",",
"mock",
")",
":",
"self",
".",
"mocks",
"=",
"[",
"m",
"for",
"m",
"in",
"self",
".",
"mocks",
"if",
"m",
"is",
"not",
"mock",
"]"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Engine.activate
|
Activates the registered interceptors in the mocking engine.
This means any HTTP traffic captures by those interceptors will
trigger the HTTP mock matching engine in order to determine if a given
HTTP transaction should be mocked out or not.
|
pook/engine.py
|
def activate(self):
"""
Activates the registered interceptors in the mocking engine.
This means any HTTP traffic captures by those interceptors will
trigger the HTTP mock matching engine in order to determine if a given
HTTP transaction should be mocked out or not.
"""
if self.active:
return None
# Activate mock engine
self.mock_engine.activate()
# Enable engine state
self.active = True
|
def activate(self):
"""
Activates the registered interceptors in the mocking engine.
This means any HTTP traffic captures by those interceptors will
trigger the HTTP mock matching engine in order to determine if a given
HTTP transaction should be mocked out or not.
"""
if self.active:
return None
# Activate mock engine
self.mock_engine.activate()
# Enable engine state
self.active = True
|
[
"Activates",
"the",
"registered",
"interceptors",
"in",
"the",
"mocking",
"engine",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/engine.py#L232-L246
|
[
"def",
"activate",
"(",
"self",
")",
":",
"if",
"self",
".",
"active",
":",
"return",
"None",
"# Activate mock engine",
"self",
".",
"mock_engine",
".",
"activate",
"(",
")",
"# Enable engine state",
"self",
".",
"active",
"=",
"True"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Engine.disable
|
Disables interceptors and stops intercepting any outgoing HTTP traffic.
|
pook/engine.py
|
def disable(self):
"""
Disables interceptors and stops intercepting any outgoing HTTP traffic.
"""
if not self.active:
return None
# Disable current mock engine
self.mock_engine.disable()
# Disable engine state
self.active = False
|
def disable(self):
"""
Disables interceptors and stops intercepting any outgoing HTTP traffic.
"""
if not self.active:
return None
# Disable current mock engine
self.mock_engine.disable()
# Disable engine state
self.active = False
|
[
"Disables",
"interceptors",
"and",
"stops",
"intercepting",
"any",
"outgoing",
"HTTP",
"traffic",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/engine.py#L248-L258
|
[
"def",
"disable",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"active",
":",
"return",
"None",
"# Disable current mock engine",
"self",
".",
"mock_engine",
".",
"disable",
"(",
")",
"# Disable engine state",
"self",
".",
"active",
"=",
"False"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Engine.should_use_network
|
Verifies if real networking mode should be used for the given
request, passing it to the registered network filters.
Arguments:
request (pook.Request): outgoing HTTP request to test.
Returns:
bool
|
pook/engine.py
|
def should_use_network(self, request):
"""
Verifies if real networking mode should be used for the given
request, passing it to the registered network filters.
Arguments:
request (pook.Request): outgoing HTTP request to test.
Returns:
bool
"""
return (self.networking and
all((fn(request) for fn in self.network_filters)))
|
def should_use_network(self, request):
"""
Verifies if real networking mode should be used for the given
request, passing it to the registered network filters.
Arguments:
request (pook.Request): outgoing HTTP request to test.
Returns:
bool
"""
return (self.networking and
all((fn(request) for fn in self.network_filters)))
|
[
"Verifies",
"if",
"real",
"networking",
"mode",
"should",
"be",
"used",
"for",
"the",
"given",
"request",
"passing",
"it",
"to",
"the",
"registered",
"network",
"filters",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/engine.py#L370-L382
|
[
"def",
"should_use_network",
"(",
"self",
",",
"request",
")",
":",
"return",
"(",
"self",
".",
"networking",
"and",
"all",
"(",
"(",
"fn",
"(",
"request",
")",
"for",
"fn",
"in",
"self",
".",
"network_filters",
")",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Engine.match
|
Matches a given Request instance contract against the registered mocks.
If a mock passes all the matchers, its response will be returned.
Arguments:
request (pook.Request): Request contract to match.
Raises:
pook.PookNoMatches: if networking is disabled and no mock matches
with the given request contract.
Returns:
pook.Response: the mock response to be used by the interceptor.
|
pook/engine.py
|
def match(self, request):
"""
Matches a given Request instance contract against the registered mocks.
If a mock passes all the matchers, its response will be returned.
Arguments:
request (pook.Request): Request contract to match.
Raises:
pook.PookNoMatches: if networking is disabled and no mock matches
with the given request contract.
Returns:
pook.Response: the mock response to be used by the interceptor.
"""
# Trigger engine-level request filters
for test in self.filters:
if not test(request, self):
return False
# Trigger engine-level request mappers
for mapper in self.mappers:
request = mapper(request, self)
if not request:
raise ValueError('map function must return a request object')
# Store list of mock matching errors for further debugging
match_errors = []
# Try to match the request against registered mock definitions
for mock in self.mocks[:]:
try:
# Return the first matched HTTP request mock
matches, errors = mock.match(request.copy())
if len(errors):
match_errors += errors
if matches:
return mock
except PookExpiredMock:
# Remove the mock if already expired
self.mocks.remove(mock)
# Validate that we have a mock
if not self.should_use_network(request):
msg = 'pook error!\n\n'
msg += (
'=> Cannot match any mock for the '
'following request:\n{}'.format(request)
)
# Compose unmatch error details, if debug mode is enabled
if self.debug:
err = '\n\n'.join([str(err) for err in match_errors])
if err:
msg += '\n\n=> Detailed matching errors:\n{}\n'.format(err)
# Raise no matches exception
raise PookNoMatches(msg)
# Register unmatched request
self.unmatched_reqs.append(request)
|
def match(self, request):
"""
Matches a given Request instance contract against the registered mocks.
If a mock passes all the matchers, its response will be returned.
Arguments:
request (pook.Request): Request contract to match.
Raises:
pook.PookNoMatches: if networking is disabled and no mock matches
with the given request contract.
Returns:
pook.Response: the mock response to be used by the interceptor.
"""
# Trigger engine-level request filters
for test in self.filters:
if not test(request, self):
return False
# Trigger engine-level request mappers
for mapper in self.mappers:
request = mapper(request, self)
if not request:
raise ValueError('map function must return a request object')
# Store list of mock matching errors for further debugging
match_errors = []
# Try to match the request against registered mock definitions
for mock in self.mocks[:]:
try:
# Return the first matched HTTP request mock
matches, errors = mock.match(request.copy())
if len(errors):
match_errors += errors
if matches:
return mock
except PookExpiredMock:
# Remove the mock if already expired
self.mocks.remove(mock)
# Validate that we have a mock
if not self.should_use_network(request):
msg = 'pook error!\n\n'
msg += (
'=> Cannot match any mock for the '
'following request:\n{}'.format(request)
)
# Compose unmatch error details, if debug mode is enabled
if self.debug:
err = '\n\n'.join([str(err) for err in match_errors])
if err:
msg += '\n\n=> Detailed matching errors:\n{}\n'.format(err)
# Raise no matches exception
raise PookNoMatches(msg)
# Register unmatched request
self.unmatched_reqs.append(request)
|
[
"Matches",
"a",
"given",
"Request",
"instance",
"contract",
"against",
"the",
"registered",
"mocks",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/engine.py#L384-L446
|
[
"def",
"match",
"(",
"self",
",",
"request",
")",
":",
"# Trigger engine-level request filters",
"for",
"test",
"in",
"self",
".",
"filters",
":",
"if",
"not",
"test",
"(",
"request",
",",
"self",
")",
":",
"return",
"False",
"# Trigger engine-level request mappers",
"for",
"mapper",
"in",
"self",
".",
"mappers",
":",
"request",
"=",
"mapper",
"(",
"request",
",",
"self",
")",
"if",
"not",
"request",
":",
"raise",
"ValueError",
"(",
"'map function must return a request object'",
")",
"# Store list of mock matching errors for further debugging",
"match_errors",
"=",
"[",
"]",
"# Try to match the request against registered mock definitions",
"for",
"mock",
"in",
"self",
".",
"mocks",
"[",
":",
"]",
":",
"try",
":",
"# Return the first matched HTTP request mock",
"matches",
",",
"errors",
"=",
"mock",
".",
"match",
"(",
"request",
".",
"copy",
"(",
")",
")",
"if",
"len",
"(",
"errors",
")",
":",
"match_errors",
"+=",
"errors",
"if",
"matches",
":",
"return",
"mock",
"except",
"PookExpiredMock",
":",
"# Remove the mock if already expired",
"self",
".",
"mocks",
".",
"remove",
"(",
"mock",
")",
"# Validate that we have a mock",
"if",
"not",
"self",
".",
"should_use_network",
"(",
"request",
")",
":",
"msg",
"=",
"'pook error!\\n\\n'",
"msg",
"+=",
"(",
"'=> Cannot match any mock for the '",
"'following request:\\n{}'",
".",
"format",
"(",
"request",
")",
")",
"# Compose unmatch error details, if debug mode is enabled",
"if",
"self",
".",
"debug",
":",
"err",
"=",
"'\\n\\n'",
".",
"join",
"(",
"[",
"str",
"(",
"err",
")",
"for",
"err",
"in",
"match_errors",
"]",
")",
"if",
"err",
":",
"msg",
"+=",
"'\\n\\n=> Detailed matching errors:\\n{}\\n'",
".",
"format",
"(",
"err",
")",
"# Raise no matches exception",
"raise",
"PookNoMatches",
"(",
"msg",
")",
"# Register unmatched request",
"self",
".",
"unmatched_reqs",
".",
"append",
"(",
"request",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
Request.copy
|
Copies the current Request object instance for side-effects purposes.
Returns:
pook.Request: copy of the current Request instance.
|
pook/request.py
|
def copy(self):
"""
Copies the current Request object instance for side-effects purposes.
Returns:
pook.Request: copy of the current Request instance.
"""
req = type(self)()
req.__dict__ = self.__dict__.copy()
req._headers = self.headers.copy()
return req
|
def copy(self):
"""
Copies the current Request object instance for side-effects purposes.
Returns:
pook.Request: copy of the current Request instance.
"""
req = type(self)()
req.__dict__ = self.__dict__.copy()
req._headers = self.headers.copy()
return req
|
[
"Copies",
"the",
"current",
"Request",
"object",
"instance",
"for",
"side",
"-",
"effects",
"purposes",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/request.py#L141-L151
|
[
"def",
"copy",
"(",
"self",
")",
":",
"req",
"=",
"type",
"(",
"self",
")",
"(",
")",
"req",
".",
"__dict__",
"=",
"self",
".",
"__dict__",
".",
"copy",
"(",
")",
"req",
".",
"_headers",
"=",
"self",
".",
"headers",
".",
"copy",
"(",
")",
"return",
"req"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
activate
|
Enables the HTTP traffic interceptors.
This function can be used as decorator.
Arguments:
fn (function|coroutinefunction): Optional function argument
if used as decorator.
Returns:
function: decorator wrapper function, only if called as decorator,
otherwise ``None``.
Example::
# Standard use case
pook.activate()
pook.mock('server.com/foo').reply(404)
res = requests.get('server.com/foo')
assert res.status_code == 404
pook.disable()
# Decorator use case
@pook.activate
def test_request():
pook.mock('server.com/foo').reply(404)
res = requests.get('server.com/foo')
assert res.status_code == 404
|
pook/api.py
|
def activate(fn=None):
"""
Enables the HTTP traffic interceptors.
This function can be used as decorator.
Arguments:
fn (function|coroutinefunction): Optional function argument
if used as decorator.
Returns:
function: decorator wrapper function, only if called as decorator,
otherwise ``None``.
Example::
# Standard use case
pook.activate()
pook.mock('server.com/foo').reply(404)
res = requests.get('server.com/foo')
assert res.status_code == 404
pook.disable()
# Decorator use case
@pook.activate
def test_request():
pook.mock('server.com/foo').reply(404)
res = requests.get('server.com/foo')
assert res.status_code == 404
"""
# If not used as decorator, activate the engine and exit
if not isfunction(fn):
_engine.activate()
return None
# If used as decorator for an async coroutine, wrap it
if iscoroutinefunction is not None and iscoroutinefunction(fn):
return activate_async(fn, _engine)
@functools.wraps(fn)
def wrapper(*args, **kw):
_engine.activate()
try:
fn(*args, **kw)
finally:
_engine.disable()
return wrapper
|
def activate(fn=None):
"""
Enables the HTTP traffic interceptors.
This function can be used as decorator.
Arguments:
fn (function|coroutinefunction): Optional function argument
if used as decorator.
Returns:
function: decorator wrapper function, only if called as decorator,
otherwise ``None``.
Example::
# Standard use case
pook.activate()
pook.mock('server.com/foo').reply(404)
res = requests.get('server.com/foo')
assert res.status_code == 404
pook.disable()
# Decorator use case
@pook.activate
def test_request():
pook.mock('server.com/foo').reply(404)
res = requests.get('server.com/foo')
assert res.status_code == 404
"""
# If not used as decorator, activate the engine and exit
if not isfunction(fn):
_engine.activate()
return None
# If used as decorator for an async coroutine, wrap it
if iscoroutinefunction is not None and iscoroutinefunction(fn):
return activate_async(fn, _engine)
@functools.wraps(fn)
def wrapper(*args, **kw):
_engine.activate()
try:
fn(*args, **kw)
finally:
_engine.disable()
return wrapper
|
[
"Enables",
"the",
"HTTP",
"traffic",
"interceptors",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/api.py#L73-L122
|
[
"def",
"activate",
"(",
"fn",
"=",
"None",
")",
":",
"# If not used as decorator, activate the engine and exit",
"if",
"not",
"isfunction",
"(",
"fn",
")",
":",
"_engine",
".",
"activate",
"(",
")",
"return",
"None",
"# If used as decorator for an async coroutine, wrap it",
"if",
"iscoroutinefunction",
"is",
"not",
"None",
"and",
"iscoroutinefunction",
"(",
"fn",
")",
":",
"return",
"activate_async",
"(",
"fn",
",",
"_engine",
")",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"_engine",
".",
"activate",
"(",
")",
"try",
":",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"finally",
":",
"_engine",
".",
"disable",
"(",
")",
"return",
"wrapper"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
use
|
Creates a new isolated mock engine to be used via context manager.
Example::
with pook.use() as engine:
pook.mock('server.com/foo').reply(404)
res = requests.get('server.com/foo')
assert res.status_code == 404
|
pook/api.py
|
def use(network=False):
"""
Creates a new isolated mock engine to be used via context manager.
Example::
with pook.use() as engine:
pook.mock('server.com/foo').reply(404)
res = requests.get('server.com/foo')
assert res.status_code == 404
"""
global _engine
# Create temporal engine
__engine = _engine
activated = __engine.active
if activated:
__engine.disable()
_engine = Engine(network=network)
_engine.activate()
# Yield enfine to be used by the context manager
yield _engine
# Restore engine state
_engine.disable()
if network:
_engine.disable_network()
# Restore previous engine
_engine = __engine
if activated:
_engine.activate()
|
def use(network=False):
"""
Creates a new isolated mock engine to be used via context manager.
Example::
with pook.use() as engine:
pook.mock('server.com/foo').reply(404)
res = requests.get('server.com/foo')
assert res.status_code == 404
"""
global _engine
# Create temporal engine
__engine = _engine
activated = __engine.active
if activated:
__engine.disable()
_engine = Engine(network=network)
_engine.activate()
# Yield enfine to be used by the context manager
yield _engine
# Restore engine state
_engine.disable()
if network:
_engine.disable_network()
# Restore previous engine
_engine = __engine
if activated:
_engine.activate()
|
[
"Creates",
"a",
"new",
"isolated",
"mock",
"engine",
"to",
"be",
"used",
"via",
"context",
"manager",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/api.py#L185-L219
|
[
"def",
"use",
"(",
"network",
"=",
"False",
")",
":",
"global",
"_engine",
"# Create temporal engine",
"__engine",
"=",
"_engine",
"activated",
"=",
"__engine",
".",
"active",
"if",
"activated",
":",
"__engine",
".",
"disable",
"(",
")",
"_engine",
"=",
"Engine",
"(",
"network",
"=",
"network",
")",
"_engine",
".",
"activate",
"(",
")",
"# Yield enfine to be used by the context manager",
"yield",
"_engine",
"# Restore engine state",
"_engine",
".",
"disable",
"(",
")",
"if",
"network",
":",
"_engine",
".",
"disable_network",
"(",
")",
"# Restore previous engine",
"_engine",
"=",
"__engine",
"if",
"activated",
":",
"_engine",
".",
"activate",
"(",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
regex
|
Convenient shortcut to ``re.compile()`` for fast, easy to use
regular expression compilation without an extra import statement.
Arguments:
expression (str): regular expression value.
flags (int): optional regular expression flags.
Defaults to ``re.IGNORECASE``
Returns:
expression (str): string based regular expression.
Raises:
Exception: in case of regular expression compilation error
Example::
(pook
.get('api.com/foo')
.header('Content-Type', pook.regex('[a-z]{1,4}')))
|
pook/api.py
|
def regex(expression, flags=re.IGNORECASE):
"""
Convenient shortcut to ``re.compile()`` for fast, easy to use
regular expression compilation without an extra import statement.
Arguments:
expression (str): regular expression value.
flags (int): optional regular expression flags.
Defaults to ``re.IGNORECASE``
Returns:
expression (str): string based regular expression.
Raises:
Exception: in case of regular expression compilation error
Example::
(pook
.get('api.com/foo')
.header('Content-Type', pook.regex('[a-z]{1,4}')))
"""
return re.compile(expression, flags=flags)
|
def regex(expression, flags=re.IGNORECASE):
"""
Convenient shortcut to ``re.compile()`` for fast, easy to use
regular expression compilation without an extra import statement.
Arguments:
expression (str): regular expression value.
flags (int): optional regular expression flags.
Defaults to ``re.IGNORECASE``
Returns:
expression (str): string based regular expression.
Raises:
Exception: in case of regular expression compilation error
Example::
(pook
.get('api.com/foo')
.header('Content-Type', pook.regex('[a-z]{1,4}')))
"""
return re.compile(expression, flags=flags)
|
[
"Convenient",
"shortcut",
"to",
"re",
".",
"compile",
"()",
"for",
"fast",
"easy",
"to",
"use",
"regular",
"expression",
"compilation",
"without",
"an",
"extra",
"import",
"statement",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/api.py#L510-L532
|
[
"def",
"regex",
"(",
"expression",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
":",
"return",
"re",
".",
"compile",
"(",
"expression",
",",
"flags",
"=",
"flags",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
MockEngine.add_interceptor
|
Adds one or multiple HTTP traffic interceptors to the current
mocking engine.
Interceptors are typically HTTP client specific wrapper classes that
implements the pook interceptor interface.
Arguments:
interceptors (pook.interceptors.BaseInterceptor)
|
pook/mock_engine.py
|
def add_interceptor(self, *interceptors):
"""
Adds one or multiple HTTP traffic interceptors to the current
mocking engine.
Interceptors are typically HTTP client specific wrapper classes that
implements the pook interceptor interface.
Arguments:
interceptors (pook.interceptors.BaseInterceptor)
"""
for interceptor in interceptors:
self.interceptors.append(interceptor(self.engine))
|
def add_interceptor(self, *interceptors):
"""
Adds one or multiple HTTP traffic interceptors to the current
mocking engine.
Interceptors are typically HTTP client specific wrapper classes that
implements the pook interceptor interface.
Arguments:
interceptors (pook.interceptors.BaseInterceptor)
"""
for interceptor in interceptors:
self.interceptors.append(interceptor(self.engine))
|
[
"Adds",
"one",
"or",
"multiple",
"HTTP",
"traffic",
"interceptors",
"to",
"the",
"current",
"mocking",
"engine",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock_engine.py#L49-L61
|
[
"def",
"add_interceptor",
"(",
"self",
",",
"*",
"interceptors",
")",
":",
"for",
"interceptor",
"in",
"interceptors",
":",
"self",
".",
"interceptors",
".",
"append",
"(",
"interceptor",
"(",
"self",
".",
"engine",
")",
")"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
MockEngine.remove_interceptor
|
Removes a specific interceptor by name.
Arguments:
name (str): interceptor name to disable.
Returns:
bool: `True` if the interceptor was disabled, otherwise `False`.
|
pook/mock_engine.py
|
def remove_interceptor(self, name):
"""
Removes a specific interceptor by name.
Arguments:
name (str): interceptor name to disable.
Returns:
bool: `True` if the interceptor was disabled, otherwise `False`.
"""
for index, interceptor in enumerate(self.interceptors):
matches = (
type(interceptor).__name__ == name or
getattr(interceptor, 'name') == name
)
if matches:
self.interceptors.pop(index)
return True
return False
|
def remove_interceptor(self, name):
"""
Removes a specific interceptor by name.
Arguments:
name (str): interceptor name to disable.
Returns:
bool: `True` if the interceptor was disabled, otherwise `False`.
"""
for index, interceptor in enumerate(self.interceptors):
matches = (
type(interceptor).__name__ == name or
getattr(interceptor, 'name') == name
)
if matches:
self.interceptors.pop(index)
return True
return False
|
[
"Removes",
"a",
"specific",
"interceptor",
"by",
"name",
"."
] |
h2non/pook
|
python
|
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock_engine.py#L71-L89
|
[
"def",
"remove_interceptor",
"(",
"self",
",",
"name",
")",
":",
"for",
"index",
",",
"interceptor",
"in",
"enumerate",
"(",
"self",
".",
"interceptors",
")",
":",
"matches",
"=",
"(",
"type",
"(",
"interceptor",
")",
".",
"__name__",
"==",
"name",
"or",
"getattr",
"(",
"interceptor",
",",
"'name'",
")",
"==",
"name",
")",
"if",
"matches",
":",
"self",
".",
"interceptors",
".",
"pop",
"(",
"index",
")",
"return",
"True",
"return",
"False"
] |
e64094e41e4d89d98d2d29af7608ef27dc50cf19
|
test
|
get_setting
|
Get key from connection or default to settings.
|
pgcrypto/mixins.py
|
def get_setting(connection, key):
"""Get key from connection or default to settings."""
if key in connection.settings_dict:
return connection.settings_dict[key]
else:
return getattr(settings, key)
|
def get_setting(connection, key):
"""Get key from connection or default to settings."""
if key in connection.settings_dict:
return connection.settings_dict[key]
else:
return getattr(settings, key)
|
[
"Get",
"key",
"from",
"connection",
"or",
"default",
"to",
"settings",
"."
] |
incuna/django-pgcrypto-fields
|
python
|
https://github.com/incuna/django-pgcrypto-fields/blob/406fddf0cbe9091ba71b97206d0f4719c0450ac1/pgcrypto/mixins.py#L13-L18
|
[
"def",
"get_setting",
"(",
"connection",
",",
"key",
")",
":",
"if",
"key",
"in",
"connection",
".",
"settings_dict",
":",
"return",
"connection",
".",
"settings_dict",
"[",
"key",
"]",
"else",
":",
"return",
"getattr",
"(",
"settings",
",",
"key",
")"
] |
406fddf0cbe9091ba71b97206d0f4719c0450ac1
|
test
|
DecryptedCol.as_sql
|
Build SQL with decryption and casting.
|
pgcrypto/mixins.py
|
def as_sql(self, compiler, connection):
"""Build SQL with decryption and casting."""
sql, params = super(DecryptedCol, self).as_sql(compiler, connection)
sql = self.target.get_decrypt_sql(connection) % (sql, self.target.get_cast_sql())
return sql, params
|
def as_sql(self, compiler, connection):
"""Build SQL with decryption and casting."""
sql, params = super(DecryptedCol, self).as_sql(compiler, connection)
sql = self.target.get_decrypt_sql(connection) % (sql, self.target.get_cast_sql())
return sql, params
|
[
"Build",
"SQL",
"with",
"decryption",
"and",
"casting",
"."
] |
incuna/django-pgcrypto-fields
|
python
|
https://github.com/incuna/django-pgcrypto-fields/blob/406fddf0cbe9091ba71b97206d0f4719c0450ac1/pgcrypto/mixins.py#L30-L34
|
[
"def",
"as_sql",
"(",
"self",
",",
"compiler",
",",
"connection",
")",
":",
"sql",
",",
"params",
"=",
"super",
"(",
"DecryptedCol",
",",
"self",
")",
".",
"as_sql",
"(",
"compiler",
",",
"connection",
")",
"sql",
"=",
"self",
".",
"target",
".",
"get_decrypt_sql",
"(",
"connection",
")",
"%",
"(",
"sql",
",",
"self",
".",
"target",
".",
"get_cast_sql",
"(",
")",
")",
"return",
"sql",
",",
"params"
] |
406fddf0cbe9091ba71b97206d0f4719c0450ac1
|
test
|
HashMixin.pre_save
|
Save the original_value.
|
pgcrypto/mixins.py
|
def pre_save(self, model_instance, add):
"""Save the original_value."""
if self.original:
original_value = getattr(model_instance, self.original)
setattr(model_instance, self.attname, original_value)
return super(HashMixin, self).pre_save(model_instance, add)
|
def pre_save(self, model_instance, add):
"""Save the original_value."""
if self.original:
original_value = getattr(model_instance, self.original)
setattr(model_instance, self.attname, original_value)
return super(HashMixin, self).pre_save(model_instance, add)
|
[
"Save",
"the",
"original_value",
"."
] |
incuna/django-pgcrypto-fields
|
python
|
https://github.com/incuna/django-pgcrypto-fields/blob/406fddf0cbe9091ba71b97206d0f4719c0450ac1/pgcrypto/mixins.py#L50-L56
|
[
"def",
"pre_save",
"(",
"self",
",",
"model_instance",
",",
"add",
")",
":",
"if",
"self",
".",
"original",
":",
"original_value",
"=",
"getattr",
"(",
"model_instance",
",",
"self",
".",
"original",
")",
"setattr",
"(",
"model_instance",
",",
"self",
".",
"attname",
",",
"original_value",
")",
"return",
"super",
"(",
"HashMixin",
",",
"self",
")",
".",
"pre_save",
"(",
"model_instance",
",",
"add",
")"
] |
406fddf0cbe9091ba71b97206d0f4719c0450ac1
|
test
|
HashMixin.get_placeholder
|
Tell postgres to encrypt this field with a hashing function.
The `value` string is checked to determine if we need to hash or keep
the current value.
`compiler` and `connection` is ignored here as we don't need custom operators.
|
pgcrypto/mixins.py
|
def get_placeholder(self, value=None, compiler=None, connection=None):
"""
Tell postgres to encrypt this field with a hashing function.
The `value` string is checked to determine if we need to hash or keep
the current value.
`compiler` and `connection` is ignored here as we don't need custom operators.
"""
if value is None or value.startswith('\\x'):
return '%s'
return self.get_encrypt_sql(connection)
|
def get_placeholder(self, value=None, compiler=None, connection=None):
"""
Tell postgres to encrypt this field with a hashing function.
The `value` string is checked to determine if we need to hash or keep
the current value.
`compiler` and `connection` is ignored here as we don't need custom operators.
"""
if value is None or value.startswith('\\x'):
return '%s'
return self.get_encrypt_sql(connection)
|
[
"Tell",
"postgres",
"to",
"encrypt",
"this",
"field",
"with",
"a",
"hashing",
"function",
"."
] |
incuna/django-pgcrypto-fields
|
python
|
https://github.com/incuna/django-pgcrypto-fields/blob/406fddf0cbe9091ba71b97206d0f4719c0450ac1/pgcrypto/mixins.py#L58-L70
|
[
"def",
"get_placeholder",
"(",
"self",
",",
"value",
"=",
"None",
",",
"compiler",
"=",
"None",
",",
"connection",
"=",
"None",
")",
":",
"if",
"value",
"is",
"None",
"or",
"value",
".",
"startswith",
"(",
"'\\\\x'",
")",
":",
"return",
"'%s'",
"return",
"self",
".",
"get_encrypt_sql",
"(",
"connection",
")"
] |
406fddf0cbe9091ba71b97206d0f4719c0450ac1
|
test
|
PGPMixin.get_col
|
Get the decryption for col.
|
pgcrypto/mixins.py
|
def get_col(self, alias, output_field=None):
"""Get the decryption for col."""
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
return DecryptedCol(
alias,
self,
output_field
)
else:
return self.cached_col
|
def get_col(self, alias, output_field=None):
"""Get the decryption for col."""
if output_field is None:
output_field = self
if alias != self.model._meta.db_table or output_field != self:
return DecryptedCol(
alias,
self,
output_field
)
else:
return self.cached_col
|
[
"Get",
"the",
"decryption",
"for",
"col",
"."
] |
incuna/django-pgcrypto-fields
|
python
|
https://github.com/incuna/django-pgcrypto-fields/blob/406fddf0cbe9091ba71b97206d0f4719c0450ac1/pgcrypto/mixins.py#L106-L117
|
[
"def",
"get_col",
"(",
"self",
",",
"alias",
",",
"output_field",
"=",
"None",
")",
":",
"if",
"output_field",
"is",
"None",
":",
"output_field",
"=",
"self",
"if",
"alias",
"!=",
"self",
".",
"model",
".",
"_meta",
".",
"db_table",
"or",
"output_field",
"!=",
"self",
":",
"return",
"DecryptedCol",
"(",
"alias",
",",
"self",
",",
"output_field",
")",
"else",
":",
"return",
"self",
".",
"cached_col"
] |
406fddf0cbe9091ba71b97206d0f4719c0450ac1
|
test
|
PGPPublicKeyFieldMixin.get_placeholder
|
Tell postgres to encrypt this field using PGP.
|
pgcrypto/mixins.py
|
def get_placeholder(self, value=None, compiler=None, connection=None):
"""Tell postgres to encrypt this field using PGP."""
return self.encrypt_sql.format(get_setting(connection, 'PUBLIC_PGP_KEY'))
|
def get_placeholder(self, value=None, compiler=None, connection=None):
"""Tell postgres to encrypt this field using PGP."""
return self.encrypt_sql.format(get_setting(connection, 'PUBLIC_PGP_KEY'))
|
[
"Tell",
"postgres",
"to",
"encrypt",
"this",
"field",
"using",
"PGP",
"."
] |
incuna/django-pgcrypto-fields
|
python
|
https://github.com/incuna/django-pgcrypto-fields/blob/406fddf0cbe9091ba71b97206d0f4719c0450ac1/pgcrypto/mixins.py#L134-L136
|
[
"def",
"get_placeholder",
"(",
"self",
",",
"value",
"=",
"None",
",",
"compiler",
"=",
"None",
",",
"connection",
"=",
"None",
")",
":",
"return",
"self",
".",
"encrypt_sql",
".",
"format",
"(",
"get_setting",
"(",
"connection",
",",
"'PUBLIC_PGP_KEY'",
")",
")"
] |
406fddf0cbe9091ba71b97206d0f4719c0450ac1
|
test
|
hunt_repeated_yaml_keys
|
Parses yaml and returns a list of repeated variables and
the line on which they occur
|
lib/ansiblereview/vars.py
|
def hunt_repeated_yaml_keys(data):
"""Parses yaml and returns a list of repeated variables and
the line on which they occur
"""
loader = yaml.Loader(data)
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
mapping = dict()
errors = dict()
for key_node, value_node in node.value:
key = key_node.value
if key in mapping:
if key in errors:
errors[key].append(key_node.__line__)
else:
errors[key] = [mapping[key], key_node.__line__]
mapping[key] = key_node.__line__
return errors
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
return data
|
def hunt_repeated_yaml_keys(data):
"""Parses yaml and returns a list of repeated variables and
the line on which they occur
"""
loader = yaml.Loader(data)
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
mapping = dict()
errors = dict()
for key_node, value_node in node.value:
key = key_node.value
if key in mapping:
if key in errors:
errors[key].append(key_node.__line__)
else:
errors[key] = [mapping[key], key_node.__line__]
mapping[key] = key_node.__line__
return errors
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
return data
|
[
"Parses",
"yaml",
"and",
"returns",
"a",
"list",
"of",
"repeated",
"variables",
"and",
"the",
"line",
"on",
"which",
"they",
"occur"
] |
willthames/ansible-review
|
python
|
https://github.com/willthames/ansible-review/blob/c55c8f1d1c009f48c289160a28188ff2f3152486/lib/ansiblereview/vars.py#L7-L38
|
[
"def",
"hunt_repeated_yaml_keys",
"(",
"data",
")",
":",
"loader",
"=",
"yaml",
".",
"Loader",
"(",
"data",
")",
"def",
"compose_node",
"(",
"parent",
",",
"index",
")",
":",
"# the line number where the previous token has ended (plus empty lines)",
"line",
"=",
"loader",
".",
"line",
"node",
"=",
"Composer",
".",
"compose_node",
"(",
"loader",
",",
"parent",
",",
"index",
")",
"node",
".",
"__line__",
"=",
"line",
"+",
"1",
"return",
"node",
"def",
"construct_mapping",
"(",
"node",
",",
"deep",
"=",
"False",
")",
":",
"mapping",
"=",
"dict",
"(",
")",
"errors",
"=",
"dict",
"(",
")",
"for",
"key_node",
",",
"value_node",
"in",
"node",
".",
"value",
":",
"key",
"=",
"key_node",
".",
"value",
"if",
"key",
"in",
"mapping",
":",
"if",
"key",
"in",
"errors",
":",
"errors",
"[",
"key",
"]",
".",
"append",
"(",
"key_node",
".",
"__line__",
")",
"else",
":",
"errors",
"[",
"key",
"]",
"=",
"[",
"mapping",
"[",
"key",
"]",
",",
"key_node",
".",
"__line__",
"]",
"mapping",
"[",
"key",
"]",
"=",
"key_node",
".",
"__line__",
"return",
"errors",
"loader",
".",
"compose_node",
"=",
"compose_node",
"loader",
".",
"construct_mapping",
"=",
"construct_mapping",
"data",
"=",
"loader",
".",
"get_single_data",
"(",
")",
"return",
"data"
] |
c55c8f1d1c009f48c289160a28188ff2f3152486
|
test
|
base_regression
|
this function calculates the regression coefficients for a
given vector containing the averages of tip and branch
quantities.
Parameters
----------
Q : numpy.array
vector with
slope : None, optional
Description
Returns
-------
TYPE
Description
|
treetime/treeregression.py
|
def base_regression(Q, slope=None):
"""
this function calculates the regression coefficients for a
given vector containing the averages of tip and branch
quantities.
Parameters
----------
Q : numpy.array
vector with
slope : None, optional
Description
Returns
-------
TYPE
Description
"""
if slope is None:
slope = (Q[dtavgii] - Q[tavgii]*Q[davgii]/Q[sii]) \
/(Q[tsqii] - Q[tavgii]**2/Q[sii])
only_intercept=False
else:
only_intercept=True
intercept = (Q[davgii] - Q[tavgii]*slope)/Q[sii]
if only_intercept:
return {'slope':slope, 'intercept':intercept,
'chisq': 0.5*(Q[dsqii]/Q[sii] - Q[davgii]**2/Q[sii]**2)}
chisq = 0.5*(Q[dsqii] - Q[davgii]**2/Q[sii]
- (Q[dtavgii] - Q[davgii]*Q[tavgii]/Q[sii])**2/(Q[tsqii]
- Q[tavgii]**2/Q[sii]))
estimator_hessian = np.array([[Q[tsqii], Q[tavgii]], [Q[tavgii], Q[sii]]])
return {'slope':slope, 'intercept':intercept,
'chisq':chisq, 'hessian':estimator_hessian,
'cov':np.linalg.inv(estimator_hessian)}
|
def base_regression(Q, slope=None):
"""
this function calculates the regression coefficients for a
given vector containing the averages of tip and branch
quantities.
Parameters
----------
Q : numpy.array
vector with
slope : None, optional
Description
Returns
-------
TYPE
Description
"""
if slope is None:
slope = (Q[dtavgii] - Q[tavgii]*Q[davgii]/Q[sii]) \
/(Q[tsqii] - Q[tavgii]**2/Q[sii])
only_intercept=False
else:
only_intercept=True
intercept = (Q[davgii] - Q[tavgii]*slope)/Q[sii]
if only_intercept:
return {'slope':slope, 'intercept':intercept,
'chisq': 0.5*(Q[dsqii]/Q[sii] - Q[davgii]**2/Q[sii]**2)}
chisq = 0.5*(Q[dsqii] - Q[davgii]**2/Q[sii]
- (Q[dtavgii] - Q[davgii]*Q[tavgii]/Q[sii])**2/(Q[tsqii]
- Q[tavgii]**2/Q[sii]))
estimator_hessian = np.array([[Q[tsqii], Q[tavgii]], [Q[tavgii], Q[sii]]])
return {'slope':slope, 'intercept':intercept,
'chisq':chisq, 'hessian':estimator_hessian,
'cov':np.linalg.inv(estimator_hessian)}
|
[
"this",
"function",
"calculates",
"the",
"regression",
"coefficients",
"for",
"a",
"given",
"vector",
"containing",
"the",
"averages",
"of",
"tip",
"and",
"branch",
"quantities",
"."
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L6-L45
|
[
"def",
"base_regression",
"(",
"Q",
",",
"slope",
"=",
"None",
")",
":",
"if",
"slope",
"is",
"None",
":",
"slope",
"=",
"(",
"Q",
"[",
"dtavgii",
"]",
"-",
"Q",
"[",
"tavgii",
"]",
"*",
"Q",
"[",
"davgii",
"]",
"/",
"Q",
"[",
"sii",
"]",
")",
"/",
"(",
"Q",
"[",
"tsqii",
"]",
"-",
"Q",
"[",
"tavgii",
"]",
"**",
"2",
"/",
"Q",
"[",
"sii",
"]",
")",
"only_intercept",
"=",
"False",
"else",
":",
"only_intercept",
"=",
"True",
"intercept",
"=",
"(",
"Q",
"[",
"davgii",
"]",
"-",
"Q",
"[",
"tavgii",
"]",
"*",
"slope",
")",
"/",
"Q",
"[",
"sii",
"]",
"if",
"only_intercept",
":",
"return",
"{",
"'slope'",
":",
"slope",
",",
"'intercept'",
":",
"intercept",
",",
"'chisq'",
":",
"0.5",
"*",
"(",
"Q",
"[",
"dsqii",
"]",
"/",
"Q",
"[",
"sii",
"]",
"-",
"Q",
"[",
"davgii",
"]",
"**",
"2",
"/",
"Q",
"[",
"sii",
"]",
"**",
"2",
")",
"}",
"chisq",
"=",
"0.5",
"*",
"(",
"Q",
"[",
"dsqii",
"]",
"-",
"Q",
"[",
"davgii",
"]",
"**",
"2",
"/",
"Q",
"[",
"sii",
"]",
"-",
"(",
"Q",
"[",
"dtavgii",
"]",
"-",
"Q",
"[",
"davgii",
"]",
"*",
"Q",
"[",
"tavgii",
"]",
"/",
"Q",
"[",
"sii",
"]",
")",
"**",
"2",
"/",
"(",
"Q",
"[",
"tsqii",
"]",
"-",
"Q",
"[",
"tavgii",
"]",
"**",
"2",
"/",
"Q",
"[",
"sii",
"]",
")",
")",
"estimator_hessian",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"Q",
"[",
"tsqii",
"]",
",",
"Q",
"[",
"tavgii",
"]",
"]",
",",
"[",
"Q",
"[",
"tavgii",
"]",
",",
"Q",
"[",
"sii",
"]",
"]",
"]",
")",
"return",
"{",
"'slope'",
":",
"slope",
",",
"'intercept'",
":",
"intercept",
",",
"'chisq'",
":",
"chisq",
",",
"'hessian'",
":",
"estimator_hessian",
",",
"'cov'",
":",
"np",
".",
"linalg",
".",
"inv",
"(",
"estimator_hessian",
")",
"}"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
TreeRegression.Cov
|
calculate the covariance matrix of the tips assuming variance
has accumulated along branches of the tree accoriding to the
the provided
Returns
-------
M : (np.array)
covariance matrix with tips arranged standard transersal order.
|
treetime/treeregression.py
|
def Cov(self):
"""
calculate the covariance matrix of the tips assuming variance
has accumulated along branches of the tree accoriding to the
the provided
Returns
-------
M : (np.array)
covariance matrix with tips arranged standard transersal order.
"""
# accumulate the covariance matrix by adding 'squares'
M = np.zeros((self.N, self.N))
for n in self.tree.find_clades():
if n == self.tree.root:
continue
M[np.meshgrid(n._ii, n._ii)] += self.branch_variance(n)
return M
|
def Cov(self):
"""
calculate the covariance matrix of the tips assuming variance
has accumulated along branches of the tree accoriding to the
the provided
Returns
-------
M : (np.array)
covariance matrix with tips arranged standard transersal order.
"""
# accumulate the covariance matrix by adding 'squares'
M = np.zeros((self.N, self.N))
for n in self.tree.find_clades():
if n == self.tree.root:
continue
M[np.meshgrid(n._ii, n._ii)] += self.branch_variance(n)
return M
|
[
"calculate",
"the",
"covariance",
"matrix",
"of",
"the",
"tips",
"assuming",
"variance",
"has",
"accumulated",
"along",
"branches",
"of",
"the",
"tree",
"accoriding",
"to",
"the",
"the",
"provided",
"Returns",
"-------"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L113-L130
|
[
"def",
"Cov",
"(",
"self",
")",
":",
"# accumulate the covariance matrix by adding 'squares'",
"M",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"N",
",",
"self",
".",
"N",
")",
")",
"for",
"n",
"in",
"self",
".",
"tree",
".",
"find_clades",
"(",
")",
":",
"if",
"n",
"==",
"self",
".",
"tree",
".",
"root",
":",
"continue",
"M",
"[",
"np",
".",
"meshgrid",
"(",
"n",
".",
"_ii",
",",
"n",
".",
"_ii",
")",
"]",
"+=",
"self",
".",
"branch_variance",
"(",
"n",
")",
"return",
"M"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
TreeRegression.CovInv
|
Inverse of the covariance matrix
Returns
-------
H : (np.array)
inverse of the covariance matrix.
|
treetime/treeregression.py
|
def CovInv(self):
"""
Inverse of the covariance matrix
Returns
-------
H : (np.array)
inverse of the covariance matrix.
"""
self.recurse(full_matrix=True)
return self.tree.root.cinv
|
def CovInv(self):
"""
Inverse of the covariance matrix
Returns
-------
H : (np.array)
inverse of the covariance matrix.
"""
self.recurse(full_matrix=True)
return self.tree.root.cinv
|
[
"Inverse",
"of",
"the",
"covariance",
"matrix"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L133-L144
|
[
"def",
"CovInv",
"(",
"self",
")",
":",
"self",
".",
"recurse",
"(",
"full_matrix",
"=",
"True",
")",
"return",
"self",
".",
"tree",
".",
"root",
".",
"cinv"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
TreeRegression.recurse
|
recursion to calculate inverse covariance matrix
Parameters
----------
full_matrix : bool, optional
if True, the entire inverse matrix is calculated. otherwise, only the weighing vector.
|
treetime/treeregression.py
|
def recurse(self, full_matrix=False):
"""
recursion to calculate inverse covariance matrix
Parameters
----------
full_matrix : bool, optional
if True, the entire inverse matrix is calculated. otherwise, only the weighing vector.
"""
for n in self.tree.get_nonterminals(order='postorder'):
n_leaves = len(n._ii)
if full_matrix: M = np.zeros((n_leaves, n_leaves), dtype=float)
r = np.zeros(n_leaves, dtype=float)
c_count = 0
for c in n:
ssq = self.branch_variance(c)
nc = len(c._ii)
if c.is_terminal():
if full_matrix:
M[c_count, c_count] = 1.0/ssq
r[c_count] = 1.0/ssq
else:
if full_matrix:
M[c_count:c_count+nc, c_count:c_count+nc] = c.cinv - ssq*np.outer(c.r,c.r)/(1+ssq*c.s)
r[c_count:c_count+nc] = c.r/(1+ssq*c.s)
c_count += nc
if full_matrix: n.cinv = M
n.r = r #M.sum(axis=1)
n.s = n.r.sum()
|
def recurse(self, full_matrix=False):
"""
recursion to calculate inverse covariance matrix
Parameters
----------
full_matrix : bool, optional
if True, the entire inverse matrix is calculated. otherwise, only the weighing vector.
"""
for n in self.tree.get_nonterminals(order='postorder'):
n_leaves = len(n._ii)
if full_matrix: M = np.zeros((n_leaves, n_leaves), dtype=float)
r = np.zeros(n_leaves, dtype=float)
c_count = 0
for c in n:
ssq = self.branch_variance(c)
nc = len(c._ii)
if c.is_terminal():
if full_matrix:
M[c_count, c_count] = 1.0/ssq
r[c_count] = 1.0/ssq
else:
if full_matrix:
M[c_count:c_count+nc, c_count:c_count+nc] = c.cinv - ssq*np.outer(c.r,c.r)/(1+ssq*c.s)
r[c_count:c_count+nc] = c.r/(1+ssq*c.s)
c_count += nc
if full_matrix: n.cinv = M
n.r = r #M.sum(axis=1)
n.s = n.r.sum()
|
[
"recursion",
"to",
"calculate",
"inverse",
"covariance",
"matrix"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L147-L176
|
[
"def",
"recurse",
"(",
"self",
",",
"full_matrix",
"=",
"False",
")",
":",
"for",
"n",
"in",
"self",
".",
"tree",
".",
"get_nonterminals",
"(",
"order",
"=",
"'postorder'",
")",
":",
"n_leaves",
"=",
"len",
"(",
"n",
".",
"_ii",
")",
"if",
"full_matrix",
":",
"M",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_leaves",
",",
"n_leaves",
")",
",",
"dtype",
"=",
"float",
")",
"r",
"=",
"np",
".",
"zeros",
"(",
"n_leaves",
",",
"dtype",
"=",
"float",
")",
"c_count",
"=",
"0",
"for",
"c",
"in",
"n",
":",
"ssq",
"=",
"self",
".",
"branch_variance",
"(",
"c",
")",
"nc",
"=",
"len",
"(",
"c",
".",
"_ii",
")",
"if",
"c",
".",
"is_terminal",
"(",
")",
":",
"if",
"full_matrix",
":",
"M",
"[",
"c_count",
",",
"c_count",
"]",
"=",
"1.0",
"/",
"ssq",
"r",
"[",
"c_count",
"]",
"=",
"1.0",
"/",
"ssq",
"else",
":",
"if",
"full_matrix",
":",
"M",
"[",
"c_count",
":",
"c_count",
"+",
"nc",
",",
"c_count",
":",
"c_count",
"+",
"nc",
"]",
"=",
"c",
".",
"cinv",
"-",
"ssq",
"*",
"np",
".",
"outer",
"(",
"c",
".",
"r",
",",
"c",
".",
"r",
")",
"/",
"(",
"1",
"+",
"ssq",
"*",
"c",
".",
"s",
")",
"r",
"[",
"c_count",
":",
"c_count",
"+",
"nc",
"]",
"=",
"c",
".",
"r",
"/",
"(",
"1",
"+",
"ssq",
"*",
"c",
".",
"s",
")",
"c_count",
"+=",
"nc",
"if",
"full_matrix",
":",
"n",
".",
"cinv",
"=",
"M",
"n",
".",
"r",
"=",
"r",
"#M.sum(axis=1)",
"n",
".",
"s",
"=",
"n",
".",
"r",
".",
"sum",
"(",
")"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
TreeRegression._calculate_averages
|
calculate the weighted sums of the tip and branch values and
their second moments.
|
treetime/treeregression.py
|
def _calculate_averages(self):
"""
calculate the weighted sums of the tip and branch values and
their second moments.
"""
for n in self.tree.get_nonterminals(order='postorder'):
Q = np.zeros(6, dtype=float)
for c in n:
tv = self.tip_value(c)
bv = self.branch_value(c)
var = self.branch_variance(c)
Q += self.propagate_averages(c, tv, bv, var)
n.Q=Q
for n in self.tree.find_clades(order='preorder'):
O = np.zeros(6, dtype=float)
if n==self.tree.root:
n.Qtot = n.Q
continue
for c in n.up:
if c==n:
continue
tv = self.tip_value(c)
bv = self.branch_value(c)
var = self.branch_variance(c)
O += self.propagate_averages(c, tv, bv, var)
if n.up!=self.tree.root:
c = n.up
tv = self.tip_value(c)
bv = self.branch_value(c)
var = self.branch_variance(c)
O += self.propagate_averages(c, tv, bv, var, outgroup=True)
n.O = O
if not n.is_terminal():
tv = self.tip_value(n)
bv = self.branch_value(n)
var = self.branch_variance(n)
n.Qtot = n.Q + self.propagate_averages(n, tv, bv, var, outgroup=True)
|
def _calculate_averages(self):
"""
calculate the weighted sums of the tip and branch values and
their second moments.
"""
for n in self.tree.get_nonterminals(order='postorder'):
Q = np.zeros(6, dtype=float)
for c in n:
tv = self.tip_value(c)
bv = self.branch_value(c)
var = self.branch_variance(c)
Q += self.propagate_averages(c, tv, bv, var)
n.Q=Q
for n in self.tree.find_clades(order='preorder'):
O = np.zeros(6, dtype=float)
if n==self.tree.root:
n.Qtot = n.Q
continue
for c in n.up:
if c==n:
continue
tv = self.tip_value(c)
bv = self.branch_value(c)
var = self.branch_variance(c)
O += self.propagate_averages(c, tv, bv, var)
if n.up!=self.tree.root:
c = n.up
tv = self.tip_value(c)
bv = self.branch_value(c)
var = self.branch_variance(c)
O += self.propagate_averages(c, tv, bv, var, outgroup=True)
n.O = O
if not n.is_terminal():
tv = self.tip_value(n)
bv = self.branch_value(n)
var = self.branch_variance(n)
n.Qtot = n.Q + self.propagate_averages(n, tv, bv, var, outgroup=True)
|
[
"calculate",
"the",
"weighted",
"sums",
"of",
"the",
"tip",
"and",
"branch",
"values",
"and",
"their",
"second",
"moments",
"."
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L179-L220
|
[
"def",
"_calculate_averages",
"(",
"self",
")",
":",
"for",
"n",
"in",
"self",
".",
"tree",
".",
"get_nonterminals",
"(",
"order",
"=",
"'postorder'",
")",
":",
"Q",
"=",
"np",
".",
"zeros",
"(",
"6",
",",
"dtype",
"=",
"float",
")",
"for",
"c",
"in",
"n",
":",
"tv",
"=",
"self",
".",
"tip_value",
"(",
"c",
")",
"bv",
"=",
"self",
".",
"branch_value",
"(",
"c",
")",
"var",
"=",
"self",
".",
"branch_variance",
"(",
"c",
")",
"Q",
"+=",
"self",
".",
"propagate_averages",
"(",
"c",
",",
"tv",
",",
"bv",
",",
"var",
")",
"n",
".",
"Q",
"=",
"Q",
"for",
"n",
"in",
"self",
".",
"tree",
".",
"find_clades",
"(",
"order",
"=",
"'preorder'",
")",
":",
"O",
"=",
"np",
".",
"zeros",
"(",
"6",
",",
"dtype",
"=",
"float",
")",
"if",
"n",
"==",
"self",
".",
"tree",
".",
"root",
":",
"n",
".",
"Qtot",
"=",
"n",
".",
"Q",
"continue",
"for",
"c",
"in",
"n",
".",
"up",
":",
"if",
"c",
"==",
"n",
":",
"continue",
"tv",
"=",
"self",
".",
"tip_value",
"(",
"c",
")",
"bv",
"=",
"self",
".",
"branch_value",
"(",
"c",
")",
"var",
"=",
"self",
".",
"branch_variance",
"(",
"c",
")",
"O",
"+=",
"self",
".",
"propagate_averages",
"(",
"c",
",",
"tv",
",",
"bv",
",",
"var",
")",
"if",
"n",
".",
"up",
"!=",
"self",
".",
"tree",
".",
"root",
":",
"c",
"=",
"n",
".",
"up",
"tv",
"=",
"self",
".",
"tip_value",
"(",
"c",
")",
"bv",
"=",
"self",
".",
"branch_value",
"(",
"c",
")",
"var",
"=",
"self",
".",
"branch_variance",
"(",
"c",
")",
"O",
"+=",
"self",
".",
"propagate_averages",
"(",
"c",
",",
"tv",
",",
"bv",
",",
"var",
",",
"outgroup",
"=",
"True",
")",
"n",
".",
"O",
"=",
"O",
"if",
"not",
"n",
".",
"is_terminal",
"(",
")",
":",
"tv",
"=",
"self",
".",
"tip_value",
"(",
"n",
")",
"bv",
"=",
"self",
".",
"branch_value",
"(",
"n",
")",
"var",
"=",
"self",
".",
"branch_variance",
"(",
"n",
")",
"n",
".",
"Qtot",
"=",
"n",
".",
"Q",
"+",
"self",
".",
"propagate_averages",
"(",
"n",
",",
"tv",
",",
"bv",
",",
"var",
",",
"outgroup",
"=",
"True",
")"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
TreeRegression.propagate_averages
|
This function implements the propagation of the means,
variance, and covariances along a branch. It operates
both towards the root and tips.
Parameters
----------
n : (node)
the branch connecting this node to its parent is used
for propagation
tv : (float)
tip value. Only required if not is terminal
bl : (float)
branch value. The increment of the tree associated quantity'
var : (float)
the variance increment along the branch
Returns
-------
Q : (np.array)
a vector of length 6 containing the updated quantities
|
treetime/treeregression.py
|
def propagate_averages(self, n, tv, bv, var, outgroup=False):
"""
This function implements the propagation of the means,
variance, and covariances along a branch. It operates
both towards the root and tips.
Parameters
----------
n : (node)
the branch connecting this node to its parent is used
for propagation
tv : (float)
tip value. Only required if not is terminal
bl : (float)
branch value. The increment of the tree associated quantity'
var : (float)
the variance increment along the branch
Returns
-------
Q : (np.array)
a vector of length 6 containing the updated quantities
"""
if n.is_terminal() and outgroup==False:
if tv is None or np.isinf(tv) or np.isnan(tv):
res = np.array([0, 0, 0, 0, 0, 0])
elif var==0:
res = np.array([np.inf, np.inf, np.inf, np.inf, np.inf, np.inf])
else:
res = np.array([
tv/var,
bv/var,
tv**2/var,
bv*tv/var,
bv**2/var,
1.0/var], dtype=float)
else:
tmpQ = n.O if outgroup else n.Q
denom = 1.0/(1+var*tmpQ[sii])
res = np.array([
tmpQ[tavgii]*denom,
(tmpQ[davgii] + bv*tmpQ[sii])*denom,
tmpQ[tsqii] - var*tmpQ[tavgii]**2*denom,
tmpQ[dtavgii] + tmpQ[tavgii]*bv - var*tmpQ[tavgii]*(tmpQ[davgii] + bv*tmpQ[sii])*denom,
tmpQ[dsqii] + 2*bv*tmpQ[davgii] + bv**2*tmpQ[sii] - var*(tmpQ[davgii]**2 + 2*bv*tmpQ[davgii]*tmpQ[sii] + bv**2*tmpQ[sii]**2)*denom,
tmpQ[sii]*denom]
)
return res
|
def propagate_averages(self, n, tv, bv, var, outgroup=False):
"""
This function implements the propagation of the means,
variance, and covariances along a branch. It operates
both towards the root and tips.
Parameters
----------
n : (node)
the branch connecting this node to its parent is used
for propagation
tv : (float)
tip value. Only required if not is terminal
bl : (float)
branch value. The increment of the tree associated quantity'
var : (float)
the variance increment along the branch
Returns
-------
Q : (np.array)
a vector of length 6 containing the updated quantities
"""
if n.is_terminal() and outgroup==False:
if tv is None or np.isinf(tv) or np.isnan(tv):
res = np.array([0, 0, 0, 0, 0, 0])
elif var==0:
res = np.array([np.inf, np.inf, np.inf, np.inf, np.inf, np.inf])
else:
res = np.array([
tv/var,
bv/var,
tv**2/var,
bv*tv/var,
bv**2/var,
1.0/var], dtype=float)
else:
tmpQ = n.O if outgroup else n.Q
denom = 1.0/(1+var*tmpQ[sii])
res = np.array([
tmpQ[tavgii]*denom,
(tmpQ[davgii] + bv*tmpQ[sii])*denom,
tmpQ[tsqii] - var*tmpQ[tavgii]**2*denom,
tmpQ[dtavgii] + tmpQ[tavgii]*bv - var*tmpQ[tavgii]*(tmpQ[davgii] + bv*tmpQ[sii])*denom,
tmpQ[dsqii] + 2*bv*tmpQ[davgii] + bv**2*tmpQ[sii] - var*(tmpQ[davgii]**2 + 2*bv*tmpQ[davgii]*tmpQ[sii] + bv**2*tmpQ[sii]**2)*denom,
tmpQ[sii]*denom]
)
return res
|
[
"This",
"function",
"implements",
"the",
"propagation",
"of",
"the",
"means",
"variance",
"and",
"covariances",
"along",
"a",
"branch",
".",
"It",
"operates",
"both",
"towards",
"the",
"root",
"and",
"tips",
"."
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L223-L272
|
[
"def",
"propagate_averages",
"(",
"self",
",",
"n",
",",
"tv",
",",
"bv",
",",
"var",
",",
"outgroup",
"=",
"False",
")",
":",
"if",
"n",
".",
"is_terminal",
"(",
")",
"and",
"outgroup",
"==",
"False",
":",
"if",
"tv",
"is",
"None",
"or",
"np",
".",
"isinf",
"(",
"tv",
")",
"or",
"np",
".",
"isnan",
"(",
"tv",
")",
":",
"res",
"=",
"np",
".",
"array",
"(",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
")",
"elif",
"var",
"==",
"0",
":",
"res",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"inf",
",",
"np",
".",
"inf",
",",
"np",
".",
"inf",
",",
"np",
".",
"inf",
",",
"np",
".",
"inf",
",",
"np",
".",
"inf",
"]",
")",
"else",
":",
"res",
"=",
"np",
".",
"array",
"(",
"[",
"tv",
"/",
"var",
",",
"bv",
"/",
"var",
",",
"tv",
"**",
"2",
"/",
"var",
",",
"bv",
"*",
"tv",
"/",
"var",
",",
"bv",
"**",
"2",
"/",
"var",
",",
"1.0",
"/",
"var",
"]",
",",
"dtype",
"=",
"float",
")",
"else",
":",
"tmpQ",
"=",
"n",
".",
"O",
"if",
"outgroup",
"else",
"n",
".",
"Q",
"denom",
"=",
"1.0",
"/",
"(",
"1",
"+",
"var",
"*",
"tmpQ",
"[",
"sii",
"]",
")",
"res",
"=",
"np",
".",
"array",
"(",
"[",
"tmpQ",
"[",
"tavgii",
"]",
"*",
"denom",
",",
"(",
"tmpQ",
"[",
"davgii",
"]",
"+",
"bv",
"*",
"tmpQ",
"[",
"sii",
"]",
")",
"*",
"denom",
",",
"tmpQ",
"[",
"tsqii",
"]",
"-",
"var",
"*",
"tmpQ",
"[",
"tavgii",
"]",
"**",
"2",
"*",
"denom",
",",
"tmpQ",
"[",
"dtavgii",
"]",
"+",
"tmpQ",
"[",
"tavgii",
"]",
"*",
"bv",
"-",
"var",
"*",
"tmpQ",
"[",
"tavgii",
"]",
"*",
"(",
"tmpQ",
"[",
"davgii",
"]",
"+",
"bv",
"*",
"tmpQ",
"[",
"sii",
"]",
")",
"*",
"denom",
",",
"tmpQ",
"[",
"dsqii",
"]",
"+",
"2",
"*",
"bv",
"*",
"tmpQ",
"[",
"davgii",
"]",
"+",
"bv",
"**",
"2",
"*",
"tmpQ",
"[",
"sii",
"]",
"-",
"var",
"*",
"(",
"tmpQ",
"[",
"davgii",
"]",
"**",
"2",
"+",
"2",
"*",
"bv",
"*",
"tmpQ",
"[",
"davgii",
"]",
"*",
"tmpQ",
"[",
"sii",
"]",
"+",
"bv",
"**",
"2",
"*",
"tmpQ",
"[",
"sii",
"]",
"**",
"2",
")",
"*",
"denom",
",",
"tmpQ",
"[",
"sii",
"]",
"*",
"denom",
"]",
")",
"return",
"res"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
TreeRegression.explained_variance
|
calculate standard explained variance
Returns
-------
float
r-value of the root-to-tip distance and time.
independent of regression model, but dependent on root choice
|
treetime/treeregression.py
|
def explained_variance(self):
"""calculate standard explained variance
Returns
-------
float
r-value of the root-to-tip distance and time.
independent of regression model, but dependent on root choice
"""
self.tree.root._v=0
for n in self.tree.get_nonterminals(order='preorder'):
for c in n:
c._v = n._v + self.branch_value(c)
raw = np.array([(self.tip_value(n), n._v) for n in self.tree.get_terminals()
if self.tip_value(n) is not None])
return np.corrcoef(raw.T)[0,1]
|
def explained_variance(self):
"""calculate standard explained variance
Returns
-------
float
r-value of the root-to-tip distance and time.
independent of regression model, but dependent on root choice
"""
self.tree.root._v=0
for n in self.tree.get_nonterminals(order='preorder'):
for c in n:
c._v = n._v + self.branch_value(c)
raw = np.array([(self.tip_value(n), n._v) for n in self.tree.get_terminals()
if self.tip_value(n) is not None])
return np.corrcoef(raw.T)[0,1]
|
[
"calculate",
"standard",
"explained",
"variance"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L274-L289
|
[
"def",
"explained_variance",
"(",
"self",
")",
":",
"self",
".",
"tree",
".",
"root",
".",
"_v",
"=",
"0",
"for",
"n",
"in",
"self",
".",
"tree",
".",
"get_nonterminals",
"(",
"order",
"=",
"'preorder'",
")",
":",
"for",
"c",
"in",
"n",
":",
"c",
".",
"_v",
"=",
"n",
".",
"_v",
"+",
"self",
".",
"branch_value",
"(",
"c",
")",
"raw",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"self",
".",
"tip_value",
"(",
"n",
")",
",",
"n",
".",
"_v",
")",
"for",
"n",
"in",
"self",
".",
"tree",
".",
"get_terminals",
"(",
")",
"if",
"self",
".",
"tip_value",
"(",
"n",
")",
"is",
"not",
"None",
"]",
")",
"return",
"np",
".",
"corrcoef",
"(",
"raw",
".",
"T",
")",
"[",
"0",
",",
"1",
"]"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
TreeRegression.regression
|
regress tip values against branch values
Parameters
----------
slope : None, optional
if given, the slope isn't optimized
Returns
-------
dict
regression parameters
|
treetime/treeregression.py
|
def regression(self, slope=None):
"""regress tip values against branch values
Parameters
----------
slope : None, optional
if given, the slope isn't optimized
Returns
-------
dict
regression parameters
"""
self._calculate_averages()
clock_model = base_regression(self.tree.root.Q, slope)
clock_model['r_val'] = self.explained_variance()
return clock_model
|
def regression(self, slope=None):
"""regress tip values against branch values
Parameters
----------
slope : None, optional
if given, the slope isn't optimized
Returns
-------
dict
regression parameters
"""
self._calculate_averages()
clock_model = base_regression(self.tree.root.Q, slope)
clock_model['r_val'] = self.explained_variance()
return clock_model
|
[
"regress",
"tip",
"values",
"against",
"branch",
"values"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L292-L310
|
[
"def",
"regression",
"(",
"self",
",",
"slope",
"=",
"None",
")",
":",
"self",
".",
"_calculate_averages",
"(",
")",
"clock_model",
"=",
"base_regression",
"(",
"self",
".",
"tree",
".",
"root",
".",
"Q",
",",
"slope",
")",
"clock_model",
"[",
"'r_val'",
"]",
"=",
"self",
".",
"explained_variance",
"(",
")",
"return",
"clock_model"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
TreeRegression.find_best_root
|
determine the position on the tree that minimizes the bilinear
product of the inverse covariance and the data vectors.
Returns
-------
best_root : (dict)
dictionary with the node, the fraction `x` at which the branch
is to be split, and the regression parameters
|
treetime/treeregression.py
|
def find_best_root(self, force_positive=True, slope=None):
"""
determine the position on the tree that minimizes the bilinear
product of the inverse covariance and the data vectors.
Returns
-------
best_root : (dict)
dictionary with the node, the fraction `x` at which the branch
is to be split, and the regression parameters
"""
self._calculate_averages()
best_root = {"chisq": np.inf}
for n in self.tree.find_clades():
if n==self.tree.root:
continue
tv = self.tip_value(n)
bv = self.branch_value(n)
var = self.branch_variance(n)
x, chisq = self._optimal_root_along_branch(n, tv, bv, var, slope=slope)
if (chisq<best_root["chisq"]):
tmpQ = self.propagate_averages(n, tv, bv*x, var*x) \
+ self.propagate_averages(n, tv, bv*(1-x), var*(1-x), outgroup=True)
reg = base_regression(tmpQ, slope=slope)
if reg["slope"]>=0 or (force_positive==False):
best_root = {"node":n, "split":x}
best_root.update(reg)
if 'node' not in best_root:
print("TreeRegression.find_best_root: No valid root found!", force_positive)
return None
if 'hessian' in best_root:
# calculate differentials with respect to x
deriv = []
n = best_root["node"]
tv = self.tip_value(n)
bv = self.branch_value(n)
var = self.branch_variance(n)
for dx in [-0.001, 0.001]:
y = min(1.0, max(0.0, best_root["split"]+dx))
tmpQ = self.propagate_averages(n, tv, bv*y, var*y) \
+ self.propagate_averages(n, tv, bv*(1-y), var*(1-y), outgroup=True)
reg = base_regression(tmpQ, slope=slope)
deriv.append([y,reg['chisq'], tmpQ[tavgii], tmpQ[davgii]])
estimator_hessian = np.zeros((3,3))
estimator_hessian[:2,:2] = best_root['hessian']
estimator_hessian[2,2] = (deriv[0][1] + deriv[1][1] - 2.0*best_root['chisq'])/(deriv[0][0] - deriv[1][0])**2
# estimator_hessian[2,0] = (deriv[0][2] - deriv[1][2])/(deriv[0][0] - deriv[1][0])
# estimator_hessian[2,1] = (deriv[0][3] - deriv[1][3])/(deriv[0][0] - deriv[1][0])
estimator_hessian[0,2] = estimator_hessian[2,0]
estimator_hessian[1,2] = estimator_hessian[2,1]
best_root['hessian'] = estimator_hessian
best_root['cov'] = np.linalg.inv(estimator_hessian)
return best_root
|
def find_best_root(self, force_positive=True, slope=None):
"""
determine the position on the tree that minimizes the bilinear
product of the inverse covariance and the data vectors.
Returns
-------
best_root : (dict)
dictionary with the node, the fraction `x` at which the branch
is to be split, and the regression parameters
"""
self._calculate_averages()
best_root = {"chisq": np.inf}
for n in self.tree.find_clades():
if n==self.tree.root:
continue
tv = self.tip_value(n)
bv = self.branch_value(n)
var = self.branch_variance(n)
x, chisq = self._optimal_root_along_branch(n, tv, bv, var, slope=slope)
if (chisq<best_root["chisq"]):
tmpQ = self.propagate_averages(n, tv, bv*x, var*x) \
+ self.propagate_averages(n, tv, bv*(1-x), var*(1-x), outgroup=True)
reg = base_regression(tmpQ, slope=slope)
if reg["slope"]>=0 or (force_positive==False):
best_root = {"node":n, "split":x}
best_root.update(reg)
if 'node' not in best_root:
print("TreeRegression.find_best_root: No valid root found!", force_positive)
return None
if 'hessian' in best_root:
# calculate differentials with respect to x
deriv = []
n = best_root["node"]
tv = self.tip_value(n)
bv = self.branch_value(n)
var = self.branch_variance(n)
for dx in [-0.001, 0.001]:
y = min(1.0, max(0.0, best_root["split"]+dx))
tmpQ = self.propagate_averages(n, tv, bv*y, var*y) \
+ self.propagate_averages(n, tv, bv*(1-y), var*(1-y), outgroup=True)
reg = base_regression(tmpQ, slope=slope)
deriv.append([y,reg['chisq'], tmpQ[tavgii], tmpQ[davgii]])
estimator_hessian = np.zeros((3,3))
estimator_hessian[:2,:2] = best_root['hessian']
estimator_hessian[2,2] = (deriv[0][1] + deriv[1][1] - 2.0*best_root['chisq'])/(deriv[0][0] - deriv[1][0])**2
# estimator_hessian[2,0] = (deriv[0][2] - deriv[1][2])/(deriv[0][0] - deriv[1][0])
# estimator_hessian[2,1] = (deriv[0][3] - deriv[1][3])/(deriv[0][0] - deriv[1][0])
estimator_hessian[0,2] = estimator_hessian[2,0]
estimator_hessian[1,2] = estimator_hessian[2,1]
best_root['hessian'] = estimator_hessian
best_root['cov'] = np.linalg.inv(estimator_hessian)
return best_root
|
[
"determine",
"the",
"position",
"on",
"the",
"tree",
"that",
"minimizes",
"the",
"bilinear",
"product",
"of",
"the",
"inverse",
"covariance",
"and",
"the",
"data",
"vectors",
"."
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L314-L372
|
[
"def",
"find_best_root",
"(",
"self",
",",
"force_positive",
"=",
"True",
",",
"slope",
"=",
"None",
")",
":",
"self",
".",
"_calculate_averages",
"(",
")",
"best_root",
"=",
"{",
"\"chisq\"",
":",
"np",
".",
"inf",
"}",
"for",
"n",
"in",
"self",
".",
"tree",
".",
"find_clades",
"(",
")",
":",
"if",
"n",
"==",
"self",
".",
"tree",
".",
"root",
":",
"continue",
"tv",
"=",
"self",
".",
"tip_value",
"(",
"n",
")",
"bv",
"=",
"self",
".",
"branch_value",
"(",
"n",
")",
"var",
"=",
"self",
".",
"branch_variance",
"(",
"n",
")",
"x",
",",
"chisq",
"=",
"self",
".",
"_optimal_root_along_branch",
"(",
"n",
",",
"tv",
",",
"bv",
",",
"var",
",",
"slope",
"=",
"slope",
")",
"if",
"(",
"chisq",
"<",
"best_root",
"[",
"\"chisq\"",
"]",
")",
":",
"tmpQ",
"=",
"self",
".",
"propagate_averages",
"(",
"n",
",",
"tv",
",",
"bv",
"*",
"x",
",",
"var",
"*",
"x",
")",
"+",
"self",
".",
"propagate_averages",
"(",
"n",
",",
"tv",
",",
"bv",
"*",
"(",
"1",
"-",
"x",
")",
",",
"var",
"*",
"(",
"1",
"-",
"x",
")",
",",
"outgroup",
"=",
"True",
")",
"reg",
"=",
"base_regression",
"(",
"tmpQ",
",",
"slope",
"=",
"slope",
")",
"if",
"reg",
"[",
"\"slope\"",
"]",
">=",
"0",
"or",
"(",
"force_positive",
"==",
"False",
")",
":",
"best_root",
"=",
"{",
"\"node\"",
":",
"n",
",",
"\"split\"",
":",
"x",
"}",
"best_root",
".",
"update",
"(",
"reg",
")",
"if",
"'node'",
"not",
"in",
"best_root",
":",
"print",
"(",
"\"TreeRegression.find_best_root: No valid root found!\"",
",",
"force_positive",
")",
"return",
"None",
"if",
"'hessian'",
"in",
"best_root",
":",
"# calculate differentials with respect to x",
"deriv",
"=",
"[",
"]",
"n",
"=",
"best_root",
"[",
"\"node\"",
"]",
"tv",
"=",
"self",
".",
"tip_value",
"(",
"n",
")",
"bv",
"=",
"self",
".",
"branch_value",
"(",
"n",
")",
"var",
"=",
"self",
".",
"branch_variance",
"(",
"n",
")",
"for",
"dx",
"in",
"[",
"-",
"0.001",
",",
"0.001",
"]",
":",
"y",
"=",
"min",
"(",
"1.0",
",",
"max",
"(",
"0.0",
",",
"best_root",
"[",
"\"split\"",
"]",
"+",
"dx",
")",
")",
"tmpQ",
"=",
"self",
".",
"propagate_averages",
"(",
"n",
",",
"tv",
",",
"bv",
"*",
"y",
",",
"var",
"*",
"y",
")",
"+",
"self",
".",
"propagate_averages",
"(",
"n",
",",
"tv",
",",
"bv",
"*",
"(",
"1",
"-",
"y",
")",
",",
"var",
"*",
"(",
"1",
"-",
"y",
")",
",",
"outgroup",
"=",
"True",
")",
"reg",
"=",
"base_regression",
"(",
"tmpQ",
",",
"slope",
"=",
"slope",
")",
"deriv",
".",
"append",
"(",
"[",
"y",
",",
"reg",
"[",
"'chisq'",
"]",
",",
"tmpQ",
"[",
"tavgii",
"]",
",",
"tmpQ",
"[",
"davgii",
"]",
"]",
")",
"estimator_hessian",
"=",
"np",
".",
"zeros",
"(",
"(",
"3",
",",
"3",
")",
")",
"estimator_hessian",
"[",
":",
"2",
",",
":",
"2",
"]",
"=",
"best_root",
"[",
"'hessian'",
"]",
"estimator_hessian",
"[",
"2",
",",
"2",
"]",
"=",
"(",
"deriv",
"[",
"0",
"]",
"[",
"1",
"]",
"+",
"deriv",
"[",
"1",
"]",
"[",
"1",
"]",
"-",
"2.0",
"*",
"best_root",
"[",
"'chisq'",
"]",
")",
"/",
"(",
"deriv",
"[",
"0",
"]",
"[",
"0",
"]",
"-",
"deriv",
"[",
"1",
"]",
"[",
"0",
"]",
")",
"**",
"2",
"# estimator_hessian[2,0] = (deriv[0][2] - deriv[1][2])/(deriv[0][0] - deriv[1][0])",
"# estimator_hessian[2,1] = (deriv[0][3] - deriv[1][3])/(deriv[0][0] - deriv[1][0])",
"estimator_hessian",
"[",
"0",
",",
"2",
"]",
"=",
"estimator_hessian",
"[",
"2",
",",
"0",
"]",
"estimator_hessian",
"[",
"1",
",",
"2",
"]",
"=",
"estimator_hessian",
"[",
"2",
",",
"1",
"]",
"best_root",
"[",
"'hessian'",
"]",
"=",
"estimator_hessian",
"best_root",
"[",
"'cov'",
"]",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"estimator_hessian",
")",
"return",
"best_root"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
TreeRegression.optimal_reroot
|
determine the best root and reroot the tree to this value.
Note that this can change the parent child relations of the tree
and values associated with branches rather than nodes
(e.g. confidence) might need to be re-evaluated afterwards
Parameters
----------
force_positive : bool, optional
if True, the search for a root will only consider positive rate estimates
slope : float, optional
if given, it will find the optimal root given a fixed rate. If slope==0, this
corresponds to minimal root-to-tip variance rooting (min_dev)
Returns
-------
dict
regression parameters
|
treetime/treeregression.py
|
def optimal_reroot(self, force_positive=True, slope=None):
"""
determine the best root and reroot the tree to this value.
Note that this can change the parent child relations of the tree
and values associated with branches rather than nodes
(e.g. confidence) might need to be re-evaluated afterwards
Parameters
----------
force_positive : bool, optional
if True, the search for a root will only consider positive rate estimates
slope : float, optional
if given, it will find the optimal root given a fixed rate. If slope==0, this
corresponds to minimal root-to-tip variance rooting (min_dev)
Returns
-------
dict
regression parameters
"""
best_root = self.find_best_root(force_positive=force_positive, slope=slope)
best_node = best_root["node"]
x = best_root["split"]
if x<1e-5:
new_node = best_node
elif x>1.0-1e-5:
new_node = best_node.up
else:
# create new node in the branch and root the tree to it
new_node = Phylo.BaseTree.Clade()
# insert the new node in the middle of the branch
# by simple re-wiring the links on the both sides of the branch
# and fix the branch lengths
new_node.branch_length = best_node.branch_length*(1-x)
new_node.up = best_node.up
new_node.clades = [best_node]
new_node.up.clades = [k if k!=best_node else new_node
for k in best_node.up.clades]
best_node.branch_length *= x
best_node.up = new_node
new_node.rtt_regression = best_root
self.tree.root_with_outgroup(new_node)
self.tree.ladderize()
for n in self.tree.get_nonterminals(order='postorder'):
for c in n:
c.up=n
return best_root
|
def optimal_reroot(self, force_positive=True, slope=None):
"""
determine the best root and reroot the tree to this value.
Note that this can change the parent child relations of the tree
and values associated with branches rather than nodes
(e.g. confidence) might need to be re-evaluated afterwards
Parameters
----------
force_positive : bool, optional
if True, the search for a root will only consider positive rate estimates
slope : float, optional
if given, it will find the optimal root given a fixed rate. If slope==0, this
corresponds to minimal root-to-tip variance rooting (min_dev)
Returns
-------
dict
regression parameters
"""
best_root = self.find_best_root(force_positive=force_positive, slope=slope)
best_node = best_root["node"]
x = best_root["split"]
if x<1e-5:
new_node = best_node
elif x>1.0-1e-5:
new_node = best_node.up
else:
# create new node in the branch and root the tree to it
new_node = Phylo.BaseTree.Clade()
# insert the new node in the middle of the branch
# by simple re-wiring the links on the both sides of the branch
# and fix the branch lengths
new_node.branch_length = best_node.branch_length*(1-x)
new_node.up = best_node.up
new_node.clades = [best_node]
new_node.up.clades = [k if k!=best_node else new_node
for k in best_node.up.clades]
best_node.branch_length *= x
best_node.up = new_node
new_node.rtt_regression = best_root
self.tree.root_with_outgroup(new_node)
self.tree.ladderize()
for n in self.tree.get_nonterminals(order='postorder'):
for c in n:
c.up=n
return best_root
|
[
"determine",
"the",
"best",
"root",
"and",
"reroot",
"the",
"tree",
"to",
"this",
"value",
".",
"Note",
"that",
"this",
"can",
"change",
"the",
"parent",
"child",
"relations",
"of",
"the",
"tree",
"and",
"values",
"associated",
"with",
"branches",
"rather",
"than",
"nodes",
"(",
"e",
".",
"g",
".",
"confidence",
")",
"might",
"need",
"to",
"be",
"re",
"-",
"evaluated",
"afterwards"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L402-L454
|
[
"def",
"optimal_reroot",
"(",
"self",
",",
"force_positive",
"=",
"True",
",",
"slope",
"=",
"None",
")",
":",
"best_root",
"=",
"self",
".",
"find_best_root",
"(",
"force_positive",
"=",
"force_positive",
",",
"slope",
"=",
"slope",
")",
"best_node",
"=",
"best_root",
"[",
"\"node\"",
"]",
"x",
"=",
"best_root",
"[",
"\"split\"",
"]",
"if",
"x",
"<",
"1e-5",
":",
"new_node",
"=",
"best_node",
"elif",
"x",
">",
"1.0",
"-",
"1e-5",
":",
"new_node",
"=",
"best_node",
".",
"up",
"else",
":",
"# create new node in the branch and root the tree to it",
"new_node",
"=",
"Phylo",
".",
"BaseTree",
".",
"Clade",
"(",
")",
"# insert the new node in the middle of the branch",
"# by simple re-wiring the links on the both sides of the branch",
"# and fix the branch lengths",
"new_node",
".",
"branch_length",
"=",
"best_node",
".",
"branch_length",
"*",
"(",
"1",
"-",
"x",
")",
"new_node",
".",
"up",
"=",
"best_node",
".",
"up",
"new_node",
".",
"clades",
"=",
"[",
"best_node",
"]",
"new_node",
".",
"up",
".",
"clades",
"=",
"[",
"k",
"if",
"k",
"!=",
"best_node",
"else",
"new_node",
"for",
"k",
"in",
"best_node",
".",
"up",
".",
"clades",
"]",
"best_node",
".",
"branch_length",
"*=",
"x",
"best_node",
".",
"up",
"=",
"new_node",
"new_node",
".",
"rtt_regression",
"=",
"best_root",
"self",
".",
"tree",
".",
"root_with_outgroup",
"(",
"new_node",
")",
"self",
".",
"tree",
".",
"ladderize",
"(",
")",
"for",
"n",
"in",
"self",
".",
"tree",
".",
"get_nonterminals",
"(",
"order",
"=",
"'postorder'",
")",
":",
"for",
"c",
"in",
"n",
":",
"c",
".",
"up",
"=",
"n",
"return",
"best_root"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
TreeRegression.clock_plot
|
Plot root-to-tip distance vs time as a basic time-tree diagnostic
Parameters
----------
add_internal : bool, optional
add internal nodes. this will only work if the tree has been dated already
ax : None, optional
an matplotlib axis to plot into. if non provided, a new figure is opened
regression : None, optional
a dict containing parameters of a root-to-tip vs time regression as
returned by the function base_regression
confidence : bool, optional
add confidence area to the regression line
n_sigma : int, optional
number of standard deviations for the confidence area.
fs : int, optional
fontsize
|
treetime/treeregression.py
|
def clock_plot(self, add_internal=False, ax=None, regression=None,
confidence=True, n_sigma = 2, fs=14):
"""Plot root-to-tip distance vs time as a basic time-tree diagnostic
Parameters
----------
add_internal : bool, optional
add internal nodes. this will only work if the tree has been dated already
ax : None, optional
an matplotlib axis to plot into. if non provided, a new figure is opened
regression : None, optional
a dict containing parameters of a root-to-tip vs time regression as
returned by the function base_regression
confidence : bool, optional
add confidence area to the regression line
n_sigma : int, optional
number of standard deviations for the confidence area.
fs : int, optional
fontsize
"""
import matplotlib.pyplot as plt
if ax is None:
plt.figure()
ax=plt.subplot(111)
self.tree.root._v=0
for n in self.tree.get_nonterminals(order='preorder'):
for c in n:
c._v = n._v + self.branch_value(c)
tips = self.tree.get_terminals()
internal = self.tree.get_nonterminals()
# get values of terminals
xi = np.array([self.tip_value(n) for n in tips])
yi = np.array([n._v for n in tips])
ind = np.array([n.bad_branch if hasattr(n, 'bad_branch') else False for n in tips])
if add_internal:
xi_int = np.array([n.numdate for n in internal])
yi_int = np.array([n._v for n in internal])
ind_int = np.array([n.bad_branch if hasattr(n, 'bad_branch') else False for n in internal])
if regression:
# plot regression line
t_mrca = -regression['intercept']/regression['slope']
if add_internal:
time_span = np.max(xi_int[~ind_int]) - np.min(xi_int[~ind_int])
x_vals = np.array([max(np.min(xi_int[~ind_int]), t_mrca) - 0.1*time_span, np.max(xi[~ind])+0.05*time_span])
else:
time_span = np.max(xi[~ind]) - np.min(xi[~ind])
x_vals = np.array([max(np.min(xi[~ind]), t_mrca) - 0.1*time_span, np.max(xi[~ind]+0.05*time_span)])
# plot confidence interval
if confidence and 'cov' in regression:
x_vals = np.linspace(x_vals[0], x_vals[1], 100)
y_vals = regression['slope']*x_vals + regression['intercept']
dev = n_sigma*np.array([np.sqrt(regression['cov'][:2,:2].dot(np.array([x, 1])).dot(np.array([x,1]))) for x in x_vals])
dev_slope = n_sigma*np.sqrt(regression['cov'][0,0])
ax.fill_between(x_vals, y_vals-dev, y_vals+dev, alpha=0.2)
dp = np.array([regression['intercept']/regression['slope']**2,-1./regression['slope']])
dev_rtt = n_sigma*np.sqrt(regression['cov'][:2,:2].dot(dp).dot(dp))
else:
dev_rtt = None
dev_slope = None
ax.plot(x_vals, regression['slope']*x_vals + regression['intercept'],
label = r"$y=\alpha + \beta t$"+"\n"+
r"$\beta=$%1.2e"%(regression["slope"])
+ ("+/- %1.e"%dev_slope if dev_slope else "") +
"\nroot date: %1.1f"%(-regression['intercept']/regression['slope']) +
("+/- %1.2f"%dev_rtt if dev_rtt else ""))
ax.scatter(xi[~ind], yi[~ind], label=("tips" if add_internal else None))
if ind.sum():
try:
# note: this is treetime specific
tmp_x = np.array([np.mean(n.raw_date_constraint) if n.raw_date_constraint else None
for n in self.tree.get_terminals()])
ax.scatter(tmp_x[ind], yi[ind], label="ignored tips", c='r')
except:
pass
if add_internal:
ax.scatter(xi_int[~ind_int], yi_int[~ind_int], label="internal nodes")
ax.set_ylabel('root-to-tip distance', fontsize=fs)
ax.set_xlabel('date', fontsize=fs)
ax.ticklabel_format(useOffset=False)
ax.tick_params(labelsize=fs*0.8)
ax.set_ylim([0, 1.1*np.max(yi)])
plt.tight_layout()
plt.legend(fontsize=fs*0.8)
|
def clock_plot(self, add_internal=False, ax=None, regression=None,
confidence=True, n_sigma = 2, fs=14):
"""Plot root-to-tip distance vs time as a basic time-tree diagnostic
Parameters
----------
add_internal : bool, optional
add internal nodes. this will only work if the tree has been dated already
ax : None, optional
an matplotlib axis to plot into. if non provided, a new figure is opened
regression : None, optional
a dict containing parameters of a root-to-tip vs time regression as
returned by the function base_regression
confidence : bool, optional
add confidence area to the regression line
n_sigma : int, optional
number of standard deviations for the confidence area.
fs : int, optional
fontsize
"""
import matplotlib.pyplot as plt
if ax is None:
plt.figure()
ax=plt.subplot(111)
self.tree.root._v=0
for n in self.tree.get_nonterminals(order='preorder'):
for c in n:
c._v = n._v + self.branch_value(c)
tips = self.tree.get_terminals()
internal = self.tree.get_nonterminals()
# get values of terminals
xi = np.array([self.tip_value(n) for n in tips])
yi = np.array([n._v for n in tips])
ind = np.array([n.bad_branch if hasattr(n, 'bad_branch') else False for n in tips])
if add_internal:
xi_int = np.array([n.numdate for n in internal])
yi_int = np.array([n._v for n in internal])
ind_int = np.array([n.bad_branch if hasattr(n, 'bad_branch') else False for n in internal])
if regression:
# plot regression line
t_mrca = -regression['intercept']/regression['slope']
if add_internal:
time_span = np.max(xi_int[~ind_int]) - np.min(xi_int[~ind_int])
x_vals = np.array([max(np.min(xi_int[~ind_int]), t_mrca) - 0.1*time_span, np.max(xi[~ind])+0.05*time_span])
else:
time_span = np.max(xi[~ind]) - np.min(xi[~ind])
x_vals = np.array([max(np.min(xi[~ind]), t_mrca) - 0.1*time_span, np.max(xi[~ind]+0.05*time_span)])
# plot confidence interval
if confidence and 'cov' in regression:
x_vals = np.linspace(x_vals[0], x_vals[1], 100)
y_vals = regression['slope']*x_vals + regression['intercept']
dev = n_sigma*np.array([np.sqrt(regression['cov'][:2,:2].dot(np.array([x, 1])).dot(np.array([x,1]))) for x in x_vals])
dev_slope = n_sigma*np.sqrt(regression['cov'][0,0])
ax.fill_between(x_vals, y_vals-dev, y_vals+dev, alpha=0.2)
dp = np.array([regression['intercept']/regression['slope']**2,-1./regression['slope']])
dev_rtt = n_sigma*np.sqrt(regression['cov'][:2,:2].dot(dp).dot(dp))
else:
dev_rtt = None
dev_slope = None
ax.plot(x_vals, regression['slope']*x_vals + regression['intercept'],
label = r"$y=\alpha + \beta t$"+"\n"+
r"$\beta=$%1.2e"%(regression["slope"])
+ ("+/- %1.e"%dev_slope if dev_slope else "") +
"\nroot date: %1.1f"%(-regression['intercept']/regression['slope']) +
("+/- %1.2f"%dev_rtt if dev_rtt else ""))
ax.scatter(xi[~ind], yi[~ind], label=("tips" if add_internal else None))
if ind.sum():
try:
# note: this is treetime specific
tmp_x = np.array([np.mean(n.raw_date_constraint) if n.raw_date_constraint else None
for n in self.tree.get_terminals()])
ax.scatter(tmp_x[ind], yi[ind], label="ignored tips", c='r')
except:
pass
if add_internal:
ax.scatter(xi_int[~ind_int], yi_int[~ind_int], label="internal nodes")
ax.set_ylabel('root-to-tip distance', fontsize=fs)
ax.set_xlabel('date', fontsize=fs)
ax.ticklabel_format(useOffset=False)
ax.tick_params(labelsize=fs*0.8)
ax.set_ylim([0, 1.1*np.max(yi)])
plt.tight_layout()
plt.legend(fontsize=fs*0.8)
|
[
"Plot",
"root",
"-",
"to",
"-",
"tip",
"distance",
"vs",
"time",
"as",
"a",
"basic",
"time",
"-",
"tree",
"diagnostic"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/treeregression.py#L457-L550
|
[
"def",
"clock_plot",
"(",
"self",
",",
"add_internal",
"=",
"False",
",",
"ax",
"=",
"None",
",",
"regression",
"=",
"None",
",",
"confidence",
"=",
"True",
",",
"n_sigma",
"=",
"2",
",",
"fs",
"=",
"14",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"ax",
"is",
"None",
":",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"plt",
".",
"subplot",
"(",
"111",
")",
"self",
".",
"tree",
".",
"root",
".",
"_v",
"=",
"0",
"for",
"n",
"in",
"self",
".",
"tree",
".",
"get_nonterminals",
"(",
"order",
"=",
"'preorder'",
")",
":",
"for",
"c",
"in",
"n",
":",
"c",
".",
"_v",
"=",
"n",
".",
"_v",
"+",
"self",
".",
"branch_value",
"(",
"c",
")",
"tips",
"=",
"self",
".",
"tree",
".",
"get_terminals",
"(",
")",
"internal",
"=",
"self",
".",
"tree",
".",
"get_nonterminals",
"(",
")",
"# get values of terminals",
"xi",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"tip_value",
"(",
"n",
")",
"for",
"n",
"in",
"tips",
"]",
")",
"yi",
"=",
"np",
".",
"array",
"(",
"[",
"n",
".",
"_v",
"for",
"n",
"in",
"tips",
"]",
")",
"ind",
"=",
"np",
".",
"array",
"(",
"[",
"n",
".",
"bad_branch",
"if",
"hasattr",
"(",
"n",
",",
"'bad_branch'",
")",
"else",
"False",
"for",
"n",
"in",
"tips",
"]",
")",
"if",
"add_internal",
":",
"xi_int",
"=",
"np",
".",
"array",
"(",
"[",
"n",
".",
"numdate",
"for",
"n",
"in",
"internal",
"]",
")",
"yi_int",
"=",
"np",
".",
"array",
"(",
"[",
"n",
".",
"_v",
"for",
"n",
"in",
"internal",
"]",
")",
"ind_int",
"=",
"np",
".",
"array",
"(",
"[",
"n",
".",
"bad_branch",
"if",
"hasattr",
"(",
"n",
",",
"'bad_branch'",
")",
"else",
"False",
"for",
"n",
"in",
"internal",
"]",
")",
"if",
"regression",
":",
"# plot regression line",
"t_mrca",
"=",
"-",
"regression",
"[",
"'intercept'",
"]",
"/",
"regression",
"[",
"'slope'",
"]",
"if",
"add_internal",
":",
"time_span",
"=",
"np",
".",
"max",
"(",
"xi_int",
"[",
"~",
"ind_int",
"]",
")",
"-",
"np",
".",
"min",
"(",
"xi_int",
"[",
"~",
"ind_int",
"]",
")",
"x_vals",
"=",
"np",
".",
"array",
"(",
"[",
"max",
"(",
"np",
".",
"min",
"(",
"xi_int",
"[",
"~",
"ind_int",
"]",
")",
",",
"t_mrca",
")",
"-",
"0.1",
"*",
"time_span",
",",
"np",
".",
"max",
"(",
"xi",
"[",
"~",
"ind",
"]",
")",
"+",
"0.05",
"*",
"time_span",
"]",
")",
"else",
":",
"time_span",
"=",
"np",
".",
"max",
"(",
"xi",
"[",
"~",
"ind",
"]",
")",
"-",
"np",
".",
"min",
"(",
"xi",
"[",
"~",
"ind",
"]",
")",
"x_vals",
"=",
"np",
".",
"array",
"(",
"[",
"max",
"(",
"np",
".",
"min",
"(",
"xi",
"[",
"~",
"ind",
"]",
")",
",",
"t_mrca",
")",
"-",
"0.1",
"*",
"time_span",
",",
"np",
".",
"max",
"(",
"xi",
"[",
"~",
"ind",
"]",
"+",
"0.05",
"*",
"time_span",
")",
"]",
")",
"# plot confidence interval",
"if",
"confidence",
"and",
"'cov'",
"in",
"regression",
":",
"x_vals",
"=",
"np",
".",
"linspace",
"(",
"x_vals",
"[",
"0",
"]",
",",
"x_vals",
"[",
"1",
"]",
",",
"100",
")",
"y_vals",
"=",
"regression",
"[",
"'slope'",
"]",
"*",
"x_vals",
"+",
"regression",
"[",
"'intercept'",
"]",
"dev",
"=",
"n_sigma",
"*",
"np",
".",
"array",
"(",
"[",
"np",
".",
"sqrt",
"(",
"regression",
"[",
"'cov'",
"]",
"[",
":",
"2",
",",
":",
"2",
"]",
".",
"dot",
"(",
"np",
".",
"array",
"(",
"[",
"x",
",",
"1",
"]",
")",
")",
".",
"dot",
"(",
"np",
".",
"array",
"(",
"[",
"x",
",",
"1",
"]",
")",
")",
")",
"for",
"x",
"in",
"x_vals",
"]",
")",
"dev_slope",
"=",
"n_sigma",
"*",
"np",
".",
"sqrt",
"(",
"regression",
"[",
"'cov'",
"]",
"[",
"0",
",",
"0",
"]",
")",
"ax",
".",
"fill_between",
"(",
"x_vals",
",",
"y_vals",
"-",
"dev",
",",
"y_vals",
"+",
"dev",
",",
"alpha",
"=",
"0.2",
")",
"dp",
"=",
"np",
".",
"array",
"(",
"[",
"regression",
"[",
"'intercept'",
"]",
"/",
"regression",
"[",
"'slope'",
"]",
"**",
"2",
",",
"-",
"1.",
"/",
"regression",
"[",
"'slope'",
"]",
"]",
")",
"dev_rtt",
"=",
"n_sigma",
"*",
"np",
".",
"sqrt",
"(",
"regression",
"[",
"'cov'",
"]",
"[",
":",
"2",
",",
":",
"2",
"]",
".",
"dot",
"(",
"dp",
")",
".",
"dot",
"(",
"dp",
")",
")",
"else",
":",
"dev_rtt",
"=",
"None",
"dev_slope",
"=",
"None",
"ax",
".",
"plot",
"(",
"x_vals",
",",
"regression",
"[",
"'slope'",
"]",
"*",
"x_vals",
"+",
"regression",
"[",
"'intercept'",
"]",
",",
"label",
"=",
"r\"$y=\\alpha + \\beta t$\"",
"+",
"\"\\n\"",
"+",
"r\"$\\beta=$%1.2e\"",
"%",
"(",
"regression",
"[",
"\"slope\"",
"]",
")",
"+",
"(",
"\"+/- %1.e\"",
"%",
"dev_slope",
"if",
"dev_slope",
"else",
"\"\"",
")",
"+",
"\"\\nroot date: %1.1f\"",
"%",
"(",
"-",
"regression",
"[",
"'intercept'",
"]",
"/",
"regression",
"[",
"'slope'",
"]",
")",
"+",
"(",
"\"+/- %1.2f\"",
"%",
"dev_rtt",
"if",
"dev_rtt",
"else",
"\"\"",
")",
")",
"ax",
".",
"scatter",
"(",
"xi",
"[",
"~",
"ind",
"]",
",",
"yi",
"[",
"~",
"ind",
"]",
",",
"label",
"=",
"(",
"\"tips\"",
"if",
"add_internal",
"else",
"None",
")",
")",
"if",
"ind",
".",
"sum",
"(",
")",
":",
"try",
":",
"# note: this is treetime specific",
"tmp_x",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"mean",
"(",
"n",
".",
"raw_date_constraint",
")",
"if",
"n",
".",
"raw_date_constraint",
"else",
"None",
"for",
"n",
"in",
"self",
".",
"tree",
".",
"get_terminals",
"(",
")",
"]",
")",
"ax",
".",
"scatter",
"(",
"tmp_x",
"[",
"ind",
"]",
",",
"yi",
"[",
"ind",
"]",
",",
"label",
"=",
"\"ignored tips\"",
",",
"c",
"=",
"'r'",
")",
"except",
":",
"pass",
"if",
"add_internal",
":",
"ax",
".",
"scatter",
"(",
"xi_int",
"[",
"~",
"ind_int",
"]",
",",
"yi_int",
"[",
"~",
"ind_int",
"]",
",",
"label",
"=",
"\"internal nodes\"",
")",
"ax",
".",
"set_ylabel",
"(",
"'root-to-tip distance'",
",",
"fontsize",
"=",
"fs",
")",
"ax",
".",
"set_xlabel",
"(",
"'date'",
",",
"fontsize",
"=",
"fs",
")",
"ax",
".",
"ticklabel_format",
"(",
"useOffset",
"=",
"False",
")",
"ax",
".",
"tick_params",
"(",
"labelsize",
"=",
"fs",
"*",
"0.8",
")",
"ax",
".",
"set_ylim",
"(",
"[",
"0",
",",
"1.1",
"*",
"np",
".",
"max",
"(",
"yi",
")",
"]",
")",
"plt",
".",
"tight_layout",
"(",
")",
"plt",
".",
"legend",
"(",
"fontsize",
"=",
"fs",
"*",
"0.8",
")"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
JC69
|
Jukes-Cantor 1969 model. This model assumes equal concentrations
of the nucleotides and equal transition rates between nucleotide states.
For more info, see: Jukes and Cantor (1969). Evolution of Protein Molecules.
New York: Academic Press. pp. 21–132
Parameters
-----------
mu : float
substitution rate
alphabet : str
specify alphabet to use.
Available alphabets are:
'nuc' - nucleotides only, gaps ignored
'nuc_gap' - nucleotide alphabet with gaps, gaps can be ignored optionally
|
treetime/nuc_models.py
|
def JC69 (mu=1.0, alphabet="nuc", **kwargs):
"""
Jukes-Cantor 1969 model. This model assumes equal concentrations
of the nucleotides and equal transition rates between nucleotide states.
For more info, see: Jukes and Cantor (1969). Evolution of Protein Molecules.
New York: Academic Press. pp. 21–132
Parameters
-----------
mu : float
substitution rate
alphabet : str
specify alphabet to use.
Available alphabets are:
'nuc' - nucleotides only, gaps ignored
'nuc_gap' - nucleotide alphabet with gaps, gaps can be ignored optionally
"""
num_chars = len(alphabets[alphabet])
W, pi = np.ones((num_chars,num_chars)), np.ones(num_chars)
gtr = GTR(alphabet=alphabet)
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr
|
def JC69 (mu=1.0, alphabet="nuc", **kwargs):
"""
Jukes-Cantor 1969 model. This model assumes equal concentrations
of the nucleotides and equal transition rates between nucleotide states.
For more info, see: Jukes and Cantor (1969). Evolution of Protein Molecules.
New York: Academic Press. pp. 21–132
Parameters
-----------
mu : float
substitution rate
alphabet : str
specify alphabet to use.
Available alphabets are:
'nuc' - nucleotides only, gaps ignored
'nuc_gap' - nucleotide alphabet with gaps, gaps can be ignored optionally
"""
num_chars = len(alphabets[alphabet])
W, pi = np.ones((num_chars,num_chars)), np.ones(num_chars)
gtr = GTR(alphabet=alphabet)
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr
|
[
"Jukes",
"-",
"Cantor",
"1969",
"model",
".",
"This",
"model",
"assumes",
"equal",
"concentrations",
"of",
"the",
"nucleotides",
"and",
"equal",
"transition",
"rates",
"between",
"nucleotide",
"states",
".",
"For",
"more",
"info",
"see",
":",
"Jukes",
"and",
"Cantor",
"(",
"1969",
")",
".",
"Evolution",
"of",
"Protein",
"Molecules",
".",
"New",
"York",
":",
"Academic",
"Press",
".",
"pp",
".",
"21–132"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/nuc_models.py#L8-L34
|
[
"def",
"JC69",
"(",
"mu",
"=",
"1.0",
",",
"alphabet",
"=",
"\"nuc\"",
",",
"*",
"*",
"kwargs",
")",
":",
"num_chars",
"=",
"len",
"(",
"alphabets",
"[",
"alphabet",
"]",
")",
"W",
",",
"pi",
"=",
"np",
".",
"ones",
"(",
"(",
"num_chars",
",",
"num_chars",
")",
")",
",",
"np",
".",
"ones",
"(",
"num_chars",
")",
"gtr",
"=",
"GTR",
"(",
"alphabet",
"=",
"alphabet",
")",
"gtr",
".",
"assign_rates",
"(",
"mu",
"=",
"mu",
",",
"pi",
"=",
"pi",
",",
"W",
"=",
"W",
")",
"return",
"gtr"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
K80
|
Kimura 1980 model. Assumes equal concentrations across nucleotides, but
allows different rates between transitions and transversions. The ratio
of the transversion/transition rates is given by kappa parameter.
For more info, see
Kimura (1980), J. Mol. Evol. 16 (2): 111–120. doi:10.1007/BF01731581.
Current implementation of the model does not account for the gaps.
Parameters
-----------
mu : float
Overall substitution rate
kappa : float
Ratio of transversion/transition rates
|
treetime/nuc_models.py
|
def K80(mu=1., kappa=0.1, **kwargs):
"""
Kimura 1980 model. Assumes equal concentrations across nucleotides, but
allows different rates between transitions and transversions. The ratio
of the transversion/transition rates is given by kappa parameter.
For more info, see
Kimura (1980), J. Mol. Evol. 16 (2): 111–120. doi:10.1007/BF01731581.
Current implementation of the model does not account for the gaps.
Parameters
-----------
mu : float
Overall substitution rate
kappa : float
Ratio of transversion/transition rates
"""
num_chars = len(alphabets['nuc_nogap'])
pi = np.ones(len(alphabets['nuc_nogap']), dtype=float)/len(alphabets['nuc_nogap'])
W = _create_transversion_transition_W(kappa)
gtr = GTR(alphabet=alphabets['nuc_nogap'])
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr
|
def K80(mu=1., kappa=0.1, **kwargs):
"""
Kimura 1980 model. Assumes equal concentrations across nucleotides, but
allows different rates between transitions and transversions. The ratio
of the transversion/transition rates is given by kappa parameter.
For more info, see
Kimura (1980), J. Mol. Evol. 16 (2): 111–120. doi:10.1007/BF01731581.
Current implementation of the model does not account for the gaps.
Parameters
-----------
mu : float
Overall substitution rate
kappa : float
Ratio of transversion/transition rates
"""
num_chars = len(alphabets['nuc_nogap'])
pi = np.ones(len(alphabets['nuc_nogap']), dtype=float)/len(alphabets['nuc_nogap'])
W = _create_transversion_transition_W(kappa)
gtr = GTR(alphabet=alphabets['nuc_nogap'])
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr
|
[
"Kimura",
"1980",
"model",
".",
"Assumes",
"equal",
"concentrations",
"across",
"nucleotides",
"but",
"allows",
"different",
"rates",
"between",
"transitions",
"and",
"transversions",
".",
"The",
"ratio",
"of",
"the",
"transversion",
"/",
"transition",
"rates",
"is",
"given",
"by",
"kappa",
"parameter",
".",
"For",
"more",
"info",
"see",
"Kimura",
"(",
"1980",
")",
"J",
".",
"Mol",
".",
"Evol",
".",
"16",
"(",
"2",
")",
":",
"111–120",
".",
"doi",
":",
"10",
".",
"1007",
"/",
"BF01731581",
"."
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/nuc_models.py#L36-L61
|
[
"def",
"K80",
"(",
"mu",
"=",
"1.",
",",
"kappa",
"=",
"0.1",
",",
"*",
"*",
"kwargs",
")",
":",
"num_chars",
"=",
"len",
"(",
"alphabets",
"[",
"'nuc_nogap'",
"]",
")",
"pi",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"alphabets",
"[",
"'nuc_nogap'",
"]",
")",
",",
"dtype",
"=",
"float",
")",
"/",
"len",
"(",
"alphabets",
"[",
"'nuc_nogap'",
"]",
")",
"W",
"=",
"_create_transversion_transition_W",
"(",
"kappa",
")",
"gtr",
"=",
"GTR",
"(",
"alphabet",
"=",
"alphabets",
"[",
"'nuc_nogap'",
"]",
")",
"gtr",
".",
"assign_rates",
"(",
"mu",
"=",
"mu",
",",
"pi",
"=",
"pi",
",",
"W",
"=",
"W",
")",
"return",
"gtr"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
F81
|
Felsenstein 1981 model. Assumes non-equal concentrations across nucleotides,
but the transition rate between all states is assumed to be equal. See
Felsenstein (1981), J. Mol. Evol. 17 (6): 368–376. doi:10.1007/BF01734359
for details.
Current implementation of the model does not account for the gaps (treatment of
gaps as characters is possible if specify alphabet='nuc_gap').
Parameters
-----------
mu : float
Substitution rate
pi : numpy.array
Nucleotide concentrations
alphabet : str
Alphabet to use. POsiible values are: ['nuc', 'nuc_gap'] Default 'nuc', which discounts al gaps.
'nuc_gap' alphabet enables treatmen of gaps as characters.
|
treetime/nuc_models.py
|
def F81(mu=1.0, pi=None, alphabet="nuc", **kwargs):
"""
Felsenstein 1981 model. Assumes non-equal concentrations across nucleotides,
but the transition rate between all states is assumed to be equal. See
Felsenstein (1981), J. Mol. Evol. 17 (6): 368–376. doi:10.1007/BF01734359
for details.
Current implementation of the model does not account for the gaps (treatment of
gaps as characters is possible if specify alphabet='nuc_gap').
Parameters
-----------
mu : float
Substitution rate
pi : numpy.array
Nucleotide concentrations
alphabet : str
Alphabet to use. POsiible values are: ['nuc', 'nuc_gap'] Default 'nuc', which discounts al gaps.
'nuc_gap' alphabet enables treatmen of gaps as characters.
"""
if pi is None:
pi=0.25*np.ones(4, dtype=float)
num_chars = len(alphabets[alphabet])
pi = np.array(pi, dtype=float)
if num_chars != len(pi) :
pi = np.ones((num_chars, ), dtype=float)
print ("GTR: Warning!The number of the characters in the alphabet does not match the "
"shape of the vector of equilibrium frequencies Pi -- assuming equal frequencies for all states.")
W = np.ones((num_chars,num_chars))
pi /= (1.0 * np.sum(pi))
gtr = GTR(alphabet=alphabets[alphabet])
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr
|
def F81(mu=1.0, pi=None, alphabet="nuc", **kwargs):
"""
Felsenstein 1981 model. Assumes non-equal concentrations across nucleotides,
but the transition rate between all states is assumed to be equal. See
Felsenstein (1981), J. Mol. Evol. 17 (6): 368–376. doi:10.1007/BF01734359
for details.
Current implementation of the model does not account for the gaps (treatment of
gaps as characters is possible if specify alphabet='nuc_gap').
Parameters
-----------
mu : float
Substitution rate
pi : numpy.array
Nucleotide concentrations
alphabet : str
Alphabet to use. POsiible values are: ['nuc', 'nuc_gap'] Default 'nuc', which discounts al gaps.
'nuc_gap' alphabet enables treatmen of gaps as characters.
"""
if pi is None:
pi=0.25*np.ones(4, dtype=float)
num_chars = len(alphabets[alphabet])
pi = np.array(pi, dtype=float)
if num_chars != len(pi) :
pi = np.ones((num_chars, ), dtype=float)
print ("GTR: Warning!The number of the characters in the alphabet does not match the "
"shape of the vector of equilibrium frequencies Pi -- assuming equal frequencies for all states.")
W = np.ones((num_chars,num_chars))
pi /= (1.0 * np.sum(pi))
gtr = GTR(alphabet=alphabets[alphabet])
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr
|
[
"Felsenstein",
"1981",
"model",
".",
"Assumes",
"non",
"-",
"equal",
"concentrations",
"across",
"nucleotides",
"but",
"the",
"transition",
"rate",
"between",
"all",
"states",
"is",
"assumed",
"to",
"be",
"equal",
".",
"See",
"Felsenstein",
"(",
"1981",
")",
"J",
".",
"Mol",
".",
"Evol",
".",
"17",
"(",
"6",
")",
":",
"368–376",
".",
"doi",
":",
"10",
".",
"1007",
"/",
"BF01734359",
"for",
"details",
"."
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/nuc_models.py#L63-L103
|
[
"def",
"F81",
"(",
"mu",
"=",
"1.0",
",",
"pi",
"=",
"None",
",",
"alphabet",
"=",
"\"nuc\"",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pi",
"is",
"None",
":",
"pi",
"=",
"0.25",
"*",
"np",
".",
"ones",
"(",
"4",
",",
"dtype",
"=",
"float",
")",
"num_chars",
"=",
"len",
"(",
"alphabets",
"[",
"alphabet",
"]",
")",
"pi",
"=",
"np",
".",
"array",
"(",
"pi",
",",
"dtype",
"=",
"float",
")",
"if",
"num_chars",
"!=",
"len",
"(",
"pi",
")",
":",
"pi",
"=",
"np",
".",
"ones",
"(",
"(",
"num_chars",
",",
")",
",",
"dtype",
"=",
"float",
")",
"print",
"(",
"\"GTR: Warning!The number of the characters in the alphabet does not match the \"",
"\"shape of the vector of equilibrium frequencies Pi -- assuming equal frequencies for all states.\"",
")",
"W",
"=",
"np",
".",
"ones",
"(",
"(",
"num_chars",
",",
"num_chars",
")",
")",
"pi",
"/=",
"(",
"1.0",
"*",
"np",
".",
"sum",
"(",
"pi",
")",
")",
"gtr",
"=",
"GTR",
"(",
"alphabet",
"=",
"alphabets",
"[",
"alphabet",
"]",
")",
"gtr",
".",
"assign_rates",
"(",
"mu",
"=",
"mu",
",",
"pi",
"=",
"pi",
",",
"W",
"=",
"W",
")",
"return",
"gtr"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
HKY85
|
Hasegawa, Kishino and Yano 1985 model. Allows different concentrations of the
nucleotides (as in F81) + distinguishes between transition/transversionsubstitutions
(similar to K80). Link:
Hasegawa, Kishino, Yano (1985), J. Mol. Evol. 22 (2): 160–174. doi:10.1007/BF02101694
Current implementation of the model does not account for the gaps
Parameters
-----------
mu : float
Substitution rate
pi : numpy.array
Nucleotide concentrations
kappa : float
Ratio of transversion/transition substitution rates
|
treetime/nuc_models.py
|
def HKY85(mu=1.0, pi=None, kappa=0.1, **kwargs):
"""
Hasegawa, Kishino and Yano 1985 model. Allows different concentrations of the
nucleotides (as in F81) + distinguishes between transition/transversionsubstitutions
(similar to K80). Link:
Hasegawa, Kishino, Yano (1985), J. Mol. Evol. 22 (2): 160–174. doi:10.1007/BF02101694
Current implementation of the model does not account for the gaps
Parameters
-----------
mu : float
Substitution rate
pi : numpy.array
Nucleotide concentrations
kappa : float
Ratio of transversion/transition substitution rates
"""
if pi is None:
pi=0.25*np.ones(4, dtype=float)
num_chars = len(alphabets['nuc_nogap'])
if num_chars != pi.shape[0] :
pi = np.ones((num_chars, ), dtype=float)
print ("GTR: Warning!The number of the characters in the alphabet does not match the "
"shape of the vector of equilibrium frequencies Pi -- assuming equal frequencies for all states.")
W = _create_transversion_transition_W(kappa)
pi /= pi.sum()
gtr = GTR(alphabet=alphabets['nuc_nogap'])
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr
|
def HKY85(mu=1.0, pi=None, kappa=0.1, **kwargs):
"""
Hasegawa, Kishino and Yano 1985 model. Allows different concentrations of the
nucleotides (as in F81) + distinguishes between transition/transversionsubstitutions
(similar to K80). Link:
Hasegawa, Kishino, Yano (1985), J. Mol. Evol. 22 (2): 160–174. doi:10.1007/BF02101694
Current implementation of the model does not account for the gaps
Parameters
-----------
mu : float
Substitution rate
pi : numpy.array
Nucleotide concentrations
kappa : float
Ratio of transversion/transition substitution rates
"""
if pi is None:
pi=0.25*np.ones(4, dtype=float)
num_chars = len(alphabets['nuc_nogap'])
if num_chars != pi.shape[0] :
pi = np.ones((num_chars, ), dtype=float)
print ("GTR: Warning!The number of the characters in the alphabet does not match the "
"shape of the vector of equilibrium frequencies Pi -- assuming equal frequencies for all states.")
W = _create_transversion_transition_W(kappa)
pi /= pi.sum()
gtr = GTR(alphabet=alphabets['nuc_nogap'])
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr
|
[
"Hasegawa",
"Kishino",
"and",
"Yano",
"1985",
"model",
".",
"Allows",
"different",
"concentrations",
"of",
"the",
"nucleotides",
"(",
"as",
"in",
"F81",
")",
"+",
"distinguishes",
"between",
"transition",
"/",
"transversionsubstitutions",
"(",
"similar",
"to",
"K80",
")",
".",
"Link",
":",
"Hasegawa",
"Kishino",
"Yano",
"(",
"1985",
")",
"J",
".",
"Mol",
".",
"Evol",
".",
"22",
"(",
"2",
")",
":",
"160–174",
".",
"doi",
":",
"10",
".",
"1007",
"/",
"BF02101694"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/nuc_models.py#L105-L141
|
[
"def",
"HKY85",
"(",
"mu",
"=",
"1.0",
",",
"pi",
"=",
"None",
",",
"kappa",
"=",
"0.1",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pi",
"is",
"None",
":",
"pi",
"=",
"0.25",
"*",
"np",
".",
"ones",
"(",
"4",
",",
"dtype",
"=",
"float",
")",
"num_chars",
"=",
"len",
"(",
"alphabets",
"[",
"'nuc_nogap'",
"]",
")",
"if",
"num_chars",
"!=",
"pi",
".",
"shape",
"[",
"0",
"]",
":",
"pi",
"=",
"np",
".",
"ones",
"(",
"(",
"num_chars",
",",
")",
",",
"dtype",
"=",
"float",
")",
"print",
"(",
"\"GTR: Warning!The number of the characters in the alphabet does not match the \"",
"\"shape of the vector of equilibrium frequencies Pi -- assuming equal frequencies for all states.\"",
")",
"W",
"=",
"_create_transversion_transition_W",
"(",
"kappa",
")",
"pi",
"/=",
"pi",
".",
"sum",
"(",
")",
"gtr",
"=",
"GTR",
"(",
"alphabet",
"=",
"alphabets",
"[",
"'nuc_nogap'",
"]",
")",
"gtr",
".",
"assign_rates",
"(",
"mu",
"=",
"mu",
",",
"pi",
"=",
"pi",
",",
"W",
"=",
"W",
")",
"return",
"gtr"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
T92
|
Tamura 1992 model. Extending Kimura (1980) model for the case where a
G+C-content bias exists. Link:
Tamura K (1992), Mol. Biol. Evol. 9 (4): 678–687. DOI: 10.1093/oxfordjournals.molbev.a040752
Current implementation of the model does not account for the gaps
Parameters
-----------
mu : float
substitution rate
pi_GC : float
relative GC content
kappa : float
relative transversion/transition rate
|
treetime/nuc_models.py
|
def T92(mu=1.0, pi_GC=0.5, kappa=0.1, **kwargs):
"""
Tamura 1992 model. Extending Kimura (1980) model for the case where a
G+C-content bias exists. Link:
Tamura K (1992), Mol. Biol. Evol. 9 (4): 678–687. DOI: 10.1093/oxfordjournals.molbev.a040752
Current implementation of the model does not account for the gaps
Parameters
-----------
mu : float
substitution rate
pi_GC : float
relative GC content
kappa : float
relative transversion/transition rate
"""
W = _create_transversion_transition_W(kappa)
# A C G T
if pi_CG >=1.:
raise ValueError("The relative CG content specified is larger than 1.0!")
pi = np.array([(1.-pi_CG)*0.5, pi_CG*0.5, pi_CG*0.5, (1-pi_CG)*0.5])
gtr = GTR(alphabet=alphabets['nuc_nogap'])
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr
|
def T92(mu=1.0, pi_GC=0.5, kappa=0.1, **kwargs):
"""
Tamura 1992 model. Extending Kimura (1980) model for the case where a
G+C-content bias exists. Link:
Tamura K (1992), Mol. Biol. Evol. 9 (4): 678–687. DOI: 10.1093/oxfordjournals.molbev.a040752
Current implementation of the model does not account for the gaps
Parameters
-----------
mu : float
substitution rate
pi_GC : float
relative GC content
kappa : float
relative transversion/transition rate
"""
W = _create_transversion_transition_W(kappa)
# A C G T
if pi_CG >=1.:
raise ValueError("The relative CG content specified is larger than 1.0!")
pi = np.array([(1.-pi_CG)*0.5, pi_CG*0.5, pi_CG*0.5, (1-pi_CG)*0.5])
gtr = GTR(alphabet=alphabets['nuc_nogap'])
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr
|
[
"Tamura",
"1992",
"model",
".",
"Extending",
"Kimura",
"(",
"1980",
")",
"model",
"for",
"the",
"case",
"where",
"a",
"G",
"+",
"C",
"-",
"content",
"bias",
"exists",
".",
"Link",
":",
"Tamura",
"K",
"(",
"1992",
")",
"Mol",
".",
"Biol",
".",
"Evol",
".",
"9",
"(",
"4",
")",
":",
"678–687",
".",
"DOI",
":",
"10",
".",
"1093",
"/",
"oxfordjournals",
".",
"molbev",
".",
"a040752"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/nuc_models.py#L143-L172
|
[
"def",
"T92",
"(",
"mu",
"=",
"1.0",
",",
"pi_GC",
"=",
"0.5",
",",
"kappa",
"=",
"0.1",
",",
"*",
"*",
"kwargs",
")",
":",
"W",
"=",
"_create_transversion_transition_W",
"(",
"kappa",
")",
"# A C G T",
"if",
"pi_CG",
">=",
"1.",
":",
"raise",
"ValueError",
"(",
"\"The relative CG content specified is larger than 1.0!\"",
")",
"pi",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"1.",
"-",
"pi_CG",
")",
"*",
"0.5",
",",
"pi_CG",
"*",
"0.5",
",",
"pi_CG",
"*",
"0.5",
",",
"(",
"1",
"-",
"pi_CG",
")",
"*",
"0.5",
"]",
")",
"gtr",
"=",
"GTR",
"(",
"alphabet",
"=",
"alphabets",
"[",
"'nuc_nogap'",
"]",
")",
"gtr",
".",
"assign_rates",
"(",
"mu",
"=",
"mu",
",",
"pi",
"=",
"pi",
",",
"W",
"=",
"W",
")",
"return",
"gtr"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
TN93
|
Tamura and Nei 1993. The model distinguishes between the two different types of
transition: (A <-> G) is allowed to have a different rate to (C<->T).
Transversions have the same rate. The frequencies of the nucleotides are allowed
to be different. Link:
Tamura, Nei (1993), MolBiol Evol. 10 (3): 512–526. DOI:10.1093/oxfordjournals.molbev.a040023
Parameters
-----------
mu : float
Substitution rate
kappa1 : float
relative A<-->C, A<-->T, T<-->G and G<-->C rates
kappa2 : float
relative C<-->T rate
Note
----
Rate of A<-->G substitution is set to one. All other rates (kappa1, kappa2)
are specified relative to this rate
|
treetime/nuc_models.py
|
def TN93(mu=1.0, kappa1=1., kappa2=1., pi=None, **kwargs):
"""
Tamura and Nei 1993. The model distinguishes between the two different types of
transition: (A <-> G) is allowed to have a different rate to (C<->T).
Transversions have the same rate. The frequencies of the nucleotides are allowed
to be different. Link:
Tamura, Nei (1993), MolBiol Evol. 10 (3): 512–526. DOI:10.1093/oxfordjournals.molbev.a040023
Parameters
-----------
mu : float
Substitution rate
kappa1 : float
relative A<-->C, A<-->T, T<-->G and G<-->C rates
kappa2 : float
relative C<-->T rate
Note
----
Rate of A<-->G substitution is set to one. All other rates (kappa1, kappa2)
are specified relative to this rate
"""
if pi is None:
pi=0.25*np.ones(4, dtype=float)
W = np.ones((4,4))
W = np.array([
[1, kappa1, 1, kappa1],
[kappa1, 1, kappa1, kappa2],
[1, kappa1, 1, kappa1],
[kappa1, kappa2, kappa1, 1]], dtype=float)
pi /=pi.sum()
num_chars = len(alphabets['nuc_nogap'])
if num_chars != pi.shape[0] :
pi = np.ones((num_chars, ), dtype=float)
print ("GTR: Warning!The number of the characters in the alphabet does not match the "
"shape of the vector of equilibrium frequencies Pi -- assuming equal frequencies for all states.")
gtr = GTR(alphabet=alphabets['nuc'])
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr
|
def TN93(mu=1.0, kappa1=1., kappa2=1., pi=None, **kwargs):
"""
Tamura and Nei 1993. The model distinguishes between the two different types of
transition: (A <-> G) is allowed to have a different rate to (C<->T).
Transversions have the same rate. The frequencies of the nucleotides are allowed
to be different. Link:
Tamura, Nei (1993), MolBiol Evol. 10 (3): 512–526. DOI:10.1093/oxfordjournals.molbev.a040023
Parameters
-----------
mu : float
Substitution rate
kappa1 : float
relative A<-->C, A<-->T, T<-->G and G<-->C rates
kappa2 : float
relative C<-->T rate
Note
----
Rate of A<-->G substitution is set to one. All other rates (kappa1, kappa2)
are specified relative to this rate
"""
if pi is None:
pi=0.25*np.ones(4, dtype=float)
W = np.ones((4,4))
W = np.array([
[1, kappa1, 1, kappa1],
[kappa1, 1, kappa1, kappa2],
[1, kappa1, 1, kappa1],
[kappa1, kappa2, kappa1, 1]], dtype=float)
pi /=pi.sum()
num_chars = len(alphabets['nuc_nogap'])
if num_chars != pi.shape[0] :
pi = np.ones((num_chars, ), dtype=float)
print ("GTR: Warning!The number of the characters in the alphabet does not match the "
"shape of the vector of equilibrium frequencies Pi -- assuming equal frequencies for all states.")
gtr = GTR(alphabet=alphabets['nuc'])
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr
|
[
"Tamura",
"and",
"Nei",
"1993",
".",
"The",
"model",
"distinguishes",
"between",
"the",
"two",
"different",
"types",
"of",
"transition",
":",
"(",
"A",
"<",
"-",
">",
"G",
")",
"is",
"allowed",
"to",
"have",
"a",
"different",
"rate",
"to",
"(",
"C<",
"-",
">",
"T",
")",
".",
"Transversions",
"have",
"the",
"same",
"rate",
".",
"The",
"frequencies",
"of",
"the",
"nucleotides",
"are",
"allowed",
"to",
"be",
"different",
".",
"Link",
":",
"Tamura",
"Nei",
"(",
"1993",
")",
"MolBiol",
"Evol",
".",
"10",
"(",
"3",
")",
":",
"512–526",
".",
"DOI",
":",
"10",
".",
"1093",
"/",
"oxfordjournals",
".",
"molbev",
".",
"a040023"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/nuc_models.py#L174-L220
|
[
"def",
"TN93",
"(",
"mu",
"=",
"1.0",
",",
"kappa1",
"=",
"1.",
",",
"kappa2",
"=",
"1.",
",",
"pi",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pi",
"is",
"None",
":",
"pi",
"=",
"0.25",
"*",
"np",
".",
"ones",
"(",
"4",
",",
"dtype",
"=",
"float",
")",
"W",
"=",
"np",
".",
"ones",
"(",
"(",
"4",
",",
"4",
")",
")",
"W",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"kappa1",
",",
"1",
",",
"kappa1",
"]",
",",
"[",
"kappa1",
",",
"1",
",",
"kappa1",
",",
"kappa2",
"]",
",",
"[",
"1",
",",
"kappa1",
",",
"1",
",",
"kappa1",
"]",
",",
"[",
"kappa1",
",",
"kappa2",
",",
"kappa1",
",",
"1",
"]",
"]",
",",
"dtype",
"=",
"float",
")",
"pi",
"/=",
"pi",
".",
"sum",
"(",
")",
"num_chars",
"=",
"len",
"(",
"alphabets",
"[",
"'nuc_nogap'",
"]",
")",
"if",
"num_chars",
"!=",
"pi",
".",
"shape",
"[",
"0",
"]",
":",
"pi",
"=",
"np",
".",
"ones",
"(",
"(",
"num_chars",
",",
")",
",",
"dtype",
"=",
"float",
")",
"print",
"(",
"\"GTR: Warning!The number of the characters in the alphabet does not match the \"",
"\"shape of the vector of equilibrium frequencies Pi -- assuming equal frequencies for all states.\"",
")",
"gtr",
"=",
"GTR",
"(",
"alphabet",
"=",
"alphabets",
"[",
"'nuc'",
"]",
")",
"gtr",
".",
"assign_rates",
"(",
"mu",
"=",
"mu",
",",
"pi",
"=",
"pi",
",",
"W",
"=",
"W",
")",
"return",
"gtr"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
_create_transversion_transition_W
|
Alphabet = [A, C, G, T]
|
treetime/nuc_models.py
|
def _create_transversion_transition_W(kappa):
"""
Alphabet = [A, C, G, T]
"""
W = np.ones((4,4))
W[0, 2]=W[1, 3]=W[2, 0]=W[3,1]=kappa
return W
|
def _create_transversion_transition_W(kappa):
"""
Alphabet = [A, C, G, T]
"""
W = np.ones((4,4))
W[0, 2]=W[1, 3]=W[2, 0]=W[3,1]=kappa
return W
|
[
"Alphabet",
"=",
"[",
"A",
"C",
"G",
"T",
"]"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/nuc_models.py#L222-L228
|
[
"def",
"_create_transversion_transition_W",
"(",
"kappa",
")",
":",
"W",
"=",
"np",
".",
"ones",
"(",
"(",
"4",
",",
"4",
")",
")",
"W",
"[",
"0",
",",
"2",
"]",
"=",
"W",
"[",
"1",
",",
"3",
"]",
"=",
"W",
"[",
"2",
",",
"0",
"]",
"=",
"W",
"[",
"3",
",",
"1",
"]",
"=",
"kappa",
"return",
"W"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
Coalescent.set_Tc
|
initialize the merger model with a coalescent time
Args:
- Tc: a float or an iterable, if iterable another argument T of same shape is required
- T: an array like of same shape as Tc that specifies the time pivots corresponding to Tc
Returns:
- None
|
treetime/merger_models.py
|
def set_Tc(self, Tc, T=None):
'''
initialize the merger model with a coalescent time
Args:
- Tc: a float or an iterable, if iterable another argument T of same shape is required
- T: an array like of same shape as Tc that specifies the time pivots corresponding to Tc
Returns:
- None
'''
if isinstance(Tc, Iterable):
if len(Tc)==len(T):
x = np.concatenate(([-ttconf.BIG_NUMBER], T, [ttconf.BIG_NUMBER]))
y = np.concatenate(([Tc[0]], Tc, [Tc[-1]]))
self.Tc = interp1d(x,y)
else:
self.logger("need Tc values and Timepoints of equal length",2,warn=True)
self.Tc = interp1d([-ttconf.BIG_NUMBER, ttconf.BIG_NUMBER], [1e-5, 1e-5])
else:
self.Tc = interp1d([-ttconf.BIG_NUMBER, ttconf.BIG_NUMBER],
[Tc+ttconf.TINY_NUMBER, Tc+ttconf.TINY_NUMBER])
self.calc_integral_merger_rate()
|
def set_Tc(self, Tc, T=None):
'''
initialize the merger model with a coalescent time
Args:
- Tc: a float or an iterable, if iterable another argument T of same shape is required
- T: an array like of same shape as Tc that specifies the time pivots corresponding to Tc
Returns:
- None
'''
if isinstance(Tc, Iterable):
if len(Tc)==len(T):
x = np.concatenate(([-ttconf.BIG_NUMBER], T, [ttconf.BIG_NUMBER]))
y = np.concatenate(([Tc[0]], Tc, [Tc[-1]]))
self.Tc = interp1d(x,y)
else:
self.logger("need Tc values and Timepoints of equal length",2,warn=True)
self.Tc = interp1d([-ttconf.BIG_NUMBER, ttconf.BIG_NUMBER], [1e-5, 1e-5])
else:
self.Tc = interp1d([-ttconf.BIG_NUMBER, ttconf.BIG_NUMBER],
[Tc+ttconf.TINY_NUMBER, Tc+ttconf.TINY_NUMBER])
self.calc_integral_merger_rate()
|
[
"initialize",
"the",
"merger",
"model",
"with",
"a",
"coalescent",
"time"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/merger_models.py#L29-L50
|
[
"def",
"set_Tc",
"(",
"self",
",",
"Tc",
",",
"T",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"Tc",
",",
"Iterable",
")",
":",
"if",
"len",
"(",
"Tc",
")",
"==",
"len",
"(",
"T",
")",
":",
"x",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"-",
"ttconf",
".",
"BIG_NUMBER",
"]",
",",
"T",
",",
"[",
"ttconf",
".",
"BIG_NUMBER",
"]",
")",
")",
"y",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"Tc",
"[",
"0",
"]",
"]",
",",
"Tc",
",",
"[",
"Tc",
"[",
"-",
"1",
"]",
"]",
")",
")",
"self",
".",
"Tc",
"=",
"interp1d",
"(",
"x",
",",
"y",
")",
"else",
":",
"self",
".",
"logger",
"(",
"\"need Tc values and Timepoints of equal length\"",
",",
"2",
",",
"warn",
"=",
"True",
")",
"self",
".",
"Tc",
"=",
"interp1d",
"(",
"[",
"-",
"ttconf",
".",
"BIG_NUMBER",
",",
"ttconf",
".",
"BIG_NUMBER",
"]",
",",
"[",
"1e-5",
",",
"1e-5",
"]",
")",
"else",
":",
"self",
".",
"Tc",
"=",
"interp1d",
"(",
"[",
"-",
"ttconf",
".",
"BIG_NUMBER",
",",
"ttconf",
".",
"BIG_NUMBER",
"]",
",",
"[",
"Tc",
"+",
"ttconf",
".",
"TINY_NUMBER",
",",
"Tc",
"+",
"ttconf",
".",
"TINY_NUMBER",
"]",
")",
"self",
".",
"calc_integral_merger_rate",
"(",
")"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
Coalescent.calc_branch_count
|
calculates an interpolation object that maps time to the number of
concurrent branches in the tree. The result is stored in self.nbranches
|
treetime/merger_models.py
|
def calc_branch_count(self):
'''
calculates an interpolation object that maps time to the number of
concurrent branches in the tree. The result is stored in self.nbranches
'''
# make a list of (time, merger or loss event) by root first iteration
self.tree_events = np.array(sorted([(n.time_before_present, len(n.clades)-1)
for n in self.tree.find_clades() if not n.bad_branch],
key=lambda x:-x[0]))
# collapse multiple events at one time point into sum of changes
from collections import defaultdict
dn_branch = defaultdict(int)
for (t, dn) in self.tree_events:
dn_branch[t]+=dn
unique_mergers = np.array(sorted(dn_branch.items(), key = lambda x:-x[0]))
# calculate the branch count at each point summing the delta branch counts
nbranches = [[ttconf.BIG_NUMBER, 1], [unique_mergers[0,0]+ttconf.TINY_NUMBER, 1]]
for ti, (t, dn) in enumerate(unique_mergers[:-1]):
new_n = nbranches[-1][1]+dn
next_t = unique_mergers[ti+1,0]+ttconf.TINY_NUMBER
nbranches.append([t, new_n])
nbranches.append([next_t, new_n])
new_n += unique_mergers[-1,1]
nbranches.append([next_t, new_n])
nbranches.append([-ttconf.BIG_NUMBER, new_n])
nbranches=np.array(nbranches)
self.nbranches = interp1d(nbranches[:,0], nbranches[:,1], kind='linear')
|
def calc_branch_count(self):
'''
calculates an interpolation object that maps time to the number of
concurrent branches in the tree. The result is stored in self.nbranches
'''
# make a list of (time, merger or loss event) by root first iteration
self.tree_events = np.array(sorted([(n.time_before_present, len(n.clades)-1)
for n in self.tree.find_clades() if not n.bad_branch],
key=lambda x:-x[0]))
# collapse multiple events at one time point into sum of changes
from collections import defaultdict
dn_branch = defaultdict(int)
for (t, dn) in self.tree_events:
dn_branch[t]+=dn
unique_mergers = np.array(sorted(dn_branch.items(), key = lambda x:-x[0]))
# calculate the branch count at each point summing the delta branch counts
nbranches = [[ttconf.BIG_NUMBER, 1], [unique_mergers[0,0]+ttconf.TINY_NUMBER, 1]]
for ti, (t, dn) in enumerate(unique_mergers[:-1]):
new_n = nbranches[-1][1]+dn
next_t = unique_mergers[ti+1,0]+ttconf.TINY_NUMBER
nbranches.append([t, new_n])
nbranches.append([next_t, new_n])
new_n += unique_mergers[-1,1]
nbranches.append([next_t, new_n])
nbranches.append([-ttconf.BIG_NUMBER, new_n])
nbranches=np.array(nbranches)
self.nbranches = interp1d(nbranches[:,0], nbranches[:,1], kind='linear')
|
[
"calculates",
"an",
"interpolation",
"object",
"that",
"maps",
"time",
"to",
"the",
"number",
"of",
"concurrent",
"branches",
"in",
"the",
"tree",
".",
"The",
"result",
"is",
"stored",
"in",
"self",
".",
"nbranches"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/merger_models.py#L53-L84
|
[
"def",
"calc_branch_count",
"(",
"self",
")",
":",
"# make a list of (time, merger or loss event) by root first iteration",
"self",
".",
"tree_events",
"=",
"np",
".",
"array",
"(",
"sorted",
"(",
"[",
"(",
"n",
".",
"time_before_present",
",",
"len",
"(",
"n",
".",
"clades",
")",
"-",
"1",
")",
"for",
"n",
"in",
"self",
".",
"tree",
".",
"find_clades",
"(",
")",
"if",
"not",
"n",
".",
"bad_branch",
"]",
",",
"key",
"=",
"lambda",
"x",
":",
"-",
"x",
"[",
"0",
"]",
")",
")",
"# collapse multiple events at one time point into sum of changes",
"from",
"collections",
"import",
"defaultdict",
"dn_branch",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"(",
"t",
",",
"dn",
")",
"in",
"self",
".",
"tree_events",
":",
"dn_branch",
"[",
"t",
"]",
"+=",
"dn",
"unique_mergers",
"=",
"np",
".",
"array",
"(",
"sorted",
"(",
"dn_branch",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"-",
"x",
"[",
"0",
"]",
")",
")",
"# calculate the branch count at each point summing the delta branch counts",
"nbranches",
"=",
"[",
"[",
"ttconf",
".",
"BIG_NUMBER",
",",
"1",
"]",
",",
"[",
"unique_mergers",
"[",
"0",
",",
"0",
"]",
"+",
"ttconf",
".",
"TINY_NUMBER",
",",
"1",
"]",
"]",
"for",
"ti",
",",
"(",
"t",
",",
"dn",
")",
"in",
"enumerate",
"(",
"unique_mergers",
"[",
":",
"-",
"1",
"]",
")",
":",
"new_n",
"=",
"nbranches",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"+",
"dn",
"next_t",
"=",
"unique_mergers",
"[",
"ti",
"+",
"1",
",",
"0",
"]",
"+",
"ttconf",
".",
"TINY_NUMBER",
"nbranches",
".",
"append",
"(",
"[",
"t",
",",
"new_n",
"]",
")",
"nbranches",
".",
"append",
"(",
"[",
"next_t",
",",
"new_n",
"]",
")",
"new_n",
"+=",
"unique_mergers",
"[",
"-",
"1",
",",
"1",
"]",
"nbranches",
".",
"append",
"(",
"[",
"next_t",
",",
"new_n",
"]",
")",
"nbranches",
".",
"append",
"(",
"[",
"-",
"ttconf",
".",
"BIG_NUMBER",
",",
"new_n",
"]",
")",
"nbranches",
"=",
"np",
".",
"array",
"(",
"nbranches",
")",
"self",
".",
"nbranches",
"=",
"interp1d",
"(",
"nbranches",
"[",
":",
",",
"0",
"]",
",",
"nbranches",
"[",
":",
",",
"1",
"]",
",",
"kind",
"=",
"'linear'",
")"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
Coalescent.calc_integral_merger_rate
|
calculates the integral int_0^t (k(t')-1)/2Tc(t') dt' and stores it as
self.integral_merger_rate. This differences of this quantity evaluated at
different times points are the cost of a branch.
|
treetime/merger_models.py
|
def calc_integral_merger_rate(self):
'''
calculates the integral int_0^t (k(t')-1)/2Tc(t') dt' and stores it as
self.integral_merger_rate. This differences of this quantity evaluated at
different times points are the cost of a branch.
'''
# integrate the piecewise constant branch count function.
tvals = np.unique(self.nbranches.x[1:-1])
rate = self.branch_merger_rate(tvals)
avg_rate = 0.5*(rate[1:] + rate[:-1])
cost = np.concatenate(([0],np.cumsum(np.diff(tvals)*avg_rate)))
# make interpolation objects for the branch count and its integral
# the latter is scaled by 0.5/Tc
# need to add extra point at very large time before present to
# prevent 'out of interpolation range' errors
self.integral_merger_rate = interp1d(np.concatenate(([-ttconf.BIG_NUMBER], tvals,[ttconf.BIG_NUMBER])),
np.concatenate(([cost[0]], cost,[cost[-1]])), kind='linear')
|
def calc_integral_merger_rate(self):
'''
calculates the integral int_0^t (k(t')-1)/2Tc(t') dt' and stores it as
self.integral_merger_rate. This differences of this quantity evaluated at
different times points are the cost of a branch.
'''
# integrate the piecewise constant branch count function.
tvals = np.unique(self.nbranches.x[1:-1])
rate = self.branch_merger_rate(tvals)
avg_rate = 0.5*(rate[1:] + rate[:-1])
cost = np.concatenate(([0],np.cumsum(np.diff(tvals)*avg_rate)))
# make interpolation objects for the branch count and its integral
# the latter is scaled by 0.5/Tc
# need to add extra point at very large time before present to
# prevent 'out of interpolation range' errors
self.integral_merger_rate = interp1d(np.concatenate(([-ttconf.BIG_NUMBER], tvals,[ttconf.BIG_NUMBER])),
np.concatenate(([cost[0]], cost,[cost[-1]])), kind='linear')
|
[
"calculates",
"the",
"integral",
"int_0^t",
"(",
"k",
"(",
"t",
")",
"-",
"1",
")",
"/",
"2Tc",
"(",
"t",
")",
"dt",
"and",
"stores",
"it",
"as",
"self",
".",
"integral_merger_rate",
".",
"This",
"differences",
"of",
"this",
"quantity",
"evaluated",
"at",
"different",
"times",
"points",
"are",
"the",
"cost",
"of",
"a",
"branch",
"."
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/merger_models.py#L87-L103
|
[
"def",
"calc_integral_merger_rate",
"(",
"self",
")",
":",
"# integrate the piecewise constant branch count function.",
"tvals",
"=",
"np",
".",
"unique",
"(",
"self",
".",
"nbranches",
".",
"x",
"[",
"1",
":",
"-",
"1",
"]",
")",
"rate",
"=",
"self",
".",
"branch_merger_rate",
"(",
"tvals",
")",
"avg_rate",
"=",
"0.5",
"*",
"(",
"rate",
"[",
"1",
":",
"]",
"+",
"rate",
"[",
":",
"-",
"1",
"]",
")",
"cost",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"0",
"]",
",",
"np",
".",
"cumsum",
"(",
"np",
".",
"diff",
"(",
"tvals",
")",
"*",
"avg_rate",
")",
")",
")",
"# make interpolation objects for the branch count and its integral",
"# the latter is scaled by 0.5/Tc",
"# need to add extra point at very large time before present to",
"# prevent 'out of interpolation range' errors",
"self",
".",
"integral_merger_rate",
"=",
"interp1d",
"(",
"np",
".",
"concatenate",
"(",
"(",
"[",
"-",
"ttconf",
".",
"BIG_NUMBER",
"]",
",",
"tvals",
",",
"[",
"ttconf",
".",
"BIG_NUMBER",
"]",
")",
")",
",",
"np",
".",
"concatenate",
"(",
"(",
"[",
"cost",
"[",
"0",
"]",
"]",
",",
"cost",
",",
"[",
"cost",
"[",
"-",
"1",
"]",
"]",
")",
")",
",",
"kind",
"=",
"'linear'",
")"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
Coalescent.cost
|
returns the cost associated with a branch starting at t_node
t_node is time before present, the branch goes back in time
Args:
- t_node: time of the node
- branch_length: branch length, determines when this branch merges with sister
- multiplicity: 2 if merger is binary, higher if this is a polytomy
|
treetime/merger_models.py
|
def cost(self, t_node, branch_length, multiplicity=2.0):
'''
returns the cost associated with a branch starting at t_node
t_node is time before present, the branch goes back in time
Args:
- t_node: time of the node
- branch_length: branch length, determines when this branch merges with sister
- multiplicity: 2 if merger is binary, higher if this is a polytomy
'''
merger_time = t_node+branch_length
return self.integral_merger_rate(merger_time) - self.integral_merger_rate(t_node)\
- np.log(self.total_merger_rate(merger_time))*(multiplicity-1.0)/multiplicity
|
def cost(self, t_node, branch_length, multiplicity=2.0):
'''
returns the cost associated with a branch starting at t_node
t_node is time before present, the branch goes back in time
Args:
- t_node: time of the node
- branch_length: branch length, determines when this branch merges with sister
- multiplicity: 2 if merger is binary, higher if this is a polytomy
'''
merger_time = t_node+branch_length
return self.integral_merger_rate(merger_time) - self.integral_merger_rate(t_node)\
- np.log(self.total_merger_rate(merger_time))*(multiplicity-1.0)/multiplicity
|
[
"returns",
"the",
"cost",
"associated",
"with",
"a",
"branch",
"starting",
"at",
"t_node",
"t_node",
"is",
"time",
"before",
"present",
"the",
"branch",
"goes",
"back",
"in",
"time"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/merger_models.py#L121-L133
|
[
"def",
"cost",
"(",
"self",
",",
"t_node",
",",
"branch_length",
",",
"multiplicity",
"=",
"2.0",
")",
":",
"merger_time",
"=",
"t_node",
"+",
"branch_length",
"return",
"self",
".",
"integral_merger_rate",
"(",
"merger_time",
")",
"-",
"self",
".",
"integral_merger_rate",
"(",
"t_node",
")",
"-",
"np",
".",
"log",
"(",
"self",
".",
"total_merger_rate",
"(",
"merger_time",
")",
")",
"*",
"(",
"multiplicity",
"-",
"1.0",
")",
"/",
"multiplicity"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
Coalescent.attach_to_tree
|
attaches the the merger cost to each branch length interpolator in the tree.
|
treetime/merger_models.py
|
def attach_to_tree(self):
'''
attaches the the merger cost to each branch length interpolator in the tree.
'''
for clade in self.tree.find_clades():
if clade.up is not None:
clade.branch_length_interpolator.merger_cost = self.cost
|
def attach_to_tree(self):
'''
attaches the the merger cost to each branch length interpolator in the tree.
'''
for clade in self.tree.find_clades():
if clade.up is not None:
clade.branch_length_interpolator.merger_cost = self.cost
|
[
"attaches",
"the",
"the",
"merger",
"cost",
"to",
"each",
"branch",
"length",
"interpolator",
"in",
"the",
"tree",
"."
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/merger_models.py#L136-L142
|
[
"def",
"attach_to_tree",
"(",
"self",
")",
":",
"for",
"clade",
"in",
"self",
".",
"tree",
".",
"find_clades",
"(",
")",
":",
"if",
"clade",
".",
"up",
"is",
"not",
"None",
":",
"clade",
".",
"branch_length_interpolator",
".",
"merger_cost",
"=",
"self",
".",
"cost"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
Coalescent.optimize_Tc
|
determines the coalescent time scale that optimizes the coalescent likelihood of the tree
|
treetime/merger_models.py
|
def optimize_Tc(self):
'''
determines the coalescent time scale that optimizes the coalescent likelihood of the tree
'''
from scipy.optimize import minimize_scalar
initial_Tc = self.Tc
def cost(Tc):
self.set_Tc(Tc)
return -self.total_LH()
sol = minimize_scalar(cost, bounds=[ttconf.TINY_NUMBER,10.0])
if "success" in sol and sol["success"]:
self.set_Tc(sol['x'])
else:
self.logger("merger_models:optimze_Tc: optimization of coalescent time scale failed: " + str(sol), 0, warn=True)
self.set_Tc(initial_Tc.y, T=initial_Tc.x)
|
def optimize_Tc(self):
'''
determines the coalescent time scale that optimizes the coalescent likelihood of the tree
'''
from scipy.optimize import minimize_scalar
initial_Tc = self.Tc
def cost(Tc):
self.set_Tc(Tc)
return -self.total_LH()
sol = minimize_scalar(cost, bounds=[ttconf.TINY_NUMBER,10.0])
if "success" in sol and sol["success"]:
self.set_Tc(sol['x'])
else:
self.logger("merger_models:optimze_Tc: optimization of coalescent time scale failed: " + str(sol), 0, warn=True)
self.set_Tc(initial_Tc.y, T=initial_Tc.x)
|
[
"determines",
"the",
"coalescent",
"time",
"scale",
"that",
"optimizes",
"the",
"coalescent",
"likelihood",
"of",
"the",
"tree"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/merger_models.py#L153-L168
|
[
"def",
"optimize_Tc",
"(",
"self",
")",
":",
"from",
"scipy",
".",
"optimize",
"import",
"minimize_scalar",
"initial_Tc",
"=",
"self",
".",
"Tc",
"def",
"cost",
"(",
"Tc",
")",
":",
"self",
".",
"set_Tc",
"(",
"Tc",
")",
"return",
"-",
"self",
".",
"total_LH",
"(",
")",
"sol",
"=",
"minimize_scalar",
"(",
"cost",
",",
"bounds",
"=",
"[",
"ttconf",
".",
"TINY_NUMBER",
",",
"10.0",
"]",
")",
"if",
"\"success\"",
"in",
"sol",
"and",
"sol",
"[",
"\"success\"",
"]",
":",
"self",
".",
"set_Tc",
"(",
"sol",
"[",
"'x'",
"]",
")",
"else",
":",
"self",
".",
"logger",
"(",
"\"merger_models:optimze_Tc: optimization of coalescent time scale failed: \"",
"+",
"str",
"(",
"sol",
")",
",",
"0",
",",
"warn",
"=",
"True",
")",
"self",
".",
"set_Tc",
"(",
"initial_Tc",
".",
"y",
",",
"T",
"=",
"initial_Tc",
".",
"x",
")"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
Coalescent.optimize_skyline
|
optimize the trajectory of the merger rate 1./T_c to maximize the
coalescent likelihood.
parameters:
n_points -- number of pivots of the Tc interpolation object
stiffness -- penalty for rapid changes in log(Tc)
methods -- method used to optimize
tol -- optimization tolerance
regularization -- cost of moving logTc outsize of the range [-100,0]
merger rate is measured in branch length units, no
plausible rates should never be outside this window
|
treetime/merger_models.py
|
def optimize_skyline(self, n_points=20, stiffness=2.0, method = 'SLSQP',
tol=0.03, regularization=10.0, **kwarks):
'''
optimize the trajectory of the merger rate 1./T_c to maximize the
coalescent likelihood.
parameters:
n_points -- number of pivots of the Tc interpolation object
stiffness -- penalty for rapid changes in log(Tc)
methods -- method used to optimize
tol -- optimization tolerance
regularization -- cost of moving logTc outsize of the range [-100,0]
merger rate is measured in branch length units, no
plausible rates should never be outside this window
'''
self.logger("Coalescent:optimize_skyline:... current LH: %f"%self.total_LH(),2)
from scipy.optimize import minimize
initial_Tc = self.Tc
tvals = np.linspace(self.tree_events[0,0], self.tree_events[-1,0], n_points)
def cost(logTc):
# cap log Tc to avoid under or overflow and nan in logs
self.set_Tc(np.exp(np.maximum(-200,np.minimum(100,logTc))), tvals)
neglogLH = -self.total_LH() + stiffness*np.sum(np.diff(logTc)**2) \
+ np.sum((logTc>0)*logTc*regularization)\
- np.sum((logTc<-100)*logTc*regularization)
return neglogLH
sol = minimize(cost, np.ones_like(tvals)*np.log(self.Tc.y.mean()), method=method, tol=tol)
if "success" in sol and sol["success"]:
dlogTc = 0.1
opt_logTc = sol['x']
dcost = []
for ii in range(len(opt_logTc)):
tmp = opt_logTc.copy()
tmp[ii]+=dlogTc
cost_plus = cost(tmp)
tmp[ii]-=2*dlogTc
cost_minus = cost(tmp)
dcost.append([cost_minus, cost_plus])
dcost = np.array(dcost)
optimal_cost = cost(opt_logTc)
self.confidence = -dlogTc/(2*optimal_cost - dcost[:,0] - dcost[:,1])
self.logger("Coalescent:optimize_skyline:...done. new LH: %f"%self.total_LH(),2)
else:
self.set_Tc(initial_Tc.y, T=initial_Tc.x)
self.logger("Coalescent:optimize_skyline:...failed:"+str(sol),0, warn=True)
|
def optimize_skyline(self, n_points=20, stiffness=2.0, method = 'SLSQP',
tol=0.03, regularization=10.0, **kwarks):
'''
optimize the trajectory of the merger rate 1./T_c to maximize the
coalescent likelihood.
parameters:
n_points -- number of pivots of the Tc interpolation object
stiffness -- penalty for rapid changes in log(Tc)
methods -- method used to optimize
tol -- optimization tolerance
regularization -- cost of moving logTc outsize of the range [-100,0]
merger rate is measured in branch length units, no
plausible rates should never be outside this window
'''
self.logger("Coalescent:optimize_skyline:... current LH: %f"%self.total_LH(),2)
from scipy.optimize import minimize
initial_Tc = self.Tc
tvals = np.linspace(self.tree_events[0,0], self.tree_events[-1,0], n_points)
def cost(logTc):
# cap log Tc to avoid under or overflow and nan in logs
self.set_Tc(np.exp(np.maximum(-200,np.minimum(100,logTc))), tvals)
neglogLH = -self.total_LH() + stiffness*np.sum(np.diff(logTc)**2) \
+ np.sum((logTc>0)*logTc*regularization)\
- np.sum((logTc<-100)*logTc*regularization)
return neglogLH
sol = minimize(cost, np.ones_like(tvals)*np.log(self.Tc.y.mean()), method=method, tol=tol)
if "success" in sol and sol["success"]:
dlogTc = 0.1
opt_logTc = sol['x']
dcost = []
for ii in range(len(opt_logTc)):
tmp = opt_logTc.copy()
tmp[ii]+=dlogTc
cost_plus = cost(tmp)
tmp[ii]-=2*dlogTc
cost_minus = cost(tmp)
dcost.append([cost_minus, cost_plus])
dcost = np.array(dcost)
optimal_cost = cost(opt_logTc)
self.confidence = -dlogTc/(2*optimal_cost - dcost[:,0] - dcost[:,1])
self.logger("Coalescent:optimize_skyline:...done. new LH: %f"%self.total_LH(),2)
else:
self.set_Tc(initial_Tc.y, T=initial_Tc.x)
self.logger("Coalescent:optimize_skyline:...failed:"+str(sol),0, warn=True)
|
[
"optimize",
"the",
"trajectory",
"of",
"the",
"merger",
"rate",
"1",
".",
"/",
"T_c",
"to",
"maximize",
"the",
"coalescent",
"likelihood",
".",
"parameters",
":",
"n_points",
"--",
"number",
"of",
"pivots",
"of",
"the",
"Tc",
"interpolation",
"object",
"stiffness",
"--",
"penalty",
"for",
"rapid",
"changes",
"in",
"log",
"(",
"Tc",
")",
"methods",
"--",
"method",
"used",
"to",
"optimize",
"tol",
"--",
"optimization",
"tolerance",
"regularization",
"--",
"cost",
"of",
"moving",
"logTc",
"outsize",
"of",
"the",
"range",
"[",
"-",
"100",
"0",
"]",
"merger",
"rate",
"is",
"measured",
"in",
"branch",
"length",
"units",
"no",
"plausible",
"rates",
"should",
"never",
"be",
"outside",
"this",
"window"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/merger_models.py#L171-L216
|
[
"def",
"optimize_skyline",
"(",
"self",
",",
"n_points",
"=",
"20",
",",
"stiffness",
"=",
"2.0",
",",
"method",
"=",
"'SLSQP'",
",",
"tol",
"=",
"0.03",
",",
"regularization",
"=",
"10.0",
",",
"*",
"*",
"kwarks",
")",
":",
"self",
".",
"logger",
"(",
"\"Coalescent:optimize_skyline:... current LH: %f\"",
"%",
"self",
".",
"total_LH",
"(",
")",
",",
"2",
")",
"from",
"scipy",
".",
"optimize",
"import",
"minimize",
"initial_Tc",
"=",
"self",
".",
"Tc",
"tvals",
"=",
"np",
".",
"linspace",
"(",
"self",
".",
"tree_events",
"[",
"0",
",",
"0",
"]",
",",
"self",
".",
"tree_events",
"[",
"-",
"1",
",",
"0",
"]",
",",
"n_points",
")",
"def",
"cost",
"(",
"logTc",
")",
":",
"# cap log Tc to avoid under or overflow and nan in logs",
"self",
".",
"set_Tc",
"(",
"np",
".",
"exp",
"(",
"np",
".",
"maximum",
"(",
"-",
"200",
",",
"np",
".",
"minimum",
"(",
"100",
",",
"logTc",
")",
")",
")",
",",
"tvals",
")",
"neglogLH",
"=",
"-",
"self",
".",
"total_LH",
"(",
")",
"+",
"stiffness",
"*",
"np",
".",
"sum",
"(",
"np",
".",
"diff",
"(",
"logTc",
")",
"**",
"2",
")",
"+",
"np",
".",
"sum",
"(",
"(",
"logTc",
">",
"0",
")",
"*",
"logTc",
"*",
"regularization",
")",
"-",
"np",
".",
"sum",
"(",
"(",
"logTc",
"<",
"-",
"100",
")",
"*",
"logTc",
"*",
"regularization",
")",
"return",
"neglogLH",
"sol",
"=",
"minimize",
"(",
"cost",
",",
"np",
".",
"ones_like",
"(",
"tvals",
")",
"*",
"np",
".",
"log",
"(",
"self",
".",
"Tc",
".",
"y",
".",
"mean",
"(",
")",
")",
",",
"method",
"=",
"method",
",",
"tol",
"=",
"tol",
")",
"if",
"\"success\"",
"in",
"sol",
"and",
"sol",
"[",
"\"success\"",
"]",
":",
"dlogTc",
"=",
"0.1",
"opt_logTc",
"=",
"sol",
"[",
"'x'",
"]",
"dcost",
"=",
"[",
"]",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"opt_logTc",
")",
")",
":",
"tmp",
"=",
"opt_logTc",
".",
"copy",
"(",
")",
"tmp",
"[",
"ii",
"]",
"+=",
"dlogTc",
"cost_plus",
"=",
"cost",
"(",
"tmp",
")",
"tmp",
"[",
"ii",
"]",
"-=",
"2",
"*",
"dlogTc",
"cost_minus",
"=",
"cost",
"(",
"tmp",
")",
"dcost",
".",
"append",
"(",
"[",
"cost_minus",
",",
"cost_plus",
"]",
")",
"dcost",
"=",
"np",
".",
"array",
"(",
"dcost",
")",
"optimal_cost",
"=",
"cost",
"(",
"opt_logTc",
")",
"self",
".",
"confidence",
"=",
"-",
"dlogTc",
"/",
"(",
"2",
"*",
"optimal_cost",
"-",
"dcost",
"[",
":",
",",
"0",
"]",
"-",
"dcost",
"[",
":",
",",
"1",
"]",
")",
"self",
".",
"logger",
"(",
"\"Coalescent:optimize_skyline:...done. new LH: %f\"",
"%",
"self",
".",
"total_LH",
"(",
")",
",",
"2",
")",
"else",
":",
"self",
".",
"set_Tc",
"(",
"initial_Tc",
".",
"y",
",",
"T",
"=",
"initial_Tc",
".",
"x",
")",
"self",
".",
"logger",
"(",
"\"Coalescent:optimize_skyline:...failed:\"",
"+",
"str",
"(",
"sol",
")",
",",
"0",
",",
"warn",
"=",
"True",
")"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
Coalescent.skyline_empirical
|
returns the skyline, i.e., an estimate of the inverse rate of coalesence.
Here, the skyline is estimated from a sliding window average of the observed
mergers, i.e., without reference to the coalescence likelihood.
parameters:
gen -- number of generations per year.
|
treetime/merger_models.py
|
def skyline_empirical(self, gen=1.0, n_points = 20):
'''
returns the skyline, i.e., an estimate of the inverse rate of coalesence.
Here, the skyline is estimated from a sliding window average of the observed
mergers, i.e., without reference to the coalescence likelihood.
parameters:
gen -- number of generations per year.
'''
mergers = self.tree_events[:,1]>0
merger_tvals = self.tree_events[mergers,0]
nlineages = self.nbranches(merger_tvals-ttconf.TINY_NUMBER)
expected_merger_density = nlineages*(nlineages-1)*0.5
nmergers = len(mergers)
et = merger_tvals
ev = 1.0/expected_merger_density
# reduce the window size if there are few events in the tree
if 2*n_points>len(expected_merger_density):
n_points = len(ev)//4
# smoothes with a sliding window over data points
avg = np.sum(ev)/np.abs(et[0]-et[-1])
dt = et[0]-et[-1]
mid_points = np.concatenate(([et[0]-0.5*(et[1]-et[0])],
0.5*(et[1:] + et[:-1]),
[et[-1]+0.5*(et[-1]-et[-2])]))
# this smoothes the ratio of expected and observed merger rate
self.Tc_inv = interp1d(mid_points[n_points:-n_points],
[np.sum(ev[(et>=l)&(et<u)])/(u-l+dt/nmergers)
for u,l in zip(mid_points[:-2*n_points],mid_points[2*n_points:])])
return interp1d(self.date2dist.to_numdate(self.Tc_inv.x), gen/self.date2dist.clock_rate/self.Tc_inv.y)
|
def skyline_empirical(self, gen=1.0, n_points = 20):
'''
returns the skyline, i.e., an estimate of the inverse rate of coalesence.
Here, the skyline is estimated from a sliding window average of the observed
mergers, i.e., without reference to the coalescence likelihood.
parameters:
gen -- number of generations per year.
'''
mergers = self.tree_events[:,1]>0
merger_tvals = self.tree_events[mergers,0]
nlineages = self.nbranches(merger_tvals-ttconf.TINY_NUMBER)
expected_merger_density = nlineages*(nlineages-1)*0.5
nmergers = len(mergers)
et = merger_tvals
ev = 1.0/expected_merger_density
# reduce the window size if there are few events in the tree
if 2*n_points>len(expected_merger_density):
n_points = len(ev)//4
# smoothes with a sliding window over data points
avg = np.sum(ev)/np.abs(et[0]-et[-1])
dt = et[0]-et[-1]
mid_points = np.concatenate(([et[0]-0.5*(et[1]-et[0])],
0.5*(et[1:] + et[:-1]),
[et[-1]+0.5*(et[-1]-et[-2])]))
# this smoothes the ratio of expected and observed merger rate
self.Tc_inv = interp1d(mid_points[n_points:-n_points],
[np.sum(ev[(et>=l)&(et<u)])/(u-l+dt/nmergers)
for u,l in zip(mid_points[:-2*n_points],mid_points[2*n_points:])])
return interp1d(self.date2dist.to_numdate(self.Tc_inv.x), gen/self.date2dist.clock_rate/self.Tc_inv.y)
|
[
"returns",
"the",
"skyline",
"i",
".",
"e",
".",
"an",
"estimate",
"of",
"the",
"inverse",
"rate",
"of",
"coalesence",
".",
"Here",
"the",
"skyline",
"is",
"estimated",
"from",
"a",
"sliding",
"window",
"average",
"of",
"the",
"observed",
"mergers",
"i",
".",
"e",
".",
"without",
"reference",
"to",
"the",
"coalescence",
"likelihood",
".",
"parameters",
":",
"gen",
"--",
"number",
"of",
"generations",
"per",
"year",
"."
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/merger_models.py#L219-L252
|
[
"def",
"skyline_empirical",
"(",
"self",
",",
"gen",
"=",
"1.0",
",",
"n_points",
"=",
"20",
")",
":",
"mergers",
"=",
"self",
".",
"tree_events",
"[",
":",
",",
"1",
"]",
">",
"0",
"merger_tvals",
"=",
"self",
".",
"tree_events",
"[",
"mergers",
",",
"0",
"]",
"nlineages",
"=",
"self",
".",
"nbranches",
"(",
"merger_tvals",
"-",
"ttconf",
".",
"TINY_NUMBER",
")",
"expected_merger_density",
"=",
"nlineages",
"*",
"(",
"nlineages",
"-",
"1",
")",
"*",
"0.5",
"nmergers",
"=",
"len",
"(",
"mergers",
")",
"et",
"=",
"merger_tvals",
"ev",
"=",
"1.0",
"/",
"expected_merger_density",
"# reduce the window size if there are few events in the tree",
"if",
"2",
"*",
"n_points",
">",
"len",
"(",
"expected_merger_density",
")",
":",
"n_points",
"=",
"len",
"(",
"ev",
")",
"//",
"4",
"# smoothes with a sliding window over data points",
"avg",
"=",
"np",
".",
"sum",
"(",
"ev",
")",
"/",
"np",
".",
"abs",
"(",
"et",
"[",
"0",
"]",
"-",
"et",
"[",
"-",
"1",
"]",
")",
"dt",
"=",
"et",
"[",
"0",
"]",
"-",
"et",
"[",
"-",
"1",
"]",
"mid_points",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"et",
"[",
"0",
"]",
"-",
"0.5",
"*",
"(",
"et",
"[",
"1",
"]",
"-",
"et",
"[",
"0",
"]",
")",
"]",
",",
"0.5",
"*",
"(",
"et",
"[",
"1",
":",
"]",
"+",
"et",
"[",
":",
"-",
"1",
"]",
")",
",",
"[",
"et",
"[",
"-",
"1",
"]",
"+",
"0.5",
"*",
"(",
"et",
"[",
"-",
"1",
"]",
"-",
"et",
"[",
"-",
"2",
"]",
")",
"]",
")",
")",
"# this smoothes the ratio of expected and observed merger rate",
"self",
".",
"Tc_inv",
"=",
"interp1d",
"(",
"mid_points",
"[",
"n_points",
":",
"-",
"n_points",
"]",
",",
"[",
"np",
".",
"sum",
"(",
"ev",
"[",
"(",
"et",
">=",
"l",
")",
"&",
"(",
"et",
"<",
"u",
")",
"]",
")",
"/",
"(",
"u",
"-",
"l",
"+",
"dt",
"/",
"nmergers",
")",
"for",
"u",
",",
"l",
"in",
"zip",
"(",
"mid_points",
"[",
":",
"-",
"2",
"*",
"n_points",
"]",
",",
"mid_points",
"[",
"2",
"*",
"n_points",
":",
"]",
")",
"]",
")",
"return",
"interp1d",
"(",
"self",
".",
"date2dist",
".",
"to_numdate",
"(",
"self",
".",
"Tc_inv",
".",
"x",
")",
",",
"gen",
"/",
"self",
".",
"date2dist",
".",
"clock_rate",
"/",
"self",
".",
"Tc_inv",
".",
"y",
")"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
Coalescent.skyline_inferred
|
return the skyline, i.e., an estimate of the inverse rate of coalesence.
This function merely returns the merger rate self.Tc that was set or
estimated by other means. If it was determined using self.optimize_skyline,
the returned skyline will maximize the coalescent likelihood.
parameters:
gen -- number of generations per year. Unit of time is branch length,
hence this needs to be the inverse substitution rate per generation
confidence -- False, or number of standard deviations of confidence intervals
|
treetime/merger_models.py
|
def skyline_inferred(self, gen=1.0, confidence=False):
'''
return the skyline, i.e., an estimate of the inverse rate of coalesence.
This function merely returns the merger rate self.Tc that was set or
estimated by other means. If it was determined using self.optimize_skyline,
the returned skyline will maximize the coalescent likelihood.
parameters:
gen -- number of generations per year. Unit of time is branch length,
hence this needs to be the inverse substitution rate per generation
confidence -- False, or number of standard deviations of confidence intervals
'''
if len(self.Tc.x)<=2:
print("no skyline has been inferred, returning constant population size")
return gen/self.date2dist.clock_rate*self.Tc.y[-1]
skyline = interp1d(self.date2dist.to_numdate(self.Tc.x[1:-1]), gen/self.date2dist.clock_rate*self.Tc.y[1:-1])
if confidence and hasattr(self, 'confidence'):
conf = [skyline.y*np.exp(-confidence*self.confidence), skyline.y*np.exp(confidence*self.confidence)]
return skyline, conf
else:
return skyline
|
def skyline_inferred(self, gen=1.0, confidence=False):
'''
return the skyline, i.e., an estimate of the inverse rate of coalesence.
This function merely returns the merger rate self.Tc that was set or
estimated by other means. If it was determined using self.optimize_skyline,
the returned skyline will maximize the coalescent likelihood.
parameters:
gen -- number of generations per year. Unit of time is branch length,
hence this needs to be the inverse substitution rate per generation
confidence -- False, or number of standard deviations of confidence intervals
'''
if len(self.Tc.x)<=2:
print("no skyline has been inferred, returning constant population size")
return gen/self.date2dist.clock_rate*self.Tc.y[-1]
skyline = interp1d(self.date2dist.to_numdate(self.Tc.x[1:-1]), gen/self.date2dist.clock_rate*self.Tc.y[1:-1])
if confidence and hasattr(self, 'confidence'):
conf = [skyline.y*np.exp(-confidence*self.confidence), skyline.y*np.exp(confidence*self.confidence)]
return skyline, conf
else:
return skyline
|
[
"return",
"the",
"skyline",
"i",
".",
"e",
".",
"an",
"estimate",
"of",
"the",
"inverse",
"rate",
"of",
"coalesence",
".",
"This",
"function",
"merely",
"returns",
"the",
"merger",
"rate",
"self",
".",
"Tc",
"that",
"was",
"set",
"or",
"estimated",
"by",
"other",
"means",
".",
"If",
"it",
"was",
"determined",
"using",
"self",
".",
"optimize_skyline",
"the",
"returned",
"skyline",
"will",
"maximize",
"the",
"coalescent",
"likelihood",
".",
"parameters",
":",
"gen",
"--",
"number",
"of",
"generations",
"per",
"year",
".",
"Unit",
"of",
"time",
"is",
"branch",
"length",
"hence",
"this",
"needs",
"to",
"be",
"the",
"inverse",
"substitution",
"rate",
"per",
"generation",
"confidence",
"--",
"False",
"or",
"number",
"of",
"standard",
"deviations",
"of",
"confidence",
"intervals"
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/merger_models.py#L255-L275
|
[
"def",
"skyline_inferred",
"(",
"self",
",",
"gen",
"=",
"1.0",
",",
"confidence",
"=",
"False",
")",
":",
"if",
"len",
"(",
"self",
".",
"Tc",
".",
"x",
")",
"<=",
"2",
":",
"print",
"(",
"\"no skyline has been inferred, returning constant population size\"",
")",
"return",
"gen",
"/",
"self",
".",
"date2dist",
".",
"clock_rate",
"*",
"self",
".",
"Tc",
".",
"y",
"[",
"-",
"1",
"]",
"skyline",
"=",
"interp1d",
"(",
"self",
".",
"date2dist",
".",
"to_numdate",
"(",
"self",
".",
"Tc",
".",
"x",
"[",
"1",
":",
"-",
"1",
"]",
")",
",",
"gen",
"/",
"self",
".",
"date2dist",
".",
"clock_rate",
"*",
"self",
".",
"Tc",
".",
"y",
"[",
"1",
":",
"-",
"1",
"]",
")",
"if",
"confidence",
"and",
"hasattr",
"(",
"self",
",",
"'confidence'",
")",
":",
"conf",
"=",
"[",
"skyline",
".",
"y",
"*",
"np",
".",
"exp",
"(",
"-",
"confidence",
"*",
"self",
".",
"confidence",
")",
",",
"skyline",
".",
"y",
"*",
"np",
".",
"exp",
"(",
"confidence",
"*",
"self",
".",
"confidence",
")",
"]",
"return",
"skyline",
",",
"conf",
"else",
":",
"return",
"skyline"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
test
|
seq2array
|
Take the raw sequence, substitute the "overhanging" gaps with 'N' (missequenced),
and convert the sequence to the numpy array of chars.
Parameters
----------
seq : Biopython.SeqRecord, str, iterable
Sequence as an object of SeqRecord, string or iterable
fill_overhangs : bool
If True, substitute the "overhanging" gaps with ambiguous character symbol
ambiguous_character : char
Specify the character for ambiguous state ('N' default for nucleotide)
Returns
-------
sequence : np.array
Sequence as 1D numpy array of chars
|
treetime/seq_utils.py
|
def seq2array(seq, fill_overhangs=True, ambiguous_character='N'):
"""
Take the raw sequence, substitute the "overhanging" gaps with 'N' (missequenced),
and convert the sequence to the numpy array of chars.
Parameters
----------
seq : Biopython.SeqRecord, str, iterable
Sequence as an object of SeqRecord, string or iterable
fill_overhangs : bool
If True, substitute the "overhanging" gaps with ambiguous character symbol
ambiguous_character : char
Specify the character for ambiguous state ('N' default for nucleotide)
Returns
-------
sequence : np.array
Sequence as 1D numpy array of chars
"""
try:
sequence = ''.join(seq)
except TypeError:
sequence = seq
sequence = np.array(list(sequence))
# substitute overhanging unsequenced tails
if fill_overhangs:
sequence [:np.where(sequence != '-')[0][0]] = ambiguous_character
sequence [np.where(sequence != '-')[0][-1]+1:] = ambiguous_character
return sequence
|
def seq2array(seq, fill_overhangs=True, ambiguous_character='N'):
"""
Take the raw sequence, substitute the "overhanging" gaps with 'N' (missequenced),
and convert the sequence to the numpy array of chars.
Parameters
----------
seq : Biopython.SeqRecord, str, iterable
Sequence as an object of SeqRecord, string or iterable
fill_overhangs : bool
If True, substitute the "overhanging" gaps with ambiguous character symbol
ambiguous_character : char
Specify the character for ambiguous state ('N' default for nucleotide)
Returns
-------
sequence : np.array
Sequence as 1D numpy array of chars
"""
try:
sequence = ''.join(seq)
except TypeError:
sequence = seq
sequence = np.array(list(sequence))
# substitute overhanging unsequenced tails
if fill_overhangs:
sequence [:np.where(sequence != '-')[0][0]] = ambiguous_character
sequence [np.where(sequence != '-')[0][-1]+1:] = ambiguous_character
return sequence
|
[
"Take",
"the",
"raw",
"sequence",
"substitute",
"the",
"overhanging",
"gaps",
"with",
"N",
"(",
"missequenced",
")",
"and",
"convert",
"the",
"sequence",
"to",
"the",
"numpy",
"array",
"of",
"chars",
"."
] |
neherlab/treetime
|
python
|
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/seq_utils.py#L118-L150
|
[
"def",
"seq2array",
"(",
"seq",
",",
"fill_overhangs",
"=",
"True",
",",
"ambiguous_character",
"=",
"'N'",
")",
":",
"try",
":",
"sequence",
"=",
"''",
".",
"join",
"(",
"seq",
")",
"except",
"TypeError",
":",
"sequence",
"=",
"seq",
"sequence",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"sequence",
")",
")",
"# substitute overhanging unsequenced tails",
"if",
"fill_overhangs",
":",
"sequence",
"[",
":",
"np",
".",
"where",
"(",
"sequence",
"!=",
"'-'",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"]",
"=",
"ambiguous_character",
"sequence",
"[",
"np",
".",
"where",
"(",
"sequence",
"!=",
"'-'",
")",
"[",
"0",
"]",
"[",
"-",
"1",
"]",
"+",
"1",
":",
"]",
"=",
"ambiguous_character",
"return",
"sequence"
] |
f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.