partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
valid
|
low_mem_sq
|
np.dot(m, m.T) with low mem usage, by doing it in small steps
|
peri/opt/optimize.py
|
def low_mem_sq(m, step=100000):
"""np.dot(m, m.T) with low mem usage, by doing it in small steps"""
if not m.flags.c_contiguous:
raise ValueError('m must be C ordered for this to work with less mem.')
# -- can make this even faster with pre-allocating arrays, but not worth it
# right now
# mmt = np.zeros([m.shape[0], m.shape[0]]) #6us
# mt_tmp = np.zeros([step, m.shape[0]])
# for a in range(0, m.shape[1], step):
# mx = min(a+step, m.shape[1])
# mt_tmp[:mx-a,:] = m.T[a:mx]
# # np.dot(m_tmp, m.T, out=mmt[a:mx])
# # np.dot(m, m[a:mx].T, out=mmt[:, a:mx])
# np.dot(m[:,a:mx], mt_tmp[:mx], out=mmt)
# return mmt
mmt = np.zeros([m.shape[0], m.shape[0]]) #6us
# m_tmp = np.zeros([step, m.shape[1]])
for a in range(0, m.shape[0], step):
mx = min(a+step, m.shape[1])
# m_tmp[:] = m[a:mx]
# np.dot(m_tmp, m.T, out=mmt[a:mx])
mmt[:, a:mx] = np.dot(m, m[a:mx].T)
return mmt
|
def low_mem_sq(m, step=100000):
"""np.dot(m, m.T) with low mem usage, by doing it in small steps"""
if not m.flags.c_contiguous:
raise ValueError('m must be C ordered for this to work with less mem.')
# -- can make this even faster with pre-allocating arrays, but not worth it
# right now
# mmt = np.zeros([m.shape[0], m.shape[0]]) #6us
# mt_tmp = np.zeros([step, m.shape[0]])
# for a in range(0, m.shape[1], step):
# mx = min(a+step, m.shape[1])
# mt_tmp[:mx-a,:] = m.T[a:mx]
# # np.dot(m_tmp, m.T, out=mmt[a:mx])
# # np.dot(m, m[a:mx].T, out=mmt[:, a:mx])
# np.dot(m[:,a:mx], mt_tmp[:mx], out=mmt)
# return mmt
mmt = np.zeros([m.shape[0], m.shape[0]]) #6us
# m_tmp = np.zeros([step, m.shape[1]])
for a in range(0, m.shape[0], step):
mx = min(a+step, m.shape[1])
# m_tmp[:] = m[a:mx]
# np.dot(m_tmp, m.T, out=mmt[a:mx])
mmt[:, a:mx] = np.dot(m, m[a:mx].T)
return mmt
|
[
"np",
".",
"dot",
"(",
"m",
"m",
".",
"T",
")",
"with",
"low",
"mem",
"usage",
"by",
"doing",
"it",
"in",
"small",
"steps"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L212-L234
|
[
"def",
"low_mem_sq",
"(",
"m",
",",
"step",
"=",
"100000",
")",
":",
"if",
"not",
"m",
".",
"flags",
".",
"c_contiguous",
":",
"raise",
"ValueError",
"(",
"'m must be C ordered for this to work with less mem.'",
")",
"# -- can make this even faster with pre-allocating arrays, but not worth it",
"# right now",
"# mmt = np.zeros([m.shape[0], m.shape[0]]) #6us",
"# mt_tmp = np.zeros([step, m.shape[0]])",
"# for a in range(0, m.shape[1], step):",
"# mx = min(a+step, m.shape[1])",
"# mt_tmp[:mx-a,:] = m.T[a:mx]",
"# # np.dot(m_tmp, m.T, out=mmt[a:mx])",
"# # np.dot(m, m[a:mx].T, out=mmt[:, a:mx])",
"# np.dot(m[:,a:mx], mt_tmp[:mx], out=mmt)",
"# return mmt",
"mmt",
"=",
"np",
".",
"zeros",
"(",
"[",
"m",
".",
"shape",
"[",
"0",
"]",
",",
"m",
".",
"shape",
"[",
"0",
"]",
"]",
")",
"#6us",
"# m_tmp = np.zeros([step, m.shape[1]])",
"for",
"a",
"in",
"range",
"(",
"0",
",",
"m",
".",
"shape",
"[",
"0",
"]",
",",
"step",
")",
":",
"mx",
"=",
"min",
"(",
"a",
"+",
"step",
",",
"m",
".",
"shape",
"[",
"1",
"]",
")",
"# m_tmp[:] = m[a:mx]",
"# np.dot(m_tmp, m.T, out=mmt[a:mx])",
"mmt",
"[",
":",
",",
"a",
":",
"mx",
"]",
"=",
"np",
".",
"dot",
"(",
"m",
",",
"m",
"[",
"a",
":",
"mx",
"]",
".",
"T",
")",
"return",
"mmt"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
find_particles_in_tile
|
Finds the particles in a tile, as numpy.ndarray of ints.
Parameters
----------
positions : `numpy.ndarray`
[N,3] array of the particle positions to check in the tile
tile : :class:`peri.util.Tile` instance
Tile of the region inside which to check for particles.
Returns
-------
numpy.ndarray, int
The indices of the particles in the tile.
|
peri/opt/optimize.py
|
def find_particles_in_tile(positions, tile):
"""
Finds the particles in a tile, as numpy.ndarray of ints.
Parameters
----------
positions : `numpy.ndarray`
[N,3] array of the particle positions to check in the tile
tile : :class:`peri.util.Tile` instance
Tile of the region inside which to check for particles.
Returns
-------
numpy.ndarray, int
The indices of the particles in the tile.
"""
bools = tile.contains(positions)
return np.arange(bools.size)[bools]
|
def find_particles_in_tile(positions, tile):
"""
Finds the particles in a tile, as numpy.ndarray of ints.
Parameters
----------
positions : `numpy.ndarray`
[N,3] array of the particle positions to check in the tile
tile : :class:`peri.util.Tile` instance
Tile of the region inside which to check for particles.
Returns
-------
numpy.ndarray, int
The indices of the particles in the tile.
"""
bools = tile.contains(positions)
return np.arange(bools.size)[bools]
|
[
"Finds",
"the",
"particles",
"in",
"a",
"tile",
"as",
"numpy",
".",
"ndarray",
"of",
"ints",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L239-L256
|
[
"def",
"find_particles_in_tile",
"(",
"positions",
",",
"tile",
")",
":",
"bools",
"=",
"tile",
".",
"contains",
"(",
"positions",
")",
"return",
"np",
".",
"arange",
"(",
"bools",
".",
"size",
")",
"[",
"bools",
"]"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
separate_particles_into_groups
|
Separates particles into convenient groups for optimization.
Given a state, returns a list of groups of particles. Each group of
particles are located near each other in the image. Every particle
located in the desired region is contained in exactly 1 group.
Parameters
----------
s : :class:`peri.states.ImageState`
The peri state to find particles in.
region_size : Int or 3-element list-like of ints, optional
The size of the box. Groups particles into boxes of shape
(region_size[0], region_size[1], region_size[2]). If region_size
is a scalar, the box is a cube of length region_size.
Default is 40.
bounds : 2-element list-like of 3-element lists, optional
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
doshift : {True, False, `'rand'`}, optional
Whether or not to shift the tile boxes by half a region size, to
prevent the same particles to be chosen every time. If `'rand'`,
randomly chooses either True or False. Default is False
Returns
-------
particle_groups : List
Each element of particle_groups is an int numpy.ndarray of the
group of nearby particles. Only contains groups with a nonzero
number of particles, so the elements don't necessarily correspond
to a given image region.
|
peri/opt/optimize.py
|
def separate_particles_into_groups(s, region_size=40, bounds=None,
doshift=False):
"""
Separates particles into convenient groups for optimization.
Given a state, returns a list of groups of particles. Each group of
particles are located near each other in the image. Every particle
located in the desired region is contained in exactly 1 group.
Parameters
----------
s : :class:`peri.states.ImageState`
The peri state to find particles in.
region_size : Int or 3-element list-like of ints, optional
The size of the box. Groups particles into boxes of shape
(region_size[0], region_size[1], region_size[2]). If region_size
is a scalar, the box is a cube of length region_size.
Default is 40.
bounds : 2-element list-like of 3-element lists, optional
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
doshift : {True, False, `'rand'`}, optional
Whether or not to shift the tile boxes by half a region size, to
prevent the same particles to be chosen every time. If `'rand'`,
randomly chooses either True or False. Default is False
Returns
-------
particle_groups : List
Each element of particle_groups is an int numpy.ndarray of the
group of nearby particles. Only contains groups with a nonzero
number of particles, so the elements don't necessarily correspond
to a given image region.
"""
imtile = s.oshape.translate(-s.pad)
bounding_tile = (imtile if bounds is None else Tile(bounds[0], bounds[1]))
rs = (np.ones(bounding_tile.dim, dtype='int')*region_size if
np.size(region_size) == 1 else np.array(region_size))
n_translate = np.ceil(bounding_tile.shape.astype('float')/rs).astype('int')
particle_groups = []
tile = Tile(left=bounding_tile.l, right=bounding_tile.l + rs)
if doshift == 'rand':
doshift = np.random.choice([True, False])
if doshift:
shift = rs // 2
n_translate += 1
else:
shift = 0
deltas = np.meshgrid(*[np.arange(i) for i in n_translate])
positions = s.obj_get_positions()
if bounds is None:
# FIXME this (deliberately) masks a problem where optimization
# places particles outside the image. However, it ensures that
# all particles are in at least one group when `bounds is None`,
# which is the use case within opt. The 1e-3 is to ensure that
# they are inside the box and not on the edge.
positions = np.clip(positions, imtile.l+1e-3, imtile.r-1e-3)
groups = list(map(lambda *args: find_particles_in_tile(positions,
tile.translate( np.array(args) * rs - shift)), *[d.ravel()
for d in deltas]))
for i in range(len(groups)-1, -1, -1):
if groups[i].size == 0:
groups.pop(i)
assert _check_groups(s, groups)
return groups
|
def separate_particles_into_groups(s, region_size=40, bounds=None,
doshift=False):
"""
Separates particles into convenient groups for optimization.
Given a state, returns a list of groups of particles. Each group of
particles are located near each other in the image. Every particle
located in the desired region is contained in exactly 1 group.
Parameters
----------
s : :class:`peri.states.ImageState`
The peri state to find particles in.
region_size : Int or 3-element list-like of ints, optional
The size of the box. Groups particles into boxes of shape
(region_size[0], region_size[1], region_size[2]). If region_size
is a scalar, the box is a cube of length region_size.
Default is 40.
bounds : 2-element list-like of 3-element lists, optional
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
doshift : {True, False, `'rand'`}, optional
Whether or not to shift the tile boxes by half a region size, to
prevent the same particles to be chosen every time. If `'rand'`,
randomly chooses either True or False. Default is False
Returns
-------
particle_groups : List
Each element of particle_groups is an int numpy.ndarray of the
group of nearby particles. Only contains groups with a nonzero
number of particles, so the elements don't necessarily correspond
to a given image region.
"""
imtile = s.oshape.translate(-s.pad)
bounding_tile = (imtile if bounds is None else Tile(bounds[0], bounds[1]))
rs = (np.ones(bounding_tile.dim, dtype='int')*region_size if
np.size(region_size) == 1 else np.array(region_size))
n_translate = np.ceil(bounding_tile.shape.astype('float')/rs).astype('int')
particle_groups = []
tile = Tile(left=bounding_tile.l, right=bounding_tile.l + rs)
if doshift == 'rand':
doshift = np.random.choice([True, False])
if doshift:
shift = rs // 2
n_translate += 1
else:
shift = 0
deltas = np.meshgrid(*[np.arange(i) for i in n_translate])
positions = s.obj_get_positions()
if bounds is None:
# FIXME this (deliberately) masks a problem where optimization
# places particles outside the image. However, it ensures that
# all particles are in at least one group when `bounds is None`,
# which is the use case within opt. The 1e-3 is to ensure that
# they are inside the box and not on the edge.
positions = np.clip(positions, imtile.l+1e-3, imtile.r-1e-3)
groups = list(map(lambda *args: find_particles_in_tile(positions,
tile.translate( np.array(args) * rs - shift)), *[d.ravel()
for d in deltas]))
for i in range(len(groups)-1, -1, -1):
if groups[i].size == 0:
groups.pop(i)
assert _check_groups(s, groups)
return groups
|
[
"Separates",
"particles",
"into",
"convenient",
"groups",
"for",
"optimization",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L258-L328
|
[
"def",
"separate_particles_into_groups",
"(",
"s",
",",
"region_size",
"=",
"40",
",",
"bounds",
"=",
"None",
",",
"doshift",
"=",
"False",
")",
":",
"imtile",
"=",
"s",
".",
"oshape",
".",
"translate",
"(",
"-",
"s",
".",
"pad",
")",
"bounding_tile",
"=",
"(",
"imtile",
"if",
"bounds",
"is",
"None",
"else",
"Tile",
"(",
"bounds",
"[",
"0",
"]",
",",
"bounds",
"[",
"1",
"]",
")",
")",
"rs",
"=",
"(",
"np",
".",
"ones",
"(",
"bounding_tile",
".",
"dim",
",",
"dtype",
"=",
"'int'",
")",
"*",
"region_size",
"if",
"np",
".",
"size",
"(",
"region_size",
")",
"==",
"1",
"else",
"np",
".",
"array",
"(",
"region_size",
")",
")",
"n_translate",
"=",
"np",
".",
"ceil",
"(",
"bounding_tile",
".",
"shape",
".",
"astype",
"(",
"'float'",
")",
"/",
"rs",
")",
".",
"astype",
"(",
"'int'",
")",
"particle_groups",
"=",
"[",
"]",
"tile",
"=",
"Tile",
"(",
"left",
"=",
"bounding_tile",
".",
"l",
",",
"right",
"=",
"bounding_tile",
".",
"l",
"+",
"rs",
")",
"if",
"doshift",
"==",
"'rand'",
":",
"doshift",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"[",
"True",
",",
"False",
"]",
")",
"if",
"doshift",
":",
"shift",
"=",
"rs",
"//",
"2",
"n_translate",
"+=",
"1",
"else",
":",
"shift",
"=",
"0",
"deltas",
"=",
"np",
".",
"meshgrid",
"(",
"*",
"[",
"np",
".",
"arange",
"(",
"i",
")",
"for",
"i",
"in",
"n_translate",
"]",
")",
"positions",
"=",
"s",
".",
"obj_get_positions",
"(",
")",
"if",
"bounds",
"is",
"None",
":",
"# FIXME this (deliberately) masks a problem where optimization",
"# places particles outside the image. However, it ensures that",
"# all particles are in at least one group when `bounds is None`,",
"# which is the use case within opt. The 1e-3 is to ensure that",
"# they are inside the box and not on the edge.",
"positions",
"=",
"np",
".",
"clip",
"(",
"positions",
",",
"imtile",
".",
"l",
"+",
"1e-3",
",",
"imtile",
".",
"r",
"-",
"1e-3",
")",
"groups",
"=",
"list",
"(",
"map",
"(",
"lambda",
"*",
"args",
":",
"find_particles_in_tile",
"(",
"positions",
",",
"tile",
".",
"translate",
"(",
"np",
".",
"array",
"(",
"args",
")",
"*",
"rs",
"-",
"shift",
")",
")",
",",
"*",
"[",
"d",
".",
"ravel",
"(",
")",
"for",
"d",
"in",
"deltas",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"groups",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"if",
"groups",
"[",
"i",
"]",
".",
"size",
"==",
"0",
":",
"groups",
".",
"pop",
"(",
"i",
")",
"assert",
"_check_groups",
"(",
"s",
",",
"groups",
")",
"return",
"groups"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
_check_groups
|
Ensures that all particles are included in exactly 1 group
|
peri/opt/optimize.py
|
def _check_groups(s, groups):
"""Ensures that all particles are included in exactly 1 group"""
ans = []
for g in groups:
ans.extend(g)
if np.unique(ans).size != np.size(ans):
return False
elif np.unique(ans).size != s.obj_get_positions().shape[0]:
return False
else:
return (np.arange(s.obj_get_radii().size) == np.sort(ans)).all()
|
def _check_groups(s, groups):
"""Ensures that all particles are included in exactly 1 group"""
ans = []
for g in groups:
ans.extend(g)
if np.unique(ans).size != np.size(ans):
return False
elif np.unique(ans).size != s.obj_get_positions().shape[0]:
return False
else:
return (np.arange(s.obj_get_radii().size) == np.sort(ans)).all()
|
[
"Ensures",
"that",
"all",
"particles",
"are",
"included",
"in",
"exactly",
"1",
"group"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L330-L340
|
[
"def",
"_check_groups",
"(",
"s",
",",
"groups",
")",
":",
"ans",
"=",
"[",
"]",
"for",
"g",
"in",
"groups",
":",
"ans",
".",
"extend",
"(",
"g",
")",
"if",
"np",
".",
"unique",
"(",
"ans",
")",
".",
"size",
"!=",
"np",
".",
"size",
"(",
"ans",
")",
":",
"return",
"False",
"elif",
"np",
".",
"unique",
"(",
"ans",
")",
".",
"size",
"!=",
"s",
".",
"obj_get_positions",
"(",
")",
".",
"shape",
"[",
"0",
"]",
":",
"return",
"False",
"else",
":",
"return",
"(",
"np",
".",
"arange",
"(",
"s",
".",
"obj_get_radii",
"(",
")",
".",
"size",
")",
"==",
"np",
".",
"sort",
"(",
"ans",
")",
")",
".",
"all",
"(",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
calc_particle_group_region_size
|
Finds the biggest region size for LM particle optimization with a
given memory constraint.
Input Parameters
----------------
s : :class:`peri.states.ImageState`
The state with the particles
region_size : Int or 3-element list-like of ints, optional.
The initial guess for the region size. Default is 40
max_mem : Numeric, optional
The maximum memory for the optimizer to take. Default is 1e9
Other Parameters
----------------
bounds: 2-element list-like of 3-element lists.
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
Returns
-------
region_size : numpy.ndarray of ints of the region size.
|
peri/opt/optimize.py
|
def calc_particle_group_region_size(s, region_size=40, max_mem=1e9, **kwargs):
"""
Finds the biggest region size for LM particle optimization with a
given memory constraint.
Input Parameters
----------------
s : :class:`peri.states.ImageState`
The state with the particles
region_size : Int or 3-element list-like of ints, optional.
The initial guess for the region size. Default is 40
max_mem : Numeric, optional
The maximum memory for the optimizer to take. Default is 1e9
Other Parameters
----------------
bounds: 2-element list-like of 3-element lists.
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
Returns
-------
region_size : numpy.ndarray of ints of the region size.
"""
region_size = np.array(region_size).astype('int')
def calc_mem_usage(region_size):
rs = np.array(region_size)
particle_groups = separate_particles_into_groups(s, region_size=
rs.tolist(), **kwargs)
# The actual mem usage is the max of the memory usage of all the
# particle groups. However this is too slow. So instead we use the
# max of the memory of the biggest 5 particle groups:
numpart = [np.size(g) for g in particle_groups]
biggroups = [particle_groups[i] for i in np.argsort(numpart)[-5:]]
def get_tile_jsize(group):
nms = s.param_particle(group)
tile = s.get_update_io_tiles(nms, s.get_values(nms))[2]
return tile.shape.prod() * len(nms)
mems = [8*get_tile_jsize(g) for g in biggroups] # 8 for bytes/float64
return np.max(mems)
im_shape = s.oshape.shape
if calc_mem_usage(region_size) > max_mem:
while ((calc_mem_usage(region_size) > max_mem) and
np.any(region_size > 2)):
region_size = np.clip(region_size-1, 2, im_shape)
else:
while ((calc_mem_usage(region_size) < max_mem) and
np.any(region_size < im_shape)):
region_size = np.clip(region_size+1, 2, im_shape)
region_size -= 1 #need to be < memory, so we undo 1 iteration
return region_size
|
def calc_particle_group_region_size(s, region_size=40, max_mem=1e9, **kwargs):
"""
Finds the biggest region size for LM particle optimization with a
given memory constraint.
Input Parameters
----------------
s : :class:`peri.states.ImageState`
The state with the particles
region_size : Int or 3-element list-like of ints, optional.
The initial guess for the region size. Default is 40
max_mem : Numeric, optional
The maximum memory for the optimizer to take. Default is 1e9
Other Parameters
----------------
bounds: 2-element list-like of 3-element lists.
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
Returns
-------
region_size : numpy.ndarray of ints of the region size.
"""
region_size = np.array(region_size).astype('int')
def calc_mem_usage(region_size):
rs = np.array(region_size)
particle_groups = separate_particles_into_groups(s, region_size=
rs.tolist(), **kwargs)
# The actual mem usage is the max of the memory usage of all the
# particle groups. However this is too slow. So instead we use the
# max of the memory of the biggest 5 particle groups:
numpart = [np.size(g) for g in particle_groups]
biggroups = [particle_groups[i] for i in np.argsort(numpart)[-5:]]
def get_tile_jsize(group):
nms = s.param_particle(group)
tile = s.get_update_io_tiles(nms, s.get_values(nms))[2]
return tile.shape.prod() * len(nms)
mems = [8*get_tile_jsize(g) for g in biggroups] # 8 for bytes/float64
return np.max(mems)
im_shape = s.oshape.shape
if calc_mem_usage(region_size) > max_mem:
while ((calc_mem_usage(region_size) > max_mem) and
np.any(region_size > 2)):
region_size = np.clip(region_size-1, 2, im_shape)
else:
while ((calc_mem_usage(region_size) < max_mem) and
np.any(region_size < im_shape)):
region_size = np.clip(region_size+1, 2, im_shape)
region_size -= 1 #need to be < memory, so we undo 1 iteration
return region_size
|
[
"Finds",
"the",
"biggest",
"region",
"size",
"for",
"LM",
"particle",
"optimization",
"with",
"a",
"given",
"memory",
"constraint",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L342-L398
|
[
"def",
"calc_particle_group_region_size",
"(",
"s",
",",
"region_size",
"=",
"40",
",",
"max_mem",
"=",
"1e9",
",",
"*",
"*",
"kwargs",
")",
":",
"region_size",
"=",
"np",
".",
"array",
"(",
"region_size",
")",
".",
"astype",
"(",
"'int'",
")",
"def",
"calc_mem_usage",
"(",
"region_size",
")",
":",
"rs",
"=",
"np",
".",
"array",
"(",
"region_size",
")",
"particle_groups",
"=",
"separate_particles_into_groups",
"(",
"s",
",",
"region_size",
"=",
"rs",
".",
"tolist",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
"# The actual mem usage is the max of the memory usage of all the",
"# particle groups. However this is too slow. So instead we use the",
"# max of the memory of the biggest 5 particle groups:",
"numpart",
"=",
"[",
"np",
".",
"size",
"(",
"g",
")",
"for",
"g",
"in",
"particle_groups",
"]",
"biggroups",
"=",
"[",
"particle_groups",
"[",
"i",
"]",
"for",
"i",
"in",
"np",
".",
"argsort",
"(",
"numpart",
")",
"[",
"-",
"5",
":",
"]",
"]",
"def",
"get_tile_jsize",
"(",
"group",
")",
":",
"nms",
"=",
"s",
".",
"param_particle",
"(",
"group",
")",
"tile",
"=",
"s",
".",
"get_update_io_tiles",
"(",
"nms",
",",
"s",
".",
"get_values",
"(",
"nms",
")",
")",
"[",
"2",
"]",
"return",
"tile",
".",
"shape",
".",
"prod",
"(",
")",
"*",
"len",
"(",
"nms",
")",
"mems",
"=",
"[",
"8",
"*",
"get_tile_jsize",
"(",
"g",
")",
"for",
"g",
"in",
"biggroups",
"]",
"# 8 for bytes/float64",
"return",
"np",
".",
"max",
"(",
"mems",
")",
"im_shape",
"=",
"s",
".",
"oshape",
".",
"shape",
"if",
"calc_mem_usage",
"(",
"region_size",
")",
">",
"max_mem",
":",
"while",
"(",
"(",
"calc_mem_usage",
"(",
"region_size",
")",
">",
"max_mem",
")",
"and",
"np",
".",
"any",
"(",
"region_size",
">",
"2",
")",
")",
":",
"region_size",
"=",
"np",
".",
"clip",
"(",
"region_size",
"-",
"1",
",",
"2",
",",
"im_shape",
")",
"else",
":",
"while",
"(",
"(",
"calc_mem_usage",
"(",
"region_size",
")",
"<",
"max_mem",
")",
"and",
"np",
".",
"any",
"(",
"region_size",
"<",
"im_shape",
")",
")",
":",
"region_size",
"=",
"np",
".",
"clip",
"(",
"region_size",
"+",
"1",
",",
"2",
",",
"im_shape",
")",
"region_size",
"-=",
"1",
"#need to be < memory, so we undo 1 iteration",
"return",
"region_size"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
get_residuals_update_tile
|
Translates a tile in the padded image to the unpadded image.
Given a state and a tile that corresponds to the padded image, returns
a tile that corresponds to the the corresponding pixels of the difference
image
Parameters
----------
st : :class:`peri.states.State`
The state
padded_tile : :class:`peri.util.Tile`
The tile in the padded image.
Returns
-------
:class:`peri.util.Tile`
The tile corresponding to padded_tile in the unpadded image.
|
peri/opt/optimize.py
|
def get_residuals_update_tile(st, padded_tile):
"""
Translates a tile in the padded image to the unpadded image.
Given a state and a tile that corresponds to the padded image, returns
a tile that corresponds to the the corresponding pixels of the difference
image
Parameters
----------
st : :class:`peri.states.State`
The state
padded_tile : :class:`peri.util.Tile`
The tile in the padded image.
Returns
-------
:class:`peri.util.Tile`
The tile corresponding to padded_tile in the unpadded image.
"""
inner_tile = st.ishape.intersection([st.ishape, padded_tile])
return inner_tile.translate(-st.pad)
|
def get_residuals_update_tile(st, padded_tile):
"""
Translates a tile in the padded image to the unpadded image.
Given a state and a tile that corresponds to the padded image, returns
a tile that corresponds to the the corresponding pixels of the difference
image
Parameters
----------
st : :class:`peri.states.State`
The state
padded_tile : :class:`peri.util.Tile`
The tile in the padded image.
Returns
-------
:class:`peri.util.Tile`
The tile corresponding to padded_tile in the unpadded image.
"""
inner_tile = st.ishape.intersection([st.ishape, padded_tile])
return inner_tile.translate(-st.pad)
|
[
"Translates",
"a",
"tile",
"in",
"the",
"padded",
"image",
"to",
"the",
"unpadded",
"image",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L400-L421
|
[
"def",
"get_residuals_update_tile",
"(",
"st",
",",
"padded_tile",
")",
":",
"inner_tile",
"=",
"st",
".",
"ishape",
".",
"intersection",
"(",
"[",
"st",
".",
"ishape",
",",
"padded_tile",
"]",
")",
"return",
"inner_tile",
".",
"translate",
"(",
"-",
"st",
".",
"pad",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
find_best_step
|
Returns the index of the lowest of the passed values. Catches nans etc.
|
peri/opt/optimize.py
|
def find_best_step(err_vals):
"""
Returns the index of the lowest of the passed values. Catches nans etc.
"""
if np.all(np.isnan(err_vals)):
raise ValueError('All err_vals are nans!')
return np.nanargmin(err_vals)
|
def find_best_step(err_vals):
"""
Returns the index of the lowest of the passed values. Catches nans etc.
"""
if np.all(np.isnan(err_vals)):
raise ValueError('All err_vals are nans!')
return np.nanargmin(err_vals)
|
[
"Returns",
"the",
"index",
"of",
"the",
"lowest",
"of",
"the",
"passed",
"values",
".",
"Catches",
"nans",
"etc",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L427-L433
|
[
"def",
"find_best_step",
"(",
"err_vals",
")",
":",
"if",
"np",
".",
"all",
"(",
"np",
".",
"isnan",
"(",
"err_vals",
")",
")",
":",
"raise",
"ValueError",
"(",
"'All err_vals are nans!'",
")",
"return",
"np",
".",
"nanargmin",
"(",
"err_vals",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
do_levmarq
|
Runs Levenberg-Marquardt optimization on a state.
Convenience wrapper for LMGlobals. Same keyword args, but the defaults
have been set to useful values for optimizing globals.
See LMGlobals and LMEngine for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq_all_particle_groups : Levenberg-Marquardt optimization
of all the particles in the state.
LMGlobals : Optimizer object; the workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers.
|
peri/opt/optimize.py
|
def do_levmarq(s, param_names, damping=0.1, decrease_damp_factor=10.,
run_length=6, eig_update=True, collect_stats=False, rz_order=0,
run_type=2, **kwargs):
"""
Runs Levenberg-Marquardt optimization on a state.
Convenience wrapper for LMGlobals. Same keyword args, but the defaults
have been set to useful values for optimizing globals.
See LMGlobals and LMEngine for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq_all_particle_groups : Levenberg-Marquardt optimization
of all the particles in the state.
LMGlobals : Optimizer object; the workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers.
"""
if rz_order > 0:
aug = AugmentedState(s, param_names, rz_order=rz_order)
lm = LMAugmentedState(aug, damping=damping, run_length=run_length,
decrease_damp_factor=decrease_damp_factor, eig_update=
eig_update, **kwargs)
else:
lm = LMGlobals(s, param_names, damping=damping, run_length=run_length,
decrease_damp_factor=decrease_damp_factor, eig_update=
eig_update, **kwargs)
if run_type == 2:
lm.do_run_2()
elif run_type == 1:
lm.do_run_1()
else:
raise ValueError('run_type=1,2 only')
if collect_stats:
return lm.get_termination_stats()
|
def do_levmarq(s, param_names, damping=0.1, decrease_damp_factor=10.,
run_length=6, eig_update=True, collect_stats=False, rz_order=0,
run_type=2, **kwargs):
"""
Runs Levenberg-Marquardt optimization on a state.
Convenience wrapper for LMGlobals. Same keyword args, but the defaults
have been set to useful values for optimizing globals.
See LMGlobals and LMEngine for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq_all_particle_groups : Levenberg-Marquardt optimization
of all the particles in the state.
LMGlobals : Optimizer object; the workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers.
"""
if rz_order > 0:
aug = AugmentedState(s, param_names, rz_order=rz_order)
lm = LMAugmentedState(aug, damping=damping, run_length=run_length,
decrease_damp_factor=decrease_damp_factor, eig_update=
eig_update, **kwargs)
else:
lm = LMGlobals(s, param_names, damping=damping, run_length=run_length,
decrease_damp_factor=decrease_damp_factor, eig_update=
eig_update, **kwargs)
if run_type == 2:
lm.do_run_2()
elif run_type == 1:
lm.do_run_1()
else:
raise ValueError('run_type=1,2 only')
if collect_stats:
return lm.get_termination_stats()
|
[
"Runs",
"Levenberg",
"-",
"Marquardt",
"optimization",
"on",
"a",
"state",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2312-L2350
|
[
"def",
"do_levmarq",
"(",
"s",
",",
"param_names",
",",
"damping",
"=",
"0.1",
",",
"decrease_damp_factor",
"=",
"10.",
",",
"run_length",
"=",
"6",
",",
"eig_update",
"=",
"True",
",",
"collect_stats",
"=",
"False",
",",
"rz_order",
"=",
"0",
",",
"run_type",
"=",
"2",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"rz_order",
">",
"0",
":",
"aug",
"=",
"AugmentedState",
"(",
"s",
",",
"param_names",
",",
"rz_order",
"=",
"rz_order",
")",
"lm",
"=",
"LMAugmentedState",
"(",
"aug",
",",
"damping",
"=",
"damping",
",",
"run_length",
"=",
"run_length",
",",
"decrease_damp_factor",
"=",
"decrease_damp_factor",
",",
"eig_update",
"=",
"eig_update",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"lm",
"=",
"LMGlobals",
"(",
"s",
",",
"param_names",
",",
"damping",
"=",
"damping",
",",
"run_length",
"=",
"run_length",
",",
"decrease_damp_factor",
"=",
"decrease_damp_factor",
",",
"eig_update",
"=",
"eig_update",
",",
"*",
"*",
"kwargs",
")",
"if",
"run_type",
"==",
"2",
":",
"lm",
".",
"do_run_2",
"(",
")",
"elif",
"run_type",
"==",
"1",
":",
"lm",
".",
"do_run_1",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'run_type=1,2 only'",
")",
"if",
"collect_stats",
":",
"return",
"lm",
".",
"get_termination_stats",
"(",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
do_levmarq_particles
|
Levenberg-Marquardt optimization on a set of particles.
Convenience wrapper for LMParticles. Same keyword args, but the
defaults have been set to useful values for optimizing particles.
See LMParticles and LMEngine for documentation.
See Also
--------
do_levmarq_all_particle_groups : Levenberg-Marquardt optimization
of all the particles in the state.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for optimizing global parameters.
LMParticles : Optimizer object; the workhorse of do_levmarq_particles.
LMEngine : Engine superclass for all the optimizers.
|
peri/opt/optimize.py
|
def do_levmarq_particles(s, particles, damping=1.0, decrease_damp_factor=10.,
run_length=4, collect_stats=False, max_iter=2, **kwargs):
"""
Levenberg-Marquardt optimization on a set of particles.
Convenience wrapper for LMParticles. Same keyword args, but the
defaults have been set to useful values for optimizing particles.
See LMParticles and LMEngine for documentation.
See Also
--------
do_levmarq_all_particle_groups : Levenberg-Marquardt optimization
of all the particles in the state.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for optimizing global parameters.
LMParticles : Optimizer object; the workhorse of do_levmarq_particles.
LMEngine : Engine superclass for all the optimizers.
"""
lp = LMParticles(s, particles, damping=damping, run_length=run_length,
decrease_damp_factor=decrease_damp_factor, max_iter=max_iter,
**kwargs)
lp.do_run_2()
if collect_stats:
return lp.get_termination_stats()
|
def do_levmarq_particles(s, particles, damping=1.0, decrease_damp_factor=10.,
run_length=4, collect_stats=False, max_iter=2, **kwargs):
"""
Levenberg-Marquardt optimization on a set of particles.
Convenience wrapper for LMParticles. Same keyword args, but the
defaults have been set to useful values for optimizing particles.
See LMParticles and LMEngine for documentation.
See Also
--------
do_levmarq_all_particle_groups : Levenberg-Marquardt optimization
of all the particles in the state.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for optimizing global parameters.
LMParticles : Optimizer object; the workhorse of do_levmarq_particles.
LMEngine : Engine superclass for all the optimizers.
"""
lp = LMParticles(s, particles, damping=damping, run_length=run_length,
decrease_damp_factor=decrease_damp_factor, max_iter=max_iter,
**kwargs)
lp.do_run_2()
if collect_stats:
return lp.get_termination_stats()
|
[
"Levenberg",
"-",
"Marquardt",
"optimization",
"on",
"a",
"set",
"of",
"particles",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2352-L2378
|
[
"def",
"do_levmarq_particles",
"(",
"s",
",",
"particles",
",",
"damping",
"=",
"1.0",
",",
"decrease_damp_factor",
"=",
"10.",
",",
"run_length",
"=",
"4",
",",
"collect_stats",
"=",
"False",
",",
"max_iter",
"=",
"2",
",",
"*",
"*",
"kwargs",
")",
":",
"lp",
"=",
"LMParticles",
"(",
"s",
",",
"particles",
",",
"damping",
"=",
"damping",
",",
"run_length",
"=",
"run_length",
",",
"decrease_damp_factor",
"=",
"decrease_damp_factor",
",",
"max_iter",
"=",
"max_iter",
",",
"*",
"*",
"kwargs",
")",
"lp",
".",
"do_run_2",
"(",
")",
"if",
"collect_stats",
":",
"return",
"lp",
".",
"get_termination_stats",
"(",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
do_levmarq_all_particle_groups
|
Levenberg-Marquardt optimization for every particle in the state.
Convenience wrapper for LMParticleGroupCollection. Same keyword args,
but I've set the defaults to what I've found to be useful values for
optimizing particles. See LMParticleGroupCollection for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for optimizing global parameters.
LMParticleGroupCollection : The workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers.
|
peri/opt/optimize.py
|
def do_levmarq_all_particle_groups(s, region_size=40, max_iter=2, damping=1.0,
decrease_damp_factor=10., run_length=4, collect_stats=False, **kwargs):
"""
Levenberg-Marquardt optimization for every particle in the state.
Convenience wrapper for LMParticleGroupCollection. Same keyword args,
but I've set the defaults to what I've found to be useful values for
optimizing particles. See LMParticleGroupCollection for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for optimizing global parameters.
LMParticleGroupCollection : The workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers.
"""
lp = LMParticleGroupCollection(s, region_size=region_size, damping=damping,
run_length=run_length, decrease_damp_factor=decrease_damp_factor,
get_cos=collect_stats, max_iter=max_iter, **kwargs)
lp.do_run_2()
if collect_stats:
return lp.stats
|
def do_levmarq_all_particle_groups(s, region_size=40, max_iter=2, damping=1.0,
decrease_damp_factor=10., run_length=4, collect_stats=False, **kwargs):
"""
Levenberg-Marquardt optimization for every particle in the state.
Convenience wrapper for LMParticleGroupCollection. Same keyword args,
but I've set the defaults to what I've found to be useful values for
optimizing particles. See LMParticleGroupCollection for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for optimizing global parameters.
LMParticleGroupCollection : The workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers.
"""
lp = LMParticleGroupCollection(s, region_size=region_size, damping=damping,
run_length=run_length, decrease_damp_factor=decrease_damp_factor,
get_cos=collect_stats, max_iter=max_iter, **kwargs)
lp.do_run_2()
if collect_stats:
return lp.stats
|
[
"Levenberg",
"-",
"Marquardt",
"optimization",
"for",
"every",
"particle",
"in",
"the",
"state",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2380-L2406
|
[
"def",
"do_levmarq_all_particle_groups",
"(",
"s",
",",
"region_size",
"=",
"40",
",",
"max_iter",
"=",
"2",
",",
"damping",
"=",
"1.0",
",",
"decrease_damp_factor",
"=",
"10.",
",",
"run_length",
"=",
"4",
",",
"collect_stats",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"lp",
"=",
"LMParticleGroupCollection",
"(",
"s",
",",
"region_size",
"=",
"region_size",
",",
"damping",
"=",
"damping",
",",
"run_length",
"=",
"run_length",
",",
"decrease_damp_factor",
"=",
"decrease_damp_factor",
",",
"get_cos",
"=",
"collect_stats",
",",
"max_iter",
"=",
"max_iter",
",",
"*",
"*",
"kwargs",
")",
"lp",
".",
"do_run_2",
"(",
")",
"if",
"collect_stats",
":",
"return",
"lp",
".",
"stats"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
do_levmarq_n_directions
|
Optimization of a state along a specific set of directions in parameter
space.
Parameters
----------
s : :class:`peri.states.State`
The state to optimize
directions : np.ndarray
[n,d] element numpy.ndarray of the n directions in the d-
dimensional space to optimize along. `directions` is trans-
formed to a unit vector internally
Other Parameters
----------------
Any parameters passed to LMEngine.
|
peri/opt/optimize.py
|
def do_levmarq_n_directions(s, directions, max_iter=2, run_length=2,
damping=1e-3, collect_stats=False, marquardt_damping=True, **kwargs):
"""
Optimization of a state along a specific set of directions in parameter
space.
Parameters
----------
s : :class:`peri.states.State`
The state to optimize
directions : np.ndarray
[n,d] element numpy.ndarray of the n directions in the d-
dimensional space to optimize along. `directions` is trans-
formed to a unit vector internally
Other Parameters
----------------
Any parameters passed to LMEngine.
"""
# normal = direction / np.sqrt(np.dot(direction, direction))
normals = np.array([d/np.sqrt(np.dot(d,d)) for d in directions])
if np.isnan(normals).any():
raise ValueError('`directions` must not be 0s or contain nan')
obj = OptState(s, normals)
lo = LMOptObj(obj, max_iter=max_iter, run_length=run_length, damping=
damping, marquardt_damping=marquardt_damping, **kwargs)
lo.do_run_1()
if collect_stats:
return lo.get_termination_stats()
|
def do_levmarq_n_directions(s, directions, max_iter=2, run_length=2,
damping=1e-3, collect_stats=False, marquardt_damping=True, **kwargs):
"""
Optimization of a state along a specific set of directions in parameter
space.
Parameters
----------
s : :class:`peri.states.State`
The state to optimize
directions : np.ndarray
[n,d] element numpy.ndarray of the n directions in the d-
dimensional space to optimize along. `directions` is trans-
formed to a unit vector internally
Other Parameters
----------------
Any parameters passed to LMEngine.
"""
# normal = direction / np.sqrt(np.dot(direction, direction))
normals = np.array([d/np.sqrt(np.dot(d,d)) for d in directions])
if np.isnan(normals).any():
raise ValueError('`directions` must not be 0s or contain nan')
obj = OptState(s, normals)
lo = LMOptObj(obj, max_iter=max_iter, run_length=run_length, damping=
damping, marquardt_damping=marquardt_damping, **kwargs)
lo.do_run_1()
if collect_stats:
return lo.get_termination_stats()
|
[
"Optimization",
"of",
"a",
"state",
"along",
"a",
"specific",
"set",
"of",
"directions",
"in",
"parameter",
"space",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2408-L2435
|
[
"def",
"do_levmarq_n_directions",
"(",
"s",
",",
"directions",
",",
"max_iter",
"=",
"2",
",",
"run_length",
"=",
"2",
",",
"damping",
"=",
"1e-3",
",",
"collect_stats",
"=",
"False",
",",
"marquardt_damping",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# normal = direction / np.sqrt(np.dot(direction, direction))",
"normals",
"=",
"np",
".",
"array",
"(",
"[",
"d",
"/",
"np",
".",
"sqrt",
"(",
"np",
".",
"dot",
"(",
"d",
",",
"d",
")",
")",
"for",
"d",
"in",
"directions",
"]",
")",
"if",
"np",
".",
"isnan",
"(",
"normals",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'`directions` must not be 0s or contain nan'",
")",
"obj",
"=",
"OptState",
"(",
"s",
",",
"normals",
")",
"lo",
"=",
"LMOptObj",
"(",
"obj",
",",
"max_iter",
"=",
"max_iter",
",",
"run_length",
"=",
"run_length",
",",
"damping",
"=",
"damping",
",",
"marquardt_damping",
"=",
"marquardt_damping",
",",
"*",
"*",
"kwargs",
")",
"lo",
".",
"do_run_1",
"(",
")",
"if",
"collect_stats",
":",
"return",
"lo",
".",
"get_termination_stats",
"(",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
burn
|
Optimizes all the parameters of a state.
Burns a state through calling LMParticleGroupCollection and LMGlobals/
LMAugmentedState.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to optimize
n_loop : Int, optional
The number of times to loop over in the optimizer. Default is 6.
collect_stats : Bool, optional
Whether or not to collect information on the optimizer's
performance. Default is False.
desc : string, optional
Description to append to the states.save() call every loop.
Set to None to avoid saving. Default is '', which selects
one of 'burning', 'polishing', 'doing_positions'
rz_order: Int, optional
Set to an int > 0 to optimize with an augmented state (R(z) as
a global parameter) vs. with the normal global parameters;
rz_order is the order of the polynomial approximate for R(z).
Default is 0 (no augmented state).
fractol : Float, optional
Fractional change in error at which to terminate. Default 1e-4
errtol : Float, optional
Absolute change in error at which to terminate. Default 1e-2
mode : {'burn', 'do-particles', or 'polish'}, optional
What mode to optimize with.
* 'burn' : Your state is far from the minimum.
* 'do-particles' : Positions far from minimum, globals well-fit.
* 'polish' : The state is close to the minimum.
'burn' is the default. Only `polish` will get to the global
minimum.
max_mem : Numeric, optional
The maximum amount of memory allowed for the optimizers' J's,
for both particles & globals. Default is 1e9, i.e. 1GB per
optimizer.
do_line_min : Bool or 'default', optional
Set to True to do an additional, third optimization per loop
which optimizes along the subspace spanned by the last 3 steps
of the burn()'s trajectory. In principle this should signifi-
cantly speed up the convergence; in practice it sometimes does,
sometimes doesn't. Default is 'default', which picks by mode:
* 'burn' : False
* 'do-particles' : False
* 'polish' : True
dowarn : Bool, optional
Whether to log a warning if termination results from finishing
loops rather than from convergence. Default is True.
Returns
-------
dictionary
Dictionary of convergence information. Contains whether the
optimization has converged (key ``'converged'``), the values of the
state after each loop (key ``'all_loop_values'``).
The values of the state's parameters after each part of the
loop: globals, particles, linemin. If ``collect_stats`` is set,
then also contains lists of termination dicts from globals,
particles, and line minimization (keys ``'global_stats'``,
``'particle_stats'``, and ``'line_stats``').
Notes
-----
Proceeds by alternating between one Levenberg-Marquardt step
optimizing the globals, one optimizing the particles, and repeating
until termination.
In addition, if `do_line_min` is True, at the end of each loop
step an additional optimization is tried along the subspaced spanned
by the steps taken during the last 3 loops. Ideally, this changes the
convergence from linear to quadratic, but it doesn't always do much.
Each of the 3 options proceed by optimizing as follows:
* burn : lm.do_run_2(), lp.do_run_2(). No psf, 2 loops on lm.
* do-particles : lp.do_run_2(), scales for ilm, bkg's
* polish : lm.do_run_2(), lp.do_run_2(). Everything, 1 loop each.
where lm is a globals LMGlobals instance, and lp a
LMParticleGroupCollection instance.
|
peri/opt/optimize.py
|
def burn(s, n_loop=6, collect_stats=False, desc='', rz_order=0, fractol=1e-4,
errtol=1e-2, mode='burn', max_mem=1e9, include_rad=True,
do_line_min='default', partial_log=False, dowarn=True):
"""
Optimizes all the parameters of a state.
Burns a state through calling LMParticleGroupCollection and LMGlobals/
LMAugmentedState.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to optimize
n_loop : Int, optional
The number of times to loop over in the optimizer. Default is 6.
collect_stats : Bool, optional
Whether or not to collect information on the optimizer's
performance. Default is False.
desc : string, optional
Description to append to the states.save() call every loop.
Set to None to avoid saving. Default is '', which selects
one of 'burning', 'polishing', 'doing_positions'
rz_order: Int, optional
Set to an int > 0 to optimize with an augmented state (R(z) as
a global parameter) vs. with the normal global parameters;
rz_order is the order of the polynomial approximate for R(z).
Default is 0 (no augmented state).
fractol : Float, optional
Fractional change in error at which to terminate. Default 1e-4
errtol : Float, optional
Absolute change in error at which to terminate. Default 1e-2
mode : {'burn', 'do-particles', or 'polish'}, optional
What mode to optimize with.
* 'burn' : Your state is far from the minimum.
* 'do-particles' : Positions far from minimum, globals well-fit.
* 'polish' : The state is close to the minimum.
'burn' is the default. Only `polish` will get to the global
minimum.
max_mem : Numeric, optional
The maximum amount of memory allowed for the optimizers' J's,
for both particles & globals. Default is 1e9, i.e. 1GB per
optimizer.
do_line_min : Bool or 'default', optional
Set to True to do an additional, third optimization per loop
which optimizes along the subspace spanned by the last 3 steps
of the burn()'s trajectory. In principle this should signifi-
cantly speed up the convergence; in practice it sometimes does,
sometimes doesn't. Default is 'default', which picks by mode:
* 'burn' : False
* 'do-particles' : False
* 'polish' : True
dowarn : Bool, optional
Whether to log a warning if termination results from finishing
loops rather than from convergence. Default is True.
Returns
-------
dictionary
Dictionary of convergence information. Contains whether the
optimization has converged (key ``'converged'``), the values of the
state after each loop (key ``'all_loop_values'``).
The values of the state's parameters after each part of the
loop: globals, particles, linemin. If ``collect_stats`` is set,
then also contains lists of termination dicts from globals,
particles, and line minimization (keys ``'global_stats'``,
``'particle_stats'``, and ``'line_stats``').
Notes
-----
Proceeds by alternating between one Levenberg-Marquardt step
optimizing the globals, one optimizing the particles, and repeating
until termination.
In addition, if `do_line_min` is True, at the end of each loop
step an additional optimization is tried along the subspaced spanned
by the steps taken during the last 3 loops. Ideally, this changes the
convergence from linear to quadratic, but it doesn't always do much.
Each of the 3 options proceed by optimizing as follows:
* burn : lm.do_run_2(), lp.do_run_2(). No psf, 2 loops on lm.
* do-particles : lp.do_run_2(), scales for ilm, bkg's
* polish : lm.do_run_2(), lp.do_run_2(). Everything, 1 loop each.
where lm is a globals LMGlobals instance, and lp a
LMParticleGroupCollection instance.
"""
# It would be nice if some of these magic #'s (region size,
# num_eig_dirs, etc) were calculated in a good way. FIXME
mode = mode.lower()
if mode not in {'burn', 'do-particles', 'polish'}:
raise ValueError('mode must be one of burn, do-particles, polish')
#1. Setting Defaults
if desc is '':
desc = mode + 'ing' if mode != 'do-particles' else 'doing-particles'
eig_update = (mode != 'do-particles')
glbl_run_length = 3 if mode == 'do-particles' else 6
glbl_mx_itr = 2 if mode == 'burn' else 1
use_accel = (mode == 'burn')
rz_order = int(rz_order)
if do_line_min == 'default':
# do_line_min = (mode == 'polish')
# temporary fix until we solve the particles-leaving-image issue:
do_line_min = False
if mode == 'do-particles':
# FIXME explicit params
# We pick some parameters for an overall illumination scale:
glbl_nms = ['ilm-scale', 'ilm-xy-0-0', 'bkg-xy-0-0', 'offset']
# And now, since we have explicit parameters, we check that they
# are actually in the state:
glbl_nms = [nm for nm in glbl_nms if nm in s.params]
else:
if mode == 'polish':
remove_params = None
else:
# FIXME explicit params
remove_params = s.get('psf').params
if ('zscale' not in remove_params) and ('zscale' in s.params):
remove_params.append('zscale')
glbl_nms = name_globals(s, remove_params=remove_params)
all_lp_stats = []
all_lm_stats = []
all_line_stats = []
all_loop_values = []
_delta_vals = [] # storing the directions we've moved along for line min
#2. Optimize
CLOG.info('Start of loop %d:\t%f' % (0, s.error))
for a in range(n_loop):
start_err = s.error
start_params = np.copy(s.state[s.params])
#2a. Globals
# glbl_dmp = 0.3 if a == 0 else 3e-2
####FIXME we damp degenerate but convenient spaces in the ilm, bkg
####manually, but we should do it more betterer.
BAD_DAMP = 1e7
BAD_LIST = [['ilm-scale', BAD_DAMP], ['ilm-off', BAD_DAMP], ['ilm-z-0',
BAD_DAMP], ['bkg-z-0', BAD_DAMP]]
####
glbl_dmp = vectorize_damping(glbl_nms + ['rz']*rz_order, damping=1.0,
increase_list=[['psf-', 3e1]] + BAD_LIST)
if a != 0 or mode != 'do-particles':
if partial_log:
log.set_level('debug')
gstats = do_levmarq(s, glbl_nms, max_iter=glbl_mx_itr, run_length=
glbl_run_length, eig_update=eig_update, num_eig_dirs=10,
eig_update_frequency=3, rz_order=rz_order, damping=
glbl_dmp, decrease_damp_factor=10., use_accel=use_accel,
collect_stats=collect_stats, fractol=0.1*fractol,
max_mem=max_mem)
if partial_log:
log.set_level('info')
all_lm_stats.append(gstats)
if desc is not None:
states.save(s, desc=desc)
CLOG.info('Globals, loop {}:\t{}'.format(a, s.error))
all_loop_values.append(s.values)
#2b. Particles
prtl_dmp = 1.0 if a==0 else 1e-2
#For now, I'm calculating the region size. This might be a bad idea
#because 1 bad particle can spoil the whole group.
pstats = do_levmarq_all_particle_groups(s, region_size=40, max_iter=1,
do_calc_size=True, run_length=4, eig_update=False,
damping=prtl_dmp, fractol=0.1*fractol, collect_stats=
collect_stats, max_mem=max_mem, include_rad=include_rad)
all_lp_stats.append(pstats)
if desc is not None:
states.save(s, desc=desc)
CLOG.info('Particles, loop {}:\t{}'.format(a, s.error))
gc.collect()
all_loop_values.append(s.values)
#2c. Line min?
end_params = np.copy(s.state[s.params])
_delta_vals.append(start_params - end_params)
if do_line_min:
all_line_stats.append(do_levmarq_n_directions(s, _delta_vals[-3:],
collect_stats=collect_stats))
if desc is not None:
states.save(s, desc=desc)
CLOG.info('Line min., loop {}:\t{}'.format(a, s.error))
all_loop_values.append(s.values)
#2d. terminate?
new_err = s.error
derr = start_err - new_err
dobreak = (derr/new_err < fractol) or (derr < errtol)
if dobreak:
break
if dowarn and (not dobreak):
CLOG.warn('burn() did not converge; consider re-running')
d = {'converged':dobreak, 'all_loop_values':all_loop_values}
if collect_stats:
d.update({'global_stats':all_lm_stats, 'particle_stats':all_lp_stats,
'line_stats':all_line_stats})
return d
|
def burn(s, n_loop=6, collect_stats=False, desc='', rz_order=0, fractol=1e-4,
errtol=1e-2, mode='burn', max_mem=1e9, include_rad=True,
do_line_min='default', partial_log=False, dowarn=True):
"""
Optimizes all the parameters of a state.
Burns a state through calling LMParticleGroupCollection and LMGlobals/
LMAugmentedState.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to optimize
n_loop : Int, optional
The number of times to loop over in the optimizer. Default is 6.
collect_stats : Bool, optional
Whether or not to collect information on the optimizer's
performance. Default is False.
desc : string, optional
Description to append to the states.save() call every loop.
Set to None to avoid saving. Default is '', which selects
one of 'burning', 'polishing', 'doing_positions'
rz_order: Int, optional
Set to an int > 0 to optimize with an augmented state (R(z) as
a global parameter) vs. with the normal global parameters;
rz_order is the order of the polynomial approximate for R(z).
Default is 0 (no augmented state).
fractol : Float, optional
Fractional change in error at which to terminate. Default 1e-4
errtol : Float, optional
Absolute change in error at which to terminate. Default 1e-2
mode : {'burn', 'do-particles', or 'polish'}, optional
What mode to optimize with.
* 'burn' : Your state is far from the minimum.
* 'do-particles' : Positions far from minimum, globals well-fit.
* 'polish' : The state is close to the minimum.
'burn' is the default. Only `polish` will get to the global
minimum.
max_mem : Numeric, optional
The maximum amount of memory allowed for the optimizers' J's,
for both particles & globals. Default is 1e9, i.e. 1GB per
optimizer.
do_line_min : Bool or 'default', optional
Set to True to do an additional, third optimization per loop
which optimizes along the subspace spanned by the last 3 steps
of the burn()'s trajectory. In principle this should signifi-
cantly speed up the convergence; in practice it sometimes does,
sometimes doesn't. Default is 'default', which picks by mode:
* 'burn' : False
* 'do-particles' : False
* 'polish' : True
dowarn : Bool, optional
Whether to log a warning if termination results from finishing
loops rather than from convergence. Default is True.
Returns
-------
dictionary
Dictionary of convergence information. Contains whether the
optimization has converged (key ``'converged'``), the values of the
state after each loop (key ``'all_loop_values'``).
The values of the state's parameters after each part of the
loop: globals, particles, linemin. If ``collect_stats`` is set,
then also contains lists of termination dicts from globals,
particles, and line minimization (keys ``'global_stats'``,
``'particle_stats'``, and ``'line_stats``').
Notes
-----
Proceeds by alternating between one Levenberg-Marquardt step
optimizing the globals, one optimizing the particles, and repeating
until termination.
In addition, if `do_line_min` is True, at the end of each loop
step an additional optimization is tried along the subspaced spanned
by the steps taken during the last 3 loops. Ideally, this changes the
convergence from linear to quadratic, but it doesn't always do much.
Each of the 3 options proceed by optimizing as follows:
* burn : lm.do_run_2(), lp.do_run_2(). No psf, 2 loops on lm.
* do-particles : lp.do_run_2(), scales for ilm, bkg's
* polish : lm.do_run_2(), lp.do_run_2(). Everything, 1 loop each.
where lm is a globals LMGlobals instance, and lp a
LMParticleGroupCollection instance.
"""
# It would be nice if some of these magic #'s (region size,
# num_eig_dirs, etc) were calculated in a good way. FIXME
mode = mode.lower()
if mode not in {'burn', 'do-particles', 'polish'}:
raise ValueError('mode must be one of burn, do-particles, polish')
#1. Setting Defaults
if desc is '':
desc = mode + 'ing' if mode != 'do-particles' else 'doing-particles'
eig_update = (mode != 'do-particles')
glbl_run_length = 3 if mode == 'do-particles' else 6
glbl_mx_itr = 2 if mode == 'burn' else 1
use_accel = (mode == 'burn')
rz_order = int(rz_order)
if do_line_min == 'default':
# do_line_min = (mode == 'polish')
# temporary fix until we solve the particles-leaving-image issue:
do_line_min = False
if mode == 'do-particles':
# FIXME explicit params
# We pick some parameters for an overall illumination scale:
glbl_nms = ['ilm-scale', 'ilm-xy-0-0', 'bkg-xy-0-0', 'offset']
# And now, since we have explicit parameters, we check that they
# are actually in the state:
glbl_nms = [nm for nm in glbl_nms if nm in s.params]
else:
if mode == 'polish':
remove_params = None
else:
# FIXME explicit params
remove_params = s.get('psf').params
if ('zscale' not in remove_params) and ('zscale' in s.params):
remove_params.append('zscale')
glbl_nms = name_globals(s, remove_params=remove_params)
all_lp_stats = []
all_lm_stats = []
all_line_stats = []
all_loop_values = []
_delta_vals = [] # storing the directions we've moved along for line min
#2. Optimize
CLOG.info('Start of loop %d:\t%f' % (0, s.error))
for a in range(n_loop):
start_err = s.error
start_params = np.copy(s.state[s.params])
#2a. Globals
# glbl_dmp = 0.3 if a == 0 else 3e-2
####FIXME we damp degenerate but convenient spaces in the ilm, bkg
####manually, but we should do it more betterer.
BAD_DAMP = 1e7
BAD_LIST = [['ilm-scale', BAD_DAMP], ['ilm-off', BAD_DAMP], ['ilm-z-0',
BAD_DAMP], ['bkg-z-0', BAD_DAMP]]
####
glbl_dmp = vectorize_damping(glbl_nms + ['rz']*rz_order, damping=1.0,
increase_list=[['psf-', 3e1]] + BAD_LIST)
if a != 0 or mode != 'do-particles':
if partial_log:
log.set_level('debug')
gstats = do_levmarq(s, glbl_nms, max_iter=glbl_mx_itr, run_length=
glbl_run_length, eig_update=eig_update, num_eig_dirs=10,
eig_update_frequency=3, rz_order=rz_order, damping=
glbl_dmp, decrease_damp_factor=10., use_accel=use_accel,
collect_stats=collect_stats, fractol=0.1*fractol,
max_mem=max_mem)
if partial_log:
log.set_level('info')
all_lm_stats.append(gstats)
if desc is not None:
states.save(s, desc=desc)
CLOG.info('Globals, loop {}:\t{}'.format(a, s.error))
all_loop_values.append(s.values)
#2b. Particles
prtl_dmp = 1.0 if a==0 else 1e-2
#For now, I'm calculating the region size. This might be a bad idea
#because 1 bad particle can spoil the whole group.
pstats = do_levmarq_all_particle_groups(s, region_size=40, max_iter=1,
do_calc_size=True, run_length=4, eig_update=False,
damping=prtl_dmp, fractol=0.1*fractol, collect_stats=
collect_stats, max_mem=max_mem, include_rad=include_rad)
all_lp_stats.append(pstats)
if desc is not None:
states.save(s, desc=desc)
CLOG.info('Particles, loop {}:\t{}'.format(a, s.error))
gc.collect()
all_loop_values.append(s.values)
#2c. Line min?
end_params = np.copy(s.state[s.params])
_delta_vals.append(start_params - end_params)
if do_line_min:
all_line_stats.append(do_levmarq_n_directions(s, _delta_vals[-3:],
collect_stats=collect_stats))
if desc is not None:
states.save(s, desc=desc)
CLOG.info('Line min., loop {}:\t{}'.format(a, s.error))
all_loop_values.append(s.values)
#2d. terminate?
new_err = s.error
derr = start_err - new_err
dobreak = (derr/new_err < fractol) or (derr < errtol)
if dobreak:
break
if dowarn and (not dobreak):
CLOG.warn('burn() did not converge; consider re-running')
d = {'converged':dobreak, 'all_loop_values':all_loop_values}
if collect_stats:
d.update({'global_stats':all_lm_stats, 'particle_stats':all_lp_stats,
'line_stats':all_line_stats})
return d
|
[
"Optimizes",
"all",
"the",
"parameters",
"of",
"a",
"state",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2437-L2637
|
[
"def",
"burn",
"(",
"s",
",",
"n_loop",
"=",
"6",
",",
"collect_stats",
"=",
"False",
",",
"desc",
"=",
"''",
",",
"rz_order",
"=",
"0",
",",
"fractol",
"=",
"1e-4",
",",
"errtol",
"=",
"1e-2",
",",
"mode",
"=",
"'burn'",
",",
"max_mem",
"=",
"1e9",
",",
"include_rad",
"=",
"True",
",",
"do_line_min",
"=",
"'default'",
",",
"partial_log",
"=",
"False",
",",
"dowarn",
"=",
"True",
")",
":",
"# It would be nice if some of these magic #'s (region size,",
"# num_eig_dirs, etc) were calculated in a good way. FIXME",
"mode",
"=",
"mode",
".",
"lower",
"(",
")",
"if",
"mode",
"not",
"in",
"{",
"'burn'",
",",
"'do-particles'",
",",
"'polish'",
"}",
":",
"raise",
"ValueError",
"(",
"'mode must be one of burn, do-particles, polish'",
")",
"#1. Setting Defaults",
"if",
"desc",
"is",
"''",
":",
"desc",
"=",
"mode",
"+",
"'ing'",
"if",
"mode",
"!=",
"'do-particles'",
"else",
"'doing-particles'",
"eig_update",
"=",
"(",
"mode",
"!=",
"'do-particles'",
")",
"glbl_run_length",
"=",
"3",
"if",
"mode",
"==",
"'do-particles'",
"else",
"6",
"glbl_mx_itr",
"=",
"2",
"if",
"mode",
"==",
"'burn'",
"else",
"1",
"use_accel",
"=",
"(",
"mode",
"==",
"'burn'",
")",
"rz_order",
"=",
"int",
"(",
"rz_order",
")",
"if",
"do_line_min",
"==",
"'default'",
":",
"# do_line_min = (mode == 'polish')",
"# temporary fix until we solve the particles-leaving-image issue:",
"do_line_min",
"=",
"False",
"if",
"mode",
"==",
"'do-particles'",
":",
"# FIXME explicit params",
"# We pick some parameters for an overall illumination scale:",
"glbl_nms",
"=",
"[",
"'ilm-scale'",
",",
"'ilm-xy-0-0'",
",",
"'bkg-xy-0-0'",
",",
"'offset'",
"]",
"# And now, since we have explicit parameters, we check that they",
"# are actually in the state:",
"glbl_nms",
"=",
"[",
"nm",
"for",
"nm",
"in",
"glbl_nms",
"if",
"nm",
"in",
"s",
".",
"params",
"]",
"else",
":",
"if",
"mode",
"==",
"'polish'",
":",
"remove_params",
"=",
"None",
"else",
":",
"# FIXME explicit params",
"remove_params",
"=",
"s",
".",
"get",
"(",
"'psf'",
")",
".",
"params",
"if",
"(",
"'zscale'",
"not",
"in",
"remove_params",
")",
"and",
"(",
"'zscale'",
"in",
"s",
".",
"params",
")",
":",
"remove_params",
".",
"append",
"(",
"'zscale'",
")",
"glbl_nms",
"=",
"name_globals",
"(",
"s",
",",
"remove_params",
"=",
"remove_params",
")",
"all_lp_stats",
"=",
"[",
"]",
"all_lm_stats",
"=",
"[",
"]",
"all_line_stats",
"=",
"[",
"]",
"all_loop_values",
"=",
"[",
"]",
"_delta_vals",
"=",
"[",
"]",
"# storing the directions we've moved along for line min",
"#2. Optimize",
"CLOG",
".",
"info",
"(",
"'Start of loop %d:\\t%f'",
"%",
"(",
"0",
",",
"s",
".",
"error",
")",
")",
"for",
"a",
"in",
"range",
"(",
"n_loop",
")",
":",
"start_err",
"=",
"s",
".",
"error",
"start_params",
"=",
"np",
".",
"copy",
"(",
"s",
".",
"state",
"[",
"s",
".",
"params",
"]",
")",
"#2a. Globals",
"# glbl_dmp = 0.3 if a == 0 else 3e-2",
"####FIXME we damp degenerate but convenient spaces in the ilm, bkg",
"####manually, but we should do it more betterer.",
"BAD_DAMP",
"=",
"1e7",
"BAD_LIST",
"=",
"[",
"[",
"'ilm-scale'",
",",
"BAD_DAMP",
"]",
",",
"[",
"'ilm-off'",
",",
"BAD_DAMP",
"]",
",",
"[",
"'ilm-z-0'",
",",
"BAD_DAMP",
"]",
",",
"[",
"'bkg-z-0'",
",",
"BAD_DAMP",
"]",
"]",
"####",
"glbl_dmp",
"=",
"vectorize_damping",
"(",
"glbl_nms",
"+",
"[",
"'rz'",
"]",
"*",
"rz_order",
",",
"damping",
"=",
"1.0",
",",
"increase_list",
"=",
"[",
"[",
"'psf-'",
",",
"3e1",
"]",
"]",
"+",
"BAD_LIST",
")",
"if",
"a",
"!=",
"0",
"or",
"mode",
"!=",
"'do-particles'",
":",
"if",
"partial_log",
":",
"log",
".",
"set_level",
"(",
"'debug'",
")",
"gstats",
"=",
"do_levmarq",
"(",
"s",
",",
"glbl_nms",
",",
"max_iter",
"=",
"glbl_mx_itr",
",",
"run_length",
"=",
"glbl_run_length",
",",
"eig_update",
"=",
"eig_update",
",",
"num_eig_dirs",
"=",
"10",
",",
"eig_update_frequency",
"=",
"3",
",",
"rz_order",
"=",
"rz_order",
",",
"damping",
"=",
"glbl_dmp",
",",
"decrease_damp_factor",
"=",
"10.",
",",
"use_accel",
"=",
"use_accel",
",",
"collect_stats",
"=",
"collect_stats",
",",
"fractol",
"=",
"0.1",
"*",
"fractol",
",",
"max_mem",
"=",
"max_mem",
")",
"if",
"partial_log",
":",
"log",
".",
"set_level",
"(",
"'info'",
")",
"all_lm_stats",
".",
"append",
"(",
"gstats",
")",
"if",
"desc",
"is",
"not",
"None",
":",
"states",
".",
"save",
"(",
"s",
",",
"desc",
"=",
"desc",
")",
"CLOG",
".",
"info",
"(",
"'Globals, loop {}:\\t{}'",
".",
"format",
"(",
"a",
",",
"s",
".",
"error",
")",
")",
"all_loop_values",
".",
"append",
"(",
"s",
".",
"values",
")",
"#2b. Particles",
"prtl_dmp",
"=",
"1.0",
"if",
"a",
"==",
"0",
"else",
"1e-2",
"#For now, I'm calculating the region size. This might be a bad idea",
"#because 1 bad particle can spoil the whole group.",
"pstats",
"=",
"do_levmarq_all_particle_groups",
"(",
"s",
",",
"region_size",
"=",
"40",
",",
"max_iter",
"=",
"1",
",",
"do_calc_size",
"=",
"True",
",",
"run_length",
"=",
"4",
",",
"eig_update",
"=",
"False",
",",
"damping",
"=",
"prtl_dmp",
",",
"fractol",
"=",
"0.1",
"*",
"fractol",
",",
"collect_stats",
"=",
"collect_stats",
",",
"max_mem",
"=",
"max_mem",
",",
"include_rad",
"=",
"include_rad",
")",
"all_lp_stats",
".",
"append",
"(",
"pstats",
")",
"if",
"desc",
"is",
"not",
"None",
":",
"states",
".",
"save",
"(",
"s",
",",
"desc",
"=",
"desc",
")",
"CLOG",
".",
"info",
"(",
"'Particles, loop {}:\\t{}'",
".",
"format",
"(",
"a",
",",
"s",
".",
"error",
")",
")",
"gc",
".",
"collect",
"(",
")",
"all_loop_values",
".",
"append",
"(",
"s",
".",
"values",
")",
"#2c. Line min?",
"end_params",
"=",
"np",
".",
"copy",
"(",
"s",
".",
"state",
"[",
"s",
".",
"params",
"]",
")",
"_delta_vals",
".",
"append",
"(",
"start_params",
"-",
"end_params",
")",
"if",
"do_line_min",
":",
"all_line_stats",
".",
"append",
"(",
"do_levmarq_n_directions",
"(",
"s",
",",
"_delta_vals",
"[",
"-",
"3",
":",
"]",
",",
"collect_stats",
"=",
"collect_stats",
")",
")",
"if",
"desc",
"is",
"not",
"None",
":",
"states",
".",
"save",
"(",
"s",
",",
"desc",
"=",
"desc",
")",
"CLOG",
".",
"info",
"(",
"'Line min., loop {}:\\t{}'",
".",
"format",
"(",
"a",
",",
"s",
".",
"error",
")",
")",
"all_loop_values",
".",
"append",
"(",
"s",
".",
"values",
")",
"#2d. terminate?",
"new_err",
"=",
"s",
".",
"error",
"derr",
"=",
"start_err",
"-",
"new_err",
"dobreak",
"=",
"(",
"derr",
"/",
"new_err",
"<",
"fractol",
")",
"or",
"(",
"derr",
"<",
"errtol",
")",
"if",
"dobreak",
":",
"break",
"if",
"dowarn",
"and",
"(",
"not",
"dobreak",
")",
":",
"CLOG",
".",
"warn",
"(",
"'burn() did not converge; consider re-running'",
")",
"d",
"=",
"{",
"'converged'",
":",
"dobreak",
",",
"'all_loop_values'",
":",
"all_loop_values",
"}",
"if",
"collect_stats",
":",
"d",
".",
"update",
"(",
"{",
"'global_stats'",
":",
"all_lm_stats",
",",
"'particle_stats'",
":",
"all_lp_stats",
",",
"'line_stats'",
":",
"all_line_stats",
"}",
")",
"return",
"d"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
finish
|
Crawls slowly to the minimum-cost state.
Blocks the global parameters into small enough sections such that each
can be optimized separately while including all the pixels (i.e. no
decimation). Optimizes the globals, then the psf separately if desired,
then particles, then a line minimization along the step direction to
speed up convergence.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to optimize
desc : string, optional
Description to append to the states.save() call every loop.
Set to `None` to avoid saving. Default is `'finish'`.
n_loop : Int, optional
The number of times to loop over in the optimizer. Default is 4.
max_mem : Numeric, optional
The maximum amount of memory allowed for the optimizers' J's,
for both particles & globals. Default is 1e9.
separate_psf : Bool, optional
If True, does the psf optimization separately from the rest of
the globals, since the psf has a more tortuous fit landscape.
Default is True.
fractol : Float, optional
Fractional change in error at which to terminate. Default 1e-4
errtol : Float, optional
Absolute change in error at which to terminate. Default 1e-2
dowarn : Bool, optional
Whether to log a warning if termination results from finishing
loops rather than from convergence. Default is True.
Returns
-------
dictionary
Information about the optimization. Has two keys: ``'converged'``,
a Bool which of whether optimization stopped due to convergence
(True) or due to max number of iterations (False), and
``'loop_values'``, a [n_loop+1, N] ``numpy.ndarray`` of the
state's values, at the start of optimization and at the end of
each loop, before the line minimization.
|
peri/opt/optimize.py
|
def finish(s, desc='finish', n_loop=4, max_mem=1e9, separate_psf=True,
fractol=1e-7, errtol=1e-3, dowarn=True):
"""
Crawls slowly to the minimum-cost state.
Blocks the global parameters into small enough sections such that each
can be optimized separately while including all the pixels (i.e. no
decimation). Optimizes the globals, then the psf separately if desired,
then particles, then a line minimization along the step direction to
speed up convergence.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to optimize
desc : string, optional
Description to append to the states.save() call every loop.
Set to `None` to avoid saving. Default is `'finish'`.
n_loop : Int, optional
The number of times to loop over in the optimizer. Default is 4.
max_mem : Numeric, optional
The maximum amount of memory allowed for the optimizers' J's,
for both particles & globals. Default is 1e9.
separate_psf : Bool, optional
If True, does the psf optimization separately from the rest of
the globals, since the psf has a more tortuous fit landscape.
Default is True.
fractol : Float, optional
Fractional change in error at which to terminate. Default 1e-4
errtol : Float, optional
Absolute change in error at which to terminate. Default 1e-2
dowarn : Bool, optional
Whether to log a warning if termination results from finishing
loops rather than from convergence. Default is True.
Returns
-------
dictionary
Information about the optimization. Has two keys: ``'converged'``,
a Bool which of whether optimization stopped due to convergence
(True) or due to max number of iterations (False), and
``'loop_values'``, a [n_loop+1, N] ``numpy.ndarray`` of the
state's values, at the start of optimization and at the end of
each loop, before the line minimization.
"""
values = [np.copy(s.state[s.params])]
remove_params = s.get('psf').params if separate_psf else None
# FIXME explicit params
global_params = name_globals(s, remove_params=remove_params)
#FIXME this could be done much better, since much of the globals such
#as the ilm are local. Could be done with sparse matrices and/or taking
#nearby globals in a group and using the update tile only as the slicer,
#rather than the full residuals.
gs = np.floor(max_mem / s.residuals.nbytes).astype('int')
groups = [global_params[a:a+gs] for a in range(0, len(global_params), gs)]
CLOG.info('Start ``finish``:\t{}'.format(s.error))
for a in range(n_loop):
start_err = s.error
#1. Min globals:
for g in groups:
do_levmarq(s, g, damping=0.1, decrease_damp_factor=20.,
max_iter=1, max_mem=max_mem, eig_update=False)
if separate_psf:
do_levmarq(s, remove_params, max_mem=max_mem, max_iter=4,
eig_update=False)
CLOG.info('Globals, loop {}:\t{}'.format(a, s.error))
if desc is not None:
states.save(s, desc=desc)
#2. Min particles
do_levmarq_all_particle_groups(s, max_iter=1, max_mem=max_mem)
CLOG.info('Particles, loop {}:\t{}'.format(a, s.error))
if desc is not None:
states.save(s, desc=desc)
#3. Append vals, line min:
values.append(np.copy(s.state[s.params]))
# dv = (np.array(values[1:]) - np.array(values[0]))[-3:]
# do_levmarq_n_directions(s, dv, damping=1e-2, max_iter=2, errtol=3e-4)
# CLOG.info('Line min., loop {}:\t{}'.format(a, s.error))
# if desc is not None:
# states.save(s, desc=desc)
#4. terminate?
new_err = s.error
derr = start_err - new_err
dobreak = (derr/new_err < fractol) or (derr < errtol)
if dobreak:
break
if dowarn and (not dobreak):
CLOG.warn('finish() did not converge; consider re-running')
return {'converged':dobreak, 'loop_values':np.array(values)}
|
def finish(s, desc='finish', n_loop=4, max_mem=1e9, separate_psf=True,
fractol=1e-7, errtol=1e-3, dowarn=True):
"""
Crawls slowly to the minimum-cost state.
Blocks the global parameters into small enough sections such that each
can be optimized separately while including all the pixels (i.e. no
decimation). Optimizes the globals, then the psf separately if desired,
then particles, then a line minimization along the step direction to
speed up convergence.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to optimize
desc : string, optional
Description to append to the states.save() call every loop.
Set to `None` to avoid saving. Default is `'finish'`.
n_loop : Int, optional
The number of times to loop over in the optimizer. Default is 4.
max_mem : Numeric, optional
The maximum amount of memory allowed for the optimizers' J's,
for both particles & globals. Default is 1e9.
separate_psf : Bool, optional
If True, does the psf optimization separately from the rest of
the globals, since the psf has a more tortuous fit landscape.
Default is True.
fractol : Float, optional
Fractional change in error at which to terminate. Default 1e-4
errtol : Float, optional
Absolute change in error at which to terminate. Default 1e-2
dowarn : Bool, optional
Whether to log a warning if termination results from finishing
loops rather than from convergence. Default is True.
Returns
-------
dictionary
Information about the optimization. Has two keys: ``'converged'``,
a Bool which of whether optimization stopped due to convergence
(True) or due to max number of iterations (False), and
``'loop_values'``, a [n_loop+1, N] ``numpy.ndarray`` of the
state's values, at the start of optimization and at the end of
each loop, before the line minimization.
"""
values = [np.copy(s.state[s.params])]
remove_params = s.get('psf').params if separate_psf else None
# FIXME explicit params
global_params = name_globals(s, remove_params=remove_params)
#FIXME this could be done much better, since much of the globals such
#as the ilm are local. Could be done with sparse matrices and/or taking
#nearby globals in a group and using the update tile only as the slicer,
#rather than the full residuals.
gs = np.floor(max_mem / s.residuals.nbytes).astype('int')
groups = [global_params[a:a+gs] for a in range(0, len(global_params), gs)]
CLOG.info('Start ``finish``:\t{}'.format(s.error))
for a in range(n_loop):
start_err = s.error
#1. Min globals:
for g in groups:
do_levmarq(s, g, damping=0.1, decrease_damp_factor=20.,
max_iter=1, max_mem=max_mem, eig_update=False)
if separate_psf:
do_levmarq(s, remove_params, max_mem=max_mem, max_iter=4,
eig_update=False)
CLOG.info('Globals, loop {}:\t{}'.format(a, s.error))
if desc is not None:
states.save(s, desc=desc)
#2. Min particles
do_levmarq_all_particle_groups(s, max_iter=1, max_mem=max_mem)
CLOG.info('Particles, loop {}:\t{}'.format(a, s.error))
if desc is not None:
states.save(s, desc=desc)
#3. Append vals, line min:
values.append(np.copy(s.state[s.params]))
# dv = (np.array(values[1:]) - np.array(values[0]))[-3:]
# do_levmarq_n_directions(s, dv, damping=1e-2, max_iter=2, errtol=3e-4)
# CLOG.info('Line min., loop {}:\t{}'.format(a, s.error))
# if desc is not None:
# states.save(s, desc=desc)
#4. terminate?
new_err = s.error
derr = start_err - new_err
dobreak = (derr/new_err < fractol) or (derr < errtol)
if dobreak:
break
if dowarn and (not dobreak):
CLOG.warn('finish() did not converge; consider re-running')
return {'converged':dobreak, 'loop_values':np.array(values)}
|
[
"Crawls",
"slowly",
"to",
"the",
"minimum",
"-",
"cost",
"state",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2639-L2728
|
[
"def",
"finish",
"(",
"s",
",",
"desc",
"=",
"'finish'",
",",
"n_loop",
"=",
"4",
",",
"max_mem",
"=",
"1e9",
",",
"separate_psf",
"=",
"True",
",",
"fractol",
"=",
"1e-7",
",",
"errtol",
"=",
"1e-3",
",",
"dowarn",
"=",
"True",
")",
":",
"values",
"=",
"[",
"np",
".",
"copy",
"(",
"s",
".",
"state",
"[",
"s",
".",
"params",
"]",
")",
"]",
"remove_params",
"=",
"s",
".",
"get",
"(",
"'psf'",
")",
".",
"params",
"if",
"separate_psf",
"else",
"None",
"# FIXME explicit params",
"global_params",
"=",
"name_globals",
"(",
"s",
",",
"remove_params",
"=",
"remove_params",
")",
"#FIXME this could be done much better, since much of the globals such",
"#as the ilm are local. Could be done with sparse matrices and/or taking",
"#nearby globals in a group and using the update tile only as the slicer,",
"#rather than the full residuals.",
"gs",
"=",
"np",
".",
"floor",
"(",
"max_mem",
"/",
"s",
".",
"residuals",
".",
"nbytes",
")",
".",
"astype",
"(",
"'int'",
")",
"groups",
"=",
"[",
"global_params",
"[",
"a",
":",
"a",
"+",
"gs",
"]",
"for",
"a",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"global_params",
")",
",",
"gs",
")",
"]",
"CLOG",
".",
"info",
"(",
"'Start ``finish``:\\t{}'",
".",
"format",
"(",
"s",
".",
"error",
")",
")",
"for",
"a",
"in",
"range",
"(",
"n_loop",
")",
":",
"start_err",
"=",
"s",
".",
"error",
"#1. Min globals:",
"for",
"g",
"in",
"groups",
":",
"do_levmarq",
"(",
"s",
",",
"g",
",",
"damping",
"=",
"0.1",
",",
"decrease_damp_factor",
"=",
"20.",
",",
"max_iter",
"=",
"1",
",",
"max_mem",
"=",
"max_mem",
",",
"eig_update",
"=",
"False",
")",
"if",
"separate_psf",
":",
"do_levmarq",
"(",
"s",
",",
"remove_params",
",",
"max_mem",
"=",
"max_mem",
",",
"max_iter",
"=",
"4",
",",
"eig_update",
"=",
"False",
")",
"CLOG",
".",
"info",
"(",
"'Globals, loop {}:\\t{}'",
".",
"format",
"(",
"a",
",",
"s",
".",
"error",
")",
")",
"if",
"desc",
"is",
"not",
"None",
":",
"states",
".",
"save",
"(",
"s",
",",
"desc",
"=",
"desc",
")",
"#2. Min particles",
"do_levmarq_all_particle_groups",
"(",
"s",
",",
"max_iter",
"=",
"1",
",",
"max_mem",
"=",
"max_mem",
")",
"CLOG",
".",
"info",
"(",
"'Particles, loop {}:\\t{}'",
".",
"format",
"(",
"a",
",",
"s",
".",
"error",
")",
")",
"if",
"desc",
"is",
"not",
"None",
":",
"states",
".",
"save",
"(",
"s",
",",
"desc",
"=",
"desc",
")",
"#3. Append vals, line min:",
"values",
".",
"append",
"(",
"np",
".",
"copy",
"(",
"s",
".",
"state",
"[",
"s",
".",
"params",
"]",
")",
")",
"# dv = (np.array(values[1:]) - np.array(values[0]))[-3:]",
"# do_levmarq_n_directions(s, dv, damping=1e-2, max_iter=2, errtol=3e-4)",
"# CLOG.info('Line min., loop {}:\\t{}'.format(a, s.error))",
"# if desc is not None:",
"# states.save(s, desc=desc)",
"#4. terminate?",
"new_err",
"=",
"s",
".",
"error",
"derr",
"=",
"start_err",
"-",
"new_err",
"dobreak",
"=",
"(",
"derr",
"/",
"new_err",
"<",
"fractol",
")",
"or",
"(",
"derr",
"<",
"errtol",
")",
"if",
"dobreak",
":",
"break",
"if",
"dowarn",
"and",
"(",
"not",
"dobreak",
")",
":",
"CLOG",
".",
"warn",
"(",
"'finish() did not converge; consider re-running'",
")",
"return",
"{",
"'converged'",
":",
"dobreak",
",",
"'loop_values'",
":",
"np",
".",
"array",
"(",
"values",
")",
"}"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
fit_comp
|
Fits a new component to an old component
Calls do_levmarq to match the .get() fields of the two objects. The
parameters of new_comp are modified in place.
Parameters
----------
new_comp : :class:`peri.comps.comp`
The new object, whose parameters to update to fit the field of
`old_comp`. Must have a .get() attribute which returns an ndarray
old_comp : peri.comp
The old ilm to match to.
Other Parameters
----------------
Any keyword arguments to be passed to the optimizer LMGlobals
through do_levmarq.
See Also
--------
do_levmarq : Levenberg-Marquardt minimization using a random subset
of the image pixels.
|
peri/opt/optimize.py
|
def fit_comp(new_comp, old_comp, **kwargs):
"""
Fits a new component to an old component
Calls do_levmarq to match the .get() fields of the two objects. The
parameters of new_comp are modified in place.
Parameters
----------
new_comp : :class:`peri.comps.comp`
The new object, whose parameters to update to fit the field of
`old_comp`. Must have a .get() attribute which returns an ndarray
old_comp : peri.comp
The old ilm to match to.
Other Parameters
----------------
Any keyword arguments to be passed to the optimizer LMGlobals
through do_levmarq.
See Also
--------
do_levmarq : Levenberg-Marquardt minimization using a random subset
of the image pixels.
"""
#resetting the category to ilm:
new_cat = new_comp.category
new_comp.category = 'ilm'
fake_s = states.ImageState(Image(old_comp.get().copy()), [new_comp], pad=0,
mdl=mdl.SmoothFieldModel())
do_levmarq(fake_s, new_comp.params, **kwargs)
new_comp.category = new_cat
|
def fit_comp(new_comp, old_comp, **kwargs):
"""
Fits a new component to an old component
Calls do_levmarq to match the .get() fields of the two objects. The
parameters of new_comp are modified in place.
Parameters
----------
new_comp : :class:`peri.comps.comp`
The new object, whose parameters to update to fit the field of
`old_comp`. Must have a .get() attribute which returns an ndarray
old_comp : peri.comp
The old ilm to match to.
Other Parameters
----------------
Any keyword arguments to be passed to the optimizer LMGlobals
through do_levmarq.
See Also
--------
do_levmarq : Levenberg-Marquardt minimization using a random subset
of the image pixels.
"""
#resetting the category to ilm:
new_cat = new_comp.category
new_comp.category = 'ilm'
fake_s = states.ImageState(Image(old_comp.get().copy()), [new_comp], pad=0,
mdl=mdl.SmoothFieldModel())
do_levmarq(fake_s, new_comp.params, **kwargs)
new_comp.category = new_cat
|
[
"Fits",
"a",
"new",
"component",
"to",
"an",
"old",
"component"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2730-L2761
|
[
"def",
"fit_comp",
"(",
"new_comp",
",",
"old_comp",
",",
"*",
"*",
"kwargs",
")",
":",
"#resetting the category to ilm:",
"new_cat",
"=",
"new_comp",
".",
"category",
"new_comp",
".",
"category",
"=",
"'ilm'",
"fake_s",
"=",
"states",
".",
"ImageState",
"(",
"Image",
"(",
"old_comp",
".",
"get",
"(",
")",
".",
"copy",
"(",
")",
")",
",",
"[",
"new_comp",
"]",
",",
"pad",
"=",
"0",
",",
"mdl",
"=",
"mdl",
".",
"SmoothFieldModel",
"(",
")",
")",
"do_levmarq",
"(",
"fake_s",
",",
"new_comp",
".",
"params",
",",
"*",
"*",
"kwargs",
")",
"new_comp",
".",
"category",
"=",
"new_cat"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.reset
|
Keeps all user supplied options the same, but resets counters etc.
|
peri/opt/optimize.py
|
def reset(self, new_damping=None):
"""
Keeps all user supplied options the same, but resets counters etc.
"""
self._num_iter = 0
self._inner_run_counter = 0
self._J_update_counter = self.update_J_frequency
self._fresh_JTJ = False
self._has_run = False
if new_damping is not None:
self.damping = np.array(new_damping).astype('float')
self._set_err_paramvals()
|
def reset(self, new_damping=None):
"""
Keeps all user supplied options the same, but resets counters etc.
"""
self._num_iter = 0
self._inner_run_counter = 0
self._J_update_counter = self.update_J_frequency
self._fresh_JTJ = False
self._has_run = False
if new_damping is not None:
self.damping = np.array(new_damping).astype('float')
self._set_err_paramvals()
|
[
"Keeps",
"all",
"user",
"supplied",
"options",
"the",
"same",
"but",
"resets",
"counters",
"etc",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L690-L701
|
[
"def",
"reset",
"(",
"self",
",",
"new_damping",
"=",
"None",
")",
":",
"self",
".",
"_num_iter",
"=",
"0",
"self",
".",
"_inner_run_counter",
"=",
"0",
"self",
".",
"_J_update_counter",
"=",
"self",
".",
"update_J_frequency",
"self",
".",
"_fresh_JTJ",
"=",
"False",
"self",
".",
"_has_run",
"=",
"False",
"if",
"new_damping",
"is",
"not",
"None",
":",
"self",
".",
"damping",
"=",
"np",
".",
"array",
"(",
"new_damping",
")",
".",
"astype",
"(",
"'float'",
")",
"self",
".",
"_set_err_paramvals",
"(",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.do_run_1
|
LM run, evaluating 1 step at a time.
Broyden or eigendirection updates replace full-J updates until
a full-J update occurs. Does not run with the calculated J (no
internal run).
|
peri/opt/optimize.py
|
def do_run_1(self):
"""
LM run, evaluating 1 step at a time.
Broyden or eigendirection updates replace full-J updates until
a full-J update occurs. Does not run with the calculated J (no
internal run).
"""
while not self.check_terminate():
self._has_run = True
self._run1()
self._num_iter += 1; self._inner_run_counter += 1
|
def do_run_1(self):
"""
LM run, evaluating 1 step at a time.
Broyden or eigendirection updates replace full-J updates until
a full-J update occurs. Does not run with the calculated J (no
internal run).
"""
while not self.check_terminate():
self._has_run = True
self._run1()
self._num_iter += 1; self._inner_run_counter += 1
|
[
"LM",
"run",
"evaluating",
"1",
"step",
"at",
"a",
"time",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L722-L733
|
[
"def",
"do_run_1",
"(",
"self",
")",
":",
"while",
"not",
"self",
".",
"check_terminate",
"(",
")",
":",
"self",
".",
"_has_run",
"=",
"True",
"self",
".",
"_run1",
"(",
")",
"self",
".",
"_num_iter",
"+=",
"1",
"self",
".",
"_inner_run_counter",
"+=",
"1"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine._run1
|
workhorse for do_run_1
|
peri/opt/optimize.py
|
def _run1(self):
"""workhorse for do_run_1"""
if self.check_update_J():
self.update_J()
else:
if self.check_Broyden_J():
self.update_Broyden_J()
if self.check_update_eig_J():
self.update_eig_J()
#1. Assuming that J starts updated:
delta_vals = self.find_LM_updates(self.calc_grad())
#2. Increase damping until we get a good step:
er1 = self.update_function(self.param_vals + delta_vals)
good_step = (find_best_step([self.error, er1]) == 1)
if not good_step:
er0 = self.update_function(self.param_vals)
if np.abs(er0 -self.error)/er0 > 1e-7:
raise RuntimeError('Function updates are not exact.')
CLOG.debug('Bad step, increasing damping')
CLOG.debug('\t\t%f\t%f' % (self.error, er1))
grad = self.calc_grad()
for _try in range(self._max_inner_loop):
self.increase_damping()
delta_vals = self.find_LM_updates(grad)
er1 = self.update_function(self.param_vals + delta_vals)
good_step = (find_best_step([self.error, er1]) == 1)
if good_step:
break
else:
er0 = self.update_function(self.param_vals)
CLOG.warn('Stuck!')
if np.abs(er0 -self.error)/er0 > 1e-7:
raise RuntimeError('Function updates are not exact.')
#state is updated, now params:
if good_step:
self._last_error = self.error
self.error = er1
CLOG.debug('Good step\t%f\t%f' % (self._last_error, self.error))
self.update_param_vals(delta_vals, incremental=True)
self.decrease_damping()
|
def _run1(self):
"""workhorse for do_run_1"""
if self.check_update_J():
self.update_J()
else:
if self.check_Broyden_J():
self.update_Broyden_J()
if self.check_update_eig_J():
self.update_eig_J()
#1. Assuming that J starts updated:
delta_vals = self.find_LM_updates(self.calc_grad())
#2. Increase damping until we get a good step:
er1 = self.update_function(self.param_vals + delta_vals)
good_step = (find_best_step([self.error, er1]) == 1)
if not good_step:
er0 = self.update_function(self.param_vals)
if np.abs(er0 -self.error)/er0 > 1e-7:
raise RuntimeError('Function updates are not exact.')
CLOG.debug('Bad step, increasing damping')
CLOG.debug('\t\t%f\t%f' % (self.error, er1))
grad = self.calc_grad()
for _try in range(self._max_inner_loop):
self.increase_damping()
delta_vals = self.find_LM_updates(grad)
er1 = self.update_function(self.param_vals + delta_vals)
good_step = (find_best_step([self.error, er1]) == 1)
if good_step:
break
else:
er0 = self.update_function(self.param_vals)
CLOG.warn('Stuck!')
if np.abs(er0 -self.error)/er0 > 1e-7:
raise RuntimeError('Function updates are not exact.')
#state is updated, now params:
if good_step:
self._last_error = self.error
self.error = er1
CLOG.debug('Good step\t%f\t%f' % (self._last_error, self.error))
self.update_param_vals(delta_vals, incremental=True)
self.decrease_damping()
|
[
"workhorse",
"for",
"do_run_1"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L735-L777
|
[
"def",
"_run1",
"(",
"self",
")",
":",
"if",
"self",
".",
"check_update_J",
"(",
")",
":",
"self",
".",
"update_J",
"(",
")",
"else",
":",
"if",
"self",
".",
"check_Broyden_J",
"(",
")",
":",
"self",
".",
"update_Broyden_J",
"(",
")",
"if",
"self",
".",
"check_update_eig_J",
"(",
")",
":",
"self",
".",
"update_eig_J",
"(",
")",
"#1. Assuming that J starts updated:",
"delta_vals",
"=",
"self",
".",
"find_LM_updates",
"(",
"self",
".",
"calc_grad",
"(",
")",
")",
"#2. Increase damping until we get a good step:",
"er1",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
"+",
"delta_vals",
")",
"good_step",
"=",
"(",
"find_best_step",
"(",
"[",
"self",
".",
"error",
",",
"er1",
"]",
")",
"==",
"1",
")",
"if",
"not",
"good_step",
":",
"er0",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
")",
"if",
"np",
".",
"abs",
"(",
"er0",
"-",
"self",
".",
"error",
")",
"/",
"er0",
">",
"1e-7",
":",
"raise",
"RuntimeError",
"(",
"'Function updates are not exact.'",
")",
"CLOG",
".",
"debug",
"(",
"'Bad step, increasing damping'",
")",
"CLOG",
".",
"debug",
"(",
"'\\t\\t%f\\t%f'",
"%",
"(",
"self",
".",
"error",
",",
"er1",
")",
")",
"grad",
"=",
"self",
".",
"calc_grad",
"(",
")",
"for",
"_try",
"in",
"range",
"(",
"self",
".",
"_max_inner_loop",
")",
":",
"self",
".",
"increase_damping",
"(",
")",
"delta_vals",
"=",
"self",
".",
"find_LM_updates",
"(",
"grad",
")",
"er1",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
"+",
"delta_vals",
")",
"good_step",
"=",
"(",
"find_best_step",
"(",
"[",
"self",
".",
"error",
",",
"er1",
"]",
")",
"==",
"1",
")",
"if",
"good_step",
":",
"break",
"else",
":",
"er0",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
")",
"CLOG",
".",
"warn",
"(",
"'Stuck!'",
")",
"if",
"np",
".",
"abs",
"(",
"er0",
"-",
"self",
".",
"error",
")",
"/",
"er0",
">",
"1e-7",
":",
"raise",
"RuntimeError",
"(",
"'Function updates are not exact.'",
")",
"#state is updated, now params:",
"if",
"good_step",
":",
"self",
".",
"_last_error",
"=",
"self",
".",
"error",
"self",
".",
"error",
"=",
"er1",
"CLOG",
".",
"debug",
"(",
"'Good step\\t%f\\t%f'",
"%",
"(",
"self",
".",
"_last_error",
",",
"self",
".",
"error",
")",
")",
"self",
".",
"update_param_vals",
"(",
"delta_vals",
",",
"incremental",
"=",
"True",
")",
"self",
".",
"decrease_damping",
"(",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.do_run_2
|
LM run evaluating 2 steps (damped and not) and choosing the best.
After finding the best of 2 steps, runs with that damping + Broyden
or eigendirection updates, until deciding to do a full-J update.
Only changes damping after full-J updates.
|
peri/opt/optimize.py
|
def do_run_2(self):
"""
LM run evaluating 2 steps (damped and not) and choosing the best.
After finding the best of 2 steps, runs with that damping + Broyden
or eigendirection updates, until deciding to do a full-J update.
Only changes damping after full-J updates.
"""
while not self.check_terminate():
self._has_run = True
self._run2()
self._num_iter += 1
|
def do_run_2(self):
"""
LM run evaluating 2 steps (damped and not) and choosing the best.
After finding the best of 2 steps, runs with that damping + Broyden
or eigendirection updates, until deciding to do a full-J update.
Only changes damping after full-J updates.
"""
while not self.check_terminate():
self._has_run = True
self._run2()
self._num_iter += 1
|
[
"LM",
"run",
"evaluating",
"2",
"steps",
"(",
"damped",
"and",
"not",
")",
"and",
"choosing",
"the",
"best",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L779-L790
|
[
"def",
"do_run_2",
"(",
"self",
")",
":",
"while",
"not",
"self",
".",
"check_terminate",
"(",
")",
":",
"self",
".",
"_has_run",
"=",
"True",
"self",
".",
"_run2",
"(",
")",
"self",
".",
"_num_iter",
"+=",
"1"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine._run2
|
Workhorse for do_run_2
|
peri/opt/optimize.py
|
def _run2(self):
"""Workhorse for do_run_2"""
if self.check_update_J():
self.update_J()
else:
if self.check_Broyden_J():
self.update_Broyden_J()
if self.check_update_eig_J():
self.update_eig_J()
#0. Find _last_residuals, _last_error, etc:
_last_residuals = self.calc_residuals().copy()
_last_error = 1*self.error
_last_vals = self.param_vals.copy()
#1. Calculate 2 possible steps
delta_params_1 = self.find_LM_updates(self.calc_grad(),
do_correct_damping=False)
self.decrease_damping()
delta_params_2 = self.find_LM_updates(self.calc_grad(),
do_correct_damping=False)
self.decrease_damping(undo_decrease=True)
#2. Check which step is best:
er1 = self.update_function(self.param_vals + delta_params_1)
er2 = self.update_function(self.param_vals + delta_params_2)
triplet = (self.error, er1, er2)
best_step = find_best_step(triplet)
if best_step == 0:
#Both bad steps, put back & increase damping:
_ = self.update_function(self.param_vals.copy())
grad = self.calc_grad()
CLOG.debug('Bad step, increasing damping')
CLOG.debug('%f\t%f\t%f' % triplet)
for _try in range(self._max_inner_loop):
self.increase_damping()
delta_vals = self.find_LM_updates(grad)
er_new = self.update_function(self.param_vals + delta_vals)
good_step = er_new < self.error
if good_step:
#Update params, error, break:
self.update_param_vals(delta_vals, incremental=True)
self.error = er_new
CLOG.debug('Sufficiently increased damping')
CLOG.debug('%f\t%f' % (triplet[0], self.error))
break
else: #for-break-else
#Throw a warning, put back the parameters
CLOG.warn('Stuck!')
self.error = self.update_function(self.param_vals.copy())
elif best_step == 1:
#er1 <= er2:
good_step = True
CLOG.debug('Good step, same damping')
CLOG.debug('%f\t%f\t%f' % triplet)
#Update to er1 params:
er1_1 = self.update_function(self.param_vals + delta_params_1)
if np.abs(er1_1 - er1) > 1e-6:
raise RuntimeError('Function updates are not exact.')
self.update_param_vals(delta_params_1, incremental=True)
self.error = er1
elif best_step == 2:
#er2 < er1:
good_step = True
self.error = er2
CLOG.debug('Good step, decreasing damping')
CLOG.debug('%f\t%f\t%f' % triplet)
#-we're already at the correct parameters
self.update_param_vals(delta_params_2, incremental=True)
self.decrease_damping()
#3. Run with current J, damping; update what we need to::
if good_step:
self._last_residuals = _last_residuals
self._last_error = _last_error
self._last_vals = _last_vals
self.error
self.do_internal_run(initial_count=1)
|
def _run2(self):
"""Workhorse for do_run_2"""
if self.check_update_J():
self.update_J()
else:
if self.check_Broyden_J():
self.update_Broyden_J()
if self.check_update_eig_J():
self.update_eig_J()
#0. Find _last_residuals, _last_error, etc:
_last_residuals = self.calc_residuals().copy()
_last_error = 1*self.error
_last_vals = self.param_vals.copy()
#1. Calculate 2 possible steps
delta_params_1 = self.find_LM_updates(self.calc_grad(),
do_correct_damping=False)
self.decrease_damping()
delta_params_2 = self.find_LM_updates(self.calc_grad(),
do_correct_damping=False)
self.decrease_damping(undo_decrease=True)
#2. Check which step is best:
er1 = self.update_function(self.param_vals + delta_params_1)
er2 = self.update_function(self.param_vals + delta_params_2)
triplet = (self.error, er1, er2)
best_step = find_best_step(triplet)
if best_step == 0:
#Both bad steps, put back & increase damping:
_ = self.update_function(self.param_vals.copy())
grad = self.calc_grad()
CLOG.debug('Bad step, increasing damping')
CLOG.debug('%f\t%f\t%f' % triplet)
for _try in range(self._max_inner_loop):
self.increase_damping()
delta_vals = self.find_LM_updates(grad)
er_new = self.update_function(self.param_vals + delta_vals)
good_step = er_new < self.error
if good_step:
#Update params, error, break:
self.update_param_vals(delta_vals, incremental=True)
self.error = er_new
CLOG.debug('Sufficiently increased damping')
CLOG.debug('%f\t%f' % (triplet[0], self.error))
break
else: #for-break-else
#Throw a warning, put back the parameters
CLOG.warn('Stuck!')
self.error = self.update_function(self.param_vals.copy())
elif best_step == 1:
#er1 <= er2:
good_step = True
CLOG.debug('Good step, same damping')
CLOG.debug('%f\t%f\t%f' % triplet)
#Update to er1 params:
er1_1 = self.update_function(self.param_vals + delta_params_1)
if np.abs(er1_1 - er1) > 1e-6:
raise RuntimeError('Function updates are not exact.')
self.update_param_vals(delta_params_1, incremental=True)
self.error = er1
elif best_step == 2:
#er2 < er1:
good_step = True
self.error = er2
CLOG.debug('Good step, decreasing damping')
CLOG.debug('%f\t%f\t%f' % triplet)
#-we're already at the correct parameters
self.update_param_vals(delta_params_2, incremental=True)
self.decrease_damping()
#3. Run with current J, damping; update what we need to::
if good_step:
self._last_residuals = _last_residuals
self._last_error = _last_error
self._last_vals = _last_vals
self.error
self.do_internal_run(initial_count=1)
|
[
"Workhorse",
"for",
"do_run_2"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L792-L872
|
[
"def",
"_run2",
"(",
"self",
")",
":",
"if",
"self",
".",
"check_update_J",
"(",
")",
":",
"self",
".",
"update_J",
"(",
")",
"else",
":",
"if",
"self",
".",
"check_Broyden_J",
"(",
")",
":",
"self",
".",
"update_Broyden_J",
"(",
")",
"if",
"self",
".",
"check_update_eig_J",
"(",
")",
":",
"self",
".",
"update_eig_J",
"(",
")",
"#0. Find _last_residuals, _last_error, etc:",
"_last_residuals",
"=",
"self",
".",
"calc_residuals",
"(",
")",
".",
"copy",
"(",
")",
"_last_error",
"=",
"1",
"*",
"self",
".",
"error",
"_last_vals",
"=",
"self",
".",
"param_vals",
".",
"copy",
"(",
")",
"#1. Calculate 2 possible steps",
"delta_params_1",
"=",
"self",
".",
"find_LM_updates",
"(",
"self",
".",
"calc_grad",
"(",
")",
",",
"do_correct_damping",
"=",
"False",
")",
"self",
".",
"decrease_damping",
"(",
")",
"delta_params_2",
"=",
"self",
".",
"find_LM_updates",
"(",
"self",
".",
"calc_grad",
"(",
")",
",",
"do_correct_damping",
"=",
"False",
")",
"self",
".",
"decrease_damping",
"(",
"undo_decrease",
"=",
"True",
")",
"#2. Check which step is best:",
"er1",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
"+",
"delta_params_1",
")",
"er2",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
"+",
"delta_params_2",
")",
"triplet",
"=",
"(",
"self",
".",
"error",
",",
"er1",
",",
"er2",
")",
"best_step",
"=",
"find_best_step",
"(",
"triplet",
")",
"if",
"best_step",
"==",
"0",
":",
"#Both bad steps, put back & increase damping:",
"_",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
".",
"copy",
"(",
")",
")",
"grad",
"=",
"self",
".",
"calc_grad",
"(",
")",
"CLOG",
".",
"debug",
"(",
"'Bad step, increasing damping'",
")",
"CLOG",
".",
"debug",
"(",
"'%f\\t%f\\t%f'",
"%",
"triplet",
")",
"for",
"_try",
"in",
"range",
"(",
"self",
".",
"_max_inner_loop",
")",
":",
"self",
".",
"increase_damping",
"(",
")",
"delta_vals",
"=",
"self",
".",
"find_LM_updates",
"(",
"grad",
")",
"er_new",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
"+",
"delta_vals",
")",
"good_step",
"=",
"er_new",
"<",
"self",
".",
"error",
"if",
"good_step",
":",
"#Update params, error, break:",
"self",
".",
"update_param_vals",
"(",
"delta_vals",
",",
"incremental",
"=",
"True",
")",
"self",
".",
"error",
"=",
"er_new",
"CLOG",
".",
"debug",
"(",
"'Sufficiently increased damping'",
")",
"CLOG",
".",
"debug",
"(",
"'%f\\t%f'",
"%",
"(",
"triplet",
"[",
"0",
"]",
",",
"self",
".",
"error",
")",
")",
"break",
"else",
":",
"#for-break-else",
"#Throw a warning, put back the parameters",
"CLOG",
".",
"warn",
"(",
"'Stuck!'",
")",
"self",
".",
"error",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
".",
"copy",
"(",
")",
")",
"elif",
"best_step",
"==",
"1",
":",
"#er1 <= er2:",
"good_step",
"=",
"True",
"CLOG",
".",
"debug",
"(",
"'Good step, same damping'",
")",
"CLOG",
".",
"debug",
"(",
"'%f\\t%f\\t%f'",
"%",
"triplet",
")",
"#Update to er1 params:",
"er1_1",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
"+",
"delta_params_1",
")",
"if",
"np",
".",
"abs",
"(",
"er1_1",
"-",
"er1",
")",
">",
"1e-6",
":",
"raise",
"RuntimeError",
"(",
"'Function updates are not exact.'",
")",
"self",
".",
"update_param_vals",
"(",
"delta_params_1",
",",
"incremental",
"=",
"True",
")",
"self",
".",
"error",
"=",
"er1",
"elif",
"best_step",
"==",
"2",
":",
"#er2 < er1:",
"good_step",
"=",
"True",
"self",
".",
"error",
"=",
"er2",
"CLOG",
".",
"debug",
"(",
"'Good step, decreasing damping'",
")",
"CLOG",
".",
"debug",
"(",
"'%f\\t%f\\t%f'",
"%",
"triplet",
")",
"#-we're already at the correct parameters",
"self",
".",
"update_param_vals",
"(",
"delta_params_2",
",",
"incremental",
"=",
"True",
")",
"self",
".",
"decrease_damping",
"(",
")",
"#3. Run with current J, damping; update what we need to::",
"if",
"good_step",
":",
"self",
".",
"_last_residuals",
"=",
"_last_residuals",
"self",
".",
"_last_error",
"=",
"_last_error",
"self",
".",
"_last_vals",
"=",
"_last_vals",
"self",
".",
"error",
"self",
".",
"do_internal_run",
"(",
"initial_count",
"=",
"1",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.do_internal_run
|
Takes more steps without calculating J again.
Given a fixed damping, J, JTJ, iterates calculating steps, with
optional Broyden or eigendirection updates. Iterates either until
a bad step is taken or for self.run_length times.
Called internally by do_run_2() but is also useful on its own.
Parameters
----------
initial_count : Int, optional
The initial count of the run. Default is 0. Increasing from
0 effectively temporarily decreases run_length.
subblock : None or np.ndarray of bools, optional
If not None, a boolean mask which determines which sub-
block of parameters to run over. Default is None, i.e.
all the parameters.
update_derr : Bool, optional
Set to False to not update the variable that determines
delta_err, preventing premature termination through errtol.
Notes
-----
It might be good to do something similar to update_derr with the
parameter values, but this is trickier because of Broyden updates
and _fresh_J.
|
peri/opt/optimize.py
|
def do_internal_run(self, initial_count=0, subblock=None, update_derr=True):
"""
Takes more steps without calculating J again.
Given a fixed damping, J, JTJ, iterates calculating steps, with
optional Broyden or eigendirection updates. Iterates either until
a bad step is taken or for self.run_length times.
Called internally by do_run_2() but is also useful on its own.
Parameters
----------
initial_count : Int, optional
The initial count of the run. Default is 0. Increasing from
0 effectively temporarily decreases run_length.
subblock : None or np.ndarray of bools, optional
If not None, a boolean mask which determines which sub-
block of parameters to run over. Default is None, i.e.
all the parameters.
update_derr : Bool, optional
Set to False to not update the variable that determines
delta_err, preventing premature termination through errtol.
Notes
-----
It might be good to do something similar to update_derr with the
parameter values, but this is trickier because of Broyden updates
and _fresh_J.
"""
self._inner_run_counter = initial_count; good_step = True
n_good_steps = 0
CLOG.debug('Running...')
_last_residuals = self.calc_residuals().copy()
while ((self._inner_run_counter < self.run_length) & good_step &
(not self.check_terminate())):
#1. Checking if we update J
if self.check_Broyden_J() and self._inner_run_counter != 0:
self.update_Broyden_J()
if self.check_update_eig_J() and self._inner_run_counter != 0:
self.update_eig_J()
#2. Getting parameters, error
er0 = 1*self.error
delta_vals = self.find_LM_updates(self.calc_grad(),
do_correct_damping=False, subblock=subblock)
er1 = self.update_function(self.param_vals + delta_vals)
good_step = er1 < er0
if good_step:
n_good_steps += 1
CLOG.debug('%f\t%f' % (er0, er1))
#Updating:
self.update_param_vals(delta_vals, incremental=True)
self._last_residuals = _last_residuals.copy()
if update_derr:
self._last_error = er0
self.error = er1
_last_residuals = self.calc_residuals().copy()
else:
er0_0 = self.update_function(self.param_vals)
CLOG.debug('Bad step!')
if np.abs(er0 - er0_0) > 1e-6:
raise RuntimeError('Function updates are not exact.')
self._inner_run_counter += 1
return n_good_steps
|
def do_internal_run(self, initial_count=0, subblock=None, update_derr=True):
"""
Takes more steps without calculating J again.
Given a fixed damping, J, JTJ, iterates calculating steps, with
optional Broyden or eigendirection updates. Iterates either until
a bad step is taken or for self.run_length times.
Called internally by do_run_2() but is also useful on its own.
Parameters
----------
initial_count : Int, optional
The initial count of the run. Default is 0. Increasing from
0 effectively temporarily decreases run_length.
subblock : None or np.ndarray of bools, optional
If not None, a boolean mask which determines which sub-
block of parameters to run over. Default is None, i.e.
all the parameters.
update_derr : Bool, optional
Set to False to not update the variable that determines
delta_err, preventing premature termination through errtol.
Notes
-----
It might be good to do something similar to update_derr with the
parameter values, but this is trickier because of Broyden updates
and _fresh_J.
"""
self._inner_run_counter = initial_count; good_step = True
n_good_steps = 0
CLOG.debug('Running...')
_last_residuals = self.calc_residuals().copy()
while ((self._inner_run_counter < self.run_length) & good_step &
(not self.check_terminate())):
#1. Checking if we update J
if self.check_Broyden_J() and self._inner_run_counter != 0:
self.update_Broyden_J()
if self.check_update_eig_J() and self._inner_run_counter != 0:
self.update_eig_J()
#2. Getting parameters, error
er0 = 1*self.error
delta_vals = self.find_LM_updates(self.calc_grad(),
do_correct_damping=False, subblock=subblock)
er1 = self.update_function(self.param_vals + delta_vals)
good_step = er1 < er0
if good_step:
n_good_steps += 1
CLOG.debug('%f\t%f' % (er0, er1))
#Updating:
self.update_param_vals(delta_vals, incremental=True)
self._last_residuals = _last_residuals.copy()
if update_derr:
self._last_error = er0
self.error = er1
_last_residuals = self.calc_residuals().copy()
else:
er0_0 = self.update_function(self.param_vals)
CLOG.debug('Bad step!')
if np.abs(er0 - er0_0) > 1e-6:
raise RuntimeError('Function updates are not exact.')
self._inner_run_counter += 1
return n_good_steps
|
[
"Takes",
"more",
"steps",
"without",
"calculating",
"J",
"again",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L874-L940
|
[
"def",
"do_internal_run",
"(",
"self",
",",
"initial_count",
"=",
"0",
",",
"subblock",
"=",
"None",
",",
"update_derr",
"=",
"True",
")",
":",
"self",
".",
"_inner_run_counter",
"=",
"initial_count",
"good_step",
"=",
"True",
"n_good_steps",
"=",
"0",
"CLOG",
".",
"debug",
"(",
"'Running...'",
")",
"_last_residuals",
"=",
"self",
".",
"calc_residuals",
"(",
")",
".",
"copy",
"(",
")",
"while",
"(",
"(",
"self",
".",
"_inner_run_counter",
"<",
"self",
".",
"run_length",
")",
"&",
"good_step",
"&",
"(",
"not",
"self",
".",
"check_terminate",
"(",
")",
")",
")",
":",
"#1. Checking if we update J",
"if",
"self",
".",
"check_Broyden_J",
"(",
")",
"and",
"self",
".",
"_inner_run_counter",
"!=",
"0",
":",
"self",
".",
"update_Broyden_J",
"(",
")",
"if",
"self",
".",
"check_update_eig_J",
"(",
")",
"and",
"self",
".",
"_inner_run_counter",
"!=",
"0",
":",
"self",
".",
"update_eig_J",
"(",
")",
"#2. Getting parameters, error",
"er0",
"=",
"1",
"*",
"self",
".",
"error",
"delta_vals",
"=",
"self",
".",
"find_LM_updates",
"(",
"self",
".",
"calc_grad",
"(",
")",
",",
"do_correct_damping",
"=",
"False",
",",
"subblock",
"=",
"subblock",
")",
"er1",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
"+",
"delta_vals",
")",
"good_step",
"=",
"er1",
"<",
"er0",
"if",
"good_step",
":",
"n_good_steps",
"+=",
"1",
"CLOG",
".",
"debug",
"(",
"'%f\\t%f'",
"%",
"(",
"er0",
",",
"er1",
")",
")",
"#Updating:",
"self",
".",
"update_param_vals",
"(",
"delta_vals",
",",
"incremental",
"=",
"True",
")",
"self",
".",
"_last_residuals",
"=",
"_last_residuals",
".",
"copy",
"(",
")",
"if",
"update_derr",
":",
"self",
".",
"_last_error",
"=",
"er0",
"self",
".",
"error",
"=",
"er1",
"_last_residuals",
"=",
"self",
".",
"calc_residuals",
"(",
")",
".",
"copy",
"(",
")",
"else",
":",
"er0_0",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
")",
"CLOG",
".",
"debug",
"(",
"'Bad step!'",
")",
"if",
"np",
".",
"abs",
"(",
"er0",
"-",
"er0_0",
")",
">",
"1e-6",
":",
"raise",
"RuntimeError",
"(",
"'Function updates are not exact.'",
")",
"self",
".",
"_inner_run_counter",
"+=",
"1",
"return",
"n_good_steps"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.find_LM_updates
|
Calculates LM updates, with or without the acceleration correction.
Parameters
----------
grad : numpy.ndarray
The gradient of the model cost.
do_correct_damping : Bool, optional
If `self.use_accel`, then set to True to correct damping
if the acceleration correction is too big. Default is True
Does nothing is `self.use_accel` is False
subblock : slice, numpy.ndarray, or None, optional
Set to a slice or a valide numpy.ndarray to use only a
certain subset of the parameters. Default is None, i.e.
use all the parameters.
Returns
-------
delta : numpy.ndarray
The Levenberg-Marquadt step, relative to the old
parameters. Size is always self.param_vals.size.
|
peri/opt/optimize.py
|
def find_LM_updates(self, grad, do_correct_damping=True, subblock=None):
"""
Calculates LM updates, with or without the acceleration correction.
Parameters
----------
grad : numpy.ndarray
The gradient of the model cost.
do_correct_damping : Bool, optional
If `self.use_accel`, then set to True to correct damping
if the acceleration correction is too big. Default is True
Does nothing is `self.use_accel` is False
subblock : slice, numpy.ndarray, or None, optional
Set to a slice or a valide numpy.ndarray to use only a
certain subset of the parameters. Default is None, i.e.
use all the parameters.
Returns
-------
delta : numpy.ndarray
The Levenberg-Marquadt step, relative to the old
parameters. Size is always self.param_vals.size.
"""
if subblock is not None:
if (subblock.sum() == 0) or (subblock.size == 0):
CLOG.fatal('Empty subblock in find_LM_updates')
raise ValueError('Empty sub-block')
j = self.J[subblock]
JTJ = np.dot(j, j.T)
damped_JTJ = self._calc_damped_jtj(JTJ, subblock=subblock)
grad = grad[subblock] #select the subblock of the grad
else:
damped_JTJ = self._calc_damped_jtj(self.JTJ, subblock=subblock)
delta = self._calc_lm_step(damped_JTJ, grad, subblock=subblock)
if self.use_accel:
accel_correction = self.calc_accel_correction(damped_JTJ, delta)
nrm_d0 = np.sqrt(np.sum(delta**2))
nrm_corr = np.sqrt(np.sum(accel_correction**2))
CLOG.debug('|correction| / |LM step|\t%e' % (nrm_corr/nrm_d0))
if nrm_corr/nrm_d0 < self.max_accel_correction:
delta += accel_correction
elif do_correct_damping:
CLOG.debug('Untrustworthy step! Increasing damping...')
self.increase_damping()
damped_JTJ = self._calc_damped_jtj(self.JTJ, subblock=subblock)
delta = self._calc_lm_step(damped_JTJ, grad, subblock=subblock)
if np.any(np.isnan(delta)):
CLOG.fatal('Calculated steps have nans!?')
raise FloatingPointError('Calculated steps have nans!?')
return delta
|
def find_LM_updates(self, grad, do_correct_damping=True, subblock=None):
"""
Calculates LM updates, with or without the acceleration correction.
Parameters
----------
grad : numpy.ndarray
The gradient of the model cost.
do_correct_damping : Bool, optional
If `self.use_accel`, then set to True to correct damping
if the acceleration correction is too big. Default is True
Does nothing is `self.use_accel` is False
subblock : slice, numpy.ndarray, or None, optional
Set to a slice or a valide numpy.ndarray to use only a
certain subset of the parameters. Default is None, i.e.
use all the parameters.
Returns
-------
delta : numpy.ndarray
The Levenberg-Marquadt step, relative to the old
parameters. Size is always self.param_vals.size.
"""
if subblock is not None:
if (subblock.sum() == 0) or (subblock.size == 0):
CLOG.fatal('Empty subblock in find_LM_updates')
raise ValueError('Empty sub-block')
j = self.J[subblock]
JTJ = np.dot(j, j.T)
damped_JTJ = self._calc_damped_jtj(JTJ, subblock=subblock)
grad = grad[subblock] #select the subblock of the grad
else:
damped_JTJ = self._calc_damped_jtj(self.JTJ, subblock=subblock)
delta = self._calc_lm_step(damped_JTJ, grad, subblock=subblock)
if self.use_accel:
accel_correction = self.calc_accel_correction(damped_JTJ, delta)
nrm_d0 = np.sqrt(np.sum(delta**2))
nrm_corr = np.sqrt(np.sum(accel_correction**2))
CLOG.debug('|correction| / |LM step|\t%e' % (nrm_corr/nrm_d0))
if nrm_corr/nrm_d0 < self.max_accel_correction:
delta += accel_correction
elif do_correct_damping:
CLOG.debug('Untrustworthy step! Increasing damping...')
self.increase_damping()
damped_JTJ = self._calc_damped_jtj(self.JTJ, subblock=subblock)
delta = self._calc_lm_step(damped_JTJ, grad, subblock=subblock)
if np.any(np.isnan(delta)):
CLOG.fatal('Calculated steps have nans!?')
raise FloatingPointError('Calculated steps have nans!?')
return delta
|
[
"Calculates",
"LM",
"updates",
"with",
"or",
"without",
"the",
"acceleration",
"correction",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L957-L1009
|
[
"def",
"find_LM_updates",
"(",
"self",
",",
"grad",
",",
"do_correct_damping",
"=",
"True",
",",
"subblock",
"=",
"None",
")",
":",
"if",
"subblock",
"is",
"not",
"None",
":",
"if",
"(",
"subblock",
".",
"sum",
"(",
")",
"==",
"0",
")",
"or",
"(",
"subblock",
".",
"size",
"==",
"0",
")",
":",
"CLOG",
".",
"fatal",
"(",
"'Empty subblock in find_LM_updates'",
")",
"raise",
"ValueError",
"(",
"'Empty sub-block'",
")",
"j",
"=",
"self",
".",
"J",
"[",
"subblock",
"]",
"JTJ",
"=",
"np",
".",
"dot",
"(",
"j",
",",
"j",
".",
"T",
")",
"damped_JTJ",
"=",
"self",
".",
"_calc_damped_jtj",
"(",
"JTJ",
",",
"subblock",
"=",
"subblock",
")",
"grad",
"=",
"grad",
"[",
"subblock",
"]",
"#select the subblock of the grad",
"else",
":",
"damped_JTJ",
"=",
"self",
".",
"_calc_damped_jtj",
"(",
"self",
".",
"JTJ",
",",
"subblock",
"=",
"subblock",
")",
"delta",
"=",
"self",
".",
"_calc_lm_step",
"(",
"damped_JTJ",
",",
"grad",
",",
"subblock",
"=",
"subblock",
")",
"if",
"self",
".",
"use_accel",
":",
"accel_correction",
"=",
"self",
".",
"calc_accel_correction",
"(",
"damped_JTJ",
",",
"delta",
")",
"nrm_d0",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"delta",
"**",
"2",
")",
")",
"nrm_corr",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"accel_correction",
"**",
"2",
")",
")",
"CLOG",
".",
"debug",
"(",
"'|correction| / |LM step|\\t%e'",
"%",
"(",
"nrm_corr",
"/",
"nrm_d0",
")",
")",
"if",
"nrm_corr",
"/",
"nrm_d0",
"<",
"self",
".",
"max_accel_correction",
":",
"delta",
"+=",
"accel_correction",
"elif",
"do_correct_damping",
":",
"CLOG",
".",
"debug",
"(",
"'Untrustworthy step! Increasing damping...'",
")",
"self",
".",
"increase_damping",
"(",
")",
"damped_JTJ",
"=",
"self",
".",
"_calc_damped_jtj",
"(",
"self",
".",
"JTJ",
",",
"subblock",
"=",
"subblock",
")",
"delta",
"=",
"self",
".",
"_calc_lm_step",
"(",
"damped_JTJ",
",",
"grad",
",",
"subblock",
"=",
"subblock",
")",
"if",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"delta",
")",
")",
":",
"CLOG",
".",
"fatal",
"(",
"'Calculated steps have nans!?'",
")",
"raise",
"FloatingPointError",
"(",
"'Calculated steps have nans!?'",
")",
"return",
"delta"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine._calc_lm_step
|
Calculates a Levenberg-Marquard step w/o acceleration
|
peri/opt/optimize.py
|
def _calc_lm_step(self, damped_JTJ, grad, subblock=None):
"""Calculates a Levenberg-Marquard step w/o acceleration"""
delta0, res, rank, s = np.linalg.lstsq(damped_JTJ, -0.5*grad,
rcond=self.min_eigval)
if self._fresh_JTJ:
CLOG.debug('%d degenerate of %d total directions' % (
delta0.size-rank, delta0.size))
if subblock is not None:
delta = np.zeros(self.J.shape[0])
delta[subblock] = delta0
else:
delta = delta0.copy()
return delta
|
def _calc_lm_step(self, damped_JTJ, grad, subblock=None):
"""Calculates a Levenberg-Marquard step w/o acceleration"""
delta0, res, rank, s = np.linalg.lstsq(damped_JTJ, -0.5*grad,
rcond=self.min_eigval)
if self._fresh_JTJ:
CLOG.debug('%d degenerate of %d total directions' % (
delta0.size-rank, delta0.size))
if subblock is not None:
delta = np.zeros(self.J.shape[0])
delta[subblock] = delta0
else:
delta = delta0.copy()
return delta
|
[
"Calculates",
"a",
"Levenberg",
"-",
"Marquard",
"step",
"w",
"/",
"o",
"acceleration"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1011-L1023
|
[
"def",
"_calc_lm_step",
"(",
"self",
",",
"damped_JTJ",
",",
"grad",
",",
"subblock",
"=",
"None",
")",
":",
"delta0",
",",
"res",
",",
"rank",
",",
"s",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"damped_JTJ",
",",
"-",
"0.5",
"*",
"grad",
",",
"rcond",
"=",
"self",
".",
"min_eigval",
")",
"if",
"self",
".",
"_fresh_JTJ",
":",
"CLOG",
".",
"debug",
"(",
"'%d degenerate of %d total directions'",
"%",
"(",
"delta0",
".",
"size",
"-",
"rank",
",",
"delta0",
".",
"size",
")",
")",
"if",
"subblock",
"is",
"not",
"None",
":",
"delta",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"J",
".",
"shape",
"[",
"0",
"]",
")",
"delta",
"[",
"subblock",
"]",
"=",
"delta0",
"else",
":",
"delta",
"=",
"delta0",
".",
"copy",
"(",
")",
"return",
"delta"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.update_param_vals
|
Updates the current set of parameter values and previous values,
sets a flag to re-calculate J.
Parameters
----------
new_vals : numpy.ndarray
The new values to update to
incremental : Bool, optional
Set to True to make it an incremental update relative
to the old parameters. Default is False
|
peri/opt/optimize.py
|
def update_param_vals(self, new_vals, incremental=False):
"""
Updates the current set of parameter values and previous values,
sets a flag to re-calculate J.
Parameters
----------
new_vals : numpy.ndarray
The new values to update to
incremental : Bool, optional
Set to True to make it an incremental update relative
to the old parameters. Default is False
"""
self._last_vals = self.param_vals.copy()
if incremental:
self.param_vals += new_vals
else:
self.param_vals = new_vals.copy()
#And we've updated, so JTJ is no longer valid:
self._fresh_JTJ = False
|
def update_param_vals(self, new_vals, incremental=False):
"""
Updates the current set of parameter values and previous values,
sets a flag to re-calculate J.
Parameters
----------
new_vals : numpy.ndarray
The new values to update to
incremental : Bool, optional
Set to True to make it an incremental update relative
to the old parameters. Default is False
"""
self._last_vals = self.param_vals.copy()
if incremental:
self.param_vals += new_vals
else:
self.param_vals = new_vals.copy()
#And we've updated, so JTJ is no longer valid:
self._fresh_JTJ = False
|
[
"Updates",
"the",
"current",
"set",
"of",
"parameter",
"values",
"and",
"previous",
"values",
"sets",
"a",
"flag",
"to",
"re",
"-",
"calculate",
"J",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1034-L1053
|
[
"def",
"update_param_vals",
"(",
"self",
",",
"new_vals",
",",
"incremental",
"=",
"False",
")",
":",
"self",
".",
"_last_vals",
"=",
"self",
".",
"param_vals",
".",
"copy",
"(",
")",
"if",
"incremental",
":",
"self",
".",
"param_vals",
"+=",
"new_vals",
"else",
":",
"self",
".",
"param_vals",
"=",
"new_vals",
".",
"copy",
"(",
")",
"#And we've updated, so JTJ is no longer valid:",
"self",
".",
"_fresh_JTJ",
"=",
"False"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.find_expected_error
|
Returns the error expected after an update if the model were linear.
Parameters
----------
delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional
The relative change in parameters. If 'calc', uses update
calculated from the current damping, J, etc; if 'perfect',
uses the update calculated with zero damping.
Returns
-------
numpy.float64
The expected error after the update with `delta_params`
|
peri/opt/optimize.py
|
def find_expected_error(self, delta_params='calc'):
"""
Returns the error expected after an update if the model were linear.
Parameters
----------
delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional
The relative change in parameters. If 'calc', uses update
calculated from the current damping, J, etc; if 'perfect',
uses the update calculated with zero damping.
Returns
-------
numpy.float64
The expected error after the update with `delta_params`
"""
grad = self.calc_grad()
if list(delta_params) in [list('calc'), list('perfect')]:
jtj = (self.JTJ if delta_params == 'perfect' else
self._calc_damped_jtj(self.JTJ))
delta_params = self._calc_lm_step(jtj, self.calc_grad())
#If the model were linear, then the cost would be quadratic,
#with Hessian 2*`self.JTJ` and gradient `grad`
expected_error = (self.error + np.dot(grad, delta_params) +
np.dot(np.dot(self.JTJ, delta_params), delta_params))
return expected_error
|
def find_expected_error(self, delta_params='calc'):
"""
Returns the error expected after an update if the model were linear.
Parameters
----------
delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional
The relative change in parameters. If 'calc', uses update
calculated from the current damping, J, etc; if 'perfect',
uses the update calculated with zero damping.
Returns
-------
numpy.float64
The expected error after the update with `delta_params`
"""
grad = self.calc_grad()
if list(delta_params) in [list('calc'), list('perfect')]:
jtj = (self.JTJ if delta_params == 'perfect' else
self._calc_damped_jtj(self.JTJ))
delta_params = self._calc_lm_step(jtj, self.calc_grad())
#If the model were linear, then the cost would be quadratic,
#with Hessian 2*`self.JTJ` and gradient `grad`
expected_error = (self.error + np.dot(grad, delta_params) +
np.dot(np.dot(self.JTJ, delta_params), delta_params))
return expected_error
|
[
"Returns",
"the",
"error",
"expected",
"after",
"an",
"update",
"if",
"the",
"model",
"were",
"linear",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1055-L1080
|
[
"def",
"find_expected_error",
"(",
"self",
",",
"delta_params",
"=",
"'calc'",
")",
":",
"grad",
"=",
"self",
".",
"calc_grad",
"(",
")",
"if",
"list",
"(",
"delta_params",
")",
"in",
"[",
"list",
"(",
"'calc'",
")",
",",
"list",
"(",
"'perfect'",
")",
"]",
":",
"jtj",
"=",
"(",
"self",
".",
"JTJ",
"if",
"delta_params",
"==",
"'perfect'",
"else",
"self",
".",
"_calc_damped_jtj",
"(",
"self",
".",
"JTJ",
")",
")",
"delta_params",
"=",
"self",
".",
"_calc_lm_step",
"(",
"jtj",
",",
"self",
".",
"calc_grad",
"(",
")",
")",
"#If the model were linear, then the cost would be quadratic,",
"#with Hessian 2*`self.JTJ` and gradient `grad`",
"expected_error",
"=",
"(",
"self",
".",
"error",
"+",
"np",
".",
"dot",
"(",
"grad",
",",
"delta_params",
")",
"+",
"np",
".",
"dot",
"(",
"np",
".",
"dot",
"(",
"self",
".",
"JTJ",
",",
"delta_params",
")",
",",
"delta_params",
")",
")",
"return",
"expected_error"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.calc_model_cosine
|
Calculates the cosine of the residuals with the model.
Parameters
----------
decimate : Int or None, optional
Decimate the residuals by `decimate` pixels. If None, no
decimation is used. Valid only with mode='svd'. Default
is None
mode : {'svd', 'err'}
Which mode to use; see Notes section. Default is 'err'.
Returns
-------
abs_cos : numpy.float64
The absolute value of the model cosine.
Notes
-----
The model cosine is defined in terms of the geometric view of
curve-fitting, as a model manifold embedded in a high-dimensional
space. The model cosine is the cosine of the residuals vector
with its projection on the tangent space: :math:`cos(phi) = |P^T r|/|r|`
where :math:`P^T` is the projection operator onto the model manifold
and :math:`r` the residuals. This can be calculated two ways: By
calculating the projection operator P directly with SVD (mode=`svd`),
or by using the expected error if the model were linear to calculate
a model sine first (mode=`err`). Since the SVD of a large matrix is
slow, mode=`err` is faster.
`decimate` allows for every nth pixel only to be counted in the
SVD matrix of J for speed. While this is n x faster, it is
considerably less accurate, so the default is no decimation.
|
peri/opt/optimize.py
|
def calc_model_cosine(self, decimate=None, mode='err'):
"""
Calculates the cosine of the residuals with the model.
Parameters
----------
decimate : Int or None, optional
Decimate the residuals by `decimate` pixels. If None, no
decimation is used. Valid only with mode='svd'. Default
is None
mode : {'svd', 'err'}
Which mode to use; see Notes section. Default is 'err'.
Returns
-------
abs_cos : numpy.float64
The absolute value of the model cosine.
Notes
-----
The model cosine is defined in terms of the geometric view of
curve-fitting, as a model manifold embedded in a high-dimensional
space. The model cosine is the cosine of the residuals vector
with its projection on the tangent space: :math:`cos(phi) = |P^T r|/|r|`
where :math:`P^T` is the projection operator onto the model manifold
and :math:`r` the residuals. This can be calculated two ways: By
calculating the projection operator P directly with SVD (mode=`svd`),
or by using the expected error if the model were linear to calculate
a model sine first (mode=`err`). Since the SVD of a large matrix is
slow, mode=`err` is faster.
`decimate` allows for every nth pixel only to be counted in the
SVD matrix of J for speed. While this is n x faster, it is
considerably less accurate, so the default is no decimation.
"""
if mode == 'svd':
slicer = slice(0, None, decimate)
#1. Calculate projection term
u, sig, v = np.linalg.svd(self.J[:,slicer], full_matrices=False) #slow part
# p = np.dot(v.T, v) - memory error, so term-by-term
r = self.calc_residuals()[slicer]
abs_r = np.sqrt((r*r).sum())
v_r = np.dot(v,r/abs_r)
projected = np.dot(v.T, v_r)
abs_cos = np.sqrt((projected*projected).sum())
elif mode == 'err':
expected_error = self.find_expected_error(delta_params='perfect')
model_sine_2 = expected_error / self.error #error = distance^2
abs_cos = np.sqrt(1 - model_sine_2)
else:
raise ValueError('mode must be one of `svd`, `err`')
return abs_cos
|
def calc_model_cosine(self, decimate=None, mode='err'):
"""
Calculates the cosine of the residuals with the model.
Parameters
----------
decimate : Int or None, optional
Decimate the residuals by `decimate` pixels. If None, no
decimation is used. Valid only with mode='svd'. Default
is None
mode : {'svd', 'err'}
Which mode to use; see Notes section. Default is 'err'.
Returns
-------
abs_cos : numpy.float64
The absolute value of the model cosine.
Notes
-----
The model cosine is defined in terms of the geometric view of
curve-fitting, as a model manifold embedded in a high-dimensional
space. The model cosine is the cosine of the residuals vector
with its projection on the tangent space: :math:`cos(phi) = |P^T r|/|r|`
where :math:`P^T` is the projection operator onto the model manifold
and :math:`r` the residuals. This can be calculated two ways: By
calculating the projection operator P directly with SVD (mode=`svd`),
or by using the expected error if the model were linear to calculate
a model sine first (mode=`err`). Since the SVD of a large matrix is
slow, mode=`err` is faster.
`decimate` allows for every nth pixel only to be counted in the
SVD matrix of J for speed. While this is n x faster, it is
considerably less accurate, so the default is no decimation.
"""
if mode == 'svd':
slicer = slice(0, None, decimate)
#1. Calculate projection term
u, sig, v = np.linalg.svd(self.J[:,slicer], full_matrices=False) #slow part
# p = np.dot(v.T, v) - memory error, so term-by-term
r = self.calc_residuals()[slicer]
abs_r = np.sqrt((r*r).sum())
v_r = np.dot(v,r/abs_r)
projected = np.dot(v.T, v_r)
abs_cos = np.sqrt((projected*projected).sum())
elif mode == 'err':
expected_error = self.find_expected_error(delta_params='perfect')
model_sine_2 = expected_error / self.error #error = distance^2
abs_cos = np.sqrt(1 - model_sine_2)
else:
raise ValueError('mode must be one of `svd`, `err`')
return abs_cos
|
[
"Calculates",
"the",
"cosine",
"of",
"the",
"residuals",
"with",
"the",
"model",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1082-L1136
|
[
"def",
"calc_model_cosine",
"(",
"self",
",",
"decimate",
"=",
"None",
",",
"mode",
"=",
"'err'",
")",
":",
"if",
"mode",
"==",
"'svd'",
":",
"slicer",
"=",
"slice",
"(",
"0",
",",
"None",
",",
"decimate",
")",
"#1. Calculate projection term",
"u",
",",
"sig",
",",
"v",
"=",
"np",
".",
"linalg",
".",
"svd",
"(",
"self",
".",
"J",
"[",
":",
",",
"slicer",
"]",
",",
"full_matrices",
"=",
"False",
")",
"#slow part",
"# p = np.dot(v.T, v) - memory error, so term-by-term",
"r",
"=",
"self",
".",
"calc_residuals",
"(",
")",
"[",
"slicer",
"]",
"abs_r",
"=",
"np",
".",
"sqrt",
"(",
"(",
"r",
"*",
"r",
")",
".",
"sum",
"(",
")",
")",
"v_r",
"=",
"np",
".",
"dot",
"(",
"v",
",",
"r",
"/",
"abs_r",
")",
"projected",
"=",
"np",
".",
"dot",
"(",
"v",
".",
"T",
",",
"v_r",
")",
"abs_cos",
"=",
"np",
".",
"sqrt",
"(",
"(",
"projected",
"*",
"projected",
")",
".",
"sum",
"(",
")",
")",
"elif",
"mode",
"==",
"'err'",
":",
"expected_error",
"=",
"self",
".",
"find_expected_error",
"(",
"delta_params",
"=",
"'perfect'",
")",
"model_sine_2",
"=",
"expected_error",
"/",
"self",
".",
"error",
"#error = distance^2",
"abs_cos",
"=",
"np",
".",
"sqrt",
"(",
"1",
"-",
"model_sine_2",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'mode must be one of `svd`, `err`'",
")",
"return",
"abs_cos"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.get_termination_stats
|
Returns a dict of termination statistics
Parameters
----------
get_cos : Bool, optional
Whether or not to calcualte the cosine of the residuals
with the tangent plane of the model using the current J.
The calculation may take some time. Default is True
Returns
-------
dict
Has keys
delta_vals : The last change in parameter values.
delta_err : The last change in the error.
exp_err : The expected (last) change in the error.
frac_err : The fractional change in the error.
num_iter : The number of iterations completed.
error : The current error.
|
peri/opt/optimize.py
|
def get_termination_stats(self, get_cos=True):
"""
Returns a dict of termination statistics
Parameters
----------
get_cos : Bool, optional
Whether or not to calcualte the cosine of the residuals
with the tangent plane of the model using the current J.
The calculation may take some time. Default is True
Returns
-------
dict
Has keys
delta_vals : The last change in parameter values.
delta_err : The last change in the error.
exp_err : The expected (last) change in the error.
frac_err : The fractional change in the error.
num_iter : The number of iterations completed.
error : The current error.
"""
delta_vals = self._last_vals - self.param_vals
delta_err = self._last_error - self.error
frac_err = delta_err / self.error
to_return = {'delta_vals':delta_vals, 'delta_err':delta_err,
'num_iter':1*self._num_iter, 'frac_err':frac_err,
'error':self.error, 'exp_err':self._exp_err}
if get_cos:
model_cosine = self.calc_model_cosine()
to_return.update({'model_cosine':model_cosine})
return to_return
|
def get_termination_stats(self, get_cos=True):
"""
Returns a dict of termination statistics
Parameters
----------
get_cos : Bool, optional
Whether or not to calcualte the cosine of the residuals
with the tangent plane of the model using the current J.
The calculation may take some time. Default is True
Returns
-------
dict
Has keys
delta_vals : The last change in parameter values.
delta_err : The last change in the error.
exp_err : The expected (last) change in the error.
frac_err : The fractional change in the error.
num_iter : The number of iterations completed.
error : The current error.
"""
delta_vals = self._last_vals - self.param_vals
delta_err = self._last_error - self.error
frac_err = delta_err / self.error
to_return = {'delta_vals':delta_vals, 'delta_err':delta_err,
'num_iter':1*self._num_iter, 'frac_err':frac_err,
'error':self.error, 'exp_err':self._exp_err}
if get_cos:
model_cosine = self.calc_model_cosine()
to_return.update({'model_cosine':model_cosine})
return to_return
|
[
"Returns",
"a",
"dict",
"of",
"termination",
"statistics"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1138-L1169
|
[
"def",
"get_termination_stats",
"(",
"self",
",",
"get_cos",
"=",
"True",
")",
":",
"delta_vals",
"=",
"self",
".",
"_last_vals",
"-",
"self",
".",
"param_vals",
"delta_err",
"=",
"self",
".",
"_last_error",
"-",
"self",
".",
"error",
"frac_err",
"=",
"delta_err",
"/",
"self",
".",
"error",
"to_return",
"=",
"{",
"'delta_vals'",
":",
"delta_vals",
",",
"'delta_err'",
":",
"delta_err",
",",
"'num_iter'",
":",
"1",
"*",
"self",
".",
"_num_iter",
",",
"'frac_err'",
":",
"frac_err",
",",
"'error'",
":",
"self",
".",
"error",
",",
"'exp_err'",
":",
"self",
".",
"_exp_err",
"}",
"if",
"get_cos",
":",
"model_cosine",
"=",
"self",
".",
"calc_model_cosine",
"(",
")",
"to_return",
".",
"update",
"(",
"{",
"'model_cosine'",
":",
"model_cosine",
"}",
")",
"return",
"to_return"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.check_completion
|
Returns a Bool of whether the algorithm has found a satisfactory minimum
|
peri/opt/optimize.py
|
def check_completion(self):
"""
Returns a Bool of whether the algorithm has found a satisfactory minimum
"""
terminate = False
term_dict = self.get_termination_stats(get_cos=self.costol is not None)
terminate |= np.all(np.abs(term_dict['delta_vals']) < self.paramtol)
terminate |= (term_dict['delta_err'] < self.errtol)
terminate |= (term_dict['exp_err'] < self.exptol)
terminate |= (term_dict['frac_err'] < self.fractol)
if self.costol is not None:
terminate |= (curcos < term_dict['model_cosine'])
return terminate
|
def check_completion(self):
"""
Returns a Bool of whether the algorithm has found a satisfactory minimum
"""
terminate = False
term_dict = self.get_termination_stats(get_cos=self.costol is not None)
terminate |= np.all(np.abs(term_dict['delta_vals']) < self.paramtol)
terminate |= (term_dict['delta_err'] < self.errtol)
terminate |= (term_dict['exp_err'] < self.exptol)
terminate |= (term_dict['frac_err'] < self.fractol)
if self.costol is not None:
terminate |= (curcos < term_dict['model_cosine'])
return terminate
|
[
"Returns",
"a",
"Bool",
"of",
"whether",
"the",
"algorithm",
"has",
"found",
"a",
"satisfactory",
"minimum"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1171-L1184
|
[
"def",
"check_completion",
"(",
"self",
")",
":",
"terminate",
"=",
"False",
"term_dict",
"=",
"self",
".",
"get_termination_stats",
"(",
"get_cos",
"=",
"self",
".",
"costol",
"is",
"not",
"None",
")",
"terminate",
"|=",
"np",
".",
"all",
"(",
"np",
".",
"abs",
"(",
"term_dict",
"[",
"'delta_vals'",
"]",
")",
"<",
"self",
".",
"paramtol",
")",
"terminate",
"|=",
"(",
"term_dict",
"[",
"'delta_err'",
"]",
"<",
"self",
".",
"errtol",
")",
"terminate",
"|=",
"(",
"term_dict",
"[",
"'exp_err'",
"]",
"<",
"self",
".",
"exptol",
")",
"terminate",
"|=",
"(",
"term_dict",
"[",
"'frac_err'",
"]",
"<",
"self",
".",
"fractol",
")",
"if",
"self",
".",
"costol",
"is",
"not",
"None",
":",
"terminate",
"|=",
"(",
"curcos",
"<",
"term_dict",
"[",
"'model_cosine'",
"]",
")",
"return",
"terminate"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.check_terminate
|
Returns a Bool of whether to terminate.
Checks whether a satisfactory minimum has been found or whether
too many iterations have occurred.
|
peri/opt/optimize.py
|
def check_terminate(self):
"""
Returns a Bool of whether to terminate.
Checks whether a satisfactory minimum has been found or whether
too many iterations have occurred.
"""
if not self._has_run:
return False
else:
#1-3. errtol, paramtol, model cosine low enough?
terminate = self.check_completion()
#4. too many iterations??
terminate |= (self._num_iter >= self.max_iter)
return terminate
|
def check_terminate(self):
"""
Returns a Bool of whether to terminate.
Checks whether a satisfactory minimum has been found or whether
too many iterations have occurred.
"""
if not self._has_run:
return False
else:
#1-3. errtol, paramtol, model cosine low enough?
terminate = self.check_completion()
#4. too many iterations??
terminate |= (self._num_iter >= self.max_iter)
return terminate
|
[
"Returns",
"a",
"Bool",
"of",
"whether",
"to",
"terminate",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1186-L1201
|
[
"def",
"check_terminate",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_has_run",
":",
"return",
"False",
"else",
":",
"#1-3. errtol, paramtol, model cosine low enough?",
"terminate",
"=",
"self",
".",
"check_completion",
"(",
")",
"#4. too many iterations??",
"terminate",
"|=",
"(",
"self",
".",
"_num_iter",
">=",
"self",
".",
"max_iter",
")",
"return",
"terminate"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.check_update_J
|
Checks if the full J should be updated.
Right now, just updates after update_J_frequency loops
|
peri/opt/optimize.py
|
def check_update_J(self):
"""
Checks if the full J should be updated.
Right now, just updates after update_J_frequency loops
"""
self._J_update_counter += 1
update = self._J_update_counter >= self.update_J_frequency
return update & (not self._fresh_JTJ)
|
def check_update_J(self):
"""
Checks if the full J should be updated.
Right now, just updates after update_J_frequency loops
"""
self._J_update_counter += 1
update = self._J_update_counter >= self.update_J_frequency
return update & (not self._fresh_JTJ)
|
[
"Checks",
"if",
"the",
"full",
"J",
"should",
"be",
"updated",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1203-L1211
|
[
"def",
"check_update_J",
"(",
"self",
")",
":",
"self",
".",
"_J_update_counter",
"+=",
"1",
"update",
"=",
"self",
".",
"_J_update_counter",
">=",
"self",
".",
"update_J_frequency",
"return",
"update",
"&",
"(",
"not",
"self",
".",
"_fresh_JTJ",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.update_J
|
Updates J, JTJ, and internal counters.
|
peri/opt/optimize.py
|
def update_J(self):
"""Updates J, JTJ, and internal counters."""
self.calc_J()
# np.dot(j, j.T) is slightly faster but 2x as much mem
step = np.ceil(1e-2 * self.J.shape[1]).astype('int') # 1% more mem...
self.JTJ = low_mem_sq(self.J, step=step)
#copies still, since J is not C -ordered but a slice of j_e...
#doing self.J.copy() works but takes 2x as much ram..
self._fresh_JTJ = True
self._J_update_counter = 0
if np.any(np.isnan(self.JTJ)):
raise FloatingPointError('J, JTJ have nans.')
#Update self._exp_err
self._exp_err = self.error - self.find_expected_error(delta_params='perfect')
|
def update_J(self):
"""Updates J, JTJ, and internal counters."""
self.calc_J()
# np.dot(j, j.T) is slightly faster but 2x as much mem
step = np.ceil(1e-2 * self.J.shape[1]).astype('int') # 1% more mem...
self.JTJ = low_mem_sq(self.J, step=step)
#copies still, since J is not C -ordered but a slice of j_e...
#doing self.J.copy() works but takes 2x as much ram..
self._fresh_JTJ = True
self._J_update_counter = 0
if np.any(np.isnan(self.JTJ)):
raise FloatingPointError('J, JTJ have nans.')
#Update self._exp_err
self._exp_err = self.error - self.find_expected_error(delta_params='perfect')
|
[
"Updates",
"J",
"JTJ",
"and",
"internal",
"counters",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1213-L1226
|
[
"def",
"update_J",
"(",
"self",
")",
":",
"self",
".",
"calc_J",
"(",
")",
"# np.dot(j, j.T) is slightly faster but 2x as much mem",
"step",
"=",
"np",
".",
"ceil",
"(",
"1e-2",
"*",
"self",
".",
"J",
".",
"shape",
"[",
"1",
"]",
")",
".",
"astype",
"(",
"'int'",
")",
"# 1% more mem...",
"self",
".",
"JTJ",
"=",
"low_mem_sq",
"(",
"self",
".",
"J",
",",
"step",
"=",
"step",
")",
"#copies still, since J is not C -ordered but a slice of j_e...",
"#doing self.J.copy() works but takes 2x as much ram..",
"self",
".",
"_fresh_JTJ",
"=",
"True",
"self",
".",
"_J_update_counter",
"=",
"0",
"if",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"self",
".",
"JTJ",
")",
")",
":",
"raise",
"FloatingPointError",
"(",
"'J, JTJ have nans.'",
")",
"#Update self._exp_err",
"self",
".",
"_exp_err",
"=",
"self",
".",
"error",
"-",
"self",
".",
"find_expected_error",
"(",
"delta_params",
"=",
"'perfect'",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.calc_grad
|
The gradient of the cost w.r.t. the parameters.
|
peri/opt/optimize.py
|
def calc_grad(self):
"""The gradient of the cost w.r.t. the parameters."""
residuals = self.calc_residuals()
return 2*np.dot(self.J, residuals)
|
def calc_grad(self):
"""The gradient of the cost w.r.t. the parameters."""
residuals = self.calc_residuals()
return 2*np.dot(self.J, residuals)
|
[
"The",
"gradient",
"of",
"the",
"cost",
"w",
".",
"r",
".",
"t",
".",
"the",
"parameters",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1228-L1231
|
[
"def",
"calc_grad",
"(",
"self",
")",
":",
"residuals",
"=",
"self",
".",
"calc_residuals",
"(",
")",
"return",
"2",
"*",
"np",
".",
"dot",
"(",
"self",
".",
"J",
",",
"residuals",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine._rank_1_J_update
|
Does J += np.outer(direction, new_values - old_values) without
using lots of memory
|
peri/opt/optimize.py
|
def _rank_1_J_update(self, direction, values):
"""
Does J += np.outer(direction, new_values - old_values) without
using lots of memory
"""
vals_to_sub = np.dot(direction, self.J)
delta_vals = values - vals_to_sub
for a in range(direction.size):
self.J[a] += direction[a] * delta_vals
|
def _rank_1_J_update(self, direction, values):
"""
Does J += np.outer(direction, new_values - old_values) without
using lots of memory
"""
vals_to_sub = np.dot(direction, self.J)
delta_vals = values - vals_to_sub
for a in range(direction.size):
self.J[a] += direction[a] * delta_vals
|
[
"Does",
"J",
"+",
"=",
"np",
".",
"outer",
"(",
"direction",
"new_values",
"-",
"old_values",
")",
"without",
"using",
"lots",
"of",
"memory"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1233-L1241
|
[
"def",
"_rank_1_J_update",
"(",
"self",
",",
"direction",
",",
"values",
")",
":",
"vals_to_sub",
"=",
"np",
".",
"dot",
"(",
"direction",
",",
"self",
".",
"J",
")",
"delta_vals",
"=",
"values",
"-",
"vals_to_sub",
"for",
"a",
"in",
"range",
"(",
"direction",
".",
"size",
")",
":",
"self",
".",
"J",
"[",
"a",
"]",
"+=",
"direction",
"[",
"a",
"]",
"*",
"delta_vals"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.update_Broyden_J
|
Execute a Broyden update of J
|
peri/opt/optimize.py
|
def update_Broyden_J(self):
"""Execute a Broyden update of J"""
CLOG.debug('Broyden update.')
delta_vals = self.param_vals - self._last_vals
delta_residuals = self.calc_residuals() - self._last_residuals
nrm = np.sqrt(np.dot(delta_vals, delta_vals))
direction = delta_vals / nrm
vals = delta_residuals / nrm
self._rank_1_J_update(direction, vals)
self.JTJ = np.dot(self.J, self.J.T)
|
def update_Broyden_J(self):
"""Execute a Broyden update of J"""
CLOG.debug('Broyden update.')
delta_vals = self.param_vals - self._last_vals
delta_residuals = self.calc_residuals() - self._last_residuals
nrm = np.sqrt(np.dot(delta_vals, delta_vals))
direction = delta_vals / nrm
vals = delta_residuals / nrm
self._rank_1_J_update(direction, vals)
self.JTJ = np.dot(self.J, self.J.T)
|
[
"Execute",
"a",
"Broyden",
"update",
"of",
"J"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1248-L1257
|
[
"def",
"update_Broyden_J",
"(",
"self",
")",
":",
"CLOG",
".",
"debug",
"(",
"'Broyden update.'",
")",
"delta_vals",
"=",
"self",
".",
"param_vals",
"-",
"self",
".",
"_last_vals",
"delta_residuals",
"=",
"self",
".",
"calc_residuals",
"(",
")",
"-",
"self",
".",
"_last_residuals",
"nrm",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"dot",
"(",
"delta_vals",
",",
"delta_vals",
")",
")",
"direction",
"=",
"delta_vals",
"/",
"nrm",
"vals",
"=",
"delta_residuals",
"/",
"nrm",
"self",
".",
"_rank_1_J_update",
"(",
"direction",
",",
"vals",
")",
"self",
".",
"JTJ",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"J",
",",
"self",
".",
"J",
".",
"T",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.update_eig_J
|
Execute an eigen update of J
|
peri/opt/optimize.py
|
def update_eig_J(self):
"""Execute an eigen update of J"""
CLOG.debug('Eigen update.')
vls, vcs = np.linalg.eigh(self.JTJ)
res0 = self.calc_residuals()
for a in range(min([self.num_eig_dirs, vls.size])):
#1. Finding stiff directions
stif_dir = vcs[-(a+1)] #already normalized
#2. Evaluating derivative along that direction, we'll use dl=5e-4:
dl = self.eig_dl #1e-5
_ = self.update_function(self.param_vals + dl*stif_dir)
res1 = self.calc_residuals()
#3. Updating
grad_stif = (res1-res0)/dl
self._rank_1_J_update(stif_dir, grad_stif)
self.JTJ = np.dot(self.J, self.J.T)
#Putting the parameters back:
_ = self.update_function(self.param_vals)
|
def update_eig_J(self):
"""Execute an eigen update of J"""
CLOG.debug('Eigen update.')
vls, vcs = np.linalg.eigh(self.JTJ)
res0 = self.calc_residuals()
for a in range(min([self.num_eig_dirs, vls.size])):
#1. Finding stiff directions
stif_dir = vcs[-(a+1)] #already normalized
#2. Evaluating derivative along that direction, we'll use dl=5e-4:
dl = self.eig_dl #1e-5
_ = self.update_function(self.param_vals + dl*stif_dir)
res1 = self.calc_residuals()
#3. Updating
grad_stif = (res1-res0)/dl
self._rank_1_J_update(stif_dir, grad_stif)
self.JTJ = np.dot(self.J, self.J.T)
#Putting the parameters back:
_ = self.update_function(self.param_vals)
|
[
"Execute",
"an",
"eigen",
"update",
"of",
"J"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1264-L1284
|
[
"def",
"update_eig_J",
"(",
"self",
")",
":",
"CLOG",
".",
"debug",
"(",
"'Eigen update.'",
")",
"vls",
",",
"vcs",
"=",
"np",
".",
"linalg",
".",
"eigh",
"(",
"self",
".",
"JTJ",
")",
"res0",
"=",
"self",
".",
"calc_residuals",
"(",
")",
"for",
"a",
"in",
"range",
"(",
"min",
"(",
"[",
"self",
".",
"num_eig_dirs",
",",
"vls",
".",
"size",
"]",
")",
")",
":",
"#1. Finding stiff directions",
"stif_dir",
"=",
"vcs",
"[",
"-",
"(",
"a",
"+",
"1",
")",
"]",
"#already normalized",
"#2. Evaluating derivative along that direction, we'll use dl=5e-4:",
"dl",
"=",
"self",
".",
"eig_dl",
"#1e-5",
"_",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
"+",
"dl",
"*",
"stif_dir",
")",
"res1",
"=",
"self",
".",
"calc_residuals",
"(",
")",
"#3. Updating",
"grad_stif",
"=",
"(",
"res1",
"-",
"res0",
")",
"/",
"dl",
"self",
".",
"_rank_1_J_update",
"(",
"stif_dir",
",",
"grad_stif",
")",
"self",
".",
"JTJ",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"J",
",",
"self",
".",
"J",
".",
"T",
")",
"#Putting the parameters back:",
"_",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.calc_accel_correction
|
Geodesic acceleration correction to the LM step.
Parameters
----------
damped_JTJ : numpy.ndarray
The damped JTJ used to calculate the initial step.
delta0 : numpy.ndarray
The initial LM step.
Returns
-------
corr : numpy.ndarray
The correction to the original LM step.
|
peri/opt/optimize.py
|
def calc_accel_correction(self, damped_JTJ, delta0):
"""
Geodesic acceleration correction to the LM step.
Parameters
----------
damped_JTJ : numpy.ndarray
The damped JTJ used to calculate the initial step.
delta0 : numpy.ndarray
The initial LM step.
Returns
-------
corr : numpy.ndarray
The correction to the original LM step.
"""
#Get the derivative:
_ = self.update_function(self.param_vals)
rm0 = self.calc_residuals().copy()
_ = self.update_function(self.param_vals + delta0)
rm1 = self.calc_residuals().copy()
_ = self.update_function(self.param_vals - delta0)
rm2 = self.calc_residuals().copy()
der2 = (rm2 + rm1 - 2*rm0)
corr, res, rank, s = np.linalg.lstsq(damped_JTJ, np.dot(self.J, der2),
rcond=self.min_eigval)
corr *= -0.5
return corr
|
def calc_accel_correction(self, damped_JTJ, delta0):
"""
Geodesic acceleration correction to the LM step.
Parameters
----------
damped_JTJ : numpy.ndarray
The damped JTJ used to calculate the initial step.
delta0 : numpy.ndarray
The initial LM step.
Returns
-------
corr : numpy.ndarray
The correction to the original LM step.
"""
#Get the derivative:
_ = self.update_function(self.param_vals)
rm0 = self.calc_residuals().copy()
_ = self.update_function(self.param_vals + delta0)
rm1 = self.calc_residuals().copy()
_ = self.update_function(self.param_vals - delta0)
rm2 = self.calc_residuals().copy()
der2 = (rm2 + rm1 - 2*rm0)
corr, res, rank, s = np.linalg.lstsq(damped_JTJ, np.dot(self.J, der2),
rcond=self.min_eigval)
corr *= -0.5
return corr
|
[
"Geodesic",
"acceleration",
"correction",
"to",
"the",
"LM",
"step",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1286-L1314
|
[
"def",
"calc_accel_correction",
"(",
"self",
",",
"damped_JTJ",
",",
"delta0",
")",
":",
"#Get the derivative:",
"_",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
")",
"rm0",
"=",
"self",
".",
"calc_residuals",
"(",
")",
".",
"copy",
"(",
")",
"_",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
"+",
"delta0",
")",
"rm1",
"=",
"self",
".",
"calc_residuals",
"(",
")",
".",
"copy",
"(",
")",
"_",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
"-",
"delta0",
")",
"rm2",
"=",
"self",
".",
"calc_residuals",
"(",
")",
".",
"copy",
"(",
")",
"der2",
"=",
"(",
"rm2",
"+",
"rm1",
"-",
"2",
"*",
"rm0",
")",
"corr",
",",
"res",
",",
"rank",
",",
"s",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"damped_JTJ",
",",
"np",
".",
"dot",
"(",
"self",
".",
"J",
",",
"der2",
")",
",",
"rcond",
"=",
"self",
".",
"min_eigval",
")",
"corr",
"*=",
"-",
"0.5",
"return",
"corr"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMEngine.update_select_J
|
Updates J only for certain parameters, described by the boolean
mask `blk`.
|
peri/opt/optimize.py
|
def update_select_J(self, blk):
"""
Updates J only for certain parameters, described by the boolean
mask `blk`.
"""
p0 = self.param_vals.copy()
self.update_function(p0) #in case things are not put back...
r0 = self.calc_residuals().copy()
dl = np.zeros(p0.size, dtype='float')
blk_J = []
for i in np.nonzero(blk)[0]:
dl *= 0; dl[i] = self.eig_dl
self.update_function(p0 + dl)
r1 = self.calc_residuals().copy()
blk_J.append((r1-r0)/self.eig_dl)
self.J[blk] = np.array(blk_J)
self.update_function(p0)
#Then we also need to update JTJ:
self.JTJ = np.dot(self.J, self.J.T)
if np.any(np.isnan(self.J)) or np.any(np.isnan(self.JTJ)):
raise FloatingPointError('J, JTJ have nans.')
|
def update_select_J(self, blk):
"""
Updates J only for certain parameters, described by the boolean
mask `blk`.
"""
p0 = self.param_vals.copy()
self.update_function(p0) #in case things are not put back...
r0 = self.calc_residuals().copy()
dl = np.zeros(p0.size, dtype='float')
blk_J = []
for i in np.nonzero(blk)[0]:
dl *= 0; dl[i] = self.eig_dl
self.update_function(p0 + dl)
r1 = self.calc_residuals().copy()
blk_J.append((r1-r0)/self.eig_dl)
self.J[blk] = np.array(blk_J)
self.update_function(p0)
#Then we also need to update JTJ:
self.JTJ = np.dot(self.J, self.J.T)
if np.any(np.isnan(self.J)) or np.any(np.isnan(self.JTJ)):
raise FloatingPointError('J, JTJ have nans.')
|
[
"Updates",
"J",
"only",
"for",
"certain",
"parameters",
"described",
"by",
"the",
"boolean",
"mask",
"blk",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1316-L1336
|
[
"def",
"update_select_J",
"(",
"self",
",",
"blk",
")",
":",
"p0",
"=",
"self",
".",
"param_vals",
".",
"copy",
"(",
")",
"self",
".",
"update_function",
"(",
"p0",
")",
"#in case things are not put back...",
"r0",
"=",
"self",
".",
"calc_residuals",
"(",
")",
".",
"copy",
"(",
")",
"dl",
"=",
"np",
".",
"zeros",
"(",
"p0",
".",
"size",
",",
"dtype",
"=",
"'float'",
")",
"blk_J",
"=",
"[",
"]",
"for",
"i",
"in",
"np",
".",
"nonzero",
"(",
"blk",
")",
"[",
"0",
"]",
":",
"dl",
"*=",
"0",
"dl",
"[",
"i",
"]",
"=",
"self",
".",
"eig_dl",
"self",
".",
"update_function",
"(",
"p0",
"+",
"dl",
")",
"r1",
"=",
"self",
".",
"calc_residuals",
"(",
")",
".",
"copy",
"(",
")",
"blk_J",
".",
"append",
"(",
"(",
"r1",
"-",
"r0",
")",
"/",
"self",
".",
"eig_dl",
")",
"self",
".",
"J",
"[",
"blk",
"]",
"=",
"np",
".",
"array",
"(",
"blk_J",
")",
"self",
".",
"update_function",
"(",
"p0",
")",
"#Then we also need to update JTJ:",
"self",
".",
"JTJ",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"J",
",",
"self",
".",
"J",
".",
"T",
")",
"if",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"self",
".",
"J",
")",
")",
"or",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"self",
".",
"JTJ",
")",
")",
":",
"raise",
"FloatingPointError",
"(",
"'J, JTJ have nans.'",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMFunction._set_err_paramvals
|
Must update:
self.error, self._last_error, self.param_vals, self._last_vals
|
peri/opt/optimize.py
|
def _set_err_paramvals(self):
"""
Must update:
self.error, self._last_error, self.param_vals, self._last_vals
"""
# self.param_vals = p0 #sloppy...
self._last_vals = self.param_vals.copy()
self.error = self.update_function(self.param_vals)
self._last_error = (1 + 2*self.fractol) * self.error
|
def _set_err_paramvals(self):
"""
Must update:
self.error, self._last_error, self.param_vals, self._last_vals
"""
# self.param_vals = p0 #sloppy...
self._last_vals = self.param_vals.copy()
self.error = self.update_function(self.param_vals)
self._last_error = (1 + 2*self.fractol) * self.error
|
[
"Must",
"update",
":",
"self",
".",
"error",
"self",
".",
"_last_error",
"self",
".",
"param_vals",
"self",
".",
"_last_vals"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1387-L1395
|
[
"def",
"_set_err_paramvals",
"(",
"self",
")",
":",
"# self.param_vals = p0 #sloppy...",
"self",
".",
"_last_vals",
"=",
"self",
".",
"param_vals",
".",
"copy",
"(",
")",
"self",
".",
"error",
"=",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
")",
"self",
".",
"_last_error",
"=",
"(",
"1",
"+",
"2",
"*",
"self",
".",
"fractol",
")",
"*",
"self",
".",
"error"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMFunction.calc_J
|
Updates self.J, returns nothing
|
peri/opt/optimize.py
|
def calc_J(self):
"""Updates self.J, returns nothing"""
del self.J
self.J = np.zeros([self.param_vals.size, self.data.size])
dp = np.zeros_like(self.param_vals)
f0 = self.model.copy()
for a in range(self.param_vals.size):
dp *= 0
dp[a] = self.dl[a]
f1 = self.func(self.param_vals + dp, *self.func_args, **self.func_kwargs)
grad_func = (f1 - f0) / dp[a]
#J = grad(residuals) = -grad(model)
self.J[a] = -grad_func
|
def calc_J(self):
"""Updates self.J, returns nothing"""
del self.J
self.J = np.zeros([self.param_vals.size, self.data.size])
dp = np.zeros_like(self.param_vals)
f0 = self.model.copy()
for a in range(self.param_vals.size):
dp *= 0
dp[a] = self.dl[a]
f1 = self.func(self.param_vals + dp, *self.func_args, **self.func_kwargs)
grad_func = (f1 - f0) / dp[a]
#J = grad(residuals) = -grad(model)
self.J[a] = -grad_func
|
[
"Updates",
"self",
".",
"J",
"returns",
"nothing"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1397-L1409
|
[
"def",
"calc_J",
"(",
"self",
")",
":",
"del",
"self",
".",
"J",
"self",
".",
"J",
"=",
"np",
".",
"zeros",
"(",
"[",
"self",
".",
"param_vals",
".",
"size",
",",
"self",
".",
"data",
".",
"size",
"]",
")",
"dp",
"=",
"np",
".",
"zeros_like",
"(",
"self",
".",
"param_vals",
")",
"f0",
"=",
"self",
".",
"model",
".",
"copy",
"(",
")",
"for",
"a",
"in",
"range",
"(",
"self",
".",
"param_vals",
".",
"size",
")",
":",
"dp",
"*=",
"0",
"dp",
"[",
"a",
"]",
"=",
"self",
".",
"dl",
"[",
"a",
"]",
"f1",
"=",
"self",
".",
"func",
"(",
"self",
".",
"param_vals",
"+",
"dp",
",",
"*",
"self",
".",
"func_args",
",",
"*",
"*",
"self",
".",
"func_kwargs",
")",
"grad_func",
"=",
"(",
"f1",
"-",
"f0",
")",
"/",
"dp",
"[",
"a",
"]",
"#J = grad(residuals) = -grad(model)",
"self",
".",
"J",
"[",
"a",
"]",
"=",
"-",
"grad_func"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMFunction.update_function
|
Takes an array param_vals, updates function, returns the new error
|
peri/opt/optimize.py
|
def update_function(self, param_vals):
"""Takes an array param_vals, updates function, returns the new error"""
self.model = self.func(param_vals, *self.func_args, **self.func_kwargs)
d = self.calc_residuals()
return np.dot(d.flat, d.flat)
|
def update_function(self, param_vals):
"""Takes an array param_vals, updates function, returns the new error"""
self.model = self.func(param_vals, *self.func_args, **self.func_kwargs)
d = self.calc_residuals()
return np.dot(d.flat, d.flat)
|
[
"Takes",
"an",
"array",
"param_vals",
"updates",
"function",
"returns",
"the",
"new",
"error"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1414-L1418
|
[
"def",
"update_function",
"(",
"self",
",",
"param_vals",
")",
":",
"self",
".",
"model",
"=",
"self",
".",
"func",
"(",
"param_vals",
",",
"*",
"self",
".",
"func_args",
",",
"*",
"*",
"self",
".",
"func_kwargs",
")",
"d",
"=",
"self",
".",
"calc_residuals",
"(",
")",
"return",
"np",
".",
"dot",
"(",
"d",
".",
"flat",
",",
"d",
".",
"flat",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMOptObj.update_function
|
Updates the opt_obj, returns new error.
|
peri/opt/optimize.py
|
def update_function(self, param_vals):
"""Updates the opt_obj, returns new error."""
self.opt_obj.update_function(param_vals)
return self.opt_obj.get_error()
|
def update_function(self, param_vals):
"""Updates the opt_obj, returns new error."""
self.opt_obj.update_function(param_vals)
return self.opt_obj.get_error()
|
[
"Updates",
"the",
"opt_obj",
"returns",
"new",
"error",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1469-L1472
|
[
"def",
"update_function",
"(",
"self",
",",
"param_vals",
")",
":",
"self",
".",
"opt_obj",
".",
"update_function",
"(",
"param_vals",
")",
"return",
"self",
".",
"opt_obj",
".",
"get_error",
"(",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
OptState.update_function
|
Updates with param_vals[i] = distance from self.p0 along self.direction[i].
|
peri/opt/optimize.py
|
def update_function(self, param_vals):
"""Updates with param_vals[i] = distance from self.p0 along self.direction[i]."""
dp = np.zeros(self.p0.size)
for a in range(param_vals.size):
dp += param_vals[a] * self.directions[a]
self.state.update(self.state.params, self.p0 + dp)
self.param_vals[:] = param_vals
return None
|
def update_function(self, param_vals):
"""Updates with param_vals[i] = distance from self.p0 along self.direction[i]."""
dp = np.zeros(self.p0.size)
for a in range(param_vals.size):
dp += param_vals[a] * self.directions[a]
self.state.update(self.state.params, self.p0 + dp)
self.param_vals[:] = param_vals
return None
|
[
"Updates",
"with",
"param_vals",
"[",
"i",
"]",
"=",
"distance",
"from",
"self",
".",
"p0",
"along",
"self",
".",
"direction",
"[",
"i",
"]",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1550-L1557
|
[
"def",
"update_function",
"(",
"self",
",",
"param_vals",
")",
":",
"dp",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"p0",
".",
"size",
")",
"for",
"a",
"in",
"range",
"(",
"param_vals",
".",
"size",
")",
":",
"dp",
"+=",
"param_vals",
"[",
"a",
"]",
"*",
"self",
".",
"directions",
"[",
"a",
"]",
"self",
".",
"state",
".",
"update",
"(",
"self",
".",
"state",
".",
"params",
",",
"self",
".",
"p0",
"+",
"dp",
")",
"self",
".",
"param_vals",
"[",
":",
"]",
"=",
"param_vals",
"return",
"None"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
OptState.calc_J
|
Calculates J along the direction.
|
peri/opt/optimize.py
|
def calc_J(self):
"""Calculates J along the direction."""
r0 = self.state.residuals.copy().ravel()
dl = np.zeros(self.param_vals.size)
p0 = self.param_vals.copy()
J = []
for a in range(self.param_vals.size):
dl *= 0
dl[a] += self.dl
self.update_function(p0 + dl)
r1 = self.state.residuals.copy().ravel()
J.append( (r1-r0)/self.dl)
self.update_function(p0)
return np.array(J)
|
def calc_J(self):
"""Calculates J along the direction."""
r0 = self.state.residuals.copy().ravel()
dl = np.zeros(self.param_vals.size)
p0 = self.param_vals.copy()
J = []
for a in range(self.param_vals.size):
dl *= 0
dl[a] += self.dl
self.update_function(p0 + dl)
r1 = self.state.residuals.copy().ravel()
J.append( (r1-r0)/self.dl)
self.update_function(p0)
return np.array(J)
|
[
"Calculates",
"J",
"along",
"the",
"direction",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1570-L1583
|
[
"def",
"calc_J",
"(",
"self",
")",
":",
"r0",
"=",
"self",
".",
"state",
".",
"residuals",
".",
"copy",
"(",
")",
".",
"ravel",
"(",
")",
"dl",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"param_vals",
".",
"size",
")",
"p0",
"=",
"self",
".",
"param_vals",
".",
"copy",
"(",
")",
"J",
"=",
"[",
"]",
"for",
"a",
"in",
"range",
"(",
"self",
".",
"param_vals",
".",
"size",
")",
":",
"dl",
"*=",
"0",
"dl",
"[",
"a",
"]",
"+=",
"self",
".",
"dl",
"self",
".",
"update_function",
"(",
"p0",
"+",
"dl",
")",
"r1",
"=",
"self",
".",
"state",
".",
"residuals",
".",
"copy",
"(",
")",
".",
"ravel",
"(",
")",
"J",
".",
"append",
"(",
"(",
"r1",
"-",
"r0",
")",
"/",
"self",
".",
"dl",
")",
"self",
".",
"update_function",
"(",
"p0",
")",
"return",
"np",
".",
"array",
"(",
"J",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMGlobals.update_select_J
|
Updates J only for certain parameters, described by the boolean
mask blk.
|
peri/opt/optimize.py
|
def update_select_J(self, blk):
"""
Updates J only for certain parameters, described by the boolean
mask blk.
"""
self.update_function(self.param_vals)
params = np.array(self.param_names)[blk].tolist()
blk_J = -self.state.gradmodel(params=params, inds=self._inds, flat=False)
self.J[blk] = blk_J
#Then we also need to update JTJ:
self.JTJ = np.dot(self.J, self.J.T)
if np.any(np.isnan(self.J)) or np.any(np.isnan(self.JTJ)):
raise FloatingPointError('J, JTJ have nans.')
|
def update_select_J(self, blk):
"""
Updates J only for certain parameters, described by the boolean
mask blk.
"""
self.update_function(self.param_vals)
params = np.array(self.param_names)[blk].tolist()
blk_J = -self.state.gradmodel(params=params, inds=self._inds, flat=False)
self.J[blk] = blk_J
#Then we also need to update JTJ:
self.JTJ = np.dot(self.J, self.J.T)
if np.any(np.isnan(self.J)) or np.any(np.isnan(self.JTJ)):
raise FloatingPointError('J, JTJ have nans.')
|
[
"Updates",
"J",
"only",
"for",
"certain",
"parameters",
"described",
"by",
"the",
"boolean",
"mask",
"blk",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1670-L1682
|
[
"def",
"update_select_J",
"(",
"self",
",",
"blk",
")",
":",
"self",
".",
"update_function",
"(",
"self",
".",
"param_vals",
")",
"params",
"=",
"np",
".",
"array",
"(",
"self",
".",
"param_names",
")",
"[",
"blk",
"]",
".",
"tolist",
"(",
")",
"blk_J",
"=",
"-",
"self",
".",
"state",
".",
"gradmodel",
"(",
"params",
"=",
"params",
",",
"inds",
"=",
"self",
".",
"_inds",
",",
"flat",
"=",
"False",
")",
"self",
".",
"J",
"[",
"blk",
"]",
"=",
"blk_J",
"#Then we also need to update JTJ:",
"self",
".",
"JTJ",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"J",
",",
"self",
".",
"J",
".",
"T",
")",
"if",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"self",
".",
"J",
")",
")",
"or",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"self",
".",
"JTJ",
")",
")",
":",
"raise",
"FloatingPointError",
"(",
"'J, JTJ have nans.'",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMGlobals.find_expected_error
|
Returns the error expected after an update if the model were linear.
Parameters
----------
delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional
The relative change in parameters. If 'calc', uses update
calculated from the current damping, J, etc; if 'perfect',
uses the update calculated with zero damping.
Returns
-------
numpy.float64
The expected error after the update with `delta_params`
|
peri/opt/optimize.py
|
def find_expected_error(self, delta_params='calc', adjust=True):
"""
Returns the error expected after an update if the model were linear.
Parameters
----------
delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional
The relative change in parameters. If 'calc', uses update
calculated from the current damping, J, etc; if 'perfect',
uses the update calculated with zero damping.
Returns
-------
numpy.float64
The expected error after the update with `delta_params`
"""
expected_error = super(LMGlobals, self).find_expected_error(
delta_params=delta_params)
if adjust:
#adjust for num_pix
derr = (expected_error - self.error) * (self.state.residuals.size /
float(self.num_pix))
expected_error = self.error + derr
return expected_error
|
def find_expected_error(self, delta_params='calc', adjust=True):
"""
Returns the error expected after an update if the model were linear.
Parameters
----------
delta_params : {numpy.ndarray, 'calc', or 'perfect'}, optional
The relative change in parameters. If 'calc', uses update
calculated from the current damping, J, etc; if 'perfect',
uses the update calculated with zero damping.
Returns
-------
numpy.float64
The expected error after the update with `delta_params`
"""
expected_error = super(LMGlobals, self).find_expected_error(
delta_params=delta_params)
if adjust:
#adjust for num_pix
derr = (expected_error - self.error) * (self.state.residuals.size /
float(self.num_pix))
expected_error = self.error + derr
return expected_error
|
[
"Returns",
"the",
"error",
"expected",
"after",
"an",
"update",
"if",
"the",
"model",
"were",
"linear",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1684-L1707
|
[
"def",
"find_expected_error",
"(",
"self",
",",
"delta_params",
"=",
"'calc'",
",",
"adjust",
"=",
"True",
")",
":",
"expected_error",
"=",
"super",
"(",
"LMGlobals",
",",
"self",
")",
".",
"find_expected_error",
"(",
"delta_params",
"=",
"delta_params",
")",
"if",
"adjust",
":",
"#adjust for num_pix",
"derr",
"=",
"(",
"expected_error",
"-",
"self",
".",
"error",
")",
"*",
"(",
"self",
".",
"state",
".",
"residuals",
".",
"size",
"/",
"float",
"(",
"self",
".",
"num_pix",
")",
")",
"expected_error",
"=",
"self",
".",
"error",
"+",
"derr",
"return",
"expected_error"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMGlobals.calc_model_cosine
|
Calculates the cosine of the residuals with the model.
Parameters
----------
decimate : Int or None, optional
Decimate the residuals by `decimate` pixels. If None, no
decimation is used. Valid only with mode='svd'. Default
is None
mode : {'svd', 'err'}
Which mode to use; see Notes section. Default is 'err'.
Returns
-------
abs_cos : numpy.float64
The absolute value of the model cosine.
Notes
-----
The model cosine is defined in terms of the geometric view of
curve-fitting, as a model manifold embedded in a high-dimensional
space. The model cosine is the cosine of the residuals vector
with its projection on the tangent space: :math:`cos(phi) = |P^T r|/|r|`
where :math:`P^T` is the projection operator onto the model manifold
and :math:`r` the residuals. This can be calculated two ways: By
calculating the projection operator P directly with SVD (mode=`svd`),
or by using the expected error if the model were linear to calculate
a model sine first (mode=`err`). Since the SVD of a large matrix is
slow, mode=`err` is faster.
`decimate` allows for every nth pixel only to be counted in the
SVD matrix of J for speed. While this is n x faster, it is
considerably less accurate, so the default is no decimation.
|
peri/opt/optimize.py
|
def calc_model_cosine(self, decimate=None, mode='err'):
"""
Calculates the cosine of the residuals with the model.
Parameters
----------
decimate : Int or None, optional
Decimate the residuals by `decimate` pixels. If None, no
decimation is used. Valid only with mode='svd'. Default
is None
mode : {'svd', 'err'}
Which mode to use; see Notes section. Default is 'err'.
Returns
-------
abs_cos : numpy.float64
The absolute value of the model cosine.
Notes
-----
The model cosine is defined in terms of the geometric view of
curve-fitting, as a model manifold embedded in a high-dimensional
space. The model cosine is the cosine of the residuals vector
with its projection on the tangent space: :math:`cos(phi) = |P^T r|/|r|`
where :math:`P^T` is the projection operator onto the model manifold
and :math:`r` the residuals. This can be calculated two ways: By
calculating the projection operator P directly with SVD (mode=`svd`),
or by using the expected error if the model were linear to calculate
a model sine first (mode=`err`). Since the SVD of a large matrix is
slow, mode=`err` is faster.
`decimate` allows for every nth pixel only to be counted in the
SVD matrix of J for speed. While this is n x faster, it is
considerably less accurate, so the default is no decimation.
"""
#we calculate the model cosine only in the data space of the
#sampled indices
if mode == 'err':
expected_error = self.find_expected_error(delta_params='perfect',
adjust=False)
derr = self.error - expected_error
residuals_err = lambda r: np.dot(r,r).sum()
current_partial_error = residuals_err(self.calc_residuals())
expected_partial_error = current_partial_error - derr
model_sine_2 = expected_partial_error / current_partial_error
abs_cos = np.sqrt(1 - model_sine_2)
else:
#superclass is fine
abs_cos = super(self.__class__, self).calc_model_cosine(decimate=
decimate, mode=mode)
return abs_cos
|
def calc_model_cosine(self, decimate=None, mode='err'):
"""
Calculates the cosine of the residuals with the model.
Parameters
----------
decimate : Int or None, optional
Decimate the residuals by `decimate` pixels. If None, no
decimation is used. Valid only with mode='svd'. Default
is None
mode : {'svd', 'err'}
Which mode to use; see Notes section. Default is 'err'.
Returns
-------
abs_cos : numpy.float64
The absolute value of the model cosine.
Notes
-----
The model cosine is defined in terms of the geometric view of
curve-fitting, as a model manifold embedded in a high-dimensional
space. The model cosine is the cosine of the residuals vector
with its projection on the tangent space: :math:`cos(phi) = |P^T r|/|r|`
where :math:`P^T` is the projection operator onto the model manifold
and :math:`r` the residuals. This can be calculated two ways: By
calculating the projection operator P directly with SVD (mode=`svd`),
or by using the expected error if the model were linear to calculate
a model sine first (mode=`err`). Since the SVD of a large matrix is
slow, mode=`err` is faster.
`decimate` allows for every nth pixel only to be counted in the
SVD matrix of J for speed. While this is n x faster, it is
considerably less accurate, so the default is no decimation.
"""
#we calculate the model cosine only in the data space of the
#sampled indices
if mode == 'err':
expected_error = self.find_expected_error(delta_params='perfect',
adjust=False)
derr = self.error - expected_error
residuals_err = lambda r: np.dot(r,r).sum()
current_partial_error = residuals_err(self.calc_residuals())
expected_partial_error = current_partial_error - derr
model_sine_2 = expected_partial_error / current_partial_error
abs_cos = np.sqrt(1 - model_sine_2)
else:
#superclass is fine
abs_cos = super(self.__class__, self).calc_model_cosine(decimate=
decimate, mode=mode)
return abs_cos
|
[
"Calculates",
"the",
"cosine",
"of",
"the",
"residuals",
"with",
"the",
"model",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1709-L1759
|
[
"def",
"calc_model_cosine",
"(",
"self",
",",
"decimate",
"=",
"None",
",",
"mode",
"=",
"'err'",
")",
":",
"#we calculate the model cosine only in the data space of the",
"#sampled indices",
"if",
"mode",
"==",
"'err'",
":",
"expected_error",
"=",
"self",
".",
"find_expected_error",
"(",
"delta_params",
"=",
"'perfect'",
",",
"adjust",
"=",
"False",
")",
"derr",
"=",
"self",
".",
"error",
"-",
"expected_error",
"residuals_err",
"=",
"lambda",
"r",
":",
"np",
".",
"dot",
"(",
"r",
",",
"r",
")",
".",
"sum",
"(",
")",
"current_partial_error",
"=",
"residuals_err",
"(",
"self",
".",
"calc_residuals",
"(",
")",
")",
"expected_partial_error",
"=",
"current_partial_error",
"-",
"derr",
"model_sine_2",
"=",
"expected_partial_error",
"/",
"current_partial_error",
"abs_cos",
"=",
"np",
".",
"sqrt",
"(",
"1",
"-",
"model_sine_2",
")",
"else",
":",
"#superclass is fine",
"abs_cos",
"=",
"super",
"(",
"self",
".",
"__class__",
",",
"self",
")",
".",
"calc_model_cosine",
"(",
"decimate",
"=",
"decimate",
",",
"mode",
"=",
"mode",
")",
"return",
"abs_cos"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMGlobals.calc_grad
|
The gradient of the cost w.r.t. the parameters.
|
peri/opt/optimize.py
|
def calc_grad(self):
"""The gradient of the cost w.r.t. the parameters."""
if self._fresh_JTJ:
return self._graderr
else:
residuals = self.calc_residuals()
return 2*np.dot(self.J, residuals)
|
def calc_grad(self):
"""The gradient of the cost w.r.t. the parameters."""
if self._fresh_JTJ:
return self._graderr
else:
residuals = self.calc_residuals()
return 2*np.dot(self.J, residuals)
|
[
"The",
"gradient",
"of",
"the",
"cost",
"w",
".",
"r",
".",
"t",
".",
"the",
"parameters",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1761-L1767
|
[
"def",
"calc_grad",
"(",
"self",
")",
":",
"if",
"self",
".",
"_fresh_JTJ",
":",
"return",
"self",
".",
"_graderr",
"else",
":",
"residuals",
"=",
"self",
".",
"calc_residuals",
"(",
")",
"return",
"2",
"*",
"np",
".",
"dot",
"(",
"self",
".",
"J",
",",
"residuals",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMParticleGroupCollection.reset
|
Resets the particle groups and optionally the region size and damping.
Parameters
----------
new_region_size : : Int or 3-element list-like of ints, optional
The region size for sub-blocking particles. Default is 40
do_calc_size : Bool, optional
If True, calculates the region size internally based on
the maximum allowed memory. Default is True
new_damping : Float or None, optional
The new damping of the optimizer. Set to None to leave
as the default for LMParticles. Default is None.
new_max_mem : Numeric, optional
The maximum allowed memory for J to occupy. Default is 1e9
|
peri/opt/optimize.py
|
def reset(self, new_region_size=None, do_calc_size=True, new_damping=None,
new_max_mem=None):
"""
Resets the particle groups and optionally the region size and damping.
Parameters
----------
new_region_size : : Int or 3-element list-like of ints, optional
The region size for sub-blocking particles. Default is 40
do_calc_size : Bool, optional
If True, calculates the region size internally based on
the maximum allowed memory. Default is True
new_damping : Float or None, optional
The new damping of the optimizer. Set to None to leave
as the default for LMParticles. Default is None.
new_max_mem : Numeric, optional
The maximum allowed memory for J to occupy. Default is 1e9
"""
if new_region_size is not None:
self.region_size = new_region_size
if new_max_mem != None:
self.max_mem = new_max_mem
if do_calc_size:
self.region_size = calc_particle_group_region_size(self.state,
region_size=self.region_size, max_mem=self.max_mem)
self.stats = []
self.particle_groups = separate_particles_into_groups(self.state,
self.region_size, doshift='rand')
if new_damping is not None:
self._kwargs.update({'damping':new_damping})
if self.save_J:
if len(self.particle_groups) > 90:
CLOG.warn('Attempting to create many open files. Consider increasing max_mem and/or region_size to avoid crashes.')
self._tempfiles = []
self._has_saved_J = []
for a in range(len(self.particle_groups)):
#TemporaryFile is automatically deleted
for _ in ['j','tile']:
self._tempfiles.append(tempfile.TemporaryFile(dir=os.getcwd()))
self._has_saved_J.append(False)
|
def reset(self, new_region_size=None, do_calc_size=True, new_damping=None,
new_max_mem=None):
"""
Resets the particle groups and optionally the region size and damping.
Parameters
----------
new_region_size : : Int or 3-element list-like of ints, optional
The region size for sub-blocking particles. Default is 40
do_calc_size : Bool, optional
If True, calculates the region size internally based on
the maximum allowed memory. Default is True
new_damping : Float or None, optional
The new damping of the optimizer. Set to None to leave
as the default for LMParticles. Default is None.
new_max_mem : Numeric, optional
The maximum allowed memory for J to occupy. Default is 1e9
"""
if new_region_size is not None:
self.region_size = new_region_size
if new_max_mem != None:
self.max_mem = new_max_mem
if do_calc_size:
self.region_size = calc_particle_group_region_size(self.state,
region_size=self.region_size, max_mem=self.max_mem)
self.stats = []
self.particle_groups = separate_particles_into_groups(self.state,
self.region_size, doshift='rand')
if new_damping is not None:
self._kwargs.update({'damping':new_damping})
if self.save_J:
if len(self.particle_groups) > 90:
CLOG.warn('Attempting to create many open files. Consider increasing max_mem and/or region_size to avoid crashes.')
self._tempfiles = []
self._has_saved_J = []
for a in range(len(self.particle_groups)):
#TemporaryFile is automatically deleted
for _ in ['j','tile']:
self._tempfiles.append(tempfile.TemporaryFile(dir=os.getcwd()))
self._has_saved_J.append(False)
|
[
"Resets",
"the",
"particle",
"groups",
"and",
"optionally",
"the",
"region",
"size",
"and",
"damping",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1984-L2023
|
[
"def",
"reset",
"(",
"self",
",",
"new_region_size",
"=",
"None",
",",
"do_calc_size",
"=",
"True",
",",
"new_damping",
"=",
"None",
",",
"new_max_mem",
"=",
"None",
")",
":",
"if",
"new_region_size",
"is",
"not",
"None",
":",
"self",
".",
"region_size",
"=",
"new_region_size",
"if",
"new_max_mem",
"!=",
"None",
":",
"self",
".",
"max_mem",
"=",
"new_max_mem",
"if",
"do_calc_size",
":",
"self",
".",
"region_size",
"=",
"calc_particle_group_region_size",
"(",
"self",
".",
"state",
",",
"region_size",
"=",
"self",
".",
"region_size",
",",
"max_mem",
"=",
"self",
".",
"max_mem",
")",
"self",
".",
"stats",
"=",
"[",
"]",
"self",
".",
"particle_groups",
"=",
"separate_particles_into_groups",
"(",
"self",
".",
"state",
",",
"self",
".",
"region_size",
",",
"doshift",
"=",
"'rand'",
")",
"if",
"new_damping",
"is",
"not",
"None",
":",
"self",
".",
"_kwargs",
".",
"update",
"(",
"{",
"'damping'",
":",
"new_damping",
"}",
")",
"if",
"self",
".",
"save_J",
":",
"if",
"len",
"(",
"self",
".",
"particle_groups",
")",
">",
"90",
":",
"CLOG",
".",
"warn",
"(",
"'Attempting to create many open files. Consider increasing max_mem and/or region_size to avoid crashes.'",
")",
"self",
".",
"_tempfiles",
"=",
"[",
"]",
"self",
".",
"_has_saved_J",
"=",
"[",
"]",
"for",
"a",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"particle_groups",
")",
")",
":",
"#TemporaryFile is automatically deleted",
"for",
"_",
"in",
"[",
"'j'",
",",
"'tile'",
"]",
":",
"self",
".",
"_tempfiles",
".",
"append",
"(",
"tempfile",
".",
"TemporaryFile",
"(",
"dir",
"=",
"os",
".",
"getcwd",
"(",
")",
")",
")",
"self",
".",
"_has_saved_J",
".",
"append",
"(",
"False",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMParticleGroupCollection._do_run
|
workhorse for the self.do_run_xx methods.
|
peri/opt/optimize.py
|
def _do_run(self, mode='1'):
"""workhorse for the self.do_run_xx methods."""
for a in range(len(self.particle_groups)):
group = self.particle_groups[a]
lp = LMParticles(self.state, group, **self._kwargs)
if mode == 'internal':
lp.J, lp.JTJ, lp._dif_tile = self._load_j_diftile(a)
if mode == '1':
lp.do_run_1()
if mode == '2':
lp.do_run_2()
if mode == 'internal':
lp.do_internal_run()
self.stats.append(lp.get_termination_stats(get_cos=self.get_cos))
if self.save_J and (mode != 'internal'):
self._dump_j_diftile(a, lp.J, lp._dif_tile)
self._has_saved_J[a] = True
|
def _do_run(self, mode='1'):
"""workhorse for the self.do_run_xx methods."""
for a in range(len(self.particle_groups)):
group = self.particle_groups[a]
lp = LMParticles(self.state, group, **self._kwargs)
if mode == 'internal':
lp.J, lp.JTJ, lp._dif_tile = self._load_j_diftile(a)
if mode == '1':
lp.do_run_1()
if mode == '2':
lp.do_run_2()
if mode == 'internal':
lp.do_internal_run()
self.stats.append(lp.get_termination_stats(get_cos=self.get_cos))
if self.save_J and (mode != 'internal'):
self._dump_j_diftile(a, lp.J, lp._dif_tile)
self._has_saved_J[a] = True
|
[
"workhorse",
"for",
"the",
"self",
".",
"do_run_xx",
"methods",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2045-L2063
|
[
"def",
"_do_run",
"(",
"self",
",",
"mode",
"=",
"'1'",
")",
":",
"for",
"a",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"particle_groups",
")",
")",
":",
"group",
"=",
"self",
".",
"particle_groups",
"[",
"a",
"]",
"lp",
"=",
"LMParticles",
"(",
"self",
".",
"state",
",",
"group",
",",
"*",
"*",
"self",
".",
"_kwargs",
")",
"if",
"mode",
"==",
"'internal'",
":",
"lp",
".",
"J",
",",
"lp",
".",
"JTJ",
",",
"lp",
".",
"_dif_tile",
"=",
"self",
".",
"_load_j_diftile",
"(",
"a",
")",
"if",
"mode",
"==",
"'1'",
":",
"lp",
".",
"do_run_1",
"(",
")",
"if",
"mode",
"==",
"'2'",
":",
"lp",
".",
"do_run_2",
"(",
")",
"if",
"mode",
"==",
"'internal'",
":",
"lp",
".",
"do_internal_run",
"(",
")",
"self",
".",
"stats",
".",
"append",
"(",
"lp",
".",
"get_termination_stats",
"(",
"get_cos",
"=",
"self",
".",
"get_cos",
")",
")",
"if",
"self",
".",
"save_J",
"and",
"(",
"mode",
"!=",
"'internal'",
")",
":",
"self",
".",
"_dump_j_diftile",
"(",
"a",
",",
"lp",
".",
"J",
",",
"lp",
".",
"_dif_tile",
")",
"self",
".",
"_has_saved_J",
"[",
"a",
"]",
"=",
"True"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMParticleGroupCollection.do_internal_run
|
Calls LMParticles.do_internal_run for each group of particles.
|
peri/opt/optimize.py
|
def do_internal_run(self):
"""Calls LMParticles.do_internal_run for each group of particles."""
if not self.save_J:
raise RuntimeError('self.save_J=True required for do_internal_run()')
if not np.all(self._has_saved_J):
raise RuntimeError('J, JTJ have not been pre-computed. Call do_run_1 or do_run_2')
self._do_run(mode='internal')
|
def do_internal_run(self):
"""Calls LMParticles.do_internal_run for each group of particles."""
if not self.save_J:
raise RuntimeError('self.save_J=True required for do_internal_run()')
if not np.all(self._has_saved_J):
raise RuntimeError('J, JTJ have not been pre-computed. Call do_run_1 or do_run_2')
self._do_run(mode='internal')
|
[
"Calls",
"LMParticles",
".",
"do_internal_run",
"for",
"each",
"group",
"of",
"particles",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2073-L2079
|
[
"def",
"do_internal_run",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"save_J",
":",
"raise",
"RuntimeError",
"(",
"'self.save_J=True required for do_internal_run()'",
")",
"if",
"not",
"np",
".",
"all",
"(",
"self",
".",
"_has_saved_J",
")",
":",
"raise",
"RuntimeError",
"(",
"'J, JTJ have not been pre-computed. Call do_run_1 or do_run_2'",
")",
"self",
".",
"_do_run",
"(",
"mode",
"=",
"'internal'",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
AugmentedState.reset
|
Resets the initial radii used for updating the particles. Call
if any of the particle radii or positions have been changed
external to the augmented state.
|
peri/opt/optimize.py
|
def reset(self):
"""
Resets the initial radii used for updating the particles. Call
if any of the particle radii or positions have been changed
external to the augmented state.
"""
inds = list(range(self.state.obj_get_positions().shape[0]))
self._rad_nms = self.state.param_particle_rad(inds)
self._pos_nms = self.state.param_particle_pos(inds)
self._initial_rad = np.copy(self.state.state[self._rad_nms])
self._initial_pos = np.copy(self.state.state[self._pos_nms]).reshape((-1,3))
self.param_vals[self.rscale_mask] = 0
|
def reset(self):
"""
Resets the initial radii used for updating the particles. Call
if any of the particle radii or positions have been changed
external to the augmented state.
"""
inds = list(range(self.state.obj_get_positions().shape[0]))
self._rad_nms = self.state.param_particle_rad(inds)
self._pos_nms = self.state.param_particle_pos(inds)
self._initial_rad = np.copy(self.state.state[self._rad_nms])
self._initial_pos = np.copy(self.state.state[self._pos_nms]).reshape((-1,3))
self.param_vals[self.rscale_mask] = 0
|
[
"Resets",
"the",
"initial",
"radii",
"used",
"for",
"updating",
"the",
"particles",
".",
"Call",
"if",
"any",
"of",
"the",
"particle",
"radii",
"or",
"positions",
"have",
"been",
"changed",
"external",
"to",
"the",
"augmented",
"state",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2143-L2154
|
[
"def",
"reset",
"(",
"self",
")",
":",
"inds",
"=",
"list",
"(",
"range",
"(",
"self",
".",
"state",
".",
"obj_get_positions",
"(",
")",
".",
"shape",
"[",
"0",
"]",
")",
")",
"self",
".",
"_rad_nms",
"=",
"self",
".",
"state",
".",
"param_particle_rad",
"(",
"inds",
")",
"self",
".",
"_pos_nms",
"=",
"self",
".",
"state",
".",
"param_particle_pos",
"(",
"inds",
")",
"self",
".",
"_initial_rad",
"=",
"np",
".",
"copy",
"(",
"self",
".",
"state",
".",
"state",
"[",
"self",
".",
"_rad_nms",
"]",
")",
"self",
".",
"_initial_pos",
"=",
"np",
".",
"copy",
"(",
"self",
".",
"state",
".",
"state",
"[",
"self",
".",
"_pos_nms",
"]",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"3",
")",
")",
"self",
".",
"param_vals",
"[",
"self",
".",
"rscale_mask",
"]",
"=",
"0"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
AugmentedState._poly
|
Right now legval(z)
|
peri/opt/optimize.py
|
def _poly(self, z):
"""Right now legval(z)"""
shp = self.state.oshape.shape
zmax = float(shp[0])
zmin = 0.0
zmid = zmax * 0.5
coeffs = self.param_vals[self.rscale_mask].copy()
if coeffs.size == 0:
ans = 0*z
else:
ans = np.polynomial.legendre.legval((z-zmid)/zmid,
self.param_vals[self.rscale_mask])
return ans
|
def _poly(self, z):
"""Right now legval(z)"""
shp = self.state.oshape.shape
zmax = float(shp[0])
zmin = 0.0
zmid = zmax * 0.5
coeffs = self.param_vals[self.rscale_mask].copy()
if coeffs.size == 0:
ans = 0*z
else:
ans = np.polynomial.legendre.legval((z-zmid)/zmid,
self.param_vals[self.rscale_mask])
return ans
|
[
"Right",
"now",
"legval",
"(",
"z",
")"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2160-L2173
|
[
"def",
"_poly",
"(",
"self",
",",
"z",
")",
":",
"shp",
"=",
"self",
".",
"state",
".",
"oshape",
".",
"shape",
"zmax",
"=",
"float",
"(",
"shp",
"[",
"0",
"]",
")",
"zmin",
"=",
"0.0",
"zmid",
"=",
"zmax",
"*",
"0.5",
"coeffs",
"=",
"self",
".",
"param_vals",
"[",
"self",
".",
"rscale_mask",
"]",
".",
"copy",
"(",
")",
"if",
"coeffs",
".",
"size",
"==",
"0",
":",
"ans",
"=",
"0",
"*",
"z",
"else",
":",
"ans",
"=",
"np",
".",
"polynomial",
".",
"legendre",
".",
"legval",
"(",
"(",
"z",
"-",
"zmid",
")",
"/",
"zmid",
",",
"self",
".",
"param_vals",
"[",
"self",
".",
"rscale_mask",
"]",
")",
"return",
"ans"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
AugmentedState.update
|
Updates all the parameters of the state + rscale(z)
|
peri/opt/optimize.py
|
def update(self, param_vals):
"""Updates all the parameters of the state + rscale(z)"""
self.update_rscl_x_params(param_vals[self.rscale_mask])
self.state.update(self.param_names, param_vals[self.globals_mask])
self.param_vals[:] = param_vals.copy()
if np.any(np.isnan(self.state.residuals)):
raise FloatingPointError('state update caused nans in residuals')
|
def update(self, param_vals):
"""Updates all the parameters of the state + rscale(z)"""
self.update_rscl_x_params(param_vals[self.rscale_mask])
self.state.update(self.param_names, param_vals[self.globals_mask])
self.param_vals[:] = param_vals.copy()
if np.any(np.isnan(self.state.residuals)):
raise FloatingPointError('state update caused nans in residuals')
|
[
"Updates",
"all",
"the",
"parameters",
"of",
"the",
"state",
"+",
"rscale",
"(",
"z",
")"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2175-L2181
|
[
"def",
"update",
"(",
"self",
",",
"param_vals",
")",
":",
"self",
".",
"update_rscl_x_params",
"(",
"param_vals",
"[",
"self",
".",
"rscale_mask",
"]",
")",
"self",
".",
"state",
".",
"update",
"(",
"self",
".",
"param_names",
",",
"param_vals",
"[",
"self",
".",
"globals_mask",
"]",
")",
"self",
".",
"param_vals",
"[",
":",
"]",
"=",
"param_vals",
".",
"copy",
"(",
")",
"if",
"np",
".",
"any",
"(",
"np",
".",
"isnan",
"(",
"self",
".",
"state",
".",
"residuals",
")",
")",
":",
"raise",
"FloatingPointError",
"(",
"'state update caused nans in residuals'",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
LMAugmentedState.reset
|
Resets the aug_state and the LMEngine
|
peri/opt/optimize.py
|
def reset(self, **kwargs):
"""Resets the aug_state and the LMEngine"""
self.aug_state.reset()
super(LMAugmentedState, self).reset(**kwargs)
|
def reset(self, **kwargs):
"""Resets the aug_state and the LMEngine"""
self.aug_state.reset()
super(LMAugmentedState, self).reset(**kwargs)
|
[
"Resets",
"the",
"aug_state",
"and",
"the",
"LMEngine"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L2298-L2301
|
[
"def",
"reset",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"aug_state",
".",
"reset",
"(",
")",
"super",
"(",
"LMAugmentedState",
",",
"self",
")",
".",
"reset",
"(",
"*",
"*",
"kwargs",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
Link.get_shares
|
Returns an object with a the numbers of shares a link has had using
Buffer.
www will be stripped, but other subdomains will not.
|
buffpy/models/link.py
|
def get_shares(self):
'''
Returns an object with a the numbers of shares a link has had using
Buffer.
www will be stripped, but other subdomains will not.
'''
self.shares = self.api.get(url=PATHS['GET_SHARES'] % self.url)['shares']
return self.shares
|
def get_shares(self):
'''
Returns an object with a the numbers of shares a link has had using
Buffer.
www will be stripped, but other subdomains will not.
'''
self.shares = self.api.get(url=PATHS['GET_SHARES'] % self.url)['shares']
return self.shares
|
[
"Returns",
"an",
"object",
"with",
"a",
"the",
"numbers",
"of",
"shares",
"a",
"link",
"has",
"had",
"using",
"Buffer",
"."
] |
vtemian/buffpy
|
python
|
https://github.com/vtemian/buffpy/blob/6c9236fd3b6a8f9e2d70dbf1bc01529242b73075/buffpy/models/link.py#L18-L28
|
[
"def",
"get_shares",
"(",
"self",
")",
":",
"self",
".",
"shares",
"=",
"self",
".",
"api",
".",
"get",
"(",
"url",
"=",
"PATHS",
"[",
"'GET_SHARES'",
"]",
"%",
"self",
".",
"url",
")",
"[",
"'shares'",
"]",
"return",
"self",
".",
"shares"
] |
6c9236fd3b6a8f9e2d70dbf1bc01529242b73075
|
valid
|
sample
|
Take a sample from a field given flat indices or a shaped slice
Parameters
-----------
inds : list of indices
One dimensional (raveled) indices to return from the field
slicer : slice object
A shaped (3D) slicer that returns a section of image
flat : boolean
Whether to flatten the sampled item before returning
|
peri/states.py
|
def sample(field, inds=None, slicer=None, flat=True):
"""
Take a sample from a field given flat indices or a shaped slice
Parameters
-----------
inds : list of indices
One dimensional (raveled) indices to return from the field
slicer : slice object
A shaped (3D) slicer that returns a section of image
flat : boolean
Whether to flatten the sampled item before returning
"""
if inds is not None:
out = field.ravel()[inds]
elif slicer is not None:
out = field[slicer].ravel()
else:
out = field
if flat:
return out.ravel()
return out
|
def sample(field, inds=None, slicer=None, flat=True):
"""
Take a sample from a field given flat indices or a shaped slice
Parameters
-----------
inds : list of indices
One dimensional (raveled) indices to return from the field
slicer : slice object
A shaped (3D) slicer that returns a section of image
flat : boolean
Whether to flatten the sampled item before returning
"""
if inds is not None:
out = field.ravel()[inds]
elif slicer is not None:
out = field[slicer].ravel()
else:
out = field
if flat:
return out.ravel()
return out
|
[
"Take",
"a",
"sample",
"from",
"a",
"field",
"given",
"flat",
"indices",
"or",
"a",
"shaped",
"slice"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L20-L44
|
[
"def",
"sample",
"(",
"field",
",",
"inds",
"=",
"None",
",",
"slicer",
"=",
"None",
",",
"flat",
"=",
"True",
")",
":",
"if",
"inds",
"is",
"not",
"None",
":",
"out",
"=",
"field",
".",
"ravel",
"(",
")",
"[",
"inds",
"]",
"elif",
"slicer",
"is",
"not",
"None",
":",
"out",
"=",
"field",
"[",
"slicer",
"]",
".",
"ravel",
"(",
")",
"else",
":",
"out",
"=",
"field",
"if",
"flat",
":",
"return",
"out",
".",
"ravel",
"(",
")",
"return",
"out"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
save
|
Save the current state with extra information (for example samples and LL
from the optimization procedure).
Parameters
----------
state : peri.states.ImageState
the state object which to save
filename : string
if provided, will override the default that is constructed based on
the state's raw image file. If there is no filename and the state has
a RawImage, the it is saved to RawImage.filename + "-peri-save.pkl"
desc : string
if provided, will augment the default filename to be
RawImage.filename + '-peri-' + desc + '.pkl'
extra : list of pickleable objects
if provided, will be saved with the state
|
peri/states.py
|
def save(state, filename=None, desc='', extra=None):
"""
Save the current state with extra information (for example samples and LL
from the optimization procedure).
Parameters
----------
state : peri.states.ImageState
the state object which to save
filename : string
if provided, will override the default that is constructed based on
the state's raw image file. If there is no filename and the state has
a RawImage, the it is saved to RawImage.filename + "-peri-save.pkl"
desc : string
if provided, will augment the default filename to be
RawImage.filename + '-peri-' + desc + '.pkl'
extra : list of pickleable objects
if provided, will be saved with the state
"""
if isinstance(state.image, util.RawImage):
desc = desc or 'save'
filename = filename or state.image.filename + '-peri-' + desc + '.pkl'
else:
if not filename:
raise AttributeError("Must provide filename since RawImage is not used")
if extra is None:
save = state
else:
save = [state] + extra
if os.path.exists(filename):
ff = "{}-tmp-for-copy".format(filename)
if os.path.exists(ff):
os.remove(ff)
os.rename(filename, ff)
pickle.dump(save, open(filename, 'wb'), protocol=2)
|
def save(state, filename=None, desc='', extra=None):
"""
Save the current state with extra information (for example samples and LL
from the optimization procedure).
Parameters
----------
state : peri.states.ImageState
the state object which to save
filename : string
if provided, will override the default that is constructed based on
the state's raw image file. If there is no filename and the state has
a RawImage, the it is saved to RawImage.filename + "-peri-save.pkl"
desc : string
if provided, will augment the default filename to be
RawImage.filename + '-peri-' + desc + '.pkl'
extra : list of pickleable objects
if provided, will be saved with the state
"""
if isinstance(state.image, util.RawImage):
desc = desc or 'save'
filename = filename or state.image.filename + '-peri-' + desc + '.pkl'
else:
if not filename:
raise AttributeError("Must provide filename since RawImage is not used")
if extra is None:
save = state
else:
save = [state] + extra
if os.path.exists(filename):
ff = "{}-tmp-for-copy".format(filename)
if os.path.exists(ff):
os.remove(ff)
os.rename(filename, ff)
pickle.dump(save, open(filename, 'wb'), protocol=2)
|
[
"Save",
"the",
"current",
"state",
"with",
"extra",
"information",
"(",
"for",
"example",
"samples",
"and",
"LL",
"from",
"the",
"optimization",
"procedure",
")",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L850-L892
|
[
"def",
"save",
"(",
"state",
",",
"filename",
"=",
"None",
",",
"desc",
"=",
"''",
",",
"extra",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"state",
".",
"image",
",",
"util",
".",
"RawImage",
")",
":",
"desc",
"=",
"desc",
"or",
"'save'",
"filename",
"=",
"filename",
"or",
"state",
".",
"image",
".",
"filename",
"+",
"'-peri-'",
"+",
"desc",
"+",
"'.pkl'",
"else",
":",
"if",
"not",
"filename",
":",
"raise",
"AttributeError",
"(",
"\"Must provide filename since RawImage is not used\"",
")",
"if",
"extra",
"is",
"None",
":",
"save",
"=",
"state",
"else",
":",
"save",
"=",
"[",
"state",
"]",
"+",
"extra",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"ff",
"=",
"\"{}-tmp-for-copy\"",
".",
"format",
"(",
"filename",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"ff",
")",
":",
"os",
".",
"remove",
"(",
"ff",
")",
"os",
".",
"rename",
"(",
"filename",
",",
"ff",
")",
"pickle",
".",
"dump",
"(",
"save",
",",
"open",
"(",
"filename",
",",
"'wb'",
")",
",",
"protocol",
"=",
"2",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
load
|
Load the state from the given file, moving to the file's directory during
load (temporarily, moving back after loaded)
Parameters
----------
filename : string
name of the file to open, should be a .pkl file
|
peri/states.py
|
def load(filename):
"""
Load the state from the given file, moving to the file's directory during
load (temporarily, moving back after loaded)
Parameters
----------
filename : string
name of the file to open, should be a .pkl file
"""
path, name = os.path.split(filename)
path = path or '.'
with util.indir(path):
return pickle.load(open(name, 'rb'))
|
def load(filename):
"""
Load the state from the given file, moving to the file's directory during
load (temporarily, moving back after loaded)
Parameters
----------
filename : string
name of the file to open, should be a .pkl file
"""
path, name = os.path.split(filename)
path = path or '.'
with util.indir(path):
return pickle.load(open(name, 'rb'))
|
[
"Load",
"the",
"state",
"from",
"the",
"given",
"file",
"moving",
"to",
"the",
"file",
"s",
"directory",
"during",
"load",
"(",
"temporarily",
"moving",
"back",
"after",
"loaded",
")"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L894-L908
|
[
"def",
"load",
"(",
"filename",
")",
":",
"path",
",",
"name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"path",
"=",
"path",
"or",
"'.'",
"with",
"util",
".",
"indir",
"(",
"path",
")",
":",
"return",
"pickle",
".",
"load",
"(",
"open",
"(",
"name",
",",
"'rb'",
")",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State.error
|
Class property: Sum of the squared errors,
:math:`E = \sum_i (D_i - M_i(\\theta))^2`
|
peri/states.py
|
def error(self):
"""
Class property: Sum of the squared errors,
:math:`E = \sum_i (D_i - M_i(\\theta))^2`
"""
r = self.residuals.ravel()
return np.dot(r,r)
|
def error(self):
"""
Class property: Sum of the squared errors,
:math:`E = \sum_i (D_i - M_i(\\theta))^2`
"""
r = self.residuals.ravel()
return np.dot(r,r)
|
[
"Class",
"property",
":",
"Sum",
"of",
"the",
"squared",
"errors",
":",
"math",
":",
"E",
"=",
"\\",
"sum_i",
"(",
"D_i",
"-",
"M_i",
"(",
"\\\\",
"theta",
"))",
"^2"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L166-L172
|
[
"def",
"error",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"residuals",
".",
"ravel",
"(",
")",
"return",
"np",
".",
"dot",
"(",
"r",
",",
"r",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State.loglikelihood
|
Class property: loglikelihood calculated by the model error,
:math:`\\mathcal{L} = - \\frac{1}{2} \\sum\\left[
\\left(\\frac{D_i - M_i(\\theta)}{\sigma}\\right)^2
+ \\log{(2\pi \sigma^2)} \\right]`
|
peri/states.py
|
def loglikelihood(self):
"""
Class property: loglikelihood calculated by the model error,
:math:`\\mathcal{L} = - \\frac{1}{2} \\sum\\left[
\\left(\\frac{D_i - M_i(\\theta)}{\sigma}\\right)^2
+ \\log{(2\pi \sigma^2)} \\right]`
"""
sig = self.hyper_parameters.get_values('sigma')
err = self.error
N = np.size(self.data)
return -0.5*err/sig**2 - np.log(np.sqrt(2*np.pi)*sig)*N
|
def loglikelihood(self):
"""
Class property: loglikelihood calculated by the model error,
:math:`\\mathcal{L} = - \\frac{1}{2} \\sum\\left[
\\left(\\frac{D_i - M_i(\\theta)}{\sigma}\\right)^2
+ \\log{(2\pi \sigma^2)} \\right]`
"""
sig = self.hyper_parameters.get_values('sigma')
err = self.error
N = np.size(self.data)
return -0.5*err/sig**2 - np.log(np.sqrt(2*np.pi)*sig)*N
|
[
"Class",
"property",
":",
"loglikelihood",
"calculated",
"by",
"the",
"model",
"error",
":",
"math",
":",
"\\\\",
"mathcal",
"{",
"L",
"}",
"=",
"-",
"\\\\",
"frac",
"{",
"1",
"}",
"{",
"2",
"}",
"\\\\",
"sum",
"\\\\",
"left",
"[",
"\\\\",
"left",
"(",
"\\\\",
"frac",
"{",
"D_i",
"-",
"M_i",
"(",
"\\\\",
"theta",
")",
"}",
"{",
"\\",
"sigma",
"}",
"\\\\",
"right",
")",
"^2",
"+",
"\\\\",
"log",
"{",
"(",
"2",
"\\",
"pi",
"\\",
"sigma^2",
")",
"}",
"\\\\",
"right",
"]"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L175-L185
|
[
"def",
"loglikelihood",
"(",
"self",
")",
":",
"sig",
"=",
"self",
".",
"hyper_parameters",
".",
"get_values",
"(",
"'sigma'",
")",
"err",
"=",
"self",
".",
"error",
"N",
"=",
"np",
".",
"size",
"(",
"self",
".",
"data",
")",
"return",
"-",
"0.5",
"*",
"err",
"/",
"sig",
"**",
"2",
"-",
"np",
".",
"log",
"(",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"*",
"sig",
")",
"*",
"N"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State.update
|
Update a single parameter or group of parameters ``params``
with ``values``.
Parameters
----------
params : string or list of strings
Parameter names which to update
value : number or list of numbers
Values of those parameters which to update
|
peri/states.py
|
def update(self, params, values):
"""
Update a single parameter or group of parameters ``params``
with ``values``.
Parameters
----------
params : string or list of strings
Parameter names which to update
value : number or list of numbers
Values of those parameters which to update
"""
return super(State, self).update(params, values)
|
def update(self, params, values):
"""
Update a single parameter or group of parameters ``params``
with ``values``.
Parameters
----------
params : string or list of strings
Parameter names which to update
value : number or list of numbers
Values of those parameters which to update
"""
return super(State, self).update(params, values)
|
[
"Update",
"a",
"single",
"parameter",
"or",
"group",
"of",
"parameters",
"params",
"with",
"values",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L204-L217
|
[
"def",
"update",
"(",
"self",
",",
"params",
",",
"values",
")",
":",
"return",
"super",
"(",
"State",
",",
"self",
")",
".",
"update",
"(",
"params",
",",
"values",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State.push_update
|
Perform a parameter update and keep track of the change on the state.
Same call structure as :func:`peri.states.States.update`
|
peri/states.py
|
def push_update(self, params, values):
"""
Perform a parameter update and keep track of the change on the state.
Same call structure as :func:`peri.states.States.update`
"""
curr = self.get_values(params)
self.stack.append((params, curr))
self.update(params, values)
|
def push_update(self, params, values):
"""
Perform a parameter update and keep track of the change on the state.
Same call structure as :func:`peri.states.States.update`
"""
curr = self.get_values(params)
self.stack.append((params, curr))
self.update(params, values)
|
[
"Perform",
"a",
"parameter",
"update",
"and",
"keep",
"track",
"of",
"the",
"change",
"on",
"the",
"state",
".",
"Same",
"call",
"structure",
"as",
":",
"func",
":",
"peri",
".",
"states",
".",
"States",
".",
"update"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L238-L245
|
[
"def",
"push_update",
"(",
"self",
",",
"params",
",",
"values",
")",
":",
"curr",
"=",
"self",
".",
"get_values",
"(",
"params",
")",
"self",
".",
"stack",
".",
"append",
"(",
"(",
"params",
",",
"curr",
")",
")",
"self",
".",
"update",
"(",
"params",
",",
"values",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State.pop_update
|
Pop the last update from the stack push by
:func:`peri.states.States.push_update` by undoing the chnage last
performed.
|
peri/states.py
|
def pop_update(self):
"""
Pop the last update from the stack push by
:func:`peri.states.States.push_update` by undoing the chnage last
performed.
"""
params, values = self.stack.pop()
self.update(params, values)
|
def pop_update(self):
"""
Pop the last update from the stack push by
:func:`peri.states.States.push_update` by undoing the chnage last
performed.
"""
params, values = self.stack.pop()
self.update(params, values)
|
[
"Pop",
"the",
"last",
"update",
"from",
"the",
"stack",
"push",
"by",
":",
"func",
":",
"peri",
".",
"states",
".",
"States",
".",
"push_update",
"by",
"undoing",
"the",
"chnage",
"last",
"performed",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L247-L254
|
[
"def",
"pop_update",
"(",
"self",
")",
":",
"params",
",",
"values",
"=",
"self",
".",
"stack",
".",
"pop",
"(",
")",
"self",
".",
"update",
"(",
"params",
",",
"values",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State.temp_update
|
Context manager to temporarily perform a parameter update (by using the
stack structure). To use:
with state.temp_update(params, values):
# measure the cost or something
state.error
|
peri/states.py
|
def temp_update(self, params, values):
"""
Context manager to temporarily perform a parameter update (by using the
stack structure). To use:
with state.temp_update(params, values):
# measure the cost or something
state.error
"""
self.push_update(params, values)
yield
self.pop_update()
|
def temp_update(self, params, values):
"""
Context manager to temporarily perform a parameter update (by using the
stack structure). To use:
with state.temp_update(params, values):
# measure the cost or something
state.error
"""
self.push_update(params, values)
yield
self.pop_update()
|
[
"Context",
"manager",
"to",
"temporarily",
"perform",
"a",
"parameter",
"update",
"(",
"by",
"using",
"the",
"stack",
"structure",
")",
".",
"To",
"use",
":"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L257-L268
|
[
"def",
"temp_update",
"(",
"self",
",",
"params",
",",
"values",
")",
":",
"self",
".",
"push_update",
"(",
"params",
",",
"values",
")",
"yield",
"self",
".",
"pop_update",
"(",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State._grad_one_param
|
Gradient of `func` wrt a single parameter `p`. (see _graddoc)
|
peri/states.py
|
def _grad_one_param(self, funct, p, dl=2e-5, rts=False, nout=1, **kwargs):
"""
Gradient of `func` wrt a single parameter `p`. (see _graddoc)
"""
vals = self.get_values(p)
f0 = funct(**kwargs)
self.update(p, vals+dl)
f1 = funct(**kwargs)
if rts:
self.update(p, vals)
if nout == 1:
return (f1 - f0) / dl
else:
return [(f1[i] - f0[i]) / dl for i in range(nout)]
|
def _grad_one_param(self, funct, p, dl=2e-5, rts=False, nout=1, **kwargs):
"""
Gradient of `func` wrt a single parameter `p`. (see _graddoc)
"""
vals = self.get_values(p)
f0 = funct(**kwargs)
self.update(p, vals+dl)
f1 = funct(**kwargs)
if rts:
self.update(p, vals)
if nout == 1:
return (f1 - f0) / dl
else:
return [(f1[i] - f0[i]) / dl for i in range(nout)]
|
[
"Gradient",
"of",
"func",
"wrt",
"a",
"single",
"parameter",
"p",
".",
"(",
"see",
"_graddoc",
")"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L273-L288
|
[
"def",
"_grad_one_param",
"(",
"self",
",",
"funct",
",",
"p",
",",
"dl",
"=",
"2e-5",
",",
"rts",
"=",
"False",
",",
"nout",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"vals",
"=",
"self",
".",
"get_values",
"(",
"p",
")",
"f0",
"=",
"funct",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"update",
"(",
"p",
",",
"vals",
"+",
"dl",
")",
"f1",
"=",
"funct",
"(",
"*",
"*",
"kwargs",
")",
"if",
"rts",
":",
"self",
".",
"update",
"(",
"p",
",",
"vals",
")",
"if",
"nout",
"==",
"1",
":",
"return",
"(",
"f1",
"-",
"f0",
")",
"/",
"dl",
"else",
":",
"return",
"[",
"(",
"f1",
"[",
"i",
"]",
"-",
"f0",
"[",
"i",
"]",
")",
"/",
"dl",
"for",
"i",
"in",
"range",
"(",
"nout",
")",
"]"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State._hess_two_param
|
Hessian of `func` wrt two parameters `p0` and `p1`. (see _graddoc)
|
peri/states.py
|
def _hess_two_param(self, funct, p0, p1, dl=2e-5, rts=False, **kwargs):
"""
Hessian of `func` wrt two parameters `p0` and `p1`. (see _graddoc)
"""
vals0 = self.get_values(p0)
vals1 = self.get_values(p1)
f00 = funct(**kwargs)
self.update(p0, vals0+dl)
f10 = funct(**kwargs)
self.update(p1, vals1+dl)
f11 = funct(**kwargs)
self.update(p0, vals0)
f01 = funct(**kwargs)
if rts:
self.update(p0, vals0)
self.update(p1, vals1)
return (f11 - f10 - f01 + f00) / (dl**2)
|
def _hess_two_param(self, funct, p0, p1, dl=2e-5, rts=False, **kwargs):
"""
Hessian of `func` wrt two parameters `p0` and `p1`. (see _graddoc)
"""
vals0 = self.get_values(p0)
vals1 = self.get_values(p1)
f00 = funct(**kwargs)
self.update(p0, vals0+dl)
f10 = funct(**kwargs)
self.update(p1, vals1+dl)
f11 = funct(**kwargs)
self.update(p0, vals0)
f01 = funct(**kwargs)
if rts:
self.update(p0, vals0)
self.update(p1, vals1)
return (f11 - f10 - f01 + f00) / (dl**2)
|
[
"Hessian",
"of",
"func",
"wrt",
"two",
"parameters",
"p0",
"and",
"p1",
".",
"(",
"see",
"_graddoc",
")"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L290-L311
|
[
"def",
"_hess_two_param",
"(",
"self",
",",
"funct",
",",
"p0",
",",
"p1",
",",
"dl",
"=",
"2e-5",
",",
"rts",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"vals0",
"=",
"self",
".",
"get_values",
"(",
"p0",
")",
"vals1",
"=",
"self",
".",
"get_values",
"(",
"p1",
")",
"f00",
"=",
"funct",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"update",
"(",
"p0",
",",
"vals0",
"+",
"dl",
")",
"f10",
"=",
"funct",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"update",
"(",
"p1",
",",
"vals1",
"+",
"dl",
")",
"f11",
"=",
"funct",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"update",
"(",
"p0",
",",
"vals0",
")",
"f01",
"=",
"funct",
"(",
"*",
"*",
"kwargs",
")",
"if",
"rts",
":",
"self",
".",
"update",
"(",
"p0",
",",
"vals0",
")",
"self",
".",
"update",
"(",
"p1",
",",
"vals1",
")",
"return",
"(",
"f11",
"-",
"f10",
"-",
"f01",
"+",
"f00",
")",
"/",
"(",
"dl",
"**",
"2",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State._grad
|
Gradient of `func` wrt a set of parameters params. (see _graddoc)
|
peri/states.py
|
def _grad(self, funct, params=None, dl=2e-5, rts=False, nout=1, out=None,
**kwargs):
"""
Gradient of `func` wrt a set of parameters params. (see _graddoc)
"""
if params is None:
params = self.param_all()
ps = util.listify(params)
f0 = funct(**kwargs)
# get the shape of the entire gradient to return and make an array
calc_shape = (
lambda ar: (len(ps),) + (ar.shape if isinstance(
ar, np.ndarray) else (1,)))
if out is not None:
grad = out # reference
elif nout == 1:
shape = calc_shape(f0)
grad = np.zeros(shape) # must be preallocated for mem reasons
else:
shape = [calc_shape(f0[i]) for i in range(nout)]
grad = [np.zeros(shp) for shp in shape]
for i, p in enumerate(ps):
if nout == 1:
grad[i] = self._grad_one_param(funct, p, dl=dl, rts=rts,
nout=nout, **kwargs)
else:
stuff = self._grad_one_param(funct, p, dl=dl, rts=rts,
nout=nout, **kwargs)
for a in range(nout): grad[a][i] = stuff[a]
return grad
|
def _grad(self, funct, params=None, dl=2e-5, rts=False, nout=1, out=None,
**kwargs):
"""
Gradient of `func` wrt a set of parameters params. (see _graddoc)
"""
if params is None:
params = self.param_all()
ps = util.listify(params)
f0 = funct(**kwargs)
# get the shape of the entire gradient to return and make an array
calc_shape = (
lambda ar: (len(ps),) + (ar.shape if isinstance(
ar, np.ndarray) else (1,)))
if out is not None:
grad = out # reference
elif nout == 1:
shape = calc_shape(f0)
grad = np.zeros(shape) # must be preallocated for mem reasons
else:
shape = [calc_shape(f0[i]) for i in range(nout)]
grad = [np.zeros(shp) for shp in shape]
for i, p in enumerate(ps):
if nout == 1:
grad[i] = self._grad_one_param(funct, p, dl=dl, rts=rts,
nout=nout, **kwargs)
else:
stuff = self._grad_one_param(funct, p, dl=dl, rts=rts,
nout=nout, **kwargs)
for a in range(nout): grad[a][i] = stuff[a]
return grad
|
[
"Gradient",
"of",
"func",
"wrt",
"a",
"set",
"of",
"parameters",
"params",
".",
"(",
"see",
"_graddoc",
")"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L313-L345
|
[
"def",
"_grad",
"(",
"self",
",",
"funct",
",",
"params",
"=",
"None",
",",
"dl",
"=",
"2e-5",
",",
"rts",
"=",
"False",
",",
"nout",
"=",
"1",
",",
"out",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"self",
".",
"param_all",
"(",
")",
"ps",
"=",
"util",
".",
"listify",
"(",
"params",
")",
"f0",
"=",
"funct",
"(",
"*",
"*",
"kwargs",
")",
"# get the shape of the entire gradient to return and make an array",
"calc_shape",
"=",
"(",
"lambda",
"ar",
":",
"(",
"len",
"(",
"ps",
")",
",",
")",
"+",
"(",
"ar",
".",
"shape",
"if",
"isinstance",
"(",
"ar",
",",
"np",
".",
"ndarray",
")",
"else",
"(",
"1",
",",
")",
")",
")",
"if",
"out",
"is",
"not",
"None",
":",
"grad",
"=",
"out",
"# reference",
"elif",
"nout",
"==",
"1",
":",
"shape",
"=",
"calc_shape",
"(",
"f0",
")",
"grad",
"=",
"np",
".",
"zeros",
"(",
"shape",
")",
"# must be preallocated for mem reasons",
"else",
":",
"shape",
"=",
"[",
"calc_shape",
"(",
"f0",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"nout",
")",
"]",
"grad",
"=",
"[",
"np",
".",
"zeros",
"(",
"shp",
")",
"for",
"shp",
"in",
"shape",
"]",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"ps",
")",
":",
"if",
"nout",
"==",
"1",
":",
"grad",
"[",
"i",
"]",
"=",
"self",
".",
"_grad_one_param",
"(",
"funct",
",",
"p",
",",
"dl",
"=",
"dl",
",",
"rts",
"=",
"rts",
",",
"nout",
"=",
"nout",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"stuff",
"=",
"self",
".",
"_grad_one_param",
"(",
"funct",
",",
"p",
",",
"dl",
"=",
"dl",
",",
"rts",
"=",
"rts",
",",
"nout",
"=",
"nout",
",",
"*",
"*",
"kwargs",
")",
"for",
"a",
"in",
"range",
"(",
"nout",
")",
":",
"grad",
"[",
"a",
"]",
"[",
"i",
"]",
"=",
"stuff",
"[",
"a",
"]",
"return",
"grad"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State._jtj
|
jTj of a `func` wrt to parmaeters `params`. (see _graddoc)
|
peri/states.py
|
def _jtj(self, funct, params=None, dl=2e-5, rts=False, **kwargs):
"""
jTj of a `func` wrt to parmaeters `params`. (see _graddoc)
"""
grad = self._grad(funct=funct, params=params, dl=dl, rts=rts, **kwargs)
return np.dot(grad, grad.T)
|
def _jtj(self, funct, params=None, dl=2e-5, rts=False, **kwargs):
"""
jTj of a `func` wrt to parmaeters `params`. (see _graddoc)
"""
grad = self._grad(funct=funct, params=params, dl=dl, rts=rts, **kwargs)
return np.dot(grad, grad.T)
|
[
"jTj",
"of",
"a",
"func",
"wrt",
"to",
"parmaeters",
"params",
".",
"(",
"see",
"_graddoc",
")"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L348-L353
|
[
"def",
"_jtj",
"(",
"self",
",",
"funct",
",",
"params",
"=",
"None",
",",
"dl",
"=",
"2e-5",
",",
"rts",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"grad",
"=",
"self",
".",
"_grad",
"(",
"funct",
"=",
"funct",
",",
"params",
"=",
"params",
",",
"dl",
"=",
"dl",
",",
"rts",
"=",
"rts",
",",
"*",
"*",
"kwargs",
")",
"return",
"np",
".",
"dot",
"(",
"grad",
",",
"grad",
".",
"T",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State._hess
|
Hessian of a `func` wrt to parmaeters `params`. (see _graddoc)
|
peri/states.py
|
def _hess(self, funct, params=None, dl=2e-5, rts=False, **kwargs):
"""
Hessian of a `func` wrt to parmaeters `params`. (see _graddoc)
"""
if params is None:
params = self.param_all()
ps = util.listify(params)
f0 = funct(**kwargs)
# get the shape of the entire hessian, allocate an array
shape = f0.shape if isinstance(f0, np.ndarray) else (1,)
shape = (len(ps), len(ps)) + shape
hess = np.zeros(shape)
for i, pi in enumerate(ps):
for j, pj in enumerate(ps[i:]):
J = j + i
thess = self._hess_two_param(funct, pi, pj, dl=dl, rts=rts, **kwargs)
hess[i][J] = thess
hess[J][i] = thess
return np.squeeze(hess)
|
def _hess(self, funct, params=None, dl=2e-5, rts=False, **kwargs):
"""
Hessian of a `func` wrt to parmaeters `params`. (see _graddoc)
"""
if params is None:
params = self.param_all()
ps = util.listify(params)
f0 = funct(**kwargs)
# get the shape of the entire hessian, allocate an array
shape = f0.shape if isinstance(f0, np.ndarray) else (1,)
shape = (len(ps), len(ps)) + shape
hess = np.zeros(shape)
for i, pi in enumerate(ps):
for j, pj in enumerate(ps[i:]):
J = j + i
thess = self._hess_two_param(funct, pi, pj, dl=dl, rts=rts, **kwargs)
hess[i][J] = thess
hess[J][i] = thess
return np.squeeze(hess)
|
[
"Hessian",
"of",
"a",
"func",
"wrt",
"to",
"parmaeters",
"params",
".",
"(",
"see",
"_graddoc",
")"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L355-L376
|
[
"def",
"_hess",
"(",
"self",
",",
"funct",
",",
"params",
"=",
"None",
",",
"dl",
"=",
"2e-5",
",",
"rts",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"self",
".",
"param_all",
"(",
")",
"ps",
"=",
"util",
".",
"listify",
"(",
"params",
")",
"f0",
"=",
"funct",
"(",
"*",
"*",
"kwargs",
")",
"# get the shape of the entire hessian, allocate an array",
"shape",
"=",
"f0",
".",
"shape",
"if",
"isinstance",
"(",
"f0",
",",
"np",
".",
"ndarray",
")",
"else",
"(",
"1",
",",
")",
"shape",
"=",
"(",
"len",
"(",
"ps",
")",
",",
"len",
"(",
"ps",
")",
")",
"+",
"shape",
"hess",
"=",
"np",
".",
"zeros",
"(",
"shape",
")",
"for",
"i",
",",
"pi",
"in",
"enumerate",
"(",
"ps",
")",
":",
"for",
"j",
",",
"pj",
"in",
"enumerate",
"(",
"ps",
"[",
"i",
":",
"]",
")",
":",
"J",
"=",
"j",
"+",
"i",
"thess",
"=",
"self",
".",
"_hess_two_param",
"(",
"funct",
",",
"pi",
",",
"pj",
",",
"dl",
"=",
"dl",
",",
"rts",
"=",
"rts",
",",
"*",
"*",
"kwargs",
")",
"hess",
"[",
"i",
"]",
"[",
"J",
"]",
"=",
"thess",
"hess",
"[",
"J",
"]",
"[",
"i",
"]",
"=",
"thess",
"return",
"np",
".",
"squeeze",
"(",
"hess",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State.build_funcs
|
Here, we build gradient and hessian functions based on the properties
of a state that are generally wanted. For each one, we fill in _grad or
_hess with a function that takes care of various options such as
slicing and flattening. For example, `m` below takes the model, selects
different indices from it, maybe flattens it and copies it. This is
then used in the fisherinformation, gradmodel, and hessmodel functions.
|
peri/states.py
|
def build_funcs(self):
"""
Here, we build gradient and hessian functions based on the properties
of a state that are generally wanted. For each one, we fill in _grad or
_hess with a function that takes care of various options such as
slicing and flattening. For example, `m` below takes the model, selects
different indices from it, maybe flattens it and copies it. This is
then used in the fisherinformation, gradmodel, and hessmodel functions.
"""
# create essentially lambda functions, but with a nice signature
def m(inds=None, slicer=None, flat=True):
return sample(self.model, inds=inds, slicer=slicer, flat=flat).copy()
def r(inds=None, slicer=None, flat=True):
return sample(self.residuals, inds=inds, slicer=slicer, flat=flat).copy()
def l():
return self.loglikelihood
def r_e(**kwargs):
"""sliced etc residuals, with state.error appended on"""
return r(**kwargs), np.copy(self.error)
def m_e(**kwargs):
"""sliced etc residuals, with state.error appended on"""
return m(**kwargs), np.copy(self.error)
# set the member functions using partial
self.fisherinformation = partial(self._jtj, funct=m)
self.gradloglikelihood = partial(self._grad, funct=l)
self.hessloglikelihood = partial(self._hess, funct=l)
self.gradmodel = partial(self._grad, funct=m)
self.hessmodel = partial(self._hess, funct=m)
self.JTJ = partial(self._jtj, funct=r)
self.J = partial(self._grad, funct=r)
self.J_e = partial(self._grad, funct=r_e, nout=2)
self.gradmodel_e = partial(self._grad, funct=m_e, nout=2)
# add the appropriate documentation to the following functions
self.fisherinformation.__doc__ = _graddoc + _sampledoc
self.gradloglikelihood.__doc__ = _graddoc
self.hessloglikelihood.__doc__ = _graddoc
self.gradmodel.__doc__ = _graddoc + _sampledoc
self.hessmodel.__doc__ = _graddoc + _sampledoc
self.JTJ.__doc__ = _graddoc + _sampledoc
self.J.__doc__ = _graddoc + _sampledoc
# add documentation to the private functions as well. this is done
# slightly differently, hence the function call
self._dograddoc(self._grad_one_param)
self._dograddoc(self._hess_two_param)
self._dograddoc(self._grad)
self._dograddoc(self._hess)
# the state object is a workaround so that other interfaces still
# work. this should probably be removed in the long run
class _Statewrap(object):
def __init__(self, obj):
self.obj = obj
def __getitem__(self, d=None):
if d is None:
d = self.obj.params
return util.delistify(self.obj.get_values(d), d)
self.state = _Statewrap(self)
|
def build_funcs(self):
"""
Here, we build gradient and hessian functions based on the properties
of a state that are generally wanted. For each one, we fill in _grad or
_hess with a function that takes care of various options such as
slicing and flattening. For example, `m` below takes the model, selects
different indices from it, maybe flattens it and copies it. This is
then used in the fisherinformation, gradmodel, and hessmodel functions.
"""
# create essentially lambda functions, but with a nice signature
def m(inds=None, slicer=None, flat=True):
return sample(self.model, inds=inds, slicer=slicer, flat=flat).copy()
def r(inds=None, slicer=None, flat=True):
return sample(self.residuals, inds=inds, slicer=slicer, flat=flat).copy()
def l():
return self.loglikelihood
def r_e(**kwargs):
"""sliced etc residuals, with state.error appended on"""
return r(**kwargs), np.copy(self.error)
def m_e(**kwargs):
"""sliced etc residuals, with state.error appended on"""
return m(**kwargs), np.copy(self.error)
# set the member functions using partial
self.fisherinformation = partial(self._jtj, funct=m)
self.gradloglikelihood = partial(self._grad, funct=l)
self.hessloglikelihood = partial(self._hess, funct=l)
self.gradmodel = partial(self._grad, funct=m)
self.hessmodel = partial(self._hess, funct=m)
self.JTJ = partial(self._jtj, funct=r)
self.J = partial(self._grad, funct=r)
self.J_e = partial(self._grad, funct=r_e, nout=2)
self.gradmodel_e = partial(self._grad, funct=m_e, nout=2)
# add the appropriate documentation to the following functions
self.fisherinformation.__doc__ = _graddoc + _sampledoc
self.gradloglikelihood.__doc__ = _graddoc
self.hessloglikelihood.__doc__ = _graddoc
self.gradmodel.__doc__ = _graddoc + _sampledoc
self.hessmodel.__doc__ = _graddoc + _sampledoc
self.JTJ.__doc__ = _graddoc + _sampledoc
self.J.__doc__ = _graddoc + _sampledoc
# add documentation to the private functions as well. this is done
# slightly differently, hence the function call
self._dograddoc(self._grad_one_param)
self._dograddoc(self._hess_two_param)
self._dograddoc(self._grad)
self._dograddoc(self._hess)
# the state object is a workaround so that other interfaces still
# work. this should probably be removed in the long run
class _Statewrap(object):
def __init__(self, obj):
self.obj = obj
def __getitem__(self, d=None):
if d is None:
d = self.obj.params
return util.delistify(self.obj.get_values(d), d)
self.state = _Statewrap(self)
|
[
"Here",
"we",
"build",
"gradient",
"and",
"hessian",
"functions",
"based",
"on",
"the",
"properties",
"of",
"a",
"state",
"that",
"are",
"generally",
"wanted",
".",
"For",
"each",
"one",
"we",
"fill",
"in",
"_grad",
"or",
"_hess",
"with",
"a",
"function",
"that",
"takes",
"care",
"of",
"various",
"options",
"such",
"as",
"slicing",
"and",
"flattening",
".",
"For",
"example",
"m",
"below",
"takes",
"the",
"model",
"selects",
"different",
"indices",
"from",
"it",
"maybe",
"flattens",
"it",
"and",
"copies",
"it",
".",
"This",
"is",
"then",
"used",
"in",
"the",
"fisherinformation",
"gradmodel",
"and",
"hessmodel",
"functions",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L381-L445
|
[
"def",
"build_funcs",
"(",
"self",
")",
":",
"# create essentially lambda functions, but with a nice signature",
"def",
"m",
"(",
"inds",
"=",
"None",
",",
"slicer",
"=",
"None",
",",
"flat",
"=",
"True",
")",
":",
"return",
"sample",
"(",
"self",
".",
"model",
",",
"inds",
"=",
"inds",
",",
"slicer",
"=",
"slicer",
",",
"flat",
"=",
"flat",
")",
".",
"copy",
"(",
")",
"def",
"r",
"(",
"inds",
"=",
"None",
",",
"slicer",
"=",
"None",
",",
"flat",
"=",
"True",
")",
":",
"return",
"sample",
"(",
"self",
".",
"residuals",
",",
"inds",
"=",
"inds",
",",
"slicer",
"=",
"slicer",
",",
"flat",
"=",
"flat",
")",
".",
"copy",
"(",
")",
"def",
"l",
"(",
")",
":",
"return",
"self",
".",
"loglikelihood",
"def",
"r_e",
"(",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"sliced etc residuals, with state.error appended on\"\"\"",
"return",
"r",
"(",
"*",
"*",
"kwargs",
")",
",",
"np",
".",
"copy",
"(",
"self",
".",
"error",
")",
"def",
"m_e",
"(",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"sliced etc residuals, with state.error appended on\"\"\"",
"return",
"m",
"(",
"*",
"*",
"kwargs",
")",
",",
"np",
".",
"copy",
"(",
"self",
".",
"error",
")",
"# set the member functions using partial",
"self",
".",
"fisherinformation",
"=",
"partial",
"(",
"self",
".",
"_jtj",
",",
"funct",
"=",
"m",
")",
"self",
".",
"gradloglikelihood",
"=",
"partial",
"(",
"self",
".",
"_grad",
",",
"funct",
"=",
"l",
")",
"self",
".",
"hessloglikelihood",
"=",
"partial",
"(",
"self",
".",
"_hess",
",",
"funct",
"=",
"l",
")",
"self",
".",
"gradmodel",
"=",
"partial",
"(",
"self",
".",
"_grad",
",",
"funct",
"=",
"m",
")",
"self",
".",
"hessmodel",
"=",
"partial",
"(",
"self",
".",
"_hess",
",",
"funct",
"=",
"m",
")",
"self",
".",
"JTJ",
"=",
"partial",
"(",
"self",
".",
"_jtj",
",",
"funct",
"=",
"r",
")",
"self",
".",
"J",
"=",
"partial",
"(",
"self",
".",
"_grad",
",",
"funct",
"=",
"r",
")",
"self",
".",
"J_e",
"=",
"partial",
"(",
"self",
".",
"_grad",
",",
"funct",
"=",
"r_e",
",",
"nout",
"=",
"2",
")",
"self",
".",
"gradmodel_e",
"=",
"partial",
"(",
"self",
".",
"_grad",
",",
"funct",
"=",
"m_e",
",",
"nout",
"=",
"2",
")",
"# add the appropriate documentation to the following functions",
"self",
".",
"fisherinformation",
".",
"__doc__",
"=",
"_graddoc",
"+",
"_sampledoc",
"self",
".",
"gradloglikelihood",
".",
"__doc__",
"=",
"_graddoc",
"self",
".",
"hessloglikelihood",
".",
"__doc__",
"=",
"_graddoc",
"self",
".",
"gradmodel",
".",
"__doc__",
"=",
"_graddoc",
"+",
"_sampledoc",
"self",
".",
"hessmodel",
".",
"__doc__",
"=",
"_graddoc",
"+",
"_sampledoc",
"self",
".",
"JTJ",
".",
"__doc__",
"=",
"_graddoc",
"+",
"_sampledoc",
"self",
".",
"J",
".",
"__doc__",
"=",
"_graddoc",
"+",
"_sampledoc",
"# add documentation to the private functions as well. this is done",
"# slightly differently, hence the function call",
"self",
".",
"_dograddoc",
"(",
"self",
".",
"_grad_one_param",
")",
"self",
".",
"_dograddoc",
"(",
"self",
".",
"_hess_two_param",
")",
"self",
".",
"_dograddoc",
"(",
"self",
".",
"_grad",
")",
"self",
".",
"_dograddoc",
"(",
"self",
".",
"_hess",
")",
"# the state object is a workaround so that other interfaces still",
"# work. this should probably be removed in the long run",
"class",
"_Statewrap",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"obj",
")",
":",
"self",
".",
"obj",
"=",
"obj",
"def",
"__getitem__",
"(",
"self",
",",
"d",
"=",
"None",
")",
":",
"if",
"d",
"is",
"None",
":",
"d",
"=",
"self",
".",
"obj",
".",
"params",
"return",
"util",
".",
"delistify",
"(",
"self",
".",
"obj",
".",
"get_values",
"(",
"d",
")",
",",
"d",
")",
"self",
".",
"state",
"=",
"_Statewrap",
"(",
"self",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
State.crb
|
Calculate the diagonal elements of the minimum covariance of the model
with respect to parameters params. ``*args`` and ``**kwargs`` go to
``fisherinformation``.
|
peri/states.py
|
def crb(self, params=None, *args, **kwargs):
"""
Calculate the diagonal elements of the minimum covariance of the model
with respect to parameters params. ``*args`` and ``**kwargs`` go to
``fisherinformation``.
"""
fish = self.fisherinformation(params=params, *args, **kwargs)
return np.sqrt(np.diag(np.linalg.inv(fish))) * self.sigma
|
def crb(self, params=None, *args, **kwargs):
"""
Calculate the diagonal elements of the minimum covariance of the model
with respect to parameters params. ``*args`` and ``**kwargs`` go to
``fisherinformation``.
"""
fish = self.fisherinformation(params=params, *args, **kwargs)
return np.sqrt(np.diag(np.linalg.inv(fish))) * self.sigma
|
[
"Calculate",
"the",
"diagonal",
"elements",
"of",
"the",
"minimum",
"covariance",
"of",
"the",
"model",
"with",
"respect",
"to",
"parameters",
"params",
".",
"*",
"args",
"and",
"**",
"kwargs",
"go",
"to",
"fisherinformation",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L447-L454
|
[
"def",
"crb",
"(",
"self",
",",
"params",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"fish",
"=",
"self",
".",
"fisherinformation",
"(",
"params",
"=",
"params",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"np",
".",
"sqrt",
"(",
"np",
".",
"diag",
"(",
"np",
".",
"linalg",
".",
"inv",
"(",
"fish",
")",
")",
")",
"*",
"self",
".",
"sigma"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ImageState.set_model
|
Setup the image model formation equation and corresponding objects into
their various objects. `mdl` is a `peri.models.Model` object
|
peri/states.py
|
def set_model(self, mdl):
"""
Setup the image model formation equation and corresponding objects into
their various objects. `mdl` is a `peri.models.Model` object
"""
self.mdl = mdl
self.mdl.check_inputs(self.comps)
for c in self.comps:
setattr(self, '_comp_'+c.category, c)
|
def set_model(self, mdl):
"""
Setup the image model formation equation and corresponding objects into
their various objects. `mdl` is a `peri.models.Model` object
"""
self.mdl = mdl
self.mdl.check_inputs(self.comps)
for c in self.comps:
setattr(self, '_comp_'+c.category, c)
|
[
"Setup",
"the",
"image",
"model",
"formation",
"equation",
"and",
"corresponding",
"objects",
"into",
"their",
"various",
"objects",
".",
"mdl",
"is",
"a",
"peri",
".",
"models",
".",
"Model",
"object"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L558-L567
|
[
"def",
"set_model",
"(",
"self",
",",
"mdl",
")",
":",
"self",
".",
"mdl",
"=",
"mdl",
"self",
".",
"mdl",
".",
"check_inputs",
"(",
"self",
".",
"comps",
")",
"for",
"c",
"in",
"self",
".",
"comps",
":",
"setattr",
"(",
"self",
",",
"'_comp_'",
"+",
"c",
".",
"category",
",",
"c",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ImageState.set_image
|
Update the current comparison (real) image
|
peri/states.py
|
def set_image(self, image):
"""
Update the current comparison (real) image
"""
if isinstance(image, np.ndarray):
image = util.Image(image)
if isinstance(image, util.NullImage):
self.model_as_data = True
else:
self.model_as_data = False
self.image = image
self._data = self.image.get_padded_image(self.pad)
# set up various slicers and Tiles associated with the image and pad
self.oshape = util.Tile(self._data.shape)
self.ishape = self.oshape.pad(-self.pad)
self.inner = self.ishape.slicer
for c in self.comps:
c.set_shape(self.oshape, self.ishape)
self._model = np.zeros(self._data.shape, dtype=np.float64)
self._residuals = np.zeros(self._data.shape, dtype=np.float64)
self.calculate_model()
|
def set_image(self, image):
"""
Update the current comparison (real) image
"""
if isinstance(image, np.ndarray):
image = util.Image(image)
if isinstance(image, util.NullImage):
self.model_as_data = True
else:
self.model_as_data = False
self.image = image
self._data = self.image.get_padded_image(self.pad)
# set up various slicers and Tiles associated with the image and pad
self.oshape = util.Tile(self._data.shape)
self.ishape = self.oshape.pad(-self.pad)
self.inner = self.ishape.slicer
for c in self.comps:
c.set_shape(self.oshape, self.ishape)
self._model = np.zeros(self._data.shape, dtype=np.float64)
self._residuals = np.zeros(self._data.shape, dtype=np.float64)
self.calculate_model()
|
[
"Update",
"the",
"current",
"comparison",
"(",
"real",
")",
"image"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L569-L594
|
[
"def",
"set_image",
"(",
"self",
",",
"image",
")",
":",
"if",
"isinstance",
"(",
"image",
",",
"np",
".",
"ndarray",
")",
":",
"image",
"=",
"util",
".",
"Image",
"(",
"image",
")",
"if",
"isinstance",
"(",
"image",
",",
"util",
".",
"NullImage",
")",
":",
"self",
".",
"model_as_data",
"=",
"True",
"else",
":",
"self",
".",
"model_as_data",
"=",
"False",
"self",
".",
"image",
"=",
"image",
"self",
".",
"_data",
"=",
"self",
".",
"image",
".",
"get_padded_image",
"(",
"self",
".",
"pad",
")",
"# set up various slicers and Tiles associated with the image and pad",
"self",
".",
"oshape",
"=",
"util",
".",
"Tile",
"(",
"self",
".",
"_data",
".",
"shape",
")",
"self",
".",
"ishape",
"=",
"self",
".",
"oshape",
".",
"pad",
"(",
"-",
"self",
".",
"pad",
")",
"self",
".",
"inner",
"=",
"self",
".",
"ishape",
".",
"slicer",
"for",
"c",
"in",
"self",
".",
"comps",
":",
"c",
".",
"set_shape",
"(",
"self",
".",
"oshape",
",",
"self",
".",
"ishape",
")",
"self",
".",
"_model",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"_data",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"self",
".",
"_residuals",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"_data",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"self",
".",
"calculate_model",
"(",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ImageState.model_to_data
|
Switch out the data for the model's recreation of the data.
|
peri/states.py
|
def model_to_data(self, sigma=0.0):
""" Switch out the data for the model's recreation of the data. """
im = self.model.copy()
im += sigma*np.random.randn(*im.shape)
self.set_image(util.NullImage(image=im))
|
def model_to_data(self, sigma=0.0):
""" Switch out the data for the model's recreation of the data. """
im = self.model.copy()
im += sigma*np.random.randn(*im.shape)
self.set_image(util.NullImage(image=im))
|
[
"Switch",
"out",
"the",
"data",
"for",
"the",
"model",
"s",
"recreation",
"of",
"the",
"data",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L599-L603
|
[
"def",
"model_to_data",
"(",
"self",
",",
"sigma",
"=",
"0.0",
")",
":",
"im",
"=",
"self",
".",
"model",
".",
"copy",
"(",
")",
"im",
"+=",
"sigma",
"*",
"np",
".",
"random",
".",
"randn",
"(",
"*",
"im",
".",
"shape",
")",
"self",
".",
"set_image",
"(",
"util",
".",
"NullImage",
"(",
"image",
"=",
"im",
")",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ImageState.get_update_io_tiles
|
Get the tiles corresponding to a particular section of image needed to
be updated. Inputs are the parameters and values. Returned is the
padded tile, inner tile, and slicer to go between, but accounting for
wrap with the edge of the image as necessary.
|
peri/states.py
|
def get_update_io_tiles(self, params, values):
"""
Get the tiles corresponding to a particular section of image needed to
be updated. Inputs are the parameters and values. Returned is the
padded tile, inner tile, and slicer to go between, but accounting for
wrap with the edge of the image as necessary.
"""
# get the affected area of the model image
otile = self.get_update_tile(params, values)
if otile is None:
return [None]*3
ptile = self.get_padding_size(otile) or util.Tile(0, dim=otile.dim)
otile = util.Tile.intersection(otile, self.oshape)
if (otile.shape <= 0).any():
raise UpdateError("update triggered invalid tile size")
if (ptile.shape < 0).any() or (ptile.shape > self.oshape.shape).any():
raise UpdateError("update triggered invalid padding tile size")
# now remove the part of the tile that is outside the image and pad the
# interior part with that overhang. reflect the necessary padding back
# into the image itself for the outer slice which we will call outer
outer = otile.pad((ptile.shape+1)//2)
inner, outer = outer.reflect_overhang(self.oshape)
iotile = inner.translate(-outer.l)
outer = util.Tile.intersection(outer, self.oshape)
inner = util.Tile.intersection(inner, self.oshape)
return outer, inner, iotile
|
def get_update_io_tiles(self, params, values):
"""
Get the tiles corresponding to a particular section of image needed to
be updated. Inputs are the parameters and values. Returned is the
padded tile, inner tile, and slicer to go between, but accounting for
wrap with the edge of the image as necessary.
"""
# get the affected area of the model image
otile = self.get_update_tile(params, values)
if otile is None:
return [None]*3
ptile = self.get_padding_size(otile) or util.Tile(0, dim=otile.dim)
otile = util.Tile.intersection(otile, self.oshape)
if (otile.shape <= 0).any():
raise UpdateError("update triggered invalid tile size")
if (ptile.shape < 0).any() or (ptile.shape > self.oshape.shape).any():
raise UpdateError("update triggered invalid padding tile size")
# now remove the part of the tile that is outside the image and pad the
# interior part with that overhang. reflect the necessary padding back
# into the image itself for the outer slice which we will call outer
outer = otile.pad((ptile.shape+1)//2)
inner, outer = outer.reflect_overhang(self.oshape)
iotile = inner.translate(-outer.l)
outer = util.Tile.intersection(outer, self.oshape)
inner = util.Tile.intersection(inner, self.oshape)
return outer, inner, iotile
|
[
"Get",
"the",
"tiles",
"corresponding",
"to",
"a",
"particular",
"section",
"of",
"image",
"needed",
"to",
"be",
"updated",
".",
"Inputs",
"are",
"the",
"parameters",
"and",
"values",
".",
"Returned",
"is",
"the",
"padded",
"tile",
"inner",
"tile",
"and",
"slicer",
"to",
"go",
"between",
"but",
"accounting",
"for",
"wrap",
"with",
"the",
"edge",
"of",
"the",
"image",
"as",
"necessary",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L634-L664
|
[
"def",
"get_update_io_tiles",
"(",
"self",
",",
"params",
",",
"values",
")",
":",
"# get the affected area of the model image",
"otile",
"=",
"self",
".",
"get_update_tile",
"(",
"params",
",",
"values",
")",
"if",
"otile",
"is",
"None",
":",
"return",
"[",
"None",
"]",
"*",
"3",
"ptile",
"=",
"self",
".",
"get_padding_size",
"(",
"otile",
")",
"or",
"util",
".",
"Tile",
"(",
"0",
",",
"dim",
"=",
"otile",
".",
"dim",
")",
"otile",
"=",
"util",
".",
"Tile",
".",
"intersection",
"(",
"otile",
",",
"self",
".",
"oshape",
")",
"if",
"(",
"otile",
".",
"shape",
"<=",
"0",
")",
".",
"any",
"(",
")",
":",
"raise",
"UpdateError",
"(",
"\"update triggered invalid tile size\"",
")",
"if",
"(",
"ptile",
".",
"shape",
"<",
"0",
")",
".",
"any",
"(",
")",
"or",
"(",
"ptile",
".",
"shape",
">",
"self",
".",
"oshape",
".",
"shape",
")",
".",
"any",
"(",
")",
":",
"raise",
"UpdateError",
"(",
"\"update triggered invalid padding tile size\"",
")",
"# now remove the part of the tile that is outside the image and pad the",
"# interior part with that overhang. reflect the necessary padding back",
"# into the image itself for the outer slice which we will call outer",
"outer",
"=",
"otile",
".",
"pad",
"(",
"(",
"ptile",
".",
"shape",
"+",
"1",
")",
"//",
"2",
")",
"inner",
",",
"outer",
"=",
"outer",
".",
"reflect_overhang",
"(",
"self",
".",
"oshape",
")",
"iotile",
"=",
"inner",
".",
"translate",
"(",
"-",
"outer",
".",
"l",
")",
"outer",
"=",
"util",
".",
"Tile",
".",
"intersection",
"(",
"outer",
",",
"self",
".",
"oshape",
")",
"inner",
"=",
"util",
".",
"Tile",
".",
"intersection",
"(",
"inner",
",",
"self",
".",
"oshape",
")",
"return",
"outer",
",",
"inner",
",",
"iotile"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ImageState.update
|
Actually perform an image (etc) update based on a set of params and
values. These parameter can be any present in the components in any
number. If there is only one component affected then difference image
updates will be employed.
|
peri/states.py
|
def update(self, params, values):
"""
Actually perform an image (etc) update based on a set of params and
values. These parameter can be any present in the components in any
number. If there is only one component affected then difference image
updates will be employed.
"""
# FIXME needs to update priors
comps = self.affected_components(params)
if len(comps) == 0:
return False
# get the affected area of the model image
otile, itile, iotile = self.get_update_io_tiles(params, values)
if otile is None:
return False
# have all components update their tiles
self.set_tile(otile)
oldmodel = self._model[itile.slicer].copy()
# here we diverge depending if there is only one component update
# (so that we may calculate a variation / difference image) or if many
# parameters are being update (should just update the whole model).
if len(comps) == 1 and self.mdl.get_difference_model(comps[0].category):
comp = comps[0]
model0 = copy.deepcopy(comp.get())
super(ImageState, self).update(params, values)
model1 = copy.deepcopy(comp.get())
diff = model1 - model0
diff = self.mdl.evaluate(
self.comps, 'get', diffmap={comp.category: diff}
)
if isinstance(model0, (float, int)):
self._model[itile.slicer] += diff
else:
self._model[itile.slicer] += diff[iotile.slicer]
else:
super(ImageState, self).update(params, values)
# allow the model to be evaluated using our components
diff = self.mdl.evaluate(self.comps, 'get')
self._model[itile.slicer] = diff[iotile.slicer]
newmodel = self._model[itile.slicer].copy()
# use the model image update to modify other class variables which
# are hard to compute globally for small local updates
self.update_from_model_change(oldmodel, newmodel, itile)
return True
|
def update(self, params, values):
"""
Actually perform an image (etc) update based on a set of params and
values. These parameter can be any present in the components in any
number. If there is only one component affected then difference image
updates will be employed.
"""
# FIXME needs to update priors
comps = self.affected_components(params)
if len(comps) == 0:
return False
# get the affected area of the model image
otile, itile, iotile = self.get_update_io_tiles(params, values)
if otile is None:
return False
# have all components update their tiles
self.set_tile(otile)
oldmodel = self._model[itile.slicer].copy()
# here we diverge depending if there is only one component update
# (so that we may calculate a variation / difference image) or if many
# parameters are being update (should just update the whole model).
if len(comps) == 1 and self.mdl.get_difference_model(comps[0].category):
comp = comps[0]
model0 = copy.deepcopy(comp.get())
super(ImageState, self).update(params, values)
model1 = copy.deepcopy(comp.get())
diff = model1 - model0
diff = self.mdl.evaluate(
self.comps, 'get', diffmap={comp.category: diff}
)
if isinstance(model0, (float, int)):
self._model[itile.slicer] += diff
else:
self._model[itile.slicer] += diff[iotile.slicer]
else:
super(ImageState, self).update(params, values)
# allow the model to be evaluated using our components
diff = self.mdl.evaluate(self.comps, 'get')
self._model[itile.slicer] = diff[iotile.slicer]
newmodel = self._model[itile.slicer].copy()
# use the model image update to modify other class variables which
# are hard to compute globally for small local updates
self.update_from_model_change(oldmodel, newmodel, itile)
return True
|
[
"Actually",
"perform",
"an",
"image",
"(",
"etc",
")",
"update",
"based",
"on",
"a",
"set",
"of",
"params",
"and",
"values",
".",
"These",
"parameter",
"can",
"be",
"any",
"present",
"in",
"the",
"components",
"in",
"any",
"number",
".",
"If",
"there",
"is",
"only",
"one",
"component",
"affected",
"then",
"difference",
"image",
"updates",
"will",
"be",
"employed",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L666-L720
|
[
"def",
"update",
"(",
"self",
",",
"params",
",",
"values",
")",
":",
"# FIXME needs to update priors",
"comps",
"=",
"self",
".",
"affected_components",
"(",
"params",
")",
"if",
"len",
"(",
"comps",
")",
"==",
"0",
":",
"return",
"False",
"# get the affected area of the model image",
"otile",
",",
"itile",
",",
"iotile",
"=",
"self",
".",
"get_update_io_tiles",
"(",
"params",
",",
"values",
")",
"if",
"otile",
"is",
"None",
":",
"return",
"False",
"# have all components update their tiles",
"self",
".",
"set_tile",
"(",
"otile",
")",
"oldmodel",
"=",
"self",
".",
"_model",
"[",
"itile",
".",
"slicer",
"]",
".",
"copy",
"(",
")",
"# here we diverge depending if there is only one component update",
"# (so that we may calculate a variation / difference image) or if many",
"# parameters are being update (should just update the whole model).",
"if",
"len",
"(",
"comps",
")",
"==",
"1",
"and",
"self",
".",
"mdl",
".",
"get_difference_model",
"(",
"comps",
"[",
"0",
"]",
".",
"category",
")",
":",
"comp",
"=",
"comps",
"[",
"0",
"]",
"model0",
"=",
"copy",
".",
"deepcopy",
"(",
"comp",
".",
"get",
"(",
")",
")",
"super",
"(",
"ImageState",
",",
"self",
")",
".",
"update",
"(",
"params",
",",
"values",
")",
"model1",
"=",
"copy",
".",
"deepcopy",
"(",
"comp",
".",
"get",
"(",
")",
")",
"diff",
"=",
"model1",
"-",
"model0",
"diff",
"=",
"self",
".",
"mdl",
".",
"evaluate",
"(",
"self",
".",
"comps",
",",
"'get'",
",",
"diffmap",
"=",
"{",
"comp",
".",
"category",
":",
"diff",
"}",
")",
"if",
"isinstance",
"(",
"model0",
",",
"(",
"float",
",",
"int",
")",
")",
":",
"self",
".",
"_model",
"[",
"itile",
".",
"slicer",
"]",
"+=",
"diff",
"else",
":",
"self",
".",
"_model",
"[",
"itile",
".",
"slicer",
"]",
"+=",
"diff",
"[",
"iotile",
".",
"slicer",
"]",
"else",
":",
"super",
"(",
"ImageState",
",",
"self",
")",
".",
"update",
"(",
"params",
",",
"values",
")",
"# allow the model to be evaluated using our components",
"diff",
"=",
"self",
".",
"mdl",
".",
"evaluate",
"(",
"self",
".",
"comps",
",",
"'get'",
")",
"self",
".",
"_model",
"[",
"itile",
".",
"slicer",
"]",
"=",
"diff",
"[",
"iotile",
".",
"slicer",
"]",
"newmodel",
"=",
"self",
".",
"_model",
"[",
"itile",
".",
"slicer",
"]",
".",
"copy",
"(",
")",
"# use the model image update to modify other class variables which",
"# are hard to compute globally for small local updates",
"self",
".",
"update_from_model_change",
"(",
"oldmodel",
",",
"newmodel",
",",
"itile",
")",
"return",
"True"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ImageState.get
|
Return component by category name
|
peri/states.py
|
def get(self, name):
""" Return component by category name """
for c in self.comps:
if c.category == name:
return c
return None
|
def get(self, name):
""" Return component by category name """
for c in self.comps:
if c.category == name:
return c
return None
|
[
"Return",
"component",
"by",
"category",
"name"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L722-L727
|
[
"def",
"get",
"(",
"self",
",",
"name",
")",
":",
"for",
"c",
"in",
"self",
".",
"comps",
":",
"if",
"c",
".",
"category",
"==",
"name",
":",
"return",
"c",
"return",
"None"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ImageState._calc_loglikelihood
|
Allows for fast local updates of log-likelihood
|
peri/states.py
|
def _calc_loglikelihood(self, model=None, tile=None):
"""Allows for fast local updates of log-likelihood"""
if model is None:
res = self.residuals
else:
res = model - self._data[tile.slicer]
sig, isig = self.sigma, 1.0/self.sigma
nlogs = -np.log(np.sqrt(2*np.pi)*sig)*res.size
return -0.5*isig*isig*np.dot(res.flat, res.flat) + nlogs
|
def _calc_loglikelihood(self, model=None, tile=None):
"""Allows for fast local updates of log-likelihood"""
if model is None:
res = self.residuals
else:
res = model - self._data[tile.slicer]
sig, isig = self.sigma, 1.0/self.sigma
nlogs = -np.log(np.sqrt(2*np.pi)*sig)*res.size
return -0.5*isig*isig*np.dot(res.flat, res.flat) + nlogs
|
[
"Allows",
"for",
"fast",
"local",
"updates",
"of",
"log",
"-",
"likelihood"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L745-L754
|
[
"def",
"_calc_loglikelihood",
"(",
"self",
",",
"model",
"=",
"None",
",",
"tile",
"=",
"None",
")",
":",
"if",
"model",
"is",
"None",
":",
"res",
"=",
"self",
".",
"residuals",
"else",
":",
"res",
"=",
"model",
"-",
"self",
".",
"_data",
"[",
"tile",
".",
"slicer",
"]",
"sig",
",",
"isig",
"=",
"self",
".",
"sigma",
",",
"1.0",
"/",
"self",
".",
"sigma",
"nlogs",
"=",
"-",
"np",
".",
"log",
"(",
"np",
".",
"sqrt",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"*",
"sig",
")",
"*",
"res",
".",
"size",
"return",
"-",
"0.5",
"*",
"isig",
"*",
"isig",
"*",
"np",
".",
"dot",
"(",
"res",
".",
"flat",
",",
"res",
".",
"flat",
")",
"+",
"nlogs"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ImageState.update_from_model_change
|
Update various internal variables from a model update from oldmodel to
newmodel for the tile `tile`
|
peri/states.py
|
def update_from_model_change(self, oldmodel, newmodel, tile):
"""
Update various internal variables from a model update from oldmodel to
newmodel for the tile `tile`
"""
self._loglikelihood -= self._calc_loglikelihood(oldmodel, tile=tile)
self._loglikelihood += self._calc_loglikelihood(newmodel, tile=tile)
self._residuals[tile.slicer] = self._data[tile.slicer] - newmodel
|
def update_from_model_change(self, oldmodel, newmodel, tile):
"""
Update various internal variables from a model update from oldmodel to
newmodel for the tile `tile`
"""
self._loglikelihood -= self._calc_loglikelihood(oldmodel, tile=tile)
self._loglikelihood += self._calc_loglikelihood(newmodel, tile=tile)
self._residuals[tile.slicer] = self._data[tile.slicer] - newmodel
|
[
"Update",
"various",
"internal",
"variables",
"from",
"a",
"model",
"update",
"from",
"oldmodel",
"to",
"newmodel",
"for",
"the",
"tile",
"tile"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L761-L768
|
[
"def",
"update_from_model_change",
"(",
"self",
",",
"oldmodel",
",",
"newmodel",
",",
"tile",
")",
":",
"self",
".",
"_loglikelihood",
"-=",
"self",
".",
"_calc_loglikelihood",
"(",
"oldmodel",
",",
"tile",
"=",
"tile",
")",
"self",
".",
"_loglikelihood",
"+=",
"self",
".",
"_calc_loglikelihood",
"(",
"newmodel",
",",
"tile",
"=",
"tile",
")",
"self",
".",
"_residuals",
"[",
"tile",
".",
"slicer",
"]",
"=",
"self",
".",
"_data",
"[",
"tile",
".",
"slicer",
"]",
"-",
"newmodel"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ImageState.set_mem_level
|
Sets the memory usage level of the state.
Parameters
----------
mem_level : string
Can be set to one of:
* hi : all mem's are np.float64
* med-hi : image, platonic are float32, rest are float64
* med : all mem's are float32
* med-lo : image, platonic are float16, rest float32
* lo : all are float16, which is bad for accuracy.
Notes
-----
Right now the PSF is not affected by the mem-level changes, which is
OK for mem but it means that self._model, self._residuals are always
float64, which can be a chunk of mem.
|
peri/states.py
|
def set_mem_level(self, mem_level='hi'):
"""
Sets the memory usage level of the state.
Parameters
----------
mem_level : string
Can be set to one of:
* hi : all mem's are np.float64
* med-hi : image, platonic are float32, rest are float64
* med : all mem's are float32
* med-lo : image, platonic are float16, rest float32
* lo : all are float16, which is bad for accuracy.
Notes
-----
Right now the PSF is not affected by the mem-level changes, which is
OK for mem but it means that self._model, self._residuals are always
float64, which can be a chunk of mem.
"""
#A little thing to parse strings for convenience:
key = ''.join([c if c in 'mlh' else '' for c in mem_level])
if key not in ['h','mh','m','ml','m', 'l']:
raise ValueError('mem_level must be one of hi, med-hi, med, med-lo, lo.')
mem_levels = { 'h': [np.float64, np.float64],
'mh': [np.float64, np.float32],
'm': [np.float32, np.float32],
'ml': [np.float32, np.float16],
'l': [np.float16, np.float16]
}
hi_lvl, lo_lvl = mem_levels[key]
cat_lvls = {'obj':lo_lvl,
'ilm':hi_lvl,
'bkg':lo_lvl
} #no psf...
self.image.float_precision = hi_lvl
self.image.image = self.image.image.astype(lo_lvl)
self.set_image(self.image)
for cat in cat_lvls.keys():
obj = self.get(cat)
#check if it's a component collection
if hasattr(obj, 'comps'):
for c in obj.comps:
c.float_precision = lo_lvl
else:
obj.float_precision = lo_lvl
self._model = self._model.astype(hi_lvl)
self._residuals = self._model.astype(hi_lvl)
self.reset()
|
def set_mem_level(self, mem_level='hi'):
"""
Sets the memory usage level of the state.
Parameters
----------
mem_level : string
Can be set to one of:
* hi : all mem's are np.float64
* med-hi : image, platonic are float32, rest are float64
* med : all mem's are float32
* med-lo : image, platonic are float16, rest float32
* lo : all are float16, which is bad for accuracy.
Notes
-----
Right now the PSF is not affected by the mem-level changes, which is
OK for mem but it means that self._model, self._residuals are always
float64, which can be a chunk of mem.
"""
#A little thing to parse strings for convenience:
key = ''.join([c if c in 'mlh' else '' for c in mem_level])
if key not in ['h','mh','m','ml','m', 'l']:
raise ValueError('mem_level must be one of hi, med-hi, med, med-lo, lo.')
mem_levels = { 'h': [np.float64, np.float64],
'mh': [np.float64, np.float32],
'm': [np.float32, np.float32],
'ml': [np.float32, np.float16],
'l': [np.float16, np.float16]
}
hi_lvl, lo_lvl = mem_levels[key]
cat_lvls = {'obj':lo_lvl,
'ilm':hi_lvl,
'bkg':lo_lvl
} #no psf...
self.image.float_precision = hi_lvl
self.image.image = self.image.image.astype(lo_lvl)
self.set_image(self.image)
for cat in cat_lvls.keys():
obj = self.get(cat)
#check if it's a component collection
if hasattr(obj, 'comps'):
for c in obj.comps:
c.float_precision = lo_lvl
else:
obj.float_precision = lo_lvl
self._model = self._model.astype(hi_lvl)
self._residuals = self._model.astype(hi_lvl)
self.reset()
|
[
"Sets",
"the",
"memory",
"usage",
"level",
"of",
"the",
"state",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/states.py#L797-L847
|
[
"def",
"set_mem_level",
"(",
"self",
",",
"mem_level",
"=",
"'hi'",
")",
":",
"#A little thing to parse strings for convenience:",
"key",
"=",
"''",
".",
"join",
"(",
"[",
"c",
"if",
"c",
"in",
"'mlh'",
"else",
"''",
"for",
"c",
"in",
"mem_level",
"]",
")",
"if",
"key",
"not",
"in",
"[",
"'h'",
",",
"'mh'",
",",
"'m'",
",",
"'ml'",
",",
"'m'",
",",
"'l'",
"]",
":",
"raise",
"ValueError",
"(",
"'mem_level must be one of hi, med-hi, med, med-lo, lo.'",
")",
"mem_levels",
"=",
"{",
"'h'",
":",
"[",
"np",
".",
"float64",
",",
"np",
".",
"float64",
"]",
",",
"'mh'",
":",
"[",
"np",
".",
"float64",
",",
"np",
".",
"float32",
"]",
",",
"'m'",
":",
"[",
"np",
".",
"float32",
",",
"np",
".",
"float32",
"]",
",",
"'ml'",
":",
"[",
"np",
".",
"float32",
",",
"np",
".",
"float16",
"]",
",",
"'l'",
":",
"[",
"np",
".",
"float16",
",",
"np",
".",
"float16",
"]",
"}",
"hi_lvl",
",",
"lo_lvl",
"=",
"mem_levels",
"[",
"key",
"]",
"cat_lvls",
"=",
"{",
"'obj'",
":",
"lo_lvl",
",",
"'ilm'",
":",
"hi_lvl",
",",
"'bkg'",
":",
"lo_lvl",
"}",
"#no psf...",
"self",
".",
"image",
".",
"float_precision",
"=",
"hi_lvl",
"self",
".",
"image",
".",
"image",
"=",
"self",
".",
"image",
".",
"image",
".",
"astype",
"(",
"lo_lvl",
")",
"self",
".",
"set_image",
"(",
"self",
".",
"image",
")",
"for",
"cat",
"in",
"cat_lvls",
".",
"keys",
"(",
")",
":",
"obj",
"=",
"self",
".",
"get",
"(",
"cat",
")",
"#check if it's a component collection",
"if",
"hasattr",
"(",
"obj",
",",
"'comps'",
")",
":",
"for",
"c",
"in",
"obj",
".",
"comps",
":",
"c",
".",
"float_precision",
"=",
"lo_lvl",
"else",
":",
"obj",
".",
"float_precision",
"=",
"lo_lvl",
"self",
".",
"_model",
"=",
"self",
".",
"_model",
".",
"astype",
"(",
"hi_lvl",
")",
"self",
".",
"_residuals",
"=",
"self",
".",
"_model",
".",
"astype",
"(",
"hi_lvl",
")",
"self",
".",
"reset",
"(",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
scramble_positions
|
randomly deletes particles and adds 1-px noise for a realistic
initial featuring guess
|
scripts/tutorial.py
|
def scramble_positions(p, delete_frac=0.1):
"""randomly deletes particles and adds 1-px noise for a realistic
initial featuring guess"""
probs = [1-delete_frac, delete_frac]
m = np.random.choice([True, False], p.shape[0], p=probs)
jumble = np.random.randn(m.sum(), 3)
return p[m] + jumble
|
def scramble_positions(p, delete_frac=0.1):
"""randomly deletes particles and adds 1-px noise for a realistic
initial featuring guess"""
probs = [1-delete_frac, delete_frac]
m = np.random.choice([True, False], p.shape[0], p=probs)
jumble = np.random.randn(m.sum(), 3)
return p[m] + jumble
|
[
"randomly",
"deletes",
"particles",
"and",
"adds",
"1",
"-",
"px",
"noise",
"for",
"a",
"realistic",
"initial",
"featuring",
"guess"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/scripts/tutorial.py#L82-L88
|
[
"def",
"scramble_positions",
"(",
"p",
",",
"delete_frac",
"=",
"0.1",
")",
":",
"probs",
"=",
"[",
"1",
"-",
"delete_frac",
",",
"delete_frac",
"]",
"m",
"=",
"np",
".",
"random",
".",
"choice",
"(",
"[",
"True",
",",
"False",
"]",
",",
"p",
".",
"shape",
"[",
"0",
"]",
",",
"p",
"=",
"probs",
")",
"jumble",
"=",
"np",
".",
"random",
".",
"randn",
"(",
"m",
".",
"sum",
"(",
")",
",",
"3",
")",
"return",
"p",
"[",
"m",
"]",
"+",
"jumble"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
create_img
|
Creates an image, as a `peri.util.Image`, which is similar
to the image in the tutorial
|
scripts/tutorial.py
|
def create_img():
"""Creates an image, as a `peri.util.Image`, which is similar
to the image in the tutorial"""
# 1. particles + coverslip
rad = 0.5 * np.random.randn(POS.shape[0]) + 4.5 # 4.5 +- 0.5 px particles
part = objs.PlatonicSpheresCollection(POS, rad, zscale=0.89)
slab = objs.Slab(zpos=4.92, angles=(-4.7e-3, -7.3e-4))
objects = comp.ComponentCollection([part, slab], category='obj')
# 2. psf, ilm
p = exactpsf.FixedSSChebLinePSF(kfki=1.07, zslab=-29.3, alpha=1.17,
n2n1=0.98, sigkf=-0.33, zscale=0.89, laser_wavelength=0.45)
i = ilms.BarnesStreakLegPoly2P1D(npts=(16,10,8,4), zorder=8)
b = ilms.LegendrePoly2P1D(order=(7,2,2), category='bkg')
off = comp.GlobalScalar(name='offset', value=-2.11)
mdl = models.ConfocalImageModel()
st = states.ImageState(util.NullImage(shape=[48,64,64]),
[objects, p, i, b, off], mdl=mdl, model_as_data=True)
b.update(b.params, BKGVALS)
i.update(i.params, ILMVALS)
im = st.model + np.random.randn(*st.model.shape) * 0.03
return util.Image(im)
|
def create_img():
"""Creates an image, as a `peri.util.Image`, which is similar
to the image in the tutorial"""
# 1. particles + coverslip
rad = 0.5 * np.random.randn(POS.shape[0]) + 4.5 # 4.5 +- 0.5 px particles
part = objs.PlatonicSpheresCollection(POS, rad, zscale=0.89)
slab = objs.Slab(zpos=4.92, angles=(-4.7e-3, -7.3e-4))
objects = comp.ComponentCollection([part, slab], category='obj')
# 2. psf, ilm
p = exactpsf.FixedSSChebLinePSF(kfki=1.07, zslab=-29.3, alpha=1.17,
n2n1=0.98, sigkf=-0.33, zscale=0.89, laser_wavelength=0.45)
i = ilms.BarnesStreakLegPoly2P1D(npts=(16,10,8,4), zorder=8)
b = ilms.LegendrePoly2P1D(order=(7,2,2), category='bkg')
off = comp.GlobalScalar(name='offset', value=-2.11)
mdl = models.ConfocalImageModel()
st = states.ImageState(util.NullImage(shape=[48,64,64]),
[objects, p, i, b, off], mdl=mdl, model_as_data=True)
b.update(b.params, BKGVALS)
i.update(i.params, ILMVALS)
im = st.model + np.random.randn(*st.model.shape) * 0.03
return util.Image(im)
|
[
"Creates",
"an",
"image",
"as",
"a",
"peri",
".",
"util",
".",
"Image",
"which",
"is",
"similar",
"to",
"the",
"image",
"in",
"the",
"tutorial"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/scripts/tutorial.py#L91-L112
|
[
"def",
"create_img",
"(",
")",
":",
"# 1. particles + coverslip",
"rad",
"=",
"0.5",
"*",
"np",
".",
"random",
".",
"randn",
"(",
"POS",
".",
"shape",
"[",
"0",
"]",
")",
"+",
"4.5",
"# 4.5 +- 0.5 px particles",
"part",
"=",
"objs",
".",
"PlatonicSpheresCollection",
"(",
"POS",
",",
"rad",
",",
"zscale",
"=",
"0.89",
")",
"slab",
"=",
"objs",
".",
"Slab",
"(",
"zpos",
"=",
"4.92",
",",
"angles",
"=",
"(",
"-",
"4.7e-3",
",",
"-",
"7.3e-4",
")",
")",
"objects",
"=",
"comp",
".",
"ComponentCollection",
"(",
"[",
"part",
",",
"slab",
"]",
",",
"category",
"=",
"'obj'",
")",
"# 2. psf, ilm",
"p",
"=",
"exactpsf",
".",
"FixedSSChebLinePSF",
"(",
"kfki",
"=",
"1.07",
",",
"zslab",
"=",
"-",
"29.3",
",",
"alpha",
"=",
"1.17",
",",
"n2n1",
"=",
"0.98",
",",
"sigkf",
"=",
"-",
"0.33",
",",
"zscale",
"=",
"0.89",
",",
"laser_wavelength",
"=",
"0.45",
")",
"i",
"=",
"ilms",
".",
"BarnesStreakLegPoly2P1D",
"(",
"npts",
"=",
"(",
"16",
",",
"10",
",",
"8",
",",
"4",
")",
",",
"zorder",
"=",
"8",
")",
"b",
"=",
"ilms",
".",
"LegendrePoly2P1D",
"(",
"order",
"=",
"(",
"7",
",",
"2",
",",
"2",
")",
",",
"category",
"=",
"'bkg'",
")",
"off",
"=",
"comp",
".",
"GlobalScalar",
"(",
"name",
"=",
"'offset'",
",",
"value",
"=",
"-",
"2.11",
")",
"mdl",
"=",
"models",
".",
"ConfocalImageModel",
"(",
")",
"st",
"=",
"states",
".",
"ImageState",
"(",
"util",
".",
"NullImage",
"(",
"shape",
"=",
"[",
"48",
",",
"64",
",",
"64",
"]",
")",
",",
"[",
"objects",
",",
"p",
",",
"i",
",",
"b",
",",
"off",
"]",
",",
"mdl",
"=",
"mdl",
",",
"model_as_data",
"=",
"True",
")",
"b",
".",
"update",
"(",
"b",
".",
"params",
",",
"BKGVALS",
")",
"i",
".",
"update",
"(",
"i",
".",
"params",
",",
"ILMVALS",
")",
"im",
"=",
"st",
".",
"model",
"+",
"np",
".",
"random",
".",
"randn",
"(",
"*",
"st",
".",
"model",
".",
"shape",
")",
"*",
"0.03",
"return",
"util",
".",
"Image",
"(",
"im",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ParameterGroup.get_values
|
Get the value of a list or single parameter.
Parameters
----------
params : string, list of string
name of parameters which to retrieve
|
peri/comp/comp.py
|
def get_values(self, params):
"""
Get the value of a list or single parameter.
Parameters
----------
params : string, list of string
name of parameters which to retrieve
"""
return util.delistify(
[self.param_dict[p] for p in util.listify(params)], params
)
|
def get_values(self, params):
"""
Get the value of a list or single parameter.
Parameters
----------
params : string, list of string
name of parameters which to retrieve
"""
return util.delistify(
[self.param_dict[p] for p in util.listify(params)], params
)
|
[
"Get",
"the",
"value",
"of",
"a",
"list",
"or",
"single",
"parameter",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/comp.py#L88-L99
|
[
"def",
"get_values",
"(",
"self",
",",
"params",
")",
":",
"return",
"util",
".",
"delistify",
"(",
"[",
"self",
".",
"param_dict",
"[",
"p",
"]",
"for",
"p",
"in",
"util",
".",
"listify",
"(",
"params",
")",
"]",
",",
"params",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ParameterGroup.set_values
|
Directly set the values corresponding to certain parameters.
This does not necessarily trigger and update of the calculation,
See also
--------
:func:`~peri.comp.comp.ParameterGroup.update` : full update func
|
peri/comp/comp.py
|
def set_values(self, params, values):
"""
Directly set the values corresponding to certain parameters.
This does not necessarily trigger and update of the calculation,
See also
--------
:func:`~peri.comp.comp.ParameterGroup.update` : full update func
"""
for p, v in zip(util.listify(params), util.listify(values)):
self.param_dict[p] = v
|
def set_values(self, params, values):
"""
Directly set the values corresponding to certain parameters.
This does not necessarily trigger and update of the calculation,
See also
--------
:func:`~peri.comp.comp.ParameterGroup.update` : full update func
"""
for p, v in zip(util.listify(params), util.listify(values)):
self.param_dict[p] = v
|
[
"Directly",
"set",
"the",
"values",
"corresponding",
"to",
"certain",
"parameters",
".",
"This",
"does",
"not",
"necessarily",
"trigger",
"and",
"update",
"of",
"the",
"calculation",
"See",
"also",
"--------",
":",
"func",
":",
"~peri",
".",
"comp",
".",
"comp",
".",
"ParameterGroup",
".",
"update",
":",
"full",
"update",
"func"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/comp.py#L101-L111
|
[
"def",
"set_values",
"(",
"self",
",",
"params",
",",
"values",
")",
":",
"for",
"p",
",",
"v",
"in",
"zip",
"(",
"util",
".",
"listify",
"(",
"params",
")",
",",
"util",
".",
"listify",
"(",
"values",
")",
")",
":",
"self",
".",
"param_dict",
"[",
"p",
"]",
"=",
"v"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
Component.set_shape
|
Set the overall shape of the calculation area. The total shape of that
the calculation can possibly occupy, in pixels. The second, inner, is
the region of interest within the image.
|
peri/comp/comp.py
|
def set_shape(self, shape, inner):
"""
Set the overall shape of the calculation area. The total shape of that
the calculation can possibly occupy, in pixels. The second, inner, is
the region of interest within the image.
"""
if self.shape != shape or self.inner != inner:
self.shape = shape
self.inner = inner
self.initialize()
|
def set_shape(self, shape, inner):
"""
Set the overall shape of the calculation area. The total shape of that
the calculation can possibly occupy, in pixels. The second, inner, is
the region of interest within the image.
"""
if self.shape != shape or self.inner != inner:
self.shape = shape
self.inner = inner
self.initialize()
|
[
"Set",
"the",
"overall",
"shape",
"of",
"the",
"calculation",
"area",
".",
"The",
"total",
"shape",
"of",
"that",
"the",
"calculation",
"can",
"possibly",
"occupy",
"in",
"pixels",
".",
"The",
"second",
"inner",
"is",
"the",
"region",
"of",
"interest",
"within",
"the",
"image",
"."
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/comp.py#L263-L272
|
[
"def",
"set_shape",
"(",
"self",
",",
"shape",
",",
"inner",
")",
":",
"if",
"self",
".",
"shape",
"!=",
"shape",
"or",
"self",
".",
"inner",
"!=",
"inner",
":",
"self",
".",
"shape",
"=",
"shape",
"self",
".",
"inner",
"=",
"inner",
"self",
".",
"initialize",
"(",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
Component.trigger_update
|
Notify parent of a parameter change
|
peri/comp/comp.py
|
def trigger_update(self, params, values):
""" Notify parent of a parameter change """
if self._parent:
self._parent.trigger_update(params, values)
else:
self.update(params, values)
|
def trigger_update(self, params, values):
""" Notify parent of a parameter change """
if self._parent:
self._parent.trigger_update(params, values)
else:
self.update(params, values)
|
[
"Notify",
"parent",
"of",
"a",
"parameter",
"change"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/comp.py#L303-L308
|
[
"def",
"trigger_update",
"(",
"self",
",",
"params",
",",
"values",
")",
":",
"if",
"self",
".",
"_parent",
":",
"self",
".",
"_parent",
".",
"trigger_update",
"(",
"params",
",",
"values",
")",
"else",
":",
"self",
".",
"update",
"(",
"params",
",",
"values",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ComponentCollection.split_params
|
Split params, values into groups that correspond to the ordering in
self.comps. For example, given a sphere collection and slab::
[
(spheres) [pos rad etc] [pos val, rad val, etc]
(slab) [slab params] [slab vals]
]
|
peri/comp/comp.py
|
def split_params(self, params, values=None):
"""
Split params, values into groups that correspond to the ordering in
self.comps. For example, given a sphere collection and slab::
[
(spheres) [pos rad etc] [pos val, rad val, etc]
(slab) [slab params] [slab vals]
]
"""
pc, vc = [], []
returnvalues = values is not None
if values is None:
values = [0]*len(util.listify(params))
for c in self.comps:
tp, tv = [], []
for p,v in zip(util.listify(params), util.listify(values)):
if not p in self.lmap:
raise NotAParameterError("%r does not belong to %r" % (p, self))
if c in self.pmap[p]:
tp.append(p)
tv.append(v)
pc.append(tp)
vc.append(tv)
if returnvalues:
return pc, vc
return pc
|
def split_params(self, params, values=None):
"""
Split params, values into groups that correspond to the ordering in
self.comps. For example, given a sphere collection and slab::
[
(spheres) [pos rad etc] [pos val, rad val, etc]
(slab) [slab params] [slab vals]
]
"""
pc, vc = [], []
returnvalues = values is not None
if values is None:
values = [0]*len(util.listify(params))
for c in self.comps:
tp, tv = [], []
for p,v in zip(util.listify(params), util.listify(values)):
if not p in self.lmap:
raise NotAParameterError("%r does not belong to %r" % (p, self))
if c in self.pmap[p]:
tp.append(p)
tv.append(v)
pc.append(tp)
vc.append(tv)
if returnvalues:
return pc, vc
return pc
|
[
"Split",
"params",
"values",
"into",
"groups",
"that",
"correspond",
"to",
"the",
"ordering",
"in",
"self",
".",
"comps",
".",
"For",
"example",
"given",
"a",
"sphere",
"collection",
"and",
"slab",
"::"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/comp.py#L413-L444
|
[
"def",
"split_params",
"(",
"self",
",",
"params",
",",
"values",
"=",
"None",
")",
":",
"pc",
",",
"vc",
"=",
"[",
"]",
",",
"[",
"]",
"returnvalues",
"=",
"values",
"is",
"not",
"None",
"if",
"values",
"is",
"None",
":",
"values",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"util",
".",
"listify",
"(",
"params",
")",
")",
"for",
"c",
"in",
"self",
".",
"comps",
":",
"tp",
",",
"tv",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"p",
",",
"v",
"in",
"zip",
"(",
"util",
".",
"listify",
"(",
"params",
")",
",",
"util",
".",
"listify",
"(",
"values",
")",
")",
":",
"if",
"not",
"p",
"in",
"self",
".",
"lmap",
":",
"raise",
"NotAParameterError",
"(",
"\"%r does not belong to %r\"",
"%",
"(",
"p",
",",
"self",
")",
")",
"if",
"c",
"in",
"self",
".",
"pmap",
"[",
"p",
"]",
":",
"tp",
".",
"append",
"(",
"p",
")",
"tv",
".",
"append",
"(",
"v",
")",
"pc",
".",
"append",
"(",
"tp",
")",
"vc",
".",
"append",
"(",
"tv",
")",
"if",
"returnvalues",
":",
"return",
"pc",
",",
"vc",
"return",
"pc"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ComponentCollection.get
|
Combine the fields from all components
|
peri/comp/comp.py
|
def get(self):
""" Combine the fields from all components """
fields = [c.get() for c in self.comps]
return self.field_reduce_func(fields)
|
def get(self):
""" Combine the fields from all components """
fields = [c.get() for c in self.comps]
return self.field_reduce_func(fields)
|
[
"Combine",
"the",
"fields",
"from",
"all",
"components"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/comp.py#L522-L525
|
[
"def",
"get",
"(",
"self",
")",
":",
"fields",
"=",
"[",
"c",
".",
"get",
"(",
")",
"for",
"c",
"in",
"self",
".",
"comps",
"]",
"return",
"self",
".",
"field_reduce_func",
"(",
"fields",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ComponentCollection.set_shape
|
Set the shape for all components
|
peri/comp/comp.py
|
def set_shape(self, shape, inner):
""" Set the shape for all components """
for c in self.comps:
c.set_shape(shape, inner)
|
def set_shape(self, shape, inner):
""" Set the shape for all components """
for c in self.comps:
c.set_shape(shape, inner)
|
[
"Set",
"the",
"shape",
"for",
"all",
"components"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/comp.py#L532-L535
|
[
"def",
"set_shape",
"(",
"self",
",",
"shape",
",",
"inner",
")",
":",
"for",
"c",
"in",
"self",
".",
"comps",
":",
"c",
".",
"set_shape",
"(",
"shape",
",",
"inner",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ComponentCollection.sync_params
|
Ensure that shared parameters are the same value everywhere
|
peri/comp/comp.py
|
def sync_params(self):
""" Ensure that shared parameters are the same value everywhere """
def _normalize(comps, param):
vals = [c.get_values(param) for c in comps]
diff = any([vals[i] != vals[i+1] for i in range(len(vals)-1)])
if diff:
for c in comps:
c.set_values(param, vals[0])
for param, comps in iteritems(self.lmap):
if isinstance(comps, list) and len(comps) > 1:
_normalize(comps, param)
|
def sync_params(self):
""" Ensure that shared parameters are the same value everywhere """
def _normalize(comps, param):
vals = [c.get_values(param) for c in comps]
diff = any([vals[i] != vals[i+1] for i in range(len(vals)-1)])
if diff:
for c in comps:
c.set_values(param, vals[0])
for param, comps in iteritems(self.lmap):
if isinstance(comps, list) and len(comps) > 1:
_normalize(comps, param)
|
[
"Ensure",
"that",
"shared",
"parameters",
"are",
"the",
"same",
"value",
"everywhere"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/comp.py#L537-L549
|
[
"def",
"sync_params",
"(",
"self",
")",
":",
"def",
"_normalize",
"(",
"comps",
",",
"param",
")",
":",
"vals",
"=",
"[",
"c",
".",
"get_values",
"(",
"param",
")",
"for",
"c",
"in",
"comps",
"]",
"diff",
"=",
"any",
"(",
"[",
"vals",
"[",
"i",
"]",
"!=",
"vals",
"[",
"i",
"+",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"vals",
")",
"-",
"1",
")",
"]",
")",
"if",
"diff",
":",
"for",
"c",
"in",
"comps",
":",
"c",
".",
"set_values",
"(",
"param",
",",
"vals",
"[",
"0",
"]",
")",
"for",
"param",
",",
"comps",
"in",
"iteritems",
"(",
"self",
".",
"lmap",
")",
":",
"if",
"isinstance",
"(",
"comps",
",",
"list",
")",
"and",
"len",
"(",
"comps",
")",
">",
"1",
":",
"_normalize",
"(",
"comps",
",",
"param",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
ComponentCollection.setup_passthroughs
|
Inherit some functions from the components that we own. In particular,
let's grab all functions that begin with `param_` so the super class
knows how to get parameter groups. Also, take anything that is listed
under Component.exports and rename with the category type, i.e.,
SphereCollection.add_particle -> Component.obj_add_particle
|
peri/comp/comp.py
|
def setup_passthroughs(self):
"""
Inherit some functions from the components that we own. In particular,
let's grab all functions that begin with `param_` so the super class
knows how to get parameter groups. Also, take anything that is listed
under Component.exports and rename with the category type, i.e.,
SphereCollection.add_particle -> Component.obj_add_particle
"""
self._nopickle = []
for c in self.comps:
# take all member functions that start with 'param_'
funcs = inspect.getmembers(c, predicate=inspect.ismethod)
for func in funcs:
if func[0].startswith('param_'):
setattr(self, func[0], func[1])
self._nopickle.append(func[0])
# add everything from exports
funcs = c.exports()
for func in funcs:
newname = c.category + '_' + func.__func__.__name__
setattr(self, newname, func)
self._nopickle.append(newname)
|
def setup_passthroughs(self):
"""
Inherit some functions from the components that we own. In particular,
let's grab all functions that begin with `param_` so the super class
knows how to get parameter groups. Also, take anything that is listed
under Component.exports and rename with the category type, i.e.,
SphereCollection.add_particle -> Component.obj_add_particle
"""
self._nopickle = []
for c in self.comps:
# take all member functions that start with 'param_'
funcs = inspect.getmembers(c, predicate=inspect.ismethod)
for func in funcs:
if func[0].startswith('param_'):
setattr(self, func[0], func[1])
self._nopickle.append(func[0])
# add everything from exports
funcs = c.exports()
for func in funcs:
newname = c.category + '_' + func.__func__.__name__
setattr(self, newname, func)
self._nopickle.append(newname)
|
[
"Inherit",
"some",
"functions",
"from",
"the",
"components",
"that",
"we",
"own",
".",
"In",
"particular",
"let",
"s",
"grab",
"all",
"functions",
"that",
"begin",
"with",
"param_",
"so",
"the",
"super",
"class",
"knows",
"how",
"to",
"get",
"parameter",
"groups",
".",
"Also",
"take",
"anything",
"that",
"is",
"listed",
"under",
"Component",
".",
"exports",
"and",
"rename",
"with",
"the",
"category",
"type",
"i",
".",
"e",
".",
"SphereCollection",
".",
"add_particle",
"-",
">",
"Component",
".",
"obj_add_particle"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/comp.py#L558-L581
|
[
"def",
"setup_passthroughs",
"(",
"self",
")",
":",
"self",
".",
"_nopickle",
"=",
"[",
"]",
"for",
"c",
"in",
"self",
".",
"comps",
":",
"# take all member functions that start with 'param_'",
"funcs",
"=",
"inspect",
".",
"getmembers",
"(",
"c",
",",
"predicate",
"=",
"inspect",
".",
"ismethod",
")",
"for",
"func",
"in",
"funcs",
":",
"if",
"func",
"[",
"0",
"]",
".",
"startswith",
"(",
"'param_'",
")",
":",
"setattr",
"(",
"self",
",",
"func",
"[",
"0",
"]",
",",
"func",
"[",
"1",
"]",
")",
"self",
".",
"_nopickle",
".",
"append",
"(",
"func",
"[",
"0",
"]",
")",
"# add everything from exports",
"funcs",
"=",
"c",
".",
"exports",
"(",
")",
"for",
"func",
"in",
"funcs",
":",
"newname",
"=",
"c",
".",
"category",
"+",
"'_'",
"+",
"func",
".",
"__func__",
".",
"__name__",
"setattr",
"(",
"self",
",",
"newname",
",",
"func",
")",
"self",
".",
"_nopickle",
".",
"append",
"(",
"newname",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
get_conf_filename
|
The configuration file either lives in ~/.peri.json or is specified on the
command line via the environment variables PERI_CONF_FILE
|
peri/conf.py
|
def get_conf_filename():
"""
The configuration file either lives in ~/.peri.json or is specified on the
command line via the environment variables PERI_CONF_FILE
"""
default = os.path.join(os.path.expanduser("~"), ".peri.json")
return os.environ.get('PERI_CONF_FILE', default)
|
def get_conf_filename():
"""
The configuration file either lives in ~/.peri.json or is specified on the
command line via the environment variables PERI_CONF_FILE
"""
default = os.path.join(os.path.expanduser("~"), ".peri.json")
return os.environ.get('PERI_CONF_FILE', default)
|
[
"The",
"configuration",
"file",
"either",
"lives",
"in",
"~",
"/",
".",
"peri",
".",
"json",
"or",
"is",
"specified",
"on",
"the",
"command",
"line",
"via",
"the",
"environment",
"variables",
"PERI_CONF_FILE"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/conf.py#L37-L43
|
[
"def",
"get_conf_filename",
"(",
")",
":",
"default",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
",",
"\".peri.json\"",
")",
"return",
"os",
".",
"environ",
".",
"get",
"(",
"'PERI_CONF_FILE'",
",",
"default",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
read_environment
|
Read all environment variables to see if they contain PERI
|
peri/conf.py
|
def read_environment():
""" Read all environment variables to see if they contain PERI """
out = {}
for k,v in iteritems(os.environ):
if transform(k) in default_conf:
out[transform(k)] = v
return out
|
def read_environment():
""" Read all environment variables to see if they contain PERI """
out = {}
for k,v in iteritems(os.environ):
if transform(k) in default_conf:
out[transform(k)] = v
return out
|
[
"Read",
"all",
"environment",
"variables",
"to",
"see",
"if",
"they",
"contain",
"PERI"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/conf.py#L55-L61
|
[
"def",
"read_environment",
"(",
")",
":",
"out",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"os",
".",
"environ",
")",
":",
"if",
"transform",
"(",
"k",
")",
"in",
"default_conf",
":",
"out",
"[",
"transform",
"(",
"k",
")",
"]",
"=",
"v",
"return",
"out"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
load_conf
|
Load the configuration with the priority:
1. environment variables
2. configuration file
3. defaults here (default_conf)
|
peri/conf.py
|
def load_conf():
"""
Load the configuration with the priority:
1. environment variables
2. configuration file
3. defaults here (default_conf)
"""
try:
conf = copy.copy(default_conf)
conf.update(json.load(open(get_conf_filename())))
conf.update(read_environment())
return conf
except IOError as e:
create_default_conf()
return load_conf()
|
def load_conf():
"""
Load the configuration with the priority:
1. environment variables
2. configuration file
3. defaults here (default_conf)
"""
try:
conf = copy.copy(default_conf)
conf.update(json.load(open(get_conf_filename())))
conf.update(read_environment())
return conf
except IOError as e:
create_default_conf()
return load_conf()
|
[
"Load",
"the",
"configuration",
"with",
"the",
"priority",
":",
"1",
".",
"environment",
"variables",
"2",
".",
"configuration",
"file",
"3",
".",
"defaults",
"here",
"(",
"default_conf",
")"
] |
peri-source/peri
|
python
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/conf.py#L68-L82
|
[
"def",
"load_conf",
"(",
")",
":",
"try",
":",
"conf",
"=",
"copy",
".",
"copy",
"(",
"default_conf",
")",
"conf",
".",
"update",
"(",
"json",
".",
"load",
"(",
"open",
"(",
"get_conf_filename",
"(",
")",
")",
")",
")",
"conf",
".",
"update",
"(",
"read_environment",
"(",
")",
")",
"return",
"conf",
"except",
"IOError",
"as",
"e",
":",
"create_default_conf",
"(",
")",
"return",
"load_conf",
"(",
")"
] |
61beed5deaaf978ab31ed716e8470d86ba639867
|
valid
|
get_group_name
|
Used for breadcrumb dynamic_list_constructor.
|
invenio_groups/views.py
|
def get_group_name(id_group):
"""Used for breadcrumb dynamic_list_constructor."""
group = Group.query.get(id_group)
if group is not None:
return group.name
|
def get_group_name(id_group):
"""Used for breadcrumb dynamic_list_constructor."""
group = Group.query.get(id_group)
if group is not None:
return group.name
|
[
"Used",
"for",
"breadcrumb",
"dynamic_list_constructor",
"."
] |
inveniosoftware-contrib/invenio-groups
|
python
|
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/invenio_groups/views.py#L52-L56
|
[
"def",
"get_group_name",
"(",
"id_group",
")",
":",
"group",
"=",
"Group",
".",
"query",
".",
"get",
"(",
"id_group",
")",
"if",
"group",
"is",
"not",
"None",
":",
"return",
"group",
".",
"name"
] |
109481d6b02701db00b72223dd4a65e167c589a6
|
valid
|
index
|
List all user memberships.
|
invenio_groups/views.py
|
def index():
"""List all user memberships."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
q = request.args.get('q', '')
groups = Group.query_by_user(current_user, eager=True)
if q:
groups = Group.search(groups, q)
groups = groups.paginate(page, per_page=per_page)
requests = Membership.query_requests(current_user).count()
invitations = Membership.query_invitations(current_user).count()
return render_template(
'invenio_groups/index.html',
groups=groups,
requests=requests,
invitations=invitations,
page=page,
per_page=per_page,
q=q
)
|
def index():
"""List all user memberships."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
q = request.args.get('q', '')
groups = Group.query_by_user(current_user, eager=True)
if q:
groups = Group.search(groups, q)
groups = groups.paginate(page, per_page=per_page)
requests = Membership.query_requests(current_user).count()
invitations = Membership.query_invitations(current_user).count()
return render_template(
'invenio_groups/index.html',
groups=groups,
requests=requests,
invitations=invitations,
page=page,
per_page=per_page,
q=q
)
|
[
"List",
"all",
"user",
"memberships",
"."
] |
inveniosoftware-contrib/invenio-groups
|
python
|
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/invenio_groups/views.py#L69-L91
|
[
"def",
"index",
"(",
")",
":",
"page",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'page'",
",",
"1",
",",
"type",
"=",
"int",
")",
"per_page",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'per_page'",
",",
"5",
",",
"type",
"=",
"int",
")",
"q",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'q'",
",",
"''",
")",
"groups",
"=",
"Group",
".",
"query_by_user",
"(",
"current_user",
",",
"eager",
"=",
"True",
")",
"if",
"q",
":",
"groups",
"=",
"Group",
".",
"search",
"(",
"groups",
",",
"q",
")",
"groups",
"=",
"groups",
".",
"paginate",
"(",
"page",
",",
"per_page",
"=",
"per_page",
")",
"requests",
"=",
"Membership",
".",
"query_requests",
"(",
"current_user",
")",
".",
"count",
"(",
")",
"invitations",
"=",
"Membership",
".",
"query_invitations",
"(",
"current_user",
")",
".",
"count",
"(",
")",
"return",
"render_template",
"(",
"'invenio_groups/index.html'",
",",
"groups",
"=",
"groups",
",",
"requests",
"=",
"requests",
",",
"invitations",
"=",
"invitations",
",",
"page",
"=",
"page",
",",
"per_page",
"=",
"per_page",
",",
"q",
"=",
"q",
")"
] |
109481d6b02701db00b72223dd4a65e167c589a6
|
valid
|
requests
|
List all pending memberships, listed only for group admins.
|
invenio_groups/views.py
|
def requests():
"""List all pending memberships, listed only for group admins."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
memberships = Membership.query_requests(current_user, eager=True).all()
return render_template(
'invenio_groups/pending.html',
memberships=memberships,
requests=True,
page=page,
per_page=per_page,
)
|
def requests():
"""List all pending memberships, listed only for group admins."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
memberships = Membership.query_requests(current_user, eager=True).all()
return render_template(
'invenio_groups/pending.html',
memberships=memberships,
requests=True,
page=page,
per_page=per_page,
)
|
[
"List",
"all",
"pending",
"memberships",
"listed",
"only",
"for",
"group",
"admins",
"."
] |
inveniosoftware-contrib/invenio-groups
|
python
|
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/invenio_groups/views.py#L97-L109
|
[
"def",
"requests",
"(",
")",
":",
"page",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'page'",
",",
"1",
",",
"type",
"=",
"int",
")",
"per_page",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'per_page'",
",",
"5",
",",
"type",
"=",
"int",
")",
"memberships",
"=",
"Membership",
".",
"query_requests",
"(",
"current_user",
",",
"eager",
"=",
"True",
")",
".",
"all",
"(",
")",
"return",
"render_template",
"(",
"'invenio_groups/pending.html'",
",",
"memberships",
"=",
"memberships",
",",
"requests",
"=",
"True",
",",
"page",
"=",
"page",
",",
"per_page",
"=",
"per_page",
",",
")"
] |
109481d6b02701db00b72223dd4a65e167c589a6
|
valid
|
invitations
|
List all user pending memberships.
|
invenio_groups/views.py
|
def invitations():
"""List all user pending memberships."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
memberships = Membership.query_invitations(current_user, eager=True).all()
return render_template(
'invenio_groups/pending.html',
memberships=memberships,
page=page,
per_page=per_page,
)
|
def invitations():
"""List all user pending memberships."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
memberships = Membership.query_invitations(current_user, eager=True).all()
return render_template(
'invenio_groups/pending.html',
memberships=memberships,
page=page,
per_page=per_page,
)
|
[
"List",
"all",
"user",
"pending",
"memberships",
"."
] |
inveniosoftware-contrib/invenio-groups
|
python
|
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/invenio_groups/views.py#L115-L126
|
[
"def",
"invitations",
"(",
")",
":",
"page",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'page'",
",",
"1",
",",
"type",
"=",
"int",
")",
"per_page",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'per_page'",
",",
"5",
",",
"type",
"=",
"int",
")",
"memberships",
"=",
"Membership",
".",
"query_invitations",
"(",
"current_user",
",",
"eager",
"=",
"True",
")",
".",
"all",
"(",
")",
"return",
"render_template",
"(",
"'invenio_groups/pending.html'",
",",
"memberships",
"=",
"memberships",
",",
"page",
"=",
"page",
",",
"per_page",
"=",
"per_page",
",",
")"
] |
109481d6b02701db00b72223dd4a65e167c589a6
|
valid
|
new
|
Create new group.
|
invenio_groups/views.py
|
def new():
"""Create new group."""
form = GroupForm(request.form)
if form.validate_on_submit():
try:
group = Group.create(admins=[current_user], **form.data)
flash(_('Group "%(name)s" created', name=group.name), 'success')
return redirect(url_for(".index"))
except IntegrityError:
flash(_('Group creation failure'), 'error')
return render_template(
"invenio_groups/new.html",
form=form,
)
|
def new():
"""Create new group."""
form = GroupForm(request.form)
if form.validate_on_submit():
try:
group = Group.create(admins=[current_user], **form.data)
flash(_('Group "%(name)s" created', name=group.name), 'success')
return redirect(url_for(".index"))
except IntegrityError:
flash(_('Group creation failure'), 'error')
return render_template(
"invenio_groups/new.html",
form=form,
)
|
[
"Create",
"new",
"group",
"."
] |
inveniosoftware-contrib/invenio-groups
|
python
|
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/invenio_groups/views.py#L132-L148
|
[
"def",
"new",
"(",
")",
":",
"form",
"=",
"GroupForm",
"(",
"request",
".",
"form",
")",
"if",
"form",
".",
"validate_on_submit",
"(",
")",
":",
"try",
":",
"group",
"=",
"Group",
".",
"create",
"(",
"admins",
"=",
"[",
"current_user",
"]",
",",
"*",
"*",
"form",
".",
"data",
")",
"flash",
"(",
"_",
"(",
"'Group \"%(name)s\" created'",
",",
"name",
"=",
"group",
".",
"name",
")",
",",
"'success'",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"\".index\"",
")",
")",
"except",
"IntegrityError",
":",
"flash",
"(",
"_",
"(",
"'Group creation failure'",
")",
",",
"'error'",
")",
"return",
"render_template",
"(",
"\"invenio_groups/new.html\"",
",",
"form",
"=",
"form",
",",
")"
] |
109481d6b02701db00b72223dd4a65e167c589a6
|
valid
|
manage
|
Manage your group.
|
invenio_groups/views.py
|
def manage(group_id):
"""Manage your group."""
group = Group.query.get_or_404(group_id)
form = GroupForm(request.form, obj=group)
if form.validate_on_submit():
if group.can_edit(current_user):
try:
group.update(**form.data)
flash(_('Group "%(name)s" was updated', name=group.name),
'success')
except Exception as e:
flash(str(e), 'error')
return render_template(
"invenio_groups/new.html",
form=form,
group=group,
)
else:
flash(
_(
'You cannot edit group %(group_name)s',
group_name=group.name
),
'error'
)
return render_template(
"invenio_groups/new.html",
form=form,
group=group,
)
|
def manage(group_id):
"""Manage your group."""
group = Group.query.get_or_404(group_id)
form = GroupForm(request.form, obj=group)
if form.validate_on_submit():
if group.can_edit(current_user):
try:
group.update(**form.data)
flash(_('Group "%(name)s" was updated', name=group.name),
'success')
except Exception as e:
flash(str(e), 'error')
return render_template(
"invenio_groups/new.html",
form=form,
group=group,
)
else:
flash(
_(
'You cannot edit group %(group_name)s',
group_name=group.name
),
'error'
)
return render_template(
"invenio_groups/new.html",
form=form,
group=group,
)
|
[
"Manage",
"your",
"group",
"."
] |
inveniosoftware-contrib/invenio-groups
|
python
|
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/invenio_groups/views.py#L160-L191
|
[
"def",
"manage",
"(",
"group_id",
")",
":",
"group",
"=",
"Group",
".",
"query",
".",
"get_or_404",
"(",
"group_id",
")",
"form",
"=",
"GroupForm",
"(",
"request",
".",
"form",
",",
"obj",
"=",
"group",
")",
"if",
"form",
".",
"validate_on_submit",
"(",
")",
":",
"if",
"group",
".",
"can_edit",
"(",
"current_user",
")",
":",
"try",
":",
"group",
".",
"update",
"(",
"*",
"*",
"form",
".",
"data",
")",
"flash",
"(",
"_",
"(",
"'Group \"%(name)s\" was updated'",
",",
"name",
"=",
"group",
".",
"name",
")",
",",
"'success'",
")",
"except",
"Exception",
"as",
"e",
":",
"flash",
"(",
"str",
"(",
"e",
")",
",",
"'error'",
")",
"return",
"render_template",
"(",
"\"invenio_groups/new.html\"",
",",
"form",
"=",
"form",
",",
"group",
"=",
"group",
",",
")",
"else",
":",
"flash",
"(",
"_",
"(",
"'You cannot edit group %(group_name)s'",
",",
"group_name",
"=",
"group",
".",
"name",
")",
",",
"'error'",
")",
"return",
"render_template",
"(",
"\"invenio_groups/new.html\"",
",",
"form",
"=",
"form",
",",
"group",
"=",
"group",
",",
")"
] |
109481d6b02701db00b72223dd4a65e167c589a6
|
valid
|
delete
|
Delete group.
|
invenio_groups/views.py
|
def delete(group_id):
"""Delete group."""
group = Group.query.get_or_404(group_id)
if group.can_edit(current_user):
try:
group.delete()
except Exception as e:
flash(str(e), "error")
return redirect(url_for(".index"))
flash(_('Successfully removed group "%(group_name)s"',
group_name=group.name), 'success')
return redirect(url_for(".index"))
flash(
_(
'You cannot delete the group %(group_name)s',
group_name=group.name
),
'error'
)
return redirect(url_for(".index"))
|
def delete(group_id):
"""Delete group."""
group = Group.query.get_or_404(group_id)
if group.can_edit(current_user):
try:
group.delete()
except Exception as e:
flash(str(e), "error")
return redirect(url_for(".index"))
flash(_('Successfully removed group "%(group_name)s"',
group_name=group.name), 'success')
return redirect(url_for(".index"))
flash(
_(
'You cannot delete the group %(group_name)s',
group_name=group.name
),
'error'
)
return redirect(url_for(".index"))
|
[
"Delete",
"group",
"."
] |
inveniosoftware-contrib/invenio-groups
|
python
|
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/invenio_groups/views.py#L196-L218
|
[
"def",
"delete",
"(",
"group_id",
")",
":",
"group",
"=",
"Group",
".",
"query",
".",
"get_or_404",
"(",
"group_id",
")",
"if",
"group",
".",
"can_edit",
"(",
"current_user",
")",
":",
"try",
":",
"group",
".",
"delete",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"flash",
"(",
"str",
"(",
"e",
")",
",",
"\"error\"",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"\".index\"",
")",
")",
"flash",
"(",
"_",
"(",
"'Successfully removed group \"%(group_name)s\"'",
",",
"group_name",
"=",
"group",
".",
"name",
")",
",",
"'success'",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"\".index\"",
")",
")",
"flash",
"(",
"_",
"(",
"'You cannot delete the group %(group_name)s'",
",",
"group_name",
"=",
"group",
".",
"name",
")",
",",
"'error'",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"\".index\"",
")",
")"
] |
109481d6b02701db00b72223dd4a65e167c589a6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.