partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
valid
fit_lens_data_with_sensitivity_tracers
Fit lens data with a normal tracer and sensitivity tracer, to determine our sensitivity to a selection of \ galaxy components. This factory automatically determines the type of fit based on the properties of the galaxies \ in the tracers. Parameters ----------- lens_data : lens_data.LensData or lens_data.LensDataHyper The lens-images that is fitted. tracer_normal : ray_tracing.AbstractTracer A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \ lens data that we are fitting. tracer_sensitive : ray_tracing.AbstractTracerNonStack A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \ lens data that we are fitting, but also addition components (e.g. mass clumps) which we measure \ how sensitive we are too.
autolens/lens/sensitivity_fit.py
def fit_lens_data_with_sensitivity_tracers(lens_data, tracer_normal, tracer_sensitive): """Fit lens data with a normal tracer and sensitivity tracer, to determine our sensitivity to a selection of \ galaxy components. This factory automatically determines the type of fit based on the properties of the galaxies \ in the tracers. Parameters ----------- lens_data : lens_data.LensData or lens_data.LensDataHyper The lens-images that is fitted. tracer_normal : ray_tracing.AbstractTracer A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \ lens data that we are fitting. tracer_sensitive : ray_tracing.AbstractTracerNonStack A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \ lens data that we are fitting, but also addition components (e.g. mass clumps) which we measure \ how sensitive we are too. """ if (tracer_normal.has_light_profile and tracer_sensitive.has_light_profile) and \ (not tracer_normal.has_pixelization and not tracer_sensitive.has_pixelization): return SensitivityProfileFit(lens_data=lens_data, tracer_normal=tracer_normal, tracer_sensitive=tracer_sensitive) elif (not tracer_normal.has_light_profile and not tracer_sensitive.has_light_profile) and \ (tracer_normal.has_pixelization and tracer_sensitive.has_pixelization): return SensitivityInversionFit(lens_data=lens_data, tracer_normal=tracer_normal, tracer_sensitive=tracer_sensitive) else: raise exc.FittingException('The sensitivity_fit routine did not call a SensitivityFit class - check the ' 'properties of the tracers')
def fit_lens_data_with_sensitivity_tracers(lens_data, tracer_normal, tracer_sensitive): """Fit lens data with a normal tracer and sensitivity tracer, to determine our sensitivity to a selection of \ galaxy components. This factory automatically determines the type of fit based on the properties of the galaxies \ in the tracers. Parameters ----------- lens_data : lens_data.LensData or lens_data.LensDataHyper The lens-images that is fitted. tracer_normal : ray_tracing.AbstractTracer A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \ lens data that we are fitting. tracer_sensitive : ray_tracing.AbstractTracerNonStack A tracer whose galaxies have the same model components (e.g. light profiles, mass profiles) as the \ lens data that we are fitting, but also addition components (e.g. mass clumps) which we measure \ how sensitive we are too. """ if (tracer_normal.has_light_profile and tracer_sensitive.has_light_profile) and \ (not tracer_normal.has_pixelization and not tracer_sensitive.has_pixelization): return SensitivityProfileFit(lens_data=lens_data, tracer_normal=tracer_normal, tracer_sensitive=tracer_sensitive) elif (not tracer_normal.has_light_profile and not tracer_sensitive.has_light_profile) and \ (tracer_normal.has_pixelization and tracer_sensitive.has_pixelization): return SensitivityInversionFit(lens_data=lens_data, tracer_normal=tracer_normal, tracer_sensitive=tracer_sensitive) else: raise exc.FittingException('The sensitivity_fit routine did not call a SensitivityFit class - check the ' 'properties of the tracers')
[ "Fit", "lens", "data", "with", "a", "normal", "tracer", "and", "sensitivity", "tracer", "to", "determine", "our", "sensitivity", "to", "a", "selection", "of", "\\", "galaxy", "components", ".", "This", "factory", "automatically", "determines", "the", "type", "of", "fit", "based", "on", "the", "properties", "of", "the", "galaxies", "\\", "in", "the", "tracers", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/lens/sensitivity_fit.py#L5-L35
[ "def", "fit_lens_data_with_sensitivity_tracers", "(", "lens_data", ",", "tracer_normal", ",", "tracer_sensitive", ")", ":", "if", "(", "tracer_normal", ".", "has_light_profile", "and", "tracer_sensitive", ".", "has_light_profile", ")", "and", "(", "not", "tracer_normal", ".", "has_pixelization", "and", "not", "tracer_sensitive", ".", "has_pixelization", ")", ":", "return", "SensitivityProfileFit", "(", "lens_data", "=", "lens_data", ",", "tracer_normal", "=", "tracer_normal", ",", "tracer_sensitive", "=", "tracer_sensitive", ")", "elif", "(", "not", "tracer_normal", ".", "has_light_profile", "and", "not", "tracer_sensitive", ".", "has_light_profile", ")", "and", "(", "tracer_normal", ".", "has_pixelization", "and", "tracer_sensitive", ".", "has_pixelization", ")", ":", "return", "SensitivityInversionFit", "(", "lens_data", "=", "lens_data", ",", "tracer_normal", "=", "tracer_normal", ",", "tracer_sensitive", "=", "tracer_sensitive", ")", "else", ":", "raise", "exc", ".", "FittingException", "(", "'The sensitivity_fit routine did not call a SensitivityFit class - check the '", "'properties of the tracers'", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
Mask.unmasked_for_shape_and_pixel_scale
Setup a mask where all pixels are unmasked. Parameters ---------- shape : (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel.
autolens/data/array/mask.py
def unmasked_for_shape_and_pixel_scale(cls, shape, pixel_scale, invert=False): """Setup a mask where all pixels are unmasked. Parameters ---------- shape : (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. """ mask = np.full(tuple(map(lambda d: int(d), shape)), False) if invert: mask = np.invert(mask) return cls(array=mask, pixel_scale=pixel_scale)
def unmasked_for_shape_and_pixel_scale(cls, shape, pixel_scale, invert=False): """Setup a mask where all pixels are unmasked. Parameters ---------- shape : (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. """ mask = np.full(tuple(map(lambda d: int(d), shape)), False) if invert: mask = np.invert(mask) return cls(array=mask, pixel_scale=pixel_scale)
[ "Setup", "a", "mask", "where", "all", "pixels", "are", "unmasked", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/mask.py#L54-L66
[ "def", "unmasked_for_shape_and_pixel_scale", "(", "cls", ",", "shape", ",", "pixel_scale", ",", "invert", "=", "False", ")", ":", "mask", "=", "np", ".", "full", "(", "tuple", "(", "map", "(", "lambda", "d", ":", "int", "(", "d", ")", ",", "shape", ")", ")", ",", "False", ")", "if", "invert", ":", "mask", "=", "np", ".", "invert", "(", "mask", ")", "return", "cls", "(", "array", "=", "mask", ",", "pixel_scale", "=", "pixel_scale", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
Mask.circular
Setup a mask where unmasked pixels are within a circle of an input arc second radius and centre. Parameters ---------- shape: (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. radius_arcsec : float The radius (in arc seconds) of the circle within which pixels unmasked. centre: (float, float) The centre of the circle used to mask pixels.
autolens/data/array/mask.py
def circular(cls, shape, pixel_scale, radius_arcsec, centre=(0., 0.), invert=False): """Setup a mask where unmasked pixels are within a circle of an input arc second radius and centre. Parameters ---------- shape: (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. radius_arcsec : float The radius (in arc seconds) of the circle within which pixels unmasked. centre: (float, float) The centre of the circle used to mask pixels. """ mask = mask_util.mask_circular_from_shape_pixel_scale_and_radius(shape, pixel_scale, radius_arcsec, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
def circular(cls, shape, pixel_scale, radius_arcsec, centre=(0., 0.), invert=False): """Setup a mask where unmasked pixels are within a circle of an input arc second radius and centre. Parameters ---------- shape: (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. radius_arcsec : float The radius (in arc seconds) of the circle within which pixels unmasked. centre: (float, float) The centre of the circle used to mask pixels. """ mask = mask_util.mask_circular_from_shape_pixel_scale_and_radius(shape, pixel_scale, radius_arcsec, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
[ "Setup", "a", "mask", "where", "unmasked", "pixels", "are", "within", "a", "circle", "of", "an", "input", "arc", "second", "radius", "and", "centre", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/mask.py#L69-L86
[ "def", "circular", "(", "cls", ",", "shape", ",", "pixel_scale", ",", "radius_arcsec", ",", "centre", "=", "(", "0.", ",", "0.", ")", ",", "invert", "=", "False", ")", ":", "mask", "=", "mask_util", ".", "mask_circular_from_shape_pixel_scale_and_radius", "(", "shape", ",", "pixel_scale", ",", "radius_arcsec", ",", "centre", ")", "if", "invert", ":", "mask", "=", "np", ".", "invert", "(", "mask", ")", "return", "cls", "(", "array", "=", "mask", ".", "astype", "(", "'bool'", ")", ",", "pixel_scale", "=", "pixel_scale", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
Mask.circular_annular
Setup a mask where unmasked pixels are within an annulus of input inner and outer arc second radii and \ centre. Parameters ---------- shape : (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. inner_radius_arcsec : float The radius (in arc seconds) of the inner circle outside of which pixels are unmasked. outer_radius_arcsec : float The radius (in arc seconds) of the outer circle within which pixels are unmasked. centre: (float, float) The centre of the annulus used to mask pixels.
autolens/data/array/mask.py
def circular_annular(cls, shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec, centre=(0., 0.), invert=False): """Setup a mask where unmasked pixels are within an annulus of input inner and outer arc second radii and \ centre. Parameters ---------- shape : (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. inner_radius_arcsec : float The radius (in arc seconds) of the inner circle outside of which pixels are unmasked. outer_radius_arcsec : float The radius (in arc seconds) of the outer circle within which pixels are unmasked. centre: (float, float) The centre of the annulus used to mask pixels. """ mask = mask_util.mask_circular_annular_from_shape_pixel_scale_and_radii(shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
def circular_annular(cls, shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec, centre=(0., 0.), invert=False): """Setup a mask where unmasked pixels are within an annulus of input inner and outer arc second radii and \ centre. Parameters ---------- shape : (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. inner_radius_arcsec : float The radius (in arc seconds) of the inner circle outside of which pixels are unmasked. outer_radius_arcsec : float The radius (in arc seconds) of the outer circle within which pixels are unmasked. centre: (float, float) The centre of the annulus used to mask pixels. """ mask = mask_util.mask_circular_annular_from_shape_pixel_scale_and_radii(shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
[ "Setup", "a", "mask", "where", "unmasked", "pixels", "are", "within", "an", "annulus", "of", "input", "inner", "and", "outer", "arc", "second", "radii", "and", "\\", "centre", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/mask.py#L89-L110
[ "def", "circular_annular", "(", "cls", ",", "shape", ",", "pixel_scale", ",", "inner_radius_arcsec", ",", "outer_radius_arcsec", ",", "centre", "=", "(", "0.", ",", "0.", ")", ",", "invert", "=", "False", ")", ":", "mask", "=", "mask_util", ".", "mask_circular_annular_from_shape_pixel_scale_and_radii", "(", "shape", ",", "pixel_scale", ",", "inner_radius_arcsec", ",", "outer_radius_arcsec", ",", "centre", ")", "if", "invert", ":", "mask", "=", "np", ".", "invert", "(", "mask", ")", "return", "cls", "(", "array", "=", "mask", ".", "astype", "(", "'bool'", ")", ",", "pixel_scale", "=", "pixel_scale", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
Mask.circular_anti_annular
Setup a mask where unmasked pixels are outside an annulus of input inner and outer arc second radii, but \ within a second outer radius, and at a given centre. This mask there has two distinct unmasked regions (an inner circle and outer annulus), with an inner annulus \ of masked pixels. Parameters ---------- shape : (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. inner_radius_arcsec : float The radius (in arc seconds) of the inner circle inside of which pixels are unmasked. outer_radius_arcsec : float The radius (in arc seconds) of the outer circle within which pixels are masked and outside of which they \ are unmasked. outer_radius_2_arcsec : float The radius (in arc seconds) of the second outer circle within which pixels are unmasked and outside of \ which they masked. centre: (float, float) The centre of the anti-annulus used to mask pixels.
autolens/data/array/mask.py
def circular_anti_annular(cls, shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec, outer_radius_2_arcsec, centre=(0., 0.), invert=False): """Setup a mask where unmasked pixels are outside an annulus of input inner and outer arc second radii, but \ within a second outer radius, and at a given centre. This mask there has two distinct unmasked regions (an inner circle and outer annulus), with an inner annulus \ of masked pixels. Parameters ---------- shape : (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. inner_radius_arcsec : float The radius (in arc seconds) of the inner circle inside of which pixels are unmasked. outer_radius_arcsec : float The radius (in arc seconds) of the outer circle within which pixels are masked and outside of which they \ are unmasked. outer_radius_2_arcsec : float The radius (in arc seconds) of the second outer circle within which pixels are unmasked and outside of \ which they masked. centre: (float, float) The centre of the anti-annulus used to mask pixels. """ mask = mask_util.mask_circular_anti_annular_from_shape_pixel_scale_and_radii(shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec, outer_radius_2_arcsec, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
def circular_anti_annular(cls, shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec, outer_radius_2_arcsec, centre=(0., 0.), invert=False): """Setup a mask where unmasked pixels are outside an annulus of input inner and outer arc second radii, but \ within a second outer radius, and at a given centre. This mask there has two distinct unmasked regions (an inner circle and outer annulus), with an inner annulus \ of masked pixels. Parameters ---------- shape : (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. inner_radius_arcsec : float The radius (in arc seconds) of the inner circle inside of which pixels are unmasked. outer_radius_arcsec : float The radius (in arc seconds) of the outer circle within which pixels are masked and outside of which they \ are unmasked. outer_radius_2_arcsec : float The radius (in arc seconds) of the second outer circle within which pixels are unmasked and outside of \ which they masked. centre: (float, float) The centre of the anti-annulus used to mask pixels. """ mask = mask_util.mask_circular_anti_annular_from_shape_pixel_scale_and_radii(shape, pixel_scale, inner_radius_arcsec, outer_radius_arcsec, outer_radius_2_arcsec, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
[ "Setup", "a", "mask", "where", "unmasked", "pixels", "are", "outside", "an", "annulus", "of", "input", "inner", "and", "outer", "arc", "second", "radii", "but", "\\", "within", "a", "second", "outer", "radius", "and", "at", "a", "given", "centre", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/mask.py#L113-L142
[ "def", "circular_anti_annular", "(", "cls", ",", "shape", ",", "pixel_scale", ",", "inner_radius_arcsec", ",", "outer_radius_arcsec", ",", "outer_radius_2_arcsec", ",", "centre", "=", "(", "0.", ",", "0.", ")", ",", "invert", "=", "False", ")", ":", "mask", "=", "mask_util", ".", "mask_circular_anti_annular_from_shape_pixel_scale_and_radii", "(", "shape", ",", "pixel_scale", ",", "inner_radius_arcsec", ",", "outer_radius_arcsec", ",", "outer_radius_2_arcsec", ",", "centre", ")", "if", "invert", ":", "mask", "=", "np", ".", "invert", "(", "mask", ")", "return", "cls", "(", "array", "=", "mask", ".", "astype", "(", "'bool'", ")", ",", "pixel_scale", "=", "pixel_scale", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
Mask.elliptical
Setup a mask where unmasked pixels are within an ellipse of an input arc second major-axis and centre. Parameters ---------- shape: (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. major_axis_radius_arcsec : float The major-axis (in arc seconds) of the ellipse within which pixels are unmasked. axis_ratio : float The axis-ratio of the ellipse within which pixels are unmasked. phi : float The rotation angle of the ellipse within which pixels are unmasked, (counter-clockwise from the positive \ x-axis). centre: (float, float) The centre of the ellipse used to mask pixels.
autolens/data/array/mask.py
def elliptical(cls, shape, pixel_scale, major_axis_radius_arcsec, axis_ratio, phi, centre=(0., 0.), invert=False): """ Setup a mask where unmasked pixels are within an ellipse of an input arc second major-axis and centre. Parameters ---------- shape: (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. major_axis_radius_arcsec : float The major-axis (in arc seconds) of the ellipse within which pixels are unmasked. axis_ratio : float The axis-ratio of the ellipse within which pixels are unmasked. phi : float The rotation angle of the ellipse within which pixels are unmasked, (counter-clockwise from the positive \ x-axis). centre: (float, float) The centre of the ellipse used to mask pixels. """ mask = mask_util.mask_elliptical_from_shape_pixel_scale_and_radius(shape, pixel_scale, major_axis_radius_arcsec, axis_ratio, phi, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
def elliptical(cls, shape, pixel_scale, major_axis_radius_arcsec, axis_ratio, phi, centre=(0., 0.), invert=False): """ Setup a mask where unmasked pixels are within an ellipse of an input arc second major-axis and centre. Parameters ---------- shape: (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. major_axis_radius_arcsec : float The major-axis (in arc seconds) of the ellipse within which pixels are unmasked. axis_ratio : float The axis-ratio of the ellipse within which pixels are unmasked. phi : float The rotation angle of the ellipse within which pixels are unmasked, (counter-clockwise from the positive \ x-axis). centre: (float, float) The centre of the ellipse used to mask pixels. """ mask = mask_util.mask_elliptical_from_shape_pixel_scale_and_radius(shape, pixel_scale, major_axis_radius_arcsec, axis_ratio, phi, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
[ "Setup", "a", "mask", "where", "unmasked", "pixels", "are", "within", "an", "ellipse", "of", "an", "input", "arc", "second", "major", "-", "axis", "and", "centre", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/mask.py#L145-L168
[ "def", "elliptical", "(", "cls", ",", "shape", ",", "pixel_scale", ",", "major_axis_radius_arcsec", ",", "axis_ratio", ",", "phi", ",", "centre", "=", "(", "0.", ",", "0.", ")", ",", "invert", "=", "False", ")", ":", "mask", "=", "mask_util", ".", "mask_elliptical_from_shape_pixel_scale_and_radius", "(", "shape", ",", "pixel_scale", ",", "major_axis_radius_arcsec", ",", "axis_ratio", ",", "phi", ",", "centre", ")", "if", "invert", ":", "mask", "=", "np", ".", "invert", "(", "mask", ")", "return", "cls", "(", "array", "=", "mask", ".", "astype", "(", "'bool'", ")", ",", "pixel_scale", "=", "pixel_scale", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
Mask.elliptical_annular
Setup a mask where unmasked pixels are within an elliptical annulus of input inner and outer arc second \ major-axis and centre. Parameters ---------- shape: (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. inner_major_axis_radius_arcsec : float The major-axis (in arc seconds) of the inner ellipse within which pixels are masked. inner_axis_ratio : float The axis-ratio of the inner ellipse within which pixels are masked. inner_phi : float The rotation angle of the inner ellipse within which pixels are masked, (counter-clockwise from the \ positive x-axis). outer_major_axis_radius_arcsec : float The major-axis (in arc seconds) of the outer ellipse within which pixels are unmasked. outer_axis_ratio : float The axis-ratio of the outer ellipse within which pixels are unmasked. outer_phi : float The rotation angle of the outer ellipse within which pixels are unmasked, (counter-clockwise from the \ positive x-axis). centre: (float, float) The centre of the elliptical annuli used to mask pixels.
autolens/data/array/mask.py
def elliptical_annular(cls, shape, pixel_scale,inner_major_axis_radius_arcsec, inner_axis_ratio, inner_phi, outer_major_axis_radius_arcsec, outer_axis_ratio, outer_phi, centre=(0.0, 0.0), invert=False): """Setup a mask where unmasked pixels are within an elliptical annulus of input inner and outer arc second \ major-axis and centre. Parameters ---------- shape: (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. inner_major_axis_radius_arcsec : float The major-axis (in arc seconds) of the inner ellipse within which pixels are masked. inner_axis_ratio : float The axis-ratio of the inner ellipse within which pixels are masked. inner_phi : float The rotation angle of the inner ellipse within which pixels are masked, (counter-clockwise from the \ positive x-axis). outer_major_axis_radius_arcsec : float The major-axis (in arc seconds) of the outer ellipse within which pixels are unmasked. outer_axis_ratio : float The axis-ratio of the outer ellipse within which pixels are unmasked. outer_phi : float The rotation angle of the outer ellipse within which pixels are unmasked, (counter-clockwise from the \ positive x-axis). centre: (float, float) The centre of the elliptical annuli used to mask pixels. """ mask = mask_util.mask_elliptical_annular_from_shape_pixel_scale_and_radius(shape, pixel_scale, inner_major_axis_radius_arcsec, inner_axis_ratio, inner_phi, outer_major_axis_radius_arcsec, outer_axis_ratio, outer_phi, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
def elliptical_annular(cls, shape, pixel_scale,inner_major_axis_radius_arcsec, inner_axis_ratio, inner_phi, outer_major_axis_radius_arcsec, outer_axis_ratio, outer_phi, centre=(0.0, 0.0), invert=False): """Setup a mask where unmasked pixels are within an elliptical annulus of input inner and outer arc second \ major-axis and centre. Parameters ---------- shape: (int, int) The (y,x) shape of the mask in units of pixels. pixel_scale: float The arc-second to pixel conversion factor of each pixel. inner_major_axis_radius_arcsec : float The major-axis (in arc seconds) of the inner ellipse within which pixels are masked. inner_axis_ratio : float The axis-ratio of the inner ellipse within which pixels are masked. inner_phi : float The rotation angle of the inner ellipse within which pixels are masked, (counter-clockwise from the \ positive x-axis). outer_major_axis_radius_arcsec : float The major-axis (in arc seconds) of the outer ellipse within which pixels are unmasked. outer_axis_ratio : float The axis-ratio of the outer ellipse within which pixels are unmasked. outer_phi : float The rotation angle of the outer ellipse within which pixels are unmasked, (counter-clockwise from the \ positive x-axis). centre: (float, float) The centre of the elliptical annuli used to mask pixels. """ mask = mask_util.mask_elliptical_annular_from_shape_pixel_scale_and_radius(shape, pixel_scale, inner_major_axis_radius_arcsec, inner_axis_ratio, inner_phi, outer_major_axis_radius_arcsec, outer_axis_ratio, outer_phi, centre) if invert: mask = np.invert(mask) return cls(array=mask.astype('bool'), pixel_scale=pixel_scale)
[ "Setup", "a", "mask", "where", "unmasked", "pixels", "are", "within", "an", "elliptical", "annulus", "of", "input", "inner", "and", "outer", "arc", "second", "\\", "major", "-", "axis", "and", "centre", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/mask.py#L171-L204
[ "def", "elliptical_annular", "(", "cls", ",", "shape", ",", "pixel_scale", ",", "inner_major_axis_radius_arcsec", ",", "inner_axis_ratio", ",", "inner_phi", ",", "outer_major_axis_radius_arcsec", ",", "outer_axis_ratio", ",", "outer_phi", ",", "centre", "=", "(", "0.0", ",", "0.0", ")", ",", "invert", "=", "False", ")", ":", "mask", "=", "mask_util", ".", "mask_elliptical_annular_from_shape_pixel_scale_and_radius", "(", "shape", ",", "pixel_scale", ",", "inner_major_axis_radius_arcsec", ",", "inner_axis_ratio", ",", "inner_phi", ",", "outer_major_axis_radius_arcsec", ",", "outer_axis_ratio", ",", "outer_phi", ",", "centre", ")", "if", "invert", ":", "mask", "=", "np", ".", "invert", "(", "mask", ")", "return", "cls", "(", "array", "=", "mask", ".", "astype", "(", "'bool'", ")", ",", "pixel_scale", "=", "pixel_scale", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
Mask.map_2d_array_to_masked_1d_array
For a 2D array (e.g. an image, noise_map, etc.) map it to a masked 1D array of valuees using this mask. Parameters ---------- array_2d : ndarray | None | float The 2D array to be mapped to a masked 1D array.
autolens/data/array/mask.py
def map_2d_array_to_masked_1d_array(self, array_2d): """For a 2D array (e.g. an image, noise_map, etc.) map it to a masked 1D array of valuees using this mask. Parameters ---------- array_2d : ndarray | None | float The 2D array to be mapped to a masked 1D array. """ if array_2d is None or isinstance(array_2d, float): return array_2d return mapping_util.map_2d_array_to_masked_1d_array_from_array_2d_and_mask(self, array_2d)
def map_2d_array_to_masked_1d_array(self, array_2d): """For a 2D array (e.g. an image, noise_map, etc.) map it to a masked 1D array of valuees using this mask. Parameters ---------- array_2d : ndarray | None | float The 2D array to be mapped to a masked 1D array. """ if array_2d is None or isinstance(array_2d, float): return array_2d return mapping_util.map_2d_array_to_masked_1d_array_from_array_2d_and_mask(self, array_2d)
[ "For", "a", "2D", "array", "(", "e", ".", "g", ".", "an", "image", "noise_map", "etc", ".", ")", "map", "it", "to", "a", "masked", "1D", "array", "of", "valuees", "using", "this", "mask", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/mask.py#L231-L241
[ "def", "map_2d_array_to_masked_1d_array", "(", "self", ",", "array_2d", ")", ":", "if", "array_2d", "is", "None", "or", "isinstance", "(", "array_2d", ",", "float", ")", ":", "return", "array_2d", "return", "mapping_util", ".", "map_2d_array_to_masked_1d_array_from_array_2d_and_mask", "(", "self", ",", "array_2d", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
Mask.blurring_mask_for_psf_shape
Compute a blurring mask, which represents all masked pixels whose light will be blurred into unmasked \ pixels via PSF convolution (see grid_stack.RegularGrid.blurring_grid_from_mask_and_psf_shape). Parameters ---------- psf_shape : (int, int) The shape of the psf which defines the blurring region (e.g. the shape of the PSF)
autolens/data/array/mask.py
def blurring_mask_for_psf_shape(self, psf_shape): """Compute a blurring mask, which represents all masked pixels whose light will be blurred into unmasked \ pixels via PSF convolution (see grid_stack.RegularGrid.blurring_grid_from_mask_and_psf_shape). Parameters ---------- psf_shape : (int, int) The shape of the psf which defines the blurring region (e.g. the shape of the PSF) """ if psf_shape[0] % 2 == 0 or psf_shape[1] % 2 == 0: raise exc.MaskException("psf_size of exterior region must be odd") blurring_mask = mask_util.mask_blurring_from_mask_and_psf_shape(self, psf_shape) return Mask(blurring_mask, self.pixel_scale)
def blurring_mask_for_psf_shape(self, psf_shape): """Compute a blurring mask, which represents all masked pixels whose light will be blurred into unmasked \ pixels via PSF convolution (see grid_stack.RegularGrid.blurring_grid_from_mask_and_psf_shape). Parameters ---------- psf_shape : (int, int) The shape of the psf which defines the blurring region (e.g. the shape of the PSF) """ if psf_shape[0] % 2 == 0 or psf_shape[1] % 2 == 0: raise exc.MaskException("psf_size of exterior region must be odd") blurring_mask = mask_util.mask_blurring_from_mask_and_psf_shape(self, psf_shape) return Mask(blurring_mask, self.pixel_scale)
[ "Compute", "a", "blurring", "mask", "which", "represents", "all", "masked", "pixels", "whose", "light", "will", "be", "blurred", "into", "unmasked", "\\", "pixels", "via", "PSF", "convolution", "(", "see", "grid_stack", ".", "RegularGrid", ".", "blurring_grid_from_mask_and_psf_shape", ")", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/mask.py#L244-L259
[ "def", "blurring_mask_for_psf_shape", "(", "self", ",", "psf_shape", ")", ":", "if", "psf_shape", "[", "0", "]", "%", "2", "==", "0", "or", "psf_shape", "[", "1", "]", "%", "2", "==", "0", ":", "raise", "exc", ".", "MaskException", "(", "\"psf_size of exterior region must be odd\"", ")", "blurring_mask", "=", "mask_util", ".", "mask_blurring_from_mask_and_psf_shape", "(", "self", ",", "psf_shape", ")", "return", "Mask", "(", "blurring_mask", ",", "self", ".", "pixel_scale", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
Mask.zoom_region
The zoomed rectangular region corresponding to the square encompassing all unmasked values. This is used to zoom in on the region of an image that is used in an analysis for visualization.
autolens/data/array/mask.py
def zoom_region(self): """The zoomed rectangular region corresponding to the square encompassing all unmasked values. This is used to zoom in on the region of an image that is used in an analysis for visualization.""" # Have to convert mask to bool for invert function to work. where = np.array(np.where(np.invert(self.astype('bool')))) y0, x0 = np.amin(where, axis=1) y1, x1 = np.amax(where, axis=1) return [y0, y1+1, x0, x1+1]
def zoom_region(self): """The zoomed rectangular region corresponding to the square encompassing all unmasked values. This is used to zoom in on the region of an image that is used in an analysis for visualization.""" # Have to convert mask to bool for invert function to work. where = np.array(np.where(np.invert(self.astype('bool')))) y0, x0 = np.amin(where, axis=1) y1, x1 = np.amax(where, axis=1) return [y0, y1+1, x0, x1+1]
[ "The", "zoomed", "rectangular", "region", "corresponding", "to", "the", "square", "encompassing", "all", "unmasked", "values", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/mask.py#L301-L310
[ "def", "zoom_region", "(", "self", ")", ":", "# Have to convert mask to bool for invert function to work.", "where", "=", "np", ".", "array", "(", "np", ".", "where", "(", "np", ".", "invert", "(", "self", ".", "astype", "(", "'bool'", ")", ")", ")", ")", "y0", ",", "x0", "=", "np", ".", "amin", "(", "where", ",", "axis", "=", "1", ")", "y1", ",", "x1", "=", "np", ".", "amax", "(", "where", ",", "axis", "=", "1", ")", "return", "[", "y0", ",", "y1", "+", "1", ",", "x0", ",", "x1", "+", "1", "]" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
data_vector_from_blurred_mapping_matrix_and_data
Compute the hyper vector *D* from a blurred mapping matrix *f* and the 1D image *d* and 1D noise-map *\sigma* \ (see Warren & Dye 2003). Parameters ----------- blurred_mapping_matrix : ndarray The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. image_1d : ndarray Flattened 1D array of the observed image the inversion is fitting. noise_map_1d : ndarray Flattened 1D array of the noise-map used by the inversion during the fit.
autolens/model/inversion/util/inversion_util.py
def data_vector_from_blurred_mapping_matrix_and_data(blurred_mapping_matrix, image_1d, noise_map_1d): """Compute the hyper vector *D* from a blurred mapping matrix *f* and the 1D image *d* and 1D noise-map *\sigma* \ (see Warren & Dye 2003). Parameters ----------- blurred_mapping_matrix : ndarray The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. image_1d : ndarray Flattened 1D array of the observed image the inversion is fitting. noise_map_1d : ndarray Flattened 1D array of the noise-map used by the inversion during the fit. """ mapping_shape = blurred_mapping_matrix.shape data_vector = np.zeros(mapping_shape[1]) for image_index in range(mapping_shape[0]): for pix_index in range(mapping_shape[1]): data_vector[pix_index] += image_1d[image_index] * \ blurred_mapping_matrix[image_index, pix_index] / (noise_map_1d[image_index] ** 2.0) return data_vector
def data_vector_from_blurred_mapping_matrix_and_data(blurred_mapping_matrix, image_1d, noise_map_1d): """Compute the hyper vector *D* from a blurred mapping matrix *f* and the 1D image *d* and 1D noise-map *\sigma* \ (see Warren & Dye 2003). Parameters ----------- blurred_mapping_matrix : ndarray The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. image_1d : ndarray Flattened 1D array of the observed image the inversion is fitting. noise_map_1d : ndarray Flattened 1D array of the noise-map used by the inversion during the fit. """ mapping_shape = blurred_mapping_matrix.shape data_vector = np.zeros(mapping_shape[1]) for image_index in range(mapping_shape[0]): for pix_index in range(mapping_shape[1]): data_vector[pix_index] += image_1d[image_index] * \ blurred_mapping_matrix[image_index, pix_index] / (noise_map_1d[image_index] ** 2.0) return data_vector
[ "Compute", "the", "hyper", "vector", "*", "D", "*", "from", "a", "blurred", "mapping", "matrix", "*", "f", "*", "and", "the", "1D", "image", "*", "d", "*", "and", "1D", "noise", "-", "map", "*", "\\", "sigma", "*", "\\", "(", "see", "Warren", "&", "Dye", "2003", ")", ".", "Parameters", "-----------", "blurred_mapping_matrix", ":", "ndarray", "The", "matrix", "representing", "the", "blurred", "mappings", "between", "sub", "-", "grid", "pixels", "and", "pixelization", "pixels", ".", "image_1d", ":", "ndarray", "Flattened", "1D", "array", "of", "the", "observed", "image", "the", "inversion", "is", "fitting", ".", "noise_map_1d", ":", "ndarray", "Flattened", "1D", "array", "of", "the", "noise", "-", "map", "used", "by", "the", "inversion", "during", "the", "fit", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/inversion/util/inversion_util.py#L5-L28
[ "def", "data_vector_from_blurred_mapping_matrix_and_data", "(", "blurred_mapping_matrix", ",", "image_1d", ",", "noise_map_1d", ")", ":", "mapping_shape", "=", "blurred_mapping_matrix", ".", "shape", "data_vector", "=", "np", ".", "zeros", "(", "mapping_shape", "[", "1", "]", ")", "for", "image_index", "in", "range", "(", "mapping_shape", "[", "0", "]", ")", ":", "for", "pix_index", "in", "range", "(", "mapping_shape", "[", "1", "]", ")", ":", "data_vector", "[", "pix_index", "]", "+=", "image_1d", "[", "image_index", "]", "*", "blurred_mapping_matrix", "[", "image_index", ",", "pix_index", "]", "/", "(", "noise_map_1d", "[", "image_index", "]", "**", "2.0", ")", "return", "data_vector" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
curvature_matrix_from_blurred_mapping_matrix
Compute the curvature matrix *F* from a blurred mapping matrix *f* and the 1D noise-map *\sigma* \ (see Warren & Dye 2003). Parameters ----------- blurred_mapping_matrix : ndarray The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. noise_map_1d : ndarray Flattened 1D array of the noise-map used by the inversion during the fit.
autolens/model/inversion/util/inversion_util.py
def curvature_matrix_from_blurred_mapping_matrix(blurred_mapping_matrix, noise_map_1d): """Compute the curvature matrix *F* from a blurred mapping matrix *f* and the 1D noise-map *\sigma* \ (see Warren & Dye 2003). Parameters ----------- blurred_mapping_matrix : ndarray The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. noise_map_1d : ndarray Flattened 1D array of the noise-map used by the inversion during the fit. """ flist = np.zeros(blurred_mapping_matrix.shape[0]) iflist = np.zeros(blurred_mapping_matrix.shape[0], dtype='int') return curvature_matrix_from_blurred_mapping_matrix_jit(blurred_mapping_matrix, noise_map_1d, flist, iflist)
def curvature_matrix_from_blurred_mapping_matrix(blurred_mapping_matrix, noise_map_1d): """Compute the curvature matrix *F* from a blurred mapping matrix *f* and the 1D noise-map *\sigma* \ (see Warren & Dye 2003). Parameters ----------- blurred_mapping_matrix : ndarray The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. noise_map_1d : ndarray Flattened 1D array of the noise-map used by the inversion during the fit. """ flist = np.zeros(blurred_mapping_matrix.shape[0]) iflist = np.zeros(blurred_mapping_matrix.shape[0], dtype='int') return curvature_matrix_from_blurred_mapping_matrix_jit(blurred_mapping_matrix, noise_map_1d, flist, iflist)
[ "Compute", "the", "curvature", "matrix", "*", "F", "*", "from", "a", "blurred", "mapping", "matrix", "*", "f", "*", "and", "the", "1D", "noise", "-", "map", "*", "\\", "sigma", "*", "\\", "(", "see", "Warren", "&", "Dye", "2003", ")", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/inversion/util/inversion_util.py#L30-L44
[ "def", "curvature_matrix_from_blurred_mapping_matrix", "(", "blurred_mapping_matrix", ",", "noise_map_1d", ")", ":", "flist", "=", "np", ".", "zeros", "(", "blurred_mapping_matrix", ".", "shape", "[", "0", "]", ")", "iflist", "=", "np", ".", "zeros", "(", "blurred_mapping_matrix", ".", "shape", "[", "0", "]", ",", "dtype", "=", "'int'", ")", "return", "curvature_matrix_from_blurred_mapping_matrix_jit", "(", "blurred_mapping_matrix", ",", "noise_map_1d", ",", "flist", ",", "iflist", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
curvature_matrix_from_blurred_mapping_matrix_jit
Compute the curvature matrix *F* from a blurred mapping matrix *f* and the 1D noise-map *\sigma* \ (see Warren & Dye 2003). Parameters ----------- blurred_mapping_matrix : ndarray The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. noise_map_1d : ndarray Flattened 1D array of the noise-map used by the inversion during the fit. flist : ndarray NumPy array of floats used to store mappings for efficienctly calculation. iflist : ndarray NumPy array of integers used to store mappings for efficienctly calculation.
autolens/model/inversion/util/inversion_util.py
def curvature_matrix_from_blurred_mapping_matrix_jit(blurred_mapping_matrix, noise_map_1d, flist, iflist): """Compute the curvature matrix *F* from a blurred mapping matrix *f* and the 1D noise-map *\sigma* \ (see Warren & Dye 2003). Parameters ----------- blurred_mapping_matrix : ndarray The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. noise_map_1d : ndarray Flattened 1D array of the noise-map used by the inversion during the fit. flist : ndarray NumPy array of floats used to store mappings for efficienctly calculation. iflist : ndarray NumPy array of integers used to store mappings for efficienctly calculation. """ curvature_matrix = np.zeros((blurred_mapping_matrix.shape[1], blurred_mapping_matrix.shape[1])) for image_index in range(blurred_mapping_matrix.shape[0]): index = 0 for pixel_index in range(blurred_mapping_matrix.shape[1]): if blurred_mapping_matrix[image_index, pixel_index] > 0.0: flist[index] = blurred_mapping_matrix[image_index, pixel_index] / noise_map_1d[image_index] iflist[index] = pixel_index index += 1 if index > 0: for i1 in range(index): for j1 in range(index): ix = iflist[i1] iy = iflist[j1] curvature_matrix[ix, iy] += flist[i1] * flist[j1] for i in range(blurred_mapping_matrix.shape[1]): for j in range(blurred_mapping_matrix.shape[1]): curvature_matrix[i, j] = curvature_matrix[j, i] return curvature_matrix
def curvature_matrix_from_blurred_mapping_matrix_jit(blurred_mapping_matrix, noise_map_1d, flist, iflist): """Compute the curvature matrix *F* from a blurred mapping matrix *f* and the 1D noise-map *\sigma* \ (see Warren & Dye 2003). Parameters ----------- blurred_mapping_matrix : ndarray The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. noise_map_1d : ndarray Flattened 1D array of the noise-map used by the inversion during the fit. flist : ndarray NumPy array of floats used to store mappings for efficienctly calculation. iflist : ndarray NumPy array of integers used to store mappings for efficienctly calculation. """ curvature_matrix = np.zeros((blurred_mapping_matrix.shape[1], blurred_mapping_matrix.shape[1])) for image_index in range(blurred_mapping_matrix.shape[0]): index = 0 for pixel_index in range(blurred_mapping_matrix.shape[1]): if blurred_mapping_matrix[image_index, pixel_index] > 0.0: flist[index] = blurred_mapping_matrix[image_index, pixel_index] / noise_map_1d[image_index] iflist[index] = pixel_index index += 1 if index > 0: for i1 in range(index): for j1 in range(index): ix = iflist[i1] iy = iflist[j1] curvature_matrix[ix, iy] += flist[i1] * flist[j1] for i in range(blurred_mapping_matrix.shape[1]): for j in range(blurred_mapping_matrix.shape[1]): curvature_matrix[i, j] = curvature_matrix[j, i] return curvature_matrix
[ "Compute", "the", "curvature", "matrix", "*", "F", "*", "from", "a", "blurred", "mapping", "matrix", "*", "f", "*", "and", "the", "1D", "noise", "-", "map", "*", "\\", "sigma", "*", "\\", "(", "see", "Warren", "&", "Dye", "2003", ")", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/inversion/util/inversion_util.py#L47-L83
[ "def", "curvature_matrix_from_blurred_mapping_matrix_jit", "(", "blurred_mapping_matrix", ",", "noise_map_1d", ",", "flist", ",", "iflist", ")", ":", "curvature_matrix", "=", "np", ".", "zeros", "(", "(", "blurred_mapping_matrix", ".", "shape", "[", "1", "]", ",", "blurred_mapping_matrix", ".", "shape", "[", "1", "]", ")", ")", "for", "image_index", "in", "range", "(", "blurred_mapping_matrix", ".", "shape", "[", "0", "]", ")", ":", "index", "=", "0", "for", "pixel_index", "in", "range", "(", "blurred_mapping_matrix", ".", "shape", "[", "1", "]", ")", ":", "if", "blurred_mapping_matrix", "[", "image_index", ",", "pixel_index", "]", ">", "0.0", ":", "flist", "[", "index", "]", "=", "blurred_mapping_matrix", "[", "image_index", ",", "pixel_index", "]", "/", "noise_map_1d", "[", "image_index", "]", "iflist", "[", "index", "]", "=", "pixel_index", "index", "+=", "1", "if", "index", ">", "0", ":", "for", "i1", "in", "range", "(", "index", ")", ":", "for", "j1", "in", "range", "(", "index", ")", ":", "ix", "=", "iflist", "[", "i1", "]", "iy", "=", "iflist", "[", "j1", "]", "curvature_matrix", "[", "ix", ",", "iy", "]", "+=", "flist", "[", "i1", "]", "*", "flist", "[", "j1", "]", "for", "i", "in", "range", "(", "blurred_mapping_matrix", ".", "shape", "[", "1", "]", ")", ":", "for", "j", "in", "range", "(", "blurred_mapping_matrix", ".", "shape", "[", "1", "]", ")", ":", "curvature_matrix", "[", "i", ",", "j", "]", "=", "curvature_matrix", "[", "j", ",", "i", "]", "return", "curvature_matrix" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
reconstructed_data_vector_from_blurred_mapping_matrix_and_solution_vector
Compute the reconstructed hyper vector from the blurrred mapping matrix *f* and solution vector *S*. Parameters ----------- blurred_mapping_matrix : ndarray The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels.
autolens/model/inversion/util/inversion_util.py
def reconstructed_data_vector_from_blurred_mapping_matrix_and_solution_vector(blurred_mapping_matrix, solution_vector): """ Compute the reconstructed hyper vector from the blurrred mapping matrix *f* and solution vector *S*. Parameters ----------- blurred_mapping_matrix : ndarray The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. """ reconstructed_data_vector = np.zeros(blurred_mapping_matrix.shape[0]) for i in range(blurred_mapping_matrix.shape[0]): for j in range(solution_vector.shape[0]): reconstructed_data_vector[i] += solution_vector[j] * blurred_mapping_matrix[i, j] return reconstructed_data_vector
def reconstructed_data_vector_from_blurred_mapping_matrix_and_solution_vector(blurred_mapping_matrix, solution_vector): """ Compute the reconstructed hyper vector from the blurrred mapping matrix *f* and solution vector *S*. Parameters ----------- blurred_mapping_matrix : ndarray The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. """ reconstructed_data_vector = np.zeros(blurred_mapping_matrix.shape[0]) for i in range(blurred_mapping_matrix.shape[0]): for j in range(solution_vector.shape[0]): reconstructed_data_vector[i] += solution_vector[j] * blurred_mapping_matrix[i, j] return reconstructed_data_vector
[ "Compute", "the", "reconstructed", "hyper", "vector", "from", "the", "blurrred", "mapping", "matrix", "*", "f", "*", "and", "solution", "vector", "*", "S", "*", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/inversion/util/inversion_util.py#L86-L100
[ "def", "reconstructed_data_vector_from_blurred_mapping_matrix_and_solution_vector", "(", "blurred_mapping_matrix", ",", "solution_vector", ")", ":", "reconstructed_data_vector", "=", "np", ".", "zeros", "(", "blurred_mapping_matrix", ".", "shape", "[", "0", "]", ")", "for", "i", "in", "range", "(", "blurred_mapping_matrix", ".", "shape", "[", "0", "]", ")", ":", "for", "j", "in", "range", "(", "solution_vector", ".", "shape", "[", "0", "]", ")", ":", "reconstructed_data_vector", "[", "i", "]", "+=", "solution_vector", "[", "j", "]", "*", "blurred_mapping_matrix", "[", "i", ",", "j", "]", "return", "reconstructed_data_vector" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
Inversion.regularization_term
Compute the regularization term of an inversion. This term represents the sum of the difference in flux \ between every pair of neighboring pixels. This is computed as: s_T * H * s = solution_vector.T * regularization_matrix * solution_vector The term is referred to as *G_l* in Warren & Dye 2003, Nightingale & Dye 2015. The above works include the regularization_matrix coefficient (lambda) in this calculation. In PyAutoLens, \ this is already in the regularization matrix and thus implicitly included in the matrix multiplication.
autolens/model/inversion/inversions.py
def regularization_term(self): """ Compute the regularization term of an inversion. This term represents the sum of the difference in flux \ between every pair of neighboring pixels. This is computed as: s_T * H * s = solution_vector.T * regularization_matrix * solution_vector The term is referred to as *G_l* in Warren & Dye 2003, Nightingale & Dye 2015. The above works include the regularization_matrix coefficient (lambda) in this calculation. In PyAutoLens, \ this is already in the regularization matrix and thus implicitly included in the matrix multiplication. """ return np.matmul(self.solution_vector.T, np.matmul(self.regularization_matrix, self.solution_vector))
def regularization_term(self): """ Compute the regularization term of an inversion. This term represents the sum of the difference in flux \ between every pair of neighboring pixels. This is computed as: s_T * H * s = solution_vector.T * regularization_matrix * solution_vector The term is referred to as *G_l* in Warren & Dye 2003, Nightingale & Dye 2015. The above works include the regularization_matrix coefficient (lambda) in this calculation. In PyAutoLens, \ this is already in the regularization matrix and thus implicitly included in the matrix multiplication. """ return np.matmul(self.solution_vector.T, np.matmul(self.regularization_matrix, self.solution_vector))
[ "Compute", "the", "regularization", "term", "of", "an", "inversion", ".", "This", "term", "represents", "the", "sum", "of", "the", "difference", "in", "flux", "\\", "between", "every", "pair", "of", "neighboring", "pixels", ".", "This", "is", "computed", "as", ":" ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/inversion/inversions.py#L77-L88
[ "def", "regularization_term", "(", "self", ")", ":", "return", "np", ".", "matmul", "(", "self", ".", "solution_vector", ".", "T", ",", "np", ".", "matmul", "(", "self", ".", "regularization_matrix", ",", "self", ".", "solution_vector", ")", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
Inversion.log_determinant_of_matrix_cholesky
There are two terms in the inversion's Bayesian likelihood function which require the log determinant of \ a matrix. These are (Nightingale & Dye 2015, Nightingale, Dye and Massey 2018): ln[det(F + H)] = ln[det(curvature_reg_matrix)] ln[det(H)] = ln[det(regularization_matrix)] The curvature_reg_matrix is positive-definite, which means the above log determinants can be computed \ efficiently (compared to using np.det) by using a Cholesky decomposition first and summing the log of each \ diagonal term. Parameters ----------- matrix : ndarray The positive-definite matrix the log determinant is computed for.
autolens/model/inversion/inversions.py
def log_determinant_of_matrix_cholesky(matrix): """There are two terms in the inversion's Bayesian likelihood function which require the log determinant of \ a matrix. These are (Nightingale & Dye 2015, Nightingale, Dye and Massey 2018): ln[det(F + H)] = ln[det(curvature_reg_matrix)] ln[det(H)] = ln[det(regularization_matrix)] The curvature_reg_matrix is positive-definite, which means the above log determinants can be computed \ efficiently (compared to using np.det) by using a Cholesky decomposition first and summing the log of each \ diagonal term. Parameters ----------- matrix : ndarray The positive-definite matrix the log determinant is computed for. """ try: return 2.0 * np.sum(np.log(np.diag(np.linalg.cholesky(matrix)))) except np.linalg.LinAlgError: raise exc.InversionException()
def log_determinant_of_matrix_cholesky(matrix): """There are two terms in the inversion's Bayesian likelihood function which require the log determinant of \ a matrix. These are (Nightingale & Dye 2015, Nightingale, Dye and Massey 2018): ln[det(F + H)] = ln[det(curvature_reg_matrix)] ln[det(H)] = ln[det(regularization_matrix)] The curvature_reg_matrix is positive-definite, which means the above log determinants can be computed \ efficiently (compared to using np.det) by using a Cholesky decomposition first and summing the log of each \ diagonal term. Parameters ----------- matrix : ndarray The positive-definite matrix the log determinant is computed for. """ try: return 2.0 * np.sum(np.log(np.diag(np.linalg.cholesky(matrix)))) except np.linalg.LinAlgError: raise exc.InversionException()
[ "There", "are", "two", "terms", "in", "the", "inversion", "s", "Bayesian", "likelihood", "function", "which", "require", "the", "log", "determinant", "of", "\\", "a", "matrix", ".", "These", "are", "(", "Nightingale", "&", "Dye", "2015", "Nightingale", "Dye", "and", "Massey", "2018", ")", ":" ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/inversion/inversions.py#L99-L118
[ "def", "log_determinant_of_matrix_cholesky", "(", "matrix", ")", ":", "try", ":", "return", "2.0", "*", "np", ".", "sum", "(", "np", ".", "log", "(", "np", ".", "diag", "(", "np", ".", "linalg", ".", "cholesky", "(", "matrix", ")", ")", ")", ")", "except", "np", ".", "linalg", ".", "LinAlgError", ":", "raise", "exc", ".", "InversionException", "(", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
GalaxyModel.constant_light_profiles
Returns ------- light_profiles: [light_profiles.LightProfile] Light profiles with set variables
autolens/model/galaxy/galaxy_model.py
def constant_light_profiles(self): """ Returns ------- light_profiles: [light_profiles.LightProfile] Light profiles with set variables """ return [value for value in self.__dict__.values() if galaxy.is_light_profile(value)]
def constant_light_profiles(self): """ Returns ------- light_profiles: [light_profiles.LightProfile] Light profiles with set variables """ return [value for value in self.__dict__.values() if galaxy.is_light_profile(value)]
[ "Returns", "-------", "light_profiles", ":", "[", "light_profiles", ".", "LightProfile", "]", "Light", "profiles", "with", "set", "variables" ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/galaxy/galaxy_model.py#L147-L154
[ "def", "constant_light_profiles", "(", "self", ")", ":", "return", "[", "value", "for", "value", "in", "self", ".", "__dict__", ".", "values", "(", ")", "if", "galaxy", ".", "is_light_profile", "(", "value", ")", "]" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
GalaxyModel.constant_mass_profiles
Returns ------- mass_profiles: [mass_profiles.MassProfile] Mass profiles with set variables
autolens/model/galaxy/galaxy_model.py
def constant_mass_profiles(self): """ Returns ------- mass_profiles: [mass_profiles.MassProfile] Mass profiles with set variables """ return [value for value in self.__dict__.values() if galaxy.is_mass_profile(value)]
def constant_mass_profiles(self): """ Returns ------- mass_profiles: [mass_profiles.MassProfile] Mass profiles with set variables """ return [value for value in self.__dict__.values() if galaxy.is_mass_profile(value)]
[ "Returns", "-------", "mass_profiles", ":", "[", "mass_profiles", ".", "MassProfile", "]", "Mass", "profiles", "with", "set", "variables" ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/galaxy/galaxy_model.py#L157-L164
[ "def", "constant_mass_profiles", "(", "self", ")", ":", "return", "[", "value", "for", "value", "in", "self", ".", "__dict__", ".", "values", "(", ")", "if", "galaxy", ".", "is_mass_profile", "(", "value", ")", "]" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
GalaxyModel.prior_models
Returns ------- prior_models: [model_mapper.PriorModel] A list of the prior models (e.g. variable profiles) attached to this galaxy prior
autolens/model/galaxy/galaxy_model.py
def prior_models(self): """ Returns ------- prior_models: [model_mapper.PriorModel] A list of the prior models (e.g. variable profiles) attached to this galaxy prior """ return [value for _, value in filter(lambda t: isinstance(t[1], pm.PriorModel), self.__dict__.items())]
def prior_models(self): """ Returns ------- prior_models: [model_mapper.PriorModel] A list of the prior models (e.g. variable profiles) attached to this galaxy prior """ return [value for _, value in filter(lambda t: isinstance(t[1], pm.PriorModel), self.__dict__.items())]
[ "Returns", "-------", "prior_models", ":", "[", "model_mapper", ".", "PriorModel", "]", "A", "list", "of", "the", "prior", "models", "(", "e", ".", "g", ".", "variable", "profiles", ")", "attached", "to", "this", "galaxy", "prior" ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/galaxy/galaxy_model.py#L167-L175
[ "def", "prior_models", "(", "self", ")", ":", "return", "[", "value", "for", "_", ",", "value", "in", "filter", "(", "lambda", "t", ":", "isinstance", "(", "t", "[", "1", "]", ",", "pm", ".", "PriorModel", ")", ",", "self", ".", "__dict__", ".", "items", "(", ")", ")", "]" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
GalaxyModel.profile_prior_model_dict
Returns ------- profile_prior_model_dict: {str: PriorModel} A dictionary mapping_matrix instance variable names to variable profiles.
autolens/model/galaxy/galaxy_model.py
def profile_prior_model_dict(self): """ Returns ------- profile_prior_model_dict: {str: PriorModel} A dictionary mapping_matrix instance variable names to variable profiles. """ return {key: value for key, value in filter(lambda t: isinstance(t[1], pm.PriorModel) and is_profile_class(t[1].cls), self.__dict__.items())}
def profile_prior_model_dict(self): """ Returns ------- profile_prior_model_dict: {str: PriorModel} A dictionary mapping_matrix instance variable names to variable profiles. """ return {key: value for key, value in filter(lambda t: isinstance(t[1], pm.PriorModel) and is_profile_class(t[1].cls), self.__dict__.items())}
[ "Returns", "-------", "profile_prior_model_dict", ":", "{", "str", ":", "PriorModel", "}", "A", "dictionary", "mapping_matrix", "instance", "variable", "names", "to", "variable", "profiles", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/galaxy/galaxy_model.py#L178-L187
[ "def", "profile_prior_model_dict", "(", "self", ")", ":", "return", "{", "key", ":", "value", "for", "key", ",", "value", "in", "filter", "(", "lambda", "t", ":", "isinstance", "(", "t", "[", "1", "]", ",", "pm", ".", "PriorModel", ")", "and", "is_profile_class", "(", "t", "[", "1", "]", ".", "cls", ")", ",", "self", ".", "__dict__", ".", "items", "(", ")", ")", "}" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
GalaxyModel.constant_profile_dict
Returns ------- constant_profile_dict: {str: geometry_profiles.GeometryProfile} A dictionary mapping_matrix instance variable names to profiles with set variables.
autolens/model/galaxy/galaxy_model.py
def constant_profile_dict(self): """ Returns ------- constant_profile_dict: {str: geometry_profiles.GeometryProfile} A dictionary mapping_matrix instance variable names to profiles with set variables. """ return {key: value for key, value in self.__dict__.items() if galaxy.is_light_profile(value) or galaxy.is_mass_profile(value)}
def constant_profile_dict(self): """ Returns ------- constant_profile_dict: {str: geometry_profiles.GeometryProfile} A dictionary mapping_matrix instance variable names to profiles with set variables. """ return {key: value for key, value in self.__dict__.items() if galaxy.is_light_profile(value) or galaxy.is_mass_profile(value)}
[ "Returns", "-------", "constant_profile_dict", ":", "{", "str", ":", "geometry_profiles", ".", "GeometryProfile", "}", "A", "dictionary", "mapping_matrix", "instance", "variable", "names", "to", "profiles", "with", "set", "variables", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/galaxy/galaxy_model.py#L200-L208
[ "def", "constant_profile_dict", "(", "self", ")", ":", "return", "{", "key", ":", "value", "for", "key", ",", "value", "in", "self", ".", "__dict__", ".", "items", "(", ")", "if", "galaxy", ".", "is_light_profile", "(", "value", ")", "or", "galaxy", ".", "is_mass_profile", "(", "value", ")", "}" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
GalaxyModel.prior_class_dict
Returns ------- prior_class_dict: {Prior: class} A dictionary mapping_matrix priors to the class associated with their prior model.
autolens/model/galaxy/galaxy_model.py
def prior_class_dict(self): """ Returns ------- prior_class_dict: {Prior: class} A dictionary mapping_matrix priors to the class associated with their prior model. """ return {prior: cls for prior_model in self.prior_models for prior, cls in prior_model.prior_class_dict.items()}
def prior_class_dict(self): """ Returns ------- prior_class_dict: {Prior: class} A dictionary mapping_matrix priors to the class associated with their prior model. """ return {prior: cls for prior_model in self.prior_models for prior, cls in prior_model.prior_class_dict.items()}
[ "Returns", "-------", "prior_class_dict", ":", "{", "Prior", ":", "class", "}", "A", "dictionary", "mapping_matrix", "priors", "to", "the", "class", "associated", "with", "their", "prior", "model", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/galaxy/galaxy_model.py#L233-L241
[ "def", "prior_class_dict", "(", "self", ")", ":", "return", "{", "prior", ":", "cls", "for", "prior_model", "in", "self", ".", "prior_models", "for", "prior", ",", "cls", "in", "prior_model", ".", "prior_class_dict", ".", "items", "(", ")", "}" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
GalaxyModel.instance_for_arguments
Create an instance of the associated class for a set of arguments Parameters ---------- arguments: {Prior: value} Dictionary mapping_matrix priors to attribute analysis_path and value pairs Returns ------- An instance of the class
autolens/model/galaxy/galaxy_model.py
def instance_for_arguments(self, arguments): """ Create an instance of the associated class for a set of arguments Parameters ---------- arguments: {Prior: value} Dictionary mapping_matrix priors to attribute analysis_path and value pairs Returns ------- An instance of the class """ profiles = {**{key: value.instance_for_arguments(arguments) for key, value in self.profile_prior_model_dict.items()}, **self.constant_profile_dict} try: redshift = self.redshift.instance_for_arguments(arguments) except AttributeError: redshift = self.redshift pixelization = self.pixelization.instance_for_arguments(arguments) \ if isinstance(self.pixelization, pm.PriorModel) \ else self.pixelization regularization = self.regularization.instance_for_arguments(arguments) \ if isinstance(self.regularization, pm.PriorModel) \ else self.regularization hyper_galaxy = self.hyper_galaxy.instance_for_arguments(arguments) \ if isinstance(self.hyper_galaxy, pm.PriorModel) \ else self.hyper_galaxy return galaxy.Galaxy(redshift=redshift, pixelization=pixelization, regularization=regularization, hyper_galaxy=hyper_galaxy, **profiles)
def instance_for_arguments(self, arguments): """ Create an instance of the associated class for a set of arguments Parameters ---------- arguments: {Prior: value} Dictionary mapping_matrix priors to attribute analysis_path and value pairs Returns ------- An instance of the class """ profiles = {**{key: value.instance_for_arguments(arguments) for key, value in self.profile_prior_model_dict.items()}, **self.constant_profile_dict} try: redshift = self.redshift.instance_for_arguments(arguments) except AttributeError: redshift = self.redshift pixelization = self.pixelization.instance_for_arguments(arguments) \ if isinstance(self.pixelization, pm.PriorModel) \ else self.pixelization regularization = self.regularization.instance_for_arguments(arguments) \ if isinstance(self.regularization, pm.PriorModel) \ else self.regularization hyper_galaxy = self.hyper_galaxy.instance_for_arguments(arguments) \ if isinstance(self.hyper_galaxy, pm.PriorModel) \ else self.hyper_galaxy return galaxy.Galaxy(redshift=redshift, pixelization=pixelization, regularization=regularization, hyper_galaxy=hyper_galaxy, **profiles)
[ "Create", "an", "instance", "of", "the", "associated", "class", "for", "a", "set", "of", "arguments" ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/galaxy/galaxy_model.py#L243-L275
[ "def", "instance_for_arguments", "(", "self", ",", "arguments", ")", ":", "profiles", "=", "{", "*", "*", "{", "key", ":", "value", ".", "instance_for_arguments", "(", "arguments", ")", "for", "key", ",", "value", "in", "self", ".", "profile_prior_model_dict", ".", "items", "(", ")", "}", ",", "*", "*", "self", ".", "constant_profile_dict", "}", "try", ":", "redshift", "=", "self", ".", "redshift", ".", "instance_for_arguments", "(", "arguments", ")", "except", "AttributeError", ":", "redshift", "=", "self", ".", "redshift", "pixelization", "=", "self", ".", "pixelization", ".", "instance_for_arguments", "(", "arguments", ")", "if", "isinstance", "(", "self", ".", "pixelization", ",", "pm", ".", "PriorModel", ")", "else", "self", ".", "pixelization", "regularization", "=", "self", ".", "regularization", ".", "instance_for_arguments", "(", "arguments", ")", "if", "isinstance", "(", "self", ".", "regularization", ",", "pm", ".", "PriorModel", ")", "else", "self", ".", "regularization", "hyper_galaxy", "=", "self", ".", "hyper_galaxy", ".", "instance_for_arguments", "(", "arguments", ")", "if", "isinstance", "(", "self", ".", "hyper_galaxy", ",", "pm", ".", "PriorModel", ")", "else", "self", ".", "hyper_galaxy", "return", "galaxy", ".", "Galaxy", "(", "redshift", "=", "redshift", ",", "pixelization", "=", "pixelization", ",", "regularization", "=", "regularization", ",", "hyper_galaxy", "=", "hyper_galaxy", ",", "*", "*", "profiles", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
GalaxyModel.gaussian_prior_model_for_arguments
Create a new galaxy prior from a set of arguments, replacing the priors of some of this galaxy prior's prior models with new arguments. Parameters ---------- arguments: dict A dictionary mapping_matrix between old priors and their replacements. Returns ------- new_model: GalaxyModel A model with some or all priors replaced.
autolens/model/galaxy/galaxy_model.py
def gaussian_prior_model_for_arguments(self, arguments): """ Create a new galaxy prior from a set of arguments, replacing the priors of some of this galaxy prior's prior models with new arguments. Parameters ---------- arguments: dict A dictionary mapping_matrix between old priors and their replacements. Returns ------- new_model: GalaxyModel A model with some or all priors replaced. """ new_model = copy.deepcopy(self) for key, value in filter(lambda t: isinstance(t[1], pm.PriorModel), self.__dict__.items()): setattr(new_model, key, value.gaussian_prior_model_for_arguments(arguments)) return new_model
def gaussian_prior_model_for_arguments(self, arguments): """ Create a new galaxy prior from a set of arguments, replacing the priors of some of this galaxy prior's prior models with new arguments. Parameters ---------- arguments: dict A dictionary mapping_matrix between old priors and their replacements. Returns ------- new_model: GalaxyModel A model with some or all priors replaced. """ new_model = copy.deepcopy(self) for key, value in filter(lambda t: isinstance(t[1], pm.PriorModel), self.__dict__.items()): setattr(new_model, key, value.gaussian_prior_model_for_arguments(arguments)) return new_model
[ "Create", "a", "new", "galaxy", "prior", "from", "a", "set", "of", "arguments", "replacing", "the", "priors", "of", "some", "of", "this", "galaxy", "prior", "s", "prior", "models", "with", "new", "arguments", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/galaxy/galaxy_model.py#L277-L297
[ "def", "gaussian_prior_model_for_arguments", "(", "self", ",", "arguments", ")", ":", "new_model", "=", "copy", ".", "deepcopy", "(", "self", ")", "for", "key", ",", "value", "in", "filter", "(", "lambda", "t", ":", "isinstance", "(", "t", "[", "1", "]", ",", "pm", ".", "PriorModel", ")", ",", "self", ".", "__dict__", ".", "items", "(", ")", ")", ":", "setattr", "(", "new_model", ",", "key", ",", "value", ".", "gaussian_prior_model_for_arguments", "(", "arguments", ")", ")", "return", "new_model" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
plot_image
Plot the observed image of the ccd data. Set *autolens.data.array.plotters.array_plotters* for a description of all input parameters not described below. Parameters ----------- image : ScaledSquarePixelArray The image of the data. plot_origin : True If true, the origin of the data's coordinate system is plotted as a 'x'. image_plane_pix_grid : ndarray or data.array.grid_stacks.PixGrid If an adaptive pixelization whose pixels are formed by tracing pixels from the data, this plots those pixels \ over the immage.
autolens/data/plotters/data_plotters.py
def plot_image( image, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False, should_plot_border=False, positions=None, as_subplot=False, units='arcsec', kpc_per_arcsec=None, figsize=(7, 7), aspect='square', cmap='jet', norm='linear', norm_min=None, norm_max=None, linthresh=0.05, linscale=0.01, cb_ticksize=10, cb_fraction=0.047, cb_pad=0.01, cb_tick_values=None, cb_tick_labels=None, title='Image', titlesize=16, xlabelsize=16, ylabelsize=16, xyticksize=16, mask_pointsize=10, position_pointsize=30, grid_pointsize=1, output_path=None, output_format='show', output_filename='image'): """Plot the observed image of the ccd data. Set *autolens.data.array.plotters.array_plotters* for a description of all input parameters not described below. Parameters ----------- image : ScaledSquarePixelArray The image of the data. plot_origin : True If true, the origin of the data's coordinate system is plotted as a 'x'. image_plane_pix_grid : ndarray or data.array.grid_stacks.PixGrid If an adaptive pixelization whose pixels are formed by tracing pixels from the data, this plots those pixels \ over the immage. """ origin = get_origin(array=image, plot_origin=plot_origin) array_plotters.plot_array( array=image, origin=origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, should_plot_border=should_plot_border, positions=positions, as_subplot=as_subplot, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, mask_pointsize=mask_pointsize, position_pointsize=position_pointsize, grid_pointsize=grid_pointsize, output_path=output_path, output_format=output_format, output_filename=output_filename)
def plot_image( image, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False, should_plot_border=False, positions=None, as_subplot=False, units='arcsec', kpc_per_arcsec=None, figsize=(7, 7), aspect='square', cmap='jet', norm='linear', norm_min=None, norm_max=None, linthresh=0.05, linscale=0.01, cb_ticksize=10, cb_fraction=0.047, cb_pad=0.01, cb_tick_values=None, cb_tick_labels=None, title='Image', titlesize=16, xlabelsize=16, ylabelsize=16, xyticksize=16, mask_pointsize=10, position_pointsize=30, grid_pointsize=1, output_path=None, output_format='show', output_filename='image'): """Plot the observed image of the ccd data. Set *autolens.data.array.plotters.array_plotters* for a description of all input parameters not described below. Parameters ----------- image : ScaledSquarePixelArray The image of the data. plot_origin : True If true, the origin of the data's coordinate system is plotted as a 'x'. image_plane_pix_grid : ndarray or data.array.grid_stacks.PixGrid If an adaptive pixelization whose pixels are formed by tracing pixels from the data, this plots those pixels \ over the immage. """ origin = get_origin(array=image, plot_origin=plot_origin) array_plotters.plot_array( array=image, origin=origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, should_plot_border=should_plot_border, positions=positions, as_subplot=as_subplot, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, title=title, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, mask_pointsize=mask_pointsize, position_pointsize=position_pointsize, grid_pointsize=grid_pointsize, output_path=output_path, output_format=output_format, output_filename=output_filename)
[ "Plot", "the", "observed", "image", "of", "the", "ccd", "data", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/plotters/data_plotters.py#L4-L39
[ "def", "plot_image", "(", "image", ",", "plot_origin", "=", "True", ",", "mask", "=", "None", ",", "extract_array_from_mask", "=", "False", ",", "zoom_around_mask", "=", "False", ",", "should_plot_border", "=", "False", ",", "positions", "=", "None", ",", "as_subplot", "=", "False", ",", "units", "=", "'arcsec'", ",", "kpc_per_arcsec", "=", "None", ",", "figsize", "=", "(", "7", ",", "7", ")", ",", "aspect", "=", "'square'", ",", "cmap", "=", "'jet'", ",", "norm", "=", "'linear'", ",", "norm_min", "=", "None", ",", "norm_max", "=", "None", ",", "linthresh", "=", "0.05", ",", "linscale", "=", "0.01", ",", "cb_ticksize", "=", "10", ",", "cb_fraction", "=", "0.047", ",", "cb_pad", "=", "0.01", ",", "cb_tick_values", "=", "None", ",", "cb_tick_labels", "=", "None", ",", "title", "=", "'Image'", ",", "titlesize", "=", "16", ",", "xlabelsize", "=", "16", ",", "ylabelsize", "=", "16", ",", "xyticksize", "=", "16", ",", "mask_pointsize", "=", "10", ",", "position_pointsize", "=", "30", ",", "grid_pointsize", "=", "1", ",", "output_path", "=", "None", ",", "output_format", "=", "'show'", ",", "output_filename", "=", "'image'", ")", ":", "origin", "=", "get_origin", "(", "array", "=", "image", ",", "plot_origin", "=", "plot_origin", ")", "array_plotters", ".", "plot_array", "(", "array", "=", "image", ",", "origin", "=", "origin", ",", "mask", "=", "mask", ",", "extract_array_from_mask", "=", "extract_array_from_mask", ",", "zoom_around_mask", "=", "zoom_around_mask", ",", "should_plot_border", "=", "should_plot_border", ",", "positions", "=", "positions", ",", "as_subplot", "=", "as_subplot", ",", "units", "=", "units", ",", "kpc_per_arcsec", "=", "kpc_per_arcsec", ",", "figsize", "=", "figsize", ",", "aspect", "=", "aspect", ",", "cmap", "=", "cmap", ",", "norm", "=", "norm", ",", "norm_min", "=", "norm_min", ",", "norm_max", "=", "norm_max", ",", "linthresh", "=", "linthresh", ",", "linscale", "=", "linscale", ",", "cb_ticksize", "=", "cb_ticksize", ",", "cb_fraction", "=", "cb_fraction", ",", "cb_pad", "=", "cb_pad", ",", "cb_tick_values", "=", "cb_tick_values", ",", "cb_tick_labels", "=", "cb_tick_labels", ",", "title", "=", "title", ",", "titlesize", "=", "titlesize", ",", "xlabelsize", "=", "xlabelsize", ",", "ylabelsize", "=", "ylabelsize", ",", "xyticksize", "=", "xyticksize", ",", "mask_pointsize", "=", "mask_pointsize", ",", "position_pointsize", "=", "position_pointsize", ",", "grid_pointsize", "=", "grid_pointsize", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ",", "output_filename", "=", "output_filename", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
plot_ccd_subplot
Plot the ccd data as a sub-plot of all its quantites (e.g. the data, noise_map-map, PSF, Signal-to_noise-map, \ etc). Set *autolens.data.array.plotters.array_plotters* for a description of all innput parameters not described below. Parameters ----------- ccd_data : data.CCDData The ccd data, which includes the observed data, noise_map-map, PSF, signal-to-noise_map-map, etc. plot_origin : True If true, the origin of the data's coordinate system is plotted as a 'x'. image_plane_pix_grid : ndarray or data.array.grid_stacks.PixGrid If an adaptive pixelization whose pixels are formed by tracing pixels from the data, this plots those pixels \ over the immage. ignore_config : bool If *False*, the config file general.ini is used to determine whether the subpot is plotted. If *True*, the \ config file is ignored.
autolens/data/plotters/ccd_plotters.py
def plot_ccd_subplot( ccd_data, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False, should_plot_border=False, positions=None, units='arcsec', kpc_per_arcsec=None, figsize=None, aspect='square', cmap='jet', norm='linear', norm_min=None, norm_max=None, linthresh=0.05, linscale=0.01, cb_ticksize=10, cb_fraction=0.047, cb_pad=0.01, cb_tick_values=None, cb_tick_labels=None, titlesize=10, xlabelsize=10, ylabelsize=10, xyticksize=10, mask_pointsize=10, position_pointsize=30, grid_pointsize=1, output_path=None, output_filename='ccd_data', output_format='show'): """Plot the ccd data as a sub-plot of all its quantites (e.g. the data, noise_map-map, PSF, Signal-to_noise-map, \ etc). Set *autolens.data.array.plotters.array_plotters* for a description of all innput parameters not described below. Parameters ----------- ccd_data : data.CCDData The ccd data, which includes the observed data, noise_map-map, PSF, signal-to-noise_map-map, etc. plot_origin : True If true, the origin of the data's coordinate system is plotted as a 'x'. image_plane_pix_grid : ndarray or data.array.grid_stacks.PixGrid If an adaptive pixelization whose pixels are formed by tracing pixels from the data, this plots those pixels \ over the immage. ignore_config : bool If *False*, the config file general.ini is used to determine whether the subpot is plotted. If *True*, the \ config file is ignored. """ rows, columns, figsize_tool = plotter_util.get_subplot_rows_columns_figsize(number_subplots=6) if figsize is None: figsize = figsize_tool plt.figure(figsize=figsize) plt.subplot(rows, columns, 1) plot_image( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, should_plot_border=should_plot_border, positions=positions, as_subplot=True, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, mask_pointsize=mask_pointsize, position_pointsize=position_pointsize, grid_pointsize=grid_pointsize, output_path=output_path, output_format=output_format) plt.subplot(rows, columns, 2) plot_noise_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, mask_pointsize=mask_pointsize, output_path=output_path, output_format=output_format) plt.subplot(rows, columns, 3) plot_psf( ccd_data=ccd_data, as_subplot=True, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, output_path=output_path, output_format=output_format) plt.subplot(rows, columns, 4) plot_signal_to_noise_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, mask_pointsize=mask_pointsize, output_path=output_path, output_format=output_format) plt.subplot(rows, columns, 5) plot_absolute_signal_to_noise_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, mask_pointsize=mask_pointsize, output_path=output_path, output_format=output_format) plt.subplot(rows, columns, 6) plot_potential_chi_squared_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, mask_pointsize=mask_pointsize, output_path=output_path, output_format=output_format) plotter_util.output_subplot_array(output_path=output_path, output_filename=output_filename, output_format=output_format) plt.close()
def plot_ccd_subplot( ccd_data, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False, should_plot_border=False, positions=None, units='arcsec', kpc_per_arcsec=None, figsize=None, aspect='square', cmap='jet', norm='linear', norm_min=None, norm_max=None, linthresh=0.05, linscale=0.01, cb_ticksize=10, cb_fraction=0.047, cb_pad=0.01, cb_tick_values=None, cb_tick_labels=None, titlesize=10, xlabelsize=10, ylabelsize=10, xyticksize=10, mask_pointsize=10, position_pointsize=30, grid_pointsize=1, output_path=None, output_filename='ccd_data', output_format='show'): """Plot the ccd data as a sub-plot of all its quantites (e.g. the data, noise_map-map, PSF, Signal-to_noise-map, \ etc). Set *autolens.data.array.plotters.array_plotters* for a description of all innput parameters not described below. Parameters ----------- ccd_data : data.CCDData The ccd data, which includes the observed data, noise_map-map, PSF, signal-to-noise_map-map, etc. plot_origin : True If true, the origin of the data's coordinate system is plotted as a 'x'. image_plane_pix_grid : ndarray or data.array.grid_stacks.PixGrid If an adaptive pixelization whose pixels are formed by tracing pixels from the data, this plots those pixels \ over the immage. ignore_config : bool If *False*, the config file general.ini is used to determine whether the subpot is plotted. If *True*, the \ config file is ignored. """ rows, columns, figsize_tool = plotter_util.get_subplot_rows_columns_figsize(number_subplots=6) if figsize is None: figsize = figsize_tool plt.figure(figsize=figsize) plt.subplot(rows, columns, 1) plot_image( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, should_plot_border=should_plot_border, positions=positions, as_subplot=True, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, mask_pointsize=mask_pointsize, position_pointsize=position_pointsize, grid_pointsize=grid_pointsize, output_path=output_path, output_format=output_format) plt.subplot(rows, columns, 2) plot_noise_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, mask_pointsize=mask_pointsize, output_path=output_path, output_format=output_format) plt.subplot(rows, columns, 3) plot_psf( ccd_data=ccd_data, as_subplot=True, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, output_path=output_path, output_format=output_format) plt.subplot(rows, columns, 4) plot_signal_to_noise_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, mask_pointsize=mask_pointsize, output_path=output_path, output_format=output_format) plt.subplot(rows, columns, 5) plot_absolute_signal_to_noise_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, mask_pointsize=mask_pointsize, output_path=output_path, output_format=output_format) plt.subplot(rows, columns, 6) plot_potential_chi_squared_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, as_subplot=True, units=units, kpc_per_arcsec=kpc_per_arcsec, figsize=figsize, aspect=aspect, cmap=cmap, norm=norm, norm_min=norm_min, norm_max=norm_max, linthresh=linthresh, linscale=linscale, cb_ticksize=cb_ticksize, cb_fraction=cb_fraction, cb_pad=cb_pad, cb_tick_values=cb_tick_values, cb_tick_labels=cb_tick_labels, titlesize=titlesize, xlabelsize=xlabelsize, ylabelsize=ylabelsize, xyticksize=xyticksize, mask_pointsize=mask_pointsize, output_path=output_path, output_format=output_format) plotter_util.output_subplot_array(output_path=output_path, output_filename=output_filename, output_format=output_format) plt.close()
[ "Plot", "the", "ccd", "data", "as", "a", "sub", "-", "plot", "of", "all", "its", "quantites", "(", "e", ".", "g", ".", "the", "data", "noise_map", "-", "map", "PSF", "Signal", "-", "to_noise", "-", "map", "\\", "etc", ")", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/plotters/ccd_plotters.py#L7-L120
[ "def", "plot_ccd_subplot", "(", "ccd_data", ",", "plot_origin", "=", "True", ",", "mask", "=", "None", ",", "extract_array_from_mask", "=", "False", ",", "zoom_around_mask", "=", "False", ",", "should_plot_border", "=", "False", ",", "positions", "=", "None", ",", "units", "=", "'arcsec'", ",", "kpc_per_arcsec", "=", "None", ",", "figsize", "=", "None", ",", "aspect", "=", "'square'", ",", "cmap", "=", "'jet'", ",", "norm", "=", "'linear'", ",", "norm_min", "=", "None", ",", "norm_max", "=", "None", ",", "linthresh", "=", "0.05", ",", "linscale", "=", "0.01", ",", "cb_ticksize", "=", "10", ",", "cb_fraction", "=", "0.047", ",", "cb_pad", "=", "0.01", ",", "cb_tick_values", "=", "None", ",", "cb_tick_labels", "=", "None", ",", "titlesize", "=", "10", ",", "xlabelsize", "=", "10", ",", "ylabelsize", "=", "10", ",", "xyticksize", "=", "10", ",", "mask_pointsize", "=", "10", ",", "position_pointsize", "=", "30", ",", "grid_pointsize", "=", "1", ",", "output_path", "=", "None", ",", "output_filename", "=", "'ccd_data'", ",", "output_format", "=", "'show'", ")", ":", "rows", ",", "columns", ",", "figsize_tool", "=", "plotter_util", ".", "get_subplot_rows_columns_figsize", "(", "number_subplots", "=", "6", ")", "if", "figsize", "is", "None", ":", "figsize", "=", "figsize_tool", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "plt", ".", "subplot", "(", "rows", ",", "columns", ",", "1", ")", "plot_image", "(", "ccd_data", "=", "ccd_data", ",", "plot_origin", "=", "plot_origin", ",", "mask", "=", "mask", ",", "extract_array_from_mask", "=", "extract_array_from_mask", ",", "zoom_around_mask", "=", "zoom_around_mask", ",", "should_plot_border", "=", "should_plot_border", ",", "positions", "=", "positions", ",", "as_subplot", "=", "True", ",", "units", "=", "units", ",", "kpc_per_arcsec", "=", "kpc_per_arcsec", ",", "figsize", "=", "figsize", ",", "aspect", "=", "aspect", ",", "cmap", "=", "cmap", ",", "norm", "=", "norm", ",", "norm_min", "=", "norm_min", ",", "norm_max", "=", "norm_max", ",", "linthresh", "=", "linthresh", ",", "linscale", "=", "linscale", ",", "cb_ticksize", "=", "cb_ticksize", ",", "cb_fraction", "=", "cb_fraction", ",", "cb_pad", "=", "cb_pad", ",", "cb_tick_values", "=", "cb_tick_values", ",", "cb_tick_labels", "=", "cb_tick_labels", ",", "titlesize", "=", "titlesize", ",", "xlabelsize", "=", "xlabelsize", ",", "ylabelsize", "=", "ylabelsize", ",", "xyticksize", "=", "xyticksize", ",", "mask_pointsize", "=", "mask_pointsize", ",", "position_pointsize", "=", "position_pointsize", ",", "grid_pointsize", "=", "grid_pointsize", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ")", "plt", ".", "subplot", "(", "rows", ",", "columns", ",", "2", ")", "plot_noise_map", "(", "ccd_data", "=", "ccd_data", ",", "plot_origin", "=", "plot_origin", ",", "mask", "=", "mask", ",", "extract_array_from_mask", "=", "extract_array_from_mask", ",", "zoom_around_mask", "=", "zoom_around_mask", ",", "as_subplot", "=", "True", ",", "units", "=", "units", ",", "kpc_per_arcsec", "=", "kpc_per_arcsec", ",", "figsize", "=", "figsize", ",", "aspect", "=", "aspect", ",", "cmap", "=", "cmap", ",", "norm", "=", "norm", ",", "norm_min", "=", "norm_min", ",", "norm_max", "=", "norm_max", ",", "linthresh", "=", "linthresh", ",", "linscale", "=", "linscale", ",", "cb_ticksize", "=", "cb_ticksize", ",", "cb_fraction", "=", "cb_fraction", ",", "cb_pad", "=", "cb_pad", ",", "cb_tick_values", "=", "cb_tick_values", ",", "cb_tick_labels", "=", "cb_tick_labels", ",", "titlesize", "=", "titlesize", ",", "xlabelsize", "=", "xlabelsize", ",", "ylabelsize", "=", "ylabelsize", ",", "xyticksize", "=", "xyticksize", ",", "mask_pointsize", "=", "mask_pointsize", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ")", "plt", ".", "subplot", "(", "rows", ",", "columns", ",", "3", ")", "plot_psf", "(", "ccd_data", "=", "ccd_data", ",", "as_subplot", "=", "True", ",", "units", "=", "units", ",", "kpc_per_arcsec", "=", "kpc_per_arcsec", ",", "figsize", "=", "figsize", ",", "aspect", "=", "aspect", ",", "cmap", "=", "cmap", ",", "norm", "=", "norm", ",", "norm_min", "=", "norm_min", ",", "norm_max", "=", "norm_max", ",", "linthresh", "=", "linthresh", ",", "linscale", "=", "linscale", ",", "cb_ticksize", "=", "cb_ticksize", ",", "cb_fraction", "=", "cb_fraction", ",", "cb_pad", "=", "cb_pad", ",", "cb_tick_values", "=", "cb_tick_values", ",", "cb_tick_labels", "=", "cb_tick_labels", ",", "titlesize", "=", "titlesize", ",", "xlabelsize", "=", "xlabelsize", ",", "ylabelsize", "=", "ylabelsize", ",", "xyticksize", "=", "xyticksize", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ")", "plt", ".", "subplot", "(", "rows", ",", "columns", ",", "4", ")", "plot_signal_to_noise_map", "(", "ccd_data", "=", "ccd_data", ",", "plot_origin", "=", "plot_origin", ",", "mask", "=", "mask", ",", "extract_array_from_mask", "=", "extract_array_from_mask", ",", "zoom_around_mask", "=", "zoom_around_mask", ",", "as_subplot", "=", "True", ",", "units", "=", "units", ",", "kpc_per_arcsec", "=", "kpc_per_arcsec", ",", "figsize", "=", "figsize", ",", "aspect", "=", "aspect", ",", "cmap", "=", "cmap", ",", "norm", "=", "norm", ",", "norm_min", "=", "norm_min", ",", "norm_max", "=", "norm_max", ",", "linthresh", "=", "linthresh", ",", "linscale", "=", "linscale", ",", "cb_ticksize", "=", "cb_ticksize", ",", "cb_fraction", "=", "cb_fraction", ",", "cb_pad", "=", "cb_pad", ",", "cb_tick_values", "=", "cb_tick_values", ",", "cb_tick_labels", "=", "cb_tick_labels", ",", "titlesize", "=", "titlesize", ",", "xlabelsize", "=", "xlabelsize", ",", "ylabelsize", "=", "ylabelsize", ",", "xyticksize", "=", "xyticksize", ",", "mask_pointsize", "=", "mask_pointsize", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ")", "plt", ".", "subplot", "(", "rows", ",", "columns", ",", "5", ")", "plot_absolute_signal_to_noise_map", "(", "ccd_data", "=", "ccd_data", ",", "plot_origin", "=", "plot_origin", ",", "mask", "=", "mask", ",", "extract_array_from_mask", "=", "extract_array_from_mask", ",", "zoom_around_mask", "=", "zoom_around_mask", ",", "as_subplot", "=", "True", ",", "units", "=", "units", ",", "kpc_per_arcsec", "=", "kpc_per_arcsec", ",", "figsize", "=", "figsize", ",", "aspect", "=", "aspect", ",", "cmap", "=", "cmap", ",", "norm", "=", "norm", ",", "norm_min", "=", "norm_min", ",", "norm_max", "=", "norm_max", ",", "linthresh", "=", "linthresh", ",", "linscale", "=", "linscale", ",", "cb_ticksize", "=", "cb_ticksize", ",", "cb_fraction", "=", "cb_fraction", ",", "cb_pad", "=", "cb_pad", ",", "cb_tick_values", "=", "cb_tick_values", ",", "cb_tick_labels", "=", "cb_tick_labels", ",", "titlesize", "=", "titlesize", ",", "xlabelsize", "=", "xlabelsize", ",", "ylabelsize", "=", "ylabelsize", ",", "xyticksize", "=", "xyticksize", ",", "mask_pointsize", "=", "mask_pointsize", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ")", "plt", ".", "subplot", "(", "rows", ",", "columns", ",", "6", ")", "plot_potential_chi_squared_map", "(", "ccd_data", "=", "ccd_data", ",", "plot_origin", "=", "plot_origin", ",", "mask", "=", "mask", ",", "extract_array_from_mask", "=", "extract_array_from_mask", ",", "zoom_around_mask", "=", "zoom_around_mask", ",", "as_subplot", "=", "True", ",", "units", "=", "units", ",", "kpc_per_arcsec", "=", "kpc_per_arcsec", ",", "figsize", "=", "figsize", ",", "aspect", "=", "aspect", ",", "cmap", "=", "cmap", ",", "norm", "=", "norm", ",", "norm_min", "=", "norm_min", ",", "norm_max", "=", "norm_max", ",", "linthresh", "=", "linthresh", ",", "linscale", "=", "linscale", ",", "cb_ticksize", "=", "cb_ticksize", ",", "cb_fraction", "=", "cb_fraction", ",", "cb_pad", "=", "cb_pad", ",", "cb_tick_values", "=", "cb_tick_values", ",", "cb_tick_labels", "=", "cb_tick_labels", ",", "titlesize", "=", "titlesize", ",", "xlabelsize", "=", "xlabelsize", ",", "ylabelsize", "=", "ylabelsize", ",", "xyticksize", "=", "xyticksize", ",", "mask_pointsize", "=", "mask_pointsize", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ")", "plotter_util", ".", "output_subplot_array", "(", "output_path", "=", "output_path", ",", "output_filename", "=", "output_filename", ",", "output_format", "=", "output_format", ")", "plt", ".", "close", "(", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
plot_ccd_individual
Plot each attribute of the ccd data as individual figures one by one (e.g. the data, noise_map-map, PSF, \ Signal-to_noise-map, etc). Set *autolens.data.array.plotters.array_plotters* for a description of all innput parameters not described below. Parameters ----------- ccd_data : data.CCDData The ccd data, which includes the observed data, noise_map-map, PSF, signal-to-noise_map-map, etc. plot_origin : True If true, the origin of the data's coordinate system is plotted as a 'x'.
autolens/data/plotters/ccd_plotters.py
def plot_ccd_individual( ccd_data, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, should_plot_image=False, should_plot_noise_map=False, should_plot_psf=False, should_plot_signal_to_noise_map=False, should_plot_absolute_signal_to_noise_map=False, should_plot_potential_chi_squared_map=False, units='arcsec', output_path=None, output_format='png'): """Plot each attribute of the ccd data as individual figures one by one (e.g. the data, noise_map-map, PSF, \ Signal-to_noise-map, etc). Set *autolens.data.array.plotters.array_plotters* for a description of all innput parameters not described below. Parameters ----------- ccd_data : data.CCDData The ccd data, which includes the observed data, noise_map-map, PSF, signal-to-noise_map-map, etc. plot_origin : True If true, the origin of the data's coordinate system is plotted as a 'x'. """ if should_plot_image: plot_image( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, positions=positions, units=units, output_path=output_path, output_format=output_format) if should_plot_noise_map: plot_noise_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, units=units, output_path=output_path, output_format=output_format) if should_plot_psf: plot_psf( ccd_data=ccd_data, plot_origin=plot_origin, output_path=output_path, output_format=output_format) if should_plot_signal_to_noise_map: plot_signal_to_noise_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, units=units, output_path=output_path, output_format=output_format) if should_plot_absolute_signal_to_noise_map: plot_absolute_signal_to_noise_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, units=units, output_path=output_path, output_format=output_format) if should_plot_potential_chi_squared_map: plot_potential_chi_squared_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, units=units, output_path=output_path, output_format=output_format)
def plot_ccd_individual( ccd_data, plot_origin=True, mask=None, extract_array_from_mask=False, zoom_around_mask=False, positions=None, should_plot_image=False, should_plot_noise_map=False, should_plot_psf=False, should_plot_signal_to_noise_map=False, should_plot_absolute_signal_to_noise_map=False, should_plot_potential_chi_squared_map=False, units='arcsec', output_path=None, output_format='png'): """Plot each attribute of the ccd data as individual figures one by one (e.g. the data, noise_map-map, PSF, \ Signal-to_noise-map, etc). Set *autolens.data.array.plotters.array_plotters* for a description of all innput parameters not described below. Parameters ----------- ccd_data : data.CCDData The ccd data, which includes the observed data, noise_map-map, PSF, signal-to-noise_map-map, etc. plot_origin : True If true, the origin of the data's coordinate system is plotted as a 'x'. """ if should_plot_image: plot_image( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, positions=positions, units=units, output_path=output_path, output_format=output_format) if should_plot_noise_map: plot_noise_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, units=units, output_path=output_path, output_format=output_format) if should_plot_psf: plot_psf( ccd_data=ccd_data, plot_origin=plot_origin, output_path=output_path, output_format=output_format) if should_plot_signal_to_noise_map: plot_signal_to_noise_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, units=units, output_path=output_path, output_format=output_format) if should_plot_absolute_signal_to_noise_map: plot_absolute_signal_to_noise_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, units=units, output_path=output_path, output_format=output_format) if should_plot_potential_chi_squared_map: plot_potential_chi_squared_map( ccd_data=ccd_data, plot_origin=plot_origin, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, units=units, output_path=output_path, output_format=output_format)
[ "Plot", "each", "attribute", "of", "the", "ccd", "data", "as", "individual", "figures", "one", "by", "one", "(", "e", ".", "g", ".", "the", "data", "noise_map", "-", "map", "PSF", "\\", "Signal", "-", "to_noise", "-", "map", "etc", ")", "." ]
Jammy2211/PyAutoLens
python
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/plotters/ccd_plotters.py#L123-L190
[ "def", "plot_ccd_individual", "(", "ccd_data", ",", "plot_origin", "=", "True", ",", "mask", "=", "None", ",", "extract_array_from_mask", "=", "False", ",", "zoom_around_mask", "=", "False", ",", "positions", "=", "None", ",", "should_plot_image", "=", "False", ",", "should_plot_noise_map", "=", "False", ",", "should_plot_psf", "=", "False", ",", "should_plot_signal_to_noise_map", "=", "False", ",", "should_plot_absolute_signal_to_noise_map", "=", "False", ",", "should_plot_potential_chi_squared_map", "=", "False", ",", "units", "=", "'arcsec'", ",", "output_path", "=", "None", ",", "output_format", "=", "'png'", ")", ":", "if", "should_plot_image", ":", "plot_image", "(", "ccd_data", "=", "ccd_data", ",", "plot_origin", "=", "plot_origin", ",", "mask", "=", "mask", ",", "extract_array_from_mask", "=", "extract_array_from_mask", ",", "zoom_around_mask", "=", "zoom_around_mask", ",", "positions", "=", "positions", ",", "units", "=", "units", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ")", "if", "should_plot_noise_map", ":", "plot_noise_map", "(", "ccd_data", "=", "ccd_data", ",", "plot_origin", "=", "plot_origin", ",", "mask", "=", "mask", ",", "extract_array_from_mask", "=", "extract_array_from_mask", ",", "zoom_around_mask", "=", "zoom_around_mask", ",", "units", "=", "units", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ")", "if", "should_plot_psf", ":", "plot_psf", "(", "ccd_data", "=", "ccd_data", ",", "plot_origin", "=", "plot_origin", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ")", "if", "should_plot_signal_to_noise_map", ":", "plot_signal_to_noise_map", "(", "ccd_data", "=", "ccd_data", ",", "plot_origin", "=", "plot_origin", ",", "mask", "=", "mask", ",", "extract_array_from_mask", "=", "extract_array_from_mask", ",", "zoom_around_mask", "=", "zoom_around_mask", ",", "units", "=", "units", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ")", "if", "should_plot_absolute_signal_to_noise_map", ":", "plot_absolute_signal_to_noise_map", "(", "ccd_data", "=", "ccd_data", ",", "plot_origin", "=", "plot_origin", ",", "mask", "=", "mask", ",", "extract_array_from_mask", "=", "extract_array_from_mask", ",", "zoom_around_mask", "=", "zoom_around_mask", ",", "units", "=", "units", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ")", "if", "should_plot_potential_chi_squared_map", ":", "plot_potential_chi_squared_map", "(", "ccd_data", "=", "ccd_data", ",", "plot_origin", "=", "plot_origin", ",", "mask", "=", "mask", ",", "extract_array_from_mask", "=", "extract_array_from_mask", ",", "zoom_around_mask", "=", "zoom_around_mask", ",", "units", "=", "units", ",", "output_path", "=", "output_path", ",", "output_format", "=", "output_format", ")" ]
91e50369c7a9c048c83d217625578b72423cd5a7
valid
norm_and_check
Normalise and check a backend path. Ensure that the requested backend path is specified as a relative path, and resolves to a location under the given source tree. Return an absolute version of the requested path.
pep517/wrappers.py
def norm_and_check(source_tree, requested): """Normalise and check a backend path. Ensure that the requested backend path is specified as a relative path, and resolves to a location under the given source tree. Return an absolute version of the requested path. """ if os.path.isabs(requested): raise ValueError("paths must be relative") abs_source = os.path.abspath(source_tree) abs_requested = os.path.normpath(os.path.join(abs_source, requested)) # We have to use commonprefix for Python 2.7 compatibility. So we # normalise case to avoid problems because commonprefix is a character # based comparison :-( norm_source = os.path.normcase(abs_source) norm_requested = os.path.normcase(abs_requested) if os.path.commonprefix([norm_source, norm_requested]) != norm_source: raise ValueError("paths must be inside source tree") return abs_requested
def norm_and_check(source_tree, requested): """Normalise and check a backend path. Ensure that the requested backend path is specified as a relative path, and resolves to a location under the given source tree. Return an absolute version of the requested path. """ if os.path.isabs(requested): raise ValueError("paths must be relative") abs_source = os.path.abspath(source_tree) abs_requested = os.path.normpath(os.path.join(abs_source, requested)) # We have to use commonprefix for Python 2.7 compatibility. So we # normalise case to avoid problems because commonprefix is a character # based comparison :-( norm_source = os.path.normcase(abs_source) norm_requested = os.path.normcase(abs_requested) if os.path.commonprefix([norm_source, norm_requested]) != norm_source: raise ValueError("paths must be inside source tree") return abs_requested
[ "Normalise", "and", "check", "a", "backend", "path", "." ]
pypa/pep517
python
https://github.com/pypa/pep517/blob/ecd511e8fc85251d0496716939ebbe109e395924/pep517/wrappers.py#L52-L73
[ "def", "norm_and_check", "(", "source_tree", ",", "requested", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "requested", ")", ":", "raise", "ValueError", "(", "\"paths must be relative\"", ")", "abs_source", "=", "os", ".", "path", ".", "abspath", "(", "source_tree", ")", "abs_requested", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "abs_source", ",", "requested", ")", ")", "# We have to use commonprefix for Python 2.7 compatibility. So we", "# normalise case to avoid problems because commonprefix is a character", "# based comparison :-(", "norm_source", "=", "os", ".", "path", ".", "normcase", "(", "abs_source", ")", "norm_requested", "=", "os", ".", "path", ".", "normcase", "(", "abs_requested", ")", "if", "os", ".", "path", ".", "commonprefix", "(", "[", "norm_source", ",", "norm_requested", "]", ")", "!=", "norm_source", ":", "raise", "ValueError", "(", "\"paths must be inside source tree\"", ")", "return", "abs_requested" ]
ecd511e8fc85251d0496716939ebbe109e395924
valid
contained_in
Test if a file is located within the given directory.
pep517/_in_process.py
def contained_in(filename, directory): """Test if a file is located within the given directory.""" filename = os.path.normcase(os.path.abspath(filename)) directory = os.path.normcase(os.path.abspath(directory)) return os.path.commonprefix([filename, directory]) == directory
def contained_in(filename, directory): """Test if a file is located within the given directory.""" filename = os.path.normcase(os.path.abspath(filename)) directory = os.path.normcase(os.path.abspath(directory)) return os.path.commonprefix([filename, directory]) == directory
[ "Test", "if", "a", "file", "is", "located", "within", "the", "given", "directory", "." ]
pypa/pep517
python
https://github.com/pypa/pep517/blob/ecd511e8fc85251d0496716939ebbe109e395924/pep517/_in_process.py#L41-L45
[ "def", "contained_in", "(", "filename", ",", "directory", ")", ":", "filename", "=", "os", ".", "path", ".", "normcase", "(", "os", ".", "path", ".", "abspath", "(", "filename", ")", ")", "directory", "=", "os", ".", "path", ".", "normcase", "(", "os", ".", "path", ".", "abspath", "(", "directory", ")", ")", "return", "os", ".", "path", ".", "commonprefix", "(", "[", "filename", ",", "directory", "]", ")", "==", "directory" ]
ecd511e8fc85251d0496716939ebbe109e395924
valid
_build_backend
Find and load the build backend
pep517/_in_process.py
def _build_backend(): """Find and load the build backend""" # Add in-tree backend directories to the front of sys.path. backend_path = os.environ.get('PEP517_BACKEND_PATH') if backend_path: extra_pathitems = backend_path.split(os.pathsep) sys.path[:0] = extra_pathitems ep = os.environ['PEP517_BUILD_BACKEND'] mod_path, _, obj_path = ep.partition(':') try: obj = import_module(mod_path) except ImportError: raise BackendUnavailable(traceback.format_exc()) if backend_path: if not any( contained_in(obj.__file__, path) for path in extra_pathitems ): raise BackendInvalid("Backend was not loaded from backend-path") if obj_path: for path_part in obj_path.split('.'): obj = getattr(obj, path_part) return obj
def _build_backend(): """Find and load the build backend""" # Add in-tree backend directories to the front of sys.path. backend_path = os.environ.get('PEP517_BACKEND_PATH') if backend_path: extra_pathitems = backend_path.split(os.pathsep) sys.path[:0] = extra_pathitems ep = os.environ['PEP517_BUILD_BACKEND'] mod_path, _, obj_path = ep.partition(':') try: obj = import_module(mod_path) except ImportError: raise BackendUnavailable(traceback.format_exc()) if backend_path: if not any( contained_in(obj.__file__, path) for path in extra_pathitems ): raise BackendInvalid("Backend was not loaded from backend-path") if obj_path: for path_part in obj_path.split('.'): obj = getattr(obj, path_part) return obj
[ "Find", "and", "load", "the", "build", "backend" ]
pypa/pep517
python
https://github.com/pypa/pep517/blob/ecd511e8fc85251d0496716939ebbe109e395924/pep517/_in_process.py#L48-L73
[ "def", "_build_backend", "(", ")", ":", "# Add in-tree backend directories to the front of sys.path.", "backend_path", "=", "os", ".", "environ", ".", "get", "(", "'PEP517_BACKEND_PATH'", ")", "if", "backend_path", ":", "extra_pathitems", "=", "backend_path", ".", "split", "(", "os", ".", "pathsep", ")", "sys", ".", "path", "[", ":", "0", "]", "=", "extra_pathitems", "ep", "=", "os", ".", "environ", "[", "'PEP517_BUILD_BACKEND'", "]", "mod_path", ",", "_", ",", "obj_path", "=", "ep", ".", "partition", "(", "':'", ")", "try", ":", "obj", "=", "import_module", "(", "mod_path", ")", "except", "ImportError", ":", "raise", "BackendUnavailable", "(", "traceback", ".", "format_exc", "(", ")", ")", "if", "backend_path", ":", "if", "not", "any", "(", "contained_in", "(", "obj", ".", "__file__", ",", "path", ")", "for", "path", "in", "extra_pathitems", ")", ":", "raise", "BackendInvalid", "(", "\"Backend was not loaded from backend-path\"", ")", "if", "obj_path", ":", "for", "path_part", "in", "obj_path", ".", "split", "(", "'.'", ")", ":", "obj", "=", "getattr", "(", "obj", ",", "path_part", ")", "return", "obj" ]
ecd511e8fc85251d0496716939ebbe109e395924
valid
build_sdist
Invoke the mandatory build_sdist hook.
pep517/_in_process.py
def build_sdist(sdist_directory, config_settings): """Invoke the mandatory build_sdist hook.""" backend = _build_backend() try: return backend.build_sdist(sdist_directory, config_settings) except getattr(backend, 'UnsupportedOperation', _DummyException): raise GotUnsupportedOperation(traceback.format_exc())
def build_sdist(sdist_directory, config_settings): """Invoke the mandatory build_sdist hook.""" backend = _build_backend() try: return backend.build_sdist(sdist_directory, config_settings) except getattr(backend, 'UnsupportedOperation', _DummyException): raise GotUnsupportedOperation(traceback.format_exc())
[ "Invoke", "the", "mandatory", "build_sdist", "hook", "." ]
pypa/pep517
python
https://github.com/pypa/pep517/blob/ecd511e8fc85251d0496716939ebbe109e395924/pep517/_in_process.py#L201-L207
[ "def", "build_sdist", "(", "sdist_directory", ",", "config_settings", ")", ":", "backend", "=", "_build_backend", "(", ")", "try", ":", "return", "backend", ".", "build_sdist", "(", "sdist_directory", ",", "config_settings", ")", "except", "getattr", "(", "backend", ",", "'UnsupportedOperation'", ",", "_DummyException", ")", ":", "raise", "GotUnsupportedOperation", "(", "traceback", ".", "format_exc", "(", ")", ")" ]
ecd511e8fc85251d0496716939ebbe109e395924
valid
API.log_error
Print errors. Stop travis-ci from leaking api keys :param e: The error :return: None
socialreaper/apis.py
def log_error(self, e): """ Print errors. Stop travis-ci from leaking api keys :param e: The error :return: None """ if not environ.get('CI'): self.log_function(e) if hasattr(e, 'response') and hasattr(e.response, 'text'): self.log_function(e.response.text)
def log_error(self, e): """ Print errors. Stop travis-ci from leaking api keys :param e: The error :return: None """ if not environ.get('CI'): self.log_function(e) if hasattr(e, 'response') and hasattr(e.response, 'text'): self.log_function(e.response.text)
[ "Print", "errors", ".", "Stop", "travis", "-", "ci", "from", "leaking", "api", "keys", ":", "param", "e", ":", "The", "error", ":", "return", ":", "None" ]
ScriptSmith/socialreaper
python
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/apis.py#L27-L38
[ "def", "log_error", "(", "self", ",", "e", ")", ":", "if", "not", "environ", ".", "get", "(", "'CI'", ")", ":", "self", ".", "log_function", "(", "e", ")", "if", "hasattr", "(", "e", ",", "'response'", ")", "and", "hasattr", "(", "e", ".", "response", ",", "'text'", ")", ":", "self", ".", "log_function", "(", "e", ".", "response", ".", "text", ")" ]
87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da
valid
API._sleep
Sleep between requests, but don't force asynchronous code to wait :param seconds: The number of seconds to sleep :return: None
socialreaper/apis.py
def _sleep(self, seconds): """ Sleep between requests, but don't force asynchronous code to wait :param seconds: The number of seconds to sleep :return: None """ for _ in range(int(seconds)): if not self.force_stop: sleep(1)
def _sleep(self, seconds): """ Sleep between requests, but don't force asynchronous code to wait :param seconds: The number of seconds to sleep :return: None """ for _ in range(int(seconds)): if not self.force_stop: sleep(1)
[ "Sleep", "between", "requests", "but", "don", "t", "force", "asynchronous", "code", "to", "wait", ":", "param", "seconds", ":", "The", "number", "of", "seconds", "to", "sleep", ":", "return", ":", "None" ]
ScriptSmith/socialreaper
python
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/apis.py#L40-L49
[ "def", "_sleep", "(", "self", ",", "seconds", ")", ":", "for", "_", "in", "range", "(", "int", "(", "seconds", ")", ")", ":", "if", "not", "self", ".", "force_stop", ":", "sleep", "(", "1", ")" ]
87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da
valid
API.get
An interface for get requests that handles errors more gracefully to prevent data loss
socialreaper/apis.py
def get(self, *args, **kwargs): """ An interface for get requests that handles errors more gracefully to prevent data loss """ try: req_func = self.session.get if self.session else requests.get req = req_func(*args, **kwargs) req.raise_for_status() self.failed_last = False return req except requests.exceptions.RequestException as e: self.log_error(e) for i in range(1, self.num_retries): sleep_time = self.retry_rate * i self.log_function("Retrying in %s seconds" % sleep_time) self._sleep(sleep_time) try: req = requests.get(*args, **kwargs) req.raise_for_status() self.log_function("New request successful") return req except requests.exceptions.RequestException: self.log_function("New request failed") # Allows for the api to ignore one potentially bad request if not self.failed_last: self.failed_last = True raise ApiError(e) else: raise FatalApiError(e)
def get(self, *args, **kwargs): """ An interface for get requests that handles errors more gracefully to prevent data loss """ try: req_func = self.session.get if self.session else requests.get req = req_func(*args, **kwargs) req.raise_for_status() self.failed_last = False return req except requests.exceptions.RequestException as e: self.log_error(e) for i in range(1, self.num_retries): sleep_time = self.retry_rate * i self.log_function("Retrying in %s seconds" % sleep_time) self._sleep(sleep_time) try: req = requests.get(*args, **kwargs) req.raise_for_status() self.log_function("New request successful") return req except requests.exceptions.RequestException: self.log_function("New request failed") # Allows for the api to ignore one potentially bad request if not self.failed_last: self.failed_last = True raise ApiError(e) else: raise FatalApiError(e)
[ "An", "interface", "for", "get", "requests", "that", "handles", "errors", "more", "gracefully", "to", "prevent", "data", "loss" ]
ScriptSmith/socialreaper
python
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/apis.py#L57-L90
[ "def", "get", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "req_func", "=", "self", ".", "session", ".", "get", "if", "self", ".", "session", "else", "requests", ".", "get", "req", "=", "req_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "req", ".", "raise_for_status", "(", ")", "self", ".", "failed_last", "=", "False", "return", "req", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "e", ":", "self", ".", "log_error", "(", "e", ")", "for", "i", "in", "range", "(", "1", ",", "self", ".", "num_retries", ")", ":", "sleep_time", "=", "self", ".", "retry_rate", "*", "i", "self", ".", "log_function", "(", "\"Retrying in %s seconds\"", "%", "sleep_time", ")", "self", ".", "_sleep", "(", "sleep_time", ")", "try", ":", "req", "=", "requests", ".", "get", "(", "*", "args", ",", "*", "*", "kwargs", ")", "req", ".", "raise_for_status", "(", ")", "self", ".", "log_function", "(", "\"New request successful\"", ")", "return", "req", "except", "requests", ".", "exceptions", ".", "RequestException", ":", "self", ".", "log_function", "(", "\"New request failed\"", ")", "# Allows for the api to ignore one potentially bad request\r", "if", "not", "self", ".", "failed_last", ":", "self", ".", "failed_last", "=", "True", "raise", "ApiError", "(", "e", ")", "else", ":", "raise", "FatalApiError", "(", "e", ")" ]
87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da
valid
Facebook.node_edge
:param node: :param edge: :param fields: :param params: :return:
socialreaper/apis.py
def node_edge(self, node, edge, fields=None, params=None): """ :param node: :param edge: :param fields: :param params: :return: """ if fields: fields = ",".join(fields) parameters = {"fields": fields, "access_token": self.key} parameters = self.merge_params(parameters, params) return self.api_call('%s/%s' % (node, edge), parameters)
def node_edge(self, node, edge, fields=None, params=None): """ :param node: :param edge: :param fields: :param params: :return: """ if fields: fields = ",".join(fields) parameters = {"fields": fields, "access_token": self.key} parameters = self.merge_params(parameters, params) return self.api_call('%s/%s' % (node, edge), parameters)
[ ":", "param", "node", ":", ":", "param", "edge", ":", ":", "param", "fields", ":", ":", "param", "params", ":", ":", "return", ":" ]
ScriptSmith/socialreaper
python
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/apis.py#L410-L427
[ "def", "node_edge", "(", "self", ",", "node", ",", "edge", ",", "fields", "=", "None", ",", "params", "=", "None", ")", ":", "if", "fields", ":", "fields", "=", "\",\"", ".", "join", "(", "fields", ")", "parameters", "=", "{", "\"fields\"", ":", "fields", ",", "\"access_token\"", ":", "self", ".", "key", "}", "parameters", "=", "self", ".", "merge_params", "(", "parameters", ",", "params", ")", "return", "self", ".", "api_call", "(", "'%s/%s'", "%", "(", "node", ",", "edge", ")", ",", "parameters", ")" ]
87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da
valid
Facebook.post
:param post_id: :param fields: :param params: :return:
socialreaper/apis.py
def post(self, post_id, fields=None, **params): """ :param post_id: :param fields: :param params: :return: """ if fields: fields = ",".join(fields) parameters = {"fields": fields, "access_token": self.key} parameters = self.merge_params(parameters, params) return self.api_call('%s' % post_id, parameters)
def post(self, post_id, fields=None, **params): """ :param post_id: :param fields: :param params: :return: """ if fields: fields = ",".join(fields) parameters = {"fields": fields, "access_token": self.key} parameters = self.merge_params(parameters, params) return self.api_call('%s' % post_id, parameters)
[ ":", "param", "post_id", ":", ":", "param", "fields", ":", ":", "param", "params", ":", ":", "return", ":" ]
ScriptSmith/socialreaper
python
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/apis.py#L429-L445
[ "def", "post", "(", "self", ",", "post_id", ",", "fields", "=", "None", ",", "*", "*", "params", ")", ":", "if", "fields", ":", "fields", "=", "\",\"", ".", "join", "(", "fields", ")", "parameters", "=", "{", "\"fields\"", ":", "fields", ",", "\"access_token\"", ":", "self", ".", "key", "}", "parameters", "=", "self", ".", "merge_params", "(", "parameters", ",", "params", ")", "return", "self", ".", "api_call", "(", "'%s'", "%", "post_id", ",", "parameters", ")" ]
87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da
valid
Facebook.page_posts
:param page_id: :param after: :param post_type: Can be 'posts', 'feed', 'tagged', 'promotable_posts' :param include_hidden: :param fields: :param params: :return:
socialreaper/apis.py
def page_posts(self, page_id, after='', post_type="posts", include_hidden=False, fields=None, **params): """ :param page_id: :param after: :param post_type: Can be 'posts', 'feed', 'tagged', 'promotable_posts' :param include_hidden: :param fields: :param params: :return: """ if fields: fields = ",".join(fields) parameters = {"access_token": self.key, "after": after, "fields": fields, "include_hidden": include_hidden} parameters = self.merge_params(parameters, params) return self.api_call('%s/%s' % (page_id, post_type), parameters)
def page_posts(self, page_id, after='', post_type="posts", include_hidden=False, fields=None, **params): """ :param page_id: :param after: :param post_type: Can be 'posts', 'feed', 'tagged', 'promotable_posts' :param include_hidden: :param fields: :param params: :return: """ if fields: fields = ",".join(fields) parameters = {"access_token": self.key, "after": after, "fields": fields, "include_hidden": include_hidden} parameters = self.merge_params(parameters, params) return self.api_call('%s/%s' % (page_id, post_type), parameters)
[ ":", "param", "page_id", ":", ":", "param", "after", ":", ":", "param", "post_type", ":", "Can", "be", "posts", "feed", "tagged", "promotable_posts", ":", "param", "include_hidden", ":", ":", "param", "fields", ":", ":", "param", "params", ":", ":", "return", ":" ]
ScriptSmith/socialreaper
python
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/apis.py#L447-L469
[ "def", "page_posts", "(", "self", ",", "page_id", ",", "after", "=", "''", ",", "post_type", "=", "\"posts\"", ",", "include_hidden", "=", "False", ",", "fields", "=", "None", ",", "*", "*", "params", ")", ":", "if", "fields", ":", "fields", "=", "\",\"", ".", "join", "(", "fields", ")", "parameters", "=", "{", "\"access_token\"", ":", "self", ".", "key", ",", "\"after\"", ":", "after", ",", "\"fields\"", ":", "fields", ",", "\"include_hidden\"", ":", "include_hidden", "}", "parameters", "=", "self", ".", "merge_params", "(", "parameters", ",", "params", ")", "return", "self", ".", "api_call", "(", "'%s/%s'", "%", "(", "page_id", ",", "post_type", ")", ",", "parameters", ")" ]
87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da
valid
Facebook.post_comments
:param post_id: :param after: :param order: Can be 'ranked', 'chronological', 'reverse_chronological' :param filter: Can be 'stream', 'toplevel' :param fields: Can be 'id', 'application', 'attachment', 'can_comment', 'can_remove', 'can_hide', 'can_like', 'can_reply_privately', 'comments', 'comment_count', 'created_time', 'from', 'likes', 'like_count', 'live_broadcast_timestamp', 'message', 'message_tags', 'object', 'parent', 'private_reply_conversation', 'user_likes' :param params: :return:
socialreaper/apis.py
def post_comments(self, post_id, after='', order="chronological", filter="stream", fields=None, **params): """ :param post_id: :param after: :param order: Can be 'ranked', 'chronological', 'reverse_chronological' :param filter: Can be 'stream', 'toplevel' :param fields: Can be 'id', 'application', 'attachment', 'can_comment', 'can_remove', 'can_hide', 'can_like', 'can_reply_privately', 'comments', 'comment_count', 'created_time', 'from', 'likes', 'like_count', 'live_broadcast_timestamp', 'message', 'message_tags', 'object', 'parent', 'private_reply_conversation', 'user_likes' :param params: :return: """ if fields: fields = ",".join(fields) parameters = {"access_token": self.key, "after": after, "order": order, "fields": fields, "filter": filter} parameters = self.merge_params(parameters, params) return self.api_call('%s/comments' % post_id, parameters)
def post_comments(self, post_id, after='', order="chronological", filter="stream", fields=None, **params): """ :param post_id: :param after: :param order: Can be 'ranked', 'chronological', 'reverse_chronological' :param filter: Can be 'stream', 'toplevel' :param fields: Can be 'id', 'application', 'attachment', 'can_comment', 'can_remove', 'can_hide', 'can_like', 'can_reply_privately', 'comments', 'comment_count', 'created_time', 'from', 'likes', 'like_count', 'live_broadcast_timestamp', 'message', 'message_tags', 'object', 'parent', 'private_reply_conversation', 'user_likes' :param params: :return: """ if fields: fields = ",".join(fields) parameters = {"access_token": self.key, "after": after, "order": order, "fields": fields, "filter": filter} parameters = self.merge_params(parameters, params) return self.api_call('%s/comments' % post_id, parameters)
[ ":", "param", "post_id", ":", ":", "param", "after", ":", ":", "param", "order", ":", "Can", "be", "ranked", "chronological", "reverse_chronological", ":", "param", "filter", ":", "Can", "be", "stream", "toplevel", ":", "param", "fields", ":", "Can", "be", "id", "application", "attachment", "can_comment", "can_remove", "can_hide", "can_like", "can_reply_privately", "comments", "comment_count", "created_time", "from", "likes", "like_count", "live_broadcast_timestamp", "message", "message_tags", "object", "parent", "private_reply_conversation", "user_likes", ":", "param", "params", ":", ":", "return", ":" ]
ScriptSmith/socialreaper
python
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/apis.py#L471-L498
[ "def", "post_comments", "(", "self", ",", "post_id", ",", "after", "=", "''", ",", "order", "=", "\"chronological\"", ",", "filter", "=", "\"stream\"", ",", "fields", "=", "None", ",", "*", "*", "params", ")", ":", "if", "fields", ":", "fields", "=", "\",\"", ".", "join", "(", "fields", ")", "parameters", "=", "{", "\"access_token\"", ":", "self", ".", "key", ",", "\"after\"", ":", "after", ",", "\"order\"", ":", "order", ",", "\"fields\"", ":", "fields", ",", "\"filter\"", ":", "filter", "}", "parameters", "=", "self", ".", "merge_params", "(", "parameters", ",", "params", ")", "return", "self", ".", "api_call", "(", "'%s/comments'", "%", "post_id", ",", "parameters", ")" ]
87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da
valid
flatten
Turn a nested dictionary into a flattened dictionary :param dictionary: The dictionary to flatten :param parent_key: The string to prepend to dictionary's keys :param separator: The string used to separate flattened keys :return: A flattened dictionary
socialreaper/tools.py
def flatten(dictionary, parent_key=False, separator='.'): """ Turn a nested dictionary into a flattened dictionary :param dictionary: The dictionary to flatten :param parent_key: The string to prepend to dictionary's keys :param separator: The string used to separate flattened keys :return: A flattened dictionary """ items = [] for key, value in dictionary.items(): new_key = str(parent_key) + separator + key if parent_key else key if isinstance(value, collections.MutableMapping): items.extend(flatten(value, new_key, separator).items()) elif isinstance(value, list): for k, v in enumerate(value): items.extend(flatten({str(k): v}, new_key).items()) else: items.append((new_key, value)) return dict(items)
def flatten(dictionary, parent_key=False, separator='.'): """ Turn a nested dictionary into a flattened dictionary :param dictionary: The dictionary to flatten :param parent_key: The string to prepend to dictionary's keys :param separator: The string used to separate flattened keys :return: A flattened dictionary """ items = [] for key, value in dictionary.items(): new_key = str(parent_key) + separator + key if parent_key else key if isinstance(value, collections.MutableMapping): items.extend(flatten(value, new_key, separator).items()) elif isinstance(value, list): for k, v in enumerate(value): items.extend(flatten({str(k): v}, new_key).items()) else: items.append((new_key, value)) return dict(items)
[ "Turn", "a", "nested", "dictionary", "into", "a", "flattened", "dictionary", ":", "param", "dictionary", ":", "The", "dictionary", "to", "flatten", ":", "param", "parent_key", ":", "The", "string", "to", "prepend", "to", "dictionary", "s", "keys", ":", "param", "separator", ":", "The", "string", "used", "to", "separate", "flattened", "keys", ":", "return", ":", "A", "flattened", "dictionary" ]
ScriptSmith/socialreaper
python
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/tools.py#L8-L28
[ "def", "flatten", "(", "dictionary", ",", "parent_key", "=", "False", ",", "separator", "=", "'.'", ")", ":", "items", "=", "[", "]", "for", "key", ",", "value", "in", "dictionary", ".", "items", "(", ")", ":", "new_key", "=", "str", "(", "parent_key", ")", "+", "separator", "+", "key", "if", "parent_key", "else", "key", "if", "isinstance", "(", "value", ",", "collections", ".", "MutableMapping", ")", ":", "items", ".", "extend", "(", "flatten", "(", "value", ",", "new_key", ",", "separator", ")", ".", "items", "(", ")", ")", "elif", "isinstance", "(", "value", ",", "list", ")", ":", "for", "k", ",", "v", "in", "enumerate", "(", "value", ")", ":", "items", ".", "extend", "(", "flatten", "(", "{", "str", "(", "k", ")", ":", "v", "}", ",", "new_key", ")", ".", "items", "(", ")", ")", "else", ":", "items", ".", "append", "(", "(", "new_key", ",", "value", ")", ")", "return", "dict", "(", "items", ")" ]
87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da
valid
fill_gaps
Fill gaps in a list of dictionaries. Add empty keys to dictionaries in the list that don't contain other entries' keys :param list_dicts: A list of dictionaries :return: A list of field names, a list of dictionaries with identical keys
socialreaper/tools.py
def fill_gaps(list_dicts): """ Fill gaps in a list of dictionaries. Add empty keys to dictionaries in the list that don't contain other entries' keys :param list_dicts: A list of dictionaries :return: A list of field names, a list of dictionaries with identical keys """ field_names = [] # != set bc. preserving order is better for output for datum in list_dicts: for key in datum.keys(): if key not in field_names: field_names.append(key) for datum in list_dicts: for key in field_names: if key not in datum: datum[key] = '' return list(field_names), list_dicts
def fill_gaps(list_dicts): """ Fill gaps in a list of dictionaries. Add empty keys to dictionaries in the list that don't contain other entries' keys :param list_dicts: A list of dictionaries :return: A list of field names, a list of dictionaries with identical keys """ field_names = [] # != set bc. preserving order is better for output for datum in list_dicts: for key in datum.keys(): if key not in field_names: field_names.append(key) for datum in list_dicts: for key in field_names: if key not in datum: datum[key] = '' return list(field_names), list_dicts
[ "Fill", "gaps", "in", "a", "list", "of", "dictionaries", ".", "Add", "empty", "keys", "to", "dictionaries", "in", "the", "list", "that", "don", "t", "contain", "other", "entries", "keys", ":", "param", "list_dicts", ":", "A", "list", "of", "dictionaries", ":", "return", ":", "A", "list", "of", "field", "names", "a", "list", "of", "dictionaries", "with", "identical", "keys" ]
ScriptSmith/socialreaper
python
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/tools.py#L31-L49
[ "def", "fill_gaps", "(", "list_dicts", ")", ":", "field_names", "=", "[", "]", "# != set bc. preserving order is better for output\r", "for", "datum", "in", "list_dicts", ":", "for", "key", "in", "datum", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "field_names", ":", "field_names", ".", "append", "(", "key", ")", "for", "datum", "in", "list_dicts", ":", "for", "key", "in", "field_names", ":", "if", "key", "not", "in", "datum", ":", "datum", "[", "key", "]", "=", "''", "return", "list", "(", "field_names", ")", ",", "list_dicts" ]
87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da
valid
to_csv
DEPRECATED Write a list of dicts to a csv file :param data: List of dicts :param field_names: The list column names :param filename: The name of the file :param overwrite: Overwrite the file if exists :param write_headers: Write the headers to the csv file :param append: Write new rows if the file exists :param flat: Flatten the dictionary before saving :param primary_fields: The first columns of the csv file :param sort_fields: Sort the field names alphabetically :return: None
socialreaper/tools.py
def to_csv(data, field_names=None, filename='data.csv', overwrite=True, write_headers=True, append=False, flat=True, primary_fields=None, sort_fields=True): """ DEPRECATED Write a list of dicts to a csv file :param data: List of dicts :param field_names: The list column names :param filename: The name of the file :param overwrite: Overwrite the file if exists :param write_headers: Write the headers to the csv file :param append: Write new rows if the file exists :param flat: Flatten the dictionary before saving :param primary_fields: The first columns of the csv file :param sort_fields: Sort the field names alphabetically :return: None """ # Don't overwrite if not specified if not overwrite and path.isfile(filename): raise FileExistsError('The file already exists') # Replace file if append not specified write_type = 'w' if not append else 'a' # Flatten if flat is specified, or there are no predefined field names if flat or not field_names: data = [flatten(datum) for datum in data] # Fill in gaps between dicts with empty string if not field_names: field_names, data = fill_gaps(data) # Sort fields if specified if sort_fields: field_names.sort() # If there are primary fields, move the field names to the front and sort # based on first field if primary_fields: for key in primary_fields[::-1]: field_names.insert(0, field_names.pop(field_names.index(key))) data = sorted(data, key=lambda k: k[field_names[0]], reverse=True) # Write the file with open(filename, write_type, encoding='utf-8') as f: writer = csv.DictWriter(f, fieldnames=field_names, lineterminator='\n') if not append or write_headers: writer.writeheader() # Write rows containing fields in field names for datum in data: for key in list(datum.keys()): if key not in field_names: del datum[key] elif type(datum[key]) is str: datum[key] = datum[key].strip() datum[key] = str(datum[key]) writer.writerow(datum)
def to_csv(data, field_names=None, filename='data.csv', overwrite=True, write_headers=True, append=False, flat=True, primary_fields=None, sort_fields=True): """ DEPRECATED Write a list of dicts to a csv file :param data: List of dicts :param field_names: The list column names :param filename: The name of the file :param overwrite: Overwrite the file if exists :param write_headers: Write the headers to the csv file :param append: Write new rows if the file exists :param flat: Flatten the dictionary before saving :param primary_fields: The first columns of the csv file :param sort_fields: Sort the field names alphabetically :return: None """ # Don't overwrite if not specified if not overwrite and path.isfile(filename): raise FileExistsError('The file already exists') # Replace file if append not specified write_type = 'w' if not append else 'a' # Flatten if flat is specified, or there are no predefined field names if flat or not field_names: data = [flatten(datum) for datum in data] # Fill in gaps between dicts with empty string if not field_names: field_names, data = fill_gaps(data) # Sort fields if specified if sort_fields: field_names.sort() # If there are primary fields, move the field names to the front and sort # based on first field if primary_fields: for key in primary_fields[::-1]: field_names.insert(0, field_names.pop(field_names.index(key))) data = sorted(data, key=lambda k: k[field_names[0]], reverse=True) # Write the file with open(filename, write_type, encoding='utf-8') as f: writer = csv.DictWriter(f, fieldnames=field_names, lineterminator='\n') if not append or write_headers: writer.writeheader() # Write rows containing fields in field names for datum in data: for key in list(datum.keys()): if key not in field_names: del datum[key] elif type(datum[key]) is str: datum[key] = datum[key].strip() datum[key] = str(datum[key]) writer.writerow(datum)
[ "DEPRECATED", "Write", "a", "list", "of", "dicts", "to", "a", "csv", "file", ":", "param", "data", ":", "List", "of", "dicts", ":", "param", "field_names", ":", "The", "list", "column", "names", ":", "param", "filename", ":", "The", "name", "of", "the", "file", ":", "param", "overwrite", ":", "Overwrite", "the", "file", "if", "exists", ":", "param", "write_headers", ":", "Write", "the", "headers", "to", "the", "csv", "file", ":", "param", "append", ":", "Write", "new", "rows", "if", "the", "file", "exists", ":", "param", "flat", ":", "Flatten", "the", "dictionary", "before", "saving", ":", "param", "primary_fields", ":", "The", "first", "columns", "of", "the", "csv", "file", ":", "param", "sort_fields", ":", "Sort", "the", "field", "names", "alphabetically", ":", "return", ":", "None" ]
ScriptSmith/socialreaper
python
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/tools.py#L132-L194
[ "def", "to_csv", "(", "data", ",", "field_names", "=", "None", ",", "filename", "=", "'data.csv'", ",", "overwrite", "=", "True", ",", "write_headers", "=", "True", ",", "append", "=", "False", ",", "flat", "=", "True", ",", "primary_fields", "=", "None", ",", "sort_fields", "=", "True", ")", ":", "# Don't overwrite if not specified\r", "if", "not", "overwrite", "and", "path", ".", "isfile", "(", "filename", ")", ":", "raise", "FileExistsError", "(", "'The file already exists'", ")", "# Replace file if append not specified\r", "write_type", "=", "'w'", "if", "not", "append", "else", "'a'", "# Flatten if flat is specified, or there are no predefined field names\r", "if", "flat", "or", "not", "field_names", ":", "data", "=", "[", "flatten", "(", "datum", ")", "for", "datum", "in", "data", "]", "# Fill in gaps between dicts with empty string\r", "if", "not", "field_names", ":", "field_names", ",", "data", "=", "fill_gaps", "(", "data", ")", "# Sort fields if specified\r", "if", "sort_fields", ":", "field_names", ".", "sort", "(", ")", "# If there are primary fields, move the field names to the front and sort\r", "# based on first field\r", "if", "primary_fields", ":", "for", "key", "in", "primary_fields", "[", ":", ":", "-", "1", "]", ":", "field_names", ".", "insert", "(", "0", ",", "field_names", ".", "pop", "(", "field_names", ".", "index", "(", "key", ")", ")", ")", "data", "=", "sorted", "(", "data", ",", "key", "=", "lambda", "k", ":", "k", "[", "field_names", "[", "0", "]", "]", ",", "reverse", "=", "True", ")", "# Write the file\r", "with", "open", "(", "filename", ",", "write_type", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "writer", "=", "csv", ".", "DictWriter", "(", "f", ",", "fieldnames", "=", "field_names", ",", "lineterminator", "=", "'\\n'", ")", "if", "not", "append", "or", "write_headers", ":", "writer", ".", "writeheader", "(", ")", "# Write rows containing fields in field names\r", "for", "datum", "in", "data", ":", "for", "key", "in", "list", "(", "datum", ".", "keys", "(", ")", ")", ":", "if", "key", "not", "in", "field_names", ":", "del", "datum", "[", "key", "]", "elif", "type", "(", "datum", "[", "key", "]", ")", "is", "str", ":", "datum", "[", "key", "]", "=", "datum", "[", "key", "]", ".", "strip", "(", ")", "datum", "[", "key", "]", "=", "str", "(", "datum", "[", "key", "]", ")", "writer", ".", "writerow", "(", "datum", ")" ]
87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da
valid
to_json
Write an object to a json file :param data: The object :param filename: The name of the file :param indent: The indentation of the file :return: None
socialreaper/tools.py
def to_json(data, filename='data.json', indent=4): """ Write an object to a json file :param data: The object :param filename: The name of the file :param indent: The indentation of the file :return: None """ with open(filename, 'w') as f: f.write(json.dumps(data, indent=indent))
def to_json(data, filename='data.json', indent=4): """ Write an object to a json file :param data: The object :param filename: The name of the file :param indent: The indentation of the file :return: None """ with open(filename, 'w') as f: f.write(json.dumps(data, indent=indent))
[ "Write", "an", "object", "to", "a", "json", "file", ":", "param", "data", ":", "The", "object", ":", "param", "filename", ":", "The", "name", "of", "the", "file", ":", "param", "indent", ":", "The", "indentation", "of", "the", "file", ":", "return", ":", "None" ]
ScriptSmith/socialreaper
python
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/tools.py#L197-L208
[ "def", "to_json", "(", "data", ",", "filename", "=", "'data.json'", ",", "indent", "=", "4", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "json", ".", "dumps", "(", "data", ",", "indent", "=", "indent", ")", ")" ]
87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da
valid
save_file
Download and save a file at path :param filename: The name of the file :param source: The location of the resource online :param folder: The directory the file will be saved in :return: None
socialreaper/tools.py
def save_file(filename, source, folder="Downloads"): """ Download and save a file at path :param filename: The name of the file :param source: The location of the resource online :param folder: The directory the file will be saved in :return: None """ r = requests.get(source, stream=True) if r.status_code == 200: if not path.isdir(folder): makedirs(folder, exist_ok=True) with open("%s/%s" % (folder, filename), 'wb') as f: for chunk in r: f.write(chunk)
def save_file(filename, source, folder="Downloads"): """ Download and save a file at path :param filename: The name of the file :param source: The location of the resource online :param folder: The directory the file will be saved in :return: None """ r = requests.get(source, stream=True) if r.status_code == 200: if not path.isdir(folder): makedirs(folder, exist_ok=True) with open("%s/%s" % (folder, filename), 'wb') as f: for chunk in r: f.write(chunk)
[ "Download", "and", "save", "a", "file", "at", "path", ":", "param", "filename", ":", "The", "name", "of", "the", "file", ":", "param", "source", ":", "The", "location", "of", "the", "resource", "online", ":", "param", "folder", ":", "The", "directory", "the", "file", "will", "be", "saved", "in", ":", "return", ":", "None" ]
ScriptSmith/socialreaper
python
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/tools.py#L211-L227
[ "def", "save_file", "(", "filename", ",", "source", ",", "folder", "=", "\"Downloads\"", ")", ":", "r", "=", "requests", ".", "get", "(", "source", ",", "stream", "=", "True", ")", "if", "r", ".", "status_code", "==", "200", ":", "if", "not", "path", ".", "isdir", "(", "folder", ")", ":", "makedirs", "(", "folder", ",", "exist_ok", "=", "True", ")", "with", "open", "(", "\"%s/%s\"", "%", "(", "folder", ",", "filename", ")", ",", "'wb'", ")", "as", "f", ":", "for", "chunk", "in", "r", ":", "f", ".", "write", "(", "chunk", ")" ]
87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da
valid
convert_frames_to_video
Try to convert a tar file containing a sequence of frames saved by the meshcat viewer into a single video file. This relies on having `ffmpeg` installed on your system.
src/meshcat/animation.py
def convert_frames_to_video(tar_file_path, output_path="output.mp4", framerate=60, overwrite=False): """ Try to convert a tar file containing a sequence of frames saved by the meshcat viewer into a single video file. This relies on having `ffmpeg` installed on your system. """ output_path = os.path.abspath(output_path) if os.path.isfile(output_path) and not overwrite: raise ValueError("The output path {:s} already exists. To overwrite that file, you can pass overwrite=True to this function.".format(output_path)) with tempfile.TemporaryDirectory() as tmp_dir: with tarfile.open(tar_file_path) as tar: tar.extractall(tmp_dir) args = ["ffmpeg", "-r", str(framerate), "-i", r"%07d.png", "-vcodec", "libx264", "-preset", "slow", "-crf", "18"] if overwrite: args.append("-y") args.append(output_path) try: subprocess.check_call(args, cwd=tmp_dir) except subprocess.CalledProcessError as e: print(""" Could not call `ffmpeg` to convert your frames into a video. If you want to convert the frames manually, you can extract the .tar archive into a directory, cd to that directory, and run: ffmpeg -r 60 -i %07d.png \\\n\t -vcodec libx264 \\\n\t -preset slow \\\n\t -crf 18 \\\n\t output.mp4 """) raise print("Saved output as {:s}".format(output_path)) return output_path
def convert_frames_to_video(tar_file_path, output_path="output.mp4", framerate=60, overwrite=False): """ Try to convert a tar file containing a sequence of frames saved by the meshcat viewer into a single video file. This relies on having `ffmpeg` installed on your system. """ output_path = os.path.abspath(output_path) if os.path.isfile(output_path) and not overwrite: raise ValueError("The output path {:s} already exists. To overwrite that file, you can pass overwrite=True to this function.".format(output_path)) with tempfile.TemporaryDirectory() as tmp_dir: with tarfile.open(tar_file_path) as tar: tar.extractall(tmp_dir) args = ["ffmpeg", "-r", str(framerate), "-i", r"%07d.png", "-vcodec", "libx264", "-preset", "slow", "-crf", "18"] if overwrite: args.append("-y") args.append(output_path) try: subprocess.check_call(args, cwd=tmp_dir) except subprocess.CalledProcessError as e: print(""" Could not call `ffmpeg` to convert your frames into a video. If you want to convert the frames manually, you can extract the .tar archive into a directory, cd to that directory, and run: ffmpeg -r 60 -i %07d.png \\\n\t -vcodec libx264 \\\n\t -preset slow \\\n\t -crf 18 \\\n\t output.mp4 """) raise print("Saved output as {:s}".format(output_path)) return output_path
[ "Try", "to", "convert", "a", "tar", "file", "containing", "a", "sequence", "of", "frames", "saved", "by", "the", "meshcat", "viewer", "into", "a", "single", "video", "file", "." ]
rdeits/meshcat-python
python
https://github.com/rdeits/meshcat-python/blob/aa3865143120f5ace8e62aab71d825e33674d277/src/meshcat/animation.py#L132-L165
[ "def", "convert_frames_to_video", "(", "tar_file_path", ",", "output_path", "=", "\"output.mp4\"", ",", "framerate", "=", "60", ",", "overwrite", "=", "False", ")", ":", "output_path", "=", "os", ".", "path", ".", "abspath", "(", "output_path", ")", "if", "os", ".", "path", ".", "isfile", "(", "output_path", ")", "and", "not", "overwrite", ":", "raise", "ValueError", "(", "\"The output path {:s} already exists. To overwrite that file, you can pass overwrite=True to this function.\"", ".", "format", "(", "output_path", ")", ")", "with", "tempfile", ".", "TemporaryDirectory", "(", ")", "as", "tmp_dir", ":", "with", "tarfile", ".", "open", "(", "tar_file_path", ")", "as", "tar", ":", "tar", ".", "extractall", "(", "tmp_dir", ")", "args", "=", "[", "\"ffmpeg\"", ",", "\"-r\"", ",", "str", "(", "framerate", ")", ",", "\"-i\"", ",", "r\"%07d.png\"", ",", "\"-vcodec\"", ",", "\"libx264\"", ",", "\"-preset\"", ",", "\"slow\"", ",", "\"-crf\"", ",", "\"18\"", "]", "if", "overwrite", ":", "args", ".", "append", "(", "\"-y\"", ")", "args", ".", "append", "(", "output_path", ")", "try", ":", "subprocess", ".", "check_call", "(", "args", ",", "cwd", "=", "tmp_dir", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "print", "(", "\"\"\"\nCould not call `ffmpeg` to convert your frames into a video.\nIf you want to convert the frames manually, you can extract the\n.tar archive into a directory, cd to that directory, and run:\nffmpeg -r 60 -i %07d.png \\\\\\n\\t -vcodec libx264 \\\\\\n\\t -preset slow \\\\\\n\\t -crf 18 \\\\\\n\\t output.mp4\n \"\"\"", ")", "raise", "print", "(", "\"Saved output as {:s}\"", ".", "format", "(", "output_path", ")", ")", "return", "output_path" ]
aa3865143120f5ace8e62aab71d825e33674d277
valid
FileMetadata.toJSON
Get a json dict of the attributes of this object.
python/kappy/kappa_common.py
def toJSON(self): """Get a json dict of the attributes of this object.""" return {"id": self.id, "compile": self.compile, "position": self.position, "version": self.version}
def toJSON(self): """Get a json dict of the attributes of this object.""" return {"id": self.id, "compile": self.compile, "position": self.position, "version": self.version}
[ "Get", "a", "json", "dict", "of", "the", "attributes", "of", "this", "object", "." ]
Kappa-Dev/KaSim
python
https://github.com/Kappa-Dev/KaSim/blob/12a01c616a47e3046323103625795fb2fca8273a/python/kappy/kappa_common.py#L72-L77
[ "def", "toJSON", "(", "self", ")", ":", "return", "{", "\"id\"", ":", "self", ".", "id", ",", "\"compile\"", ":", "self", ".", "compile", ",", "\"position\"", ":", "self", ".", "position", ",", "\"version\"", ":", "self", ".", "version", "}" ]
12a01c616a47e3046323103625795fb2fca8273a
valid
File.from_string
Convenience method to create a file from a string. This file object's metadata will have the id 'inlined_input'. Inputs ------ content -- the content of the file (a string). position -- (default 1) rank among all files of the model while parsing see FileMetadata file_id -- (default 'inlined_input') the file_id that will be used by kappa.
python/kappy/kappa_common.py
def from_string(cls, content, position=1, file_id=None): """ Convenience method to create a file from a string. This file object's metadata will have the id 'inlined_input'. Inputs ------ content -- the content of the file (a string). position -- (default 1) rank among all files of the model while parsing see FileMetadata file_id -- (default 'inlined_input') the file_id that will be used by kappa. """ if file_id is None: file_id = 'inlined_input' return cls(FileMetadata(file_id, position), content)
def from_string(cls, content, position=1, file_id=None): """ Convenience method to create a file from a string. This file object's metadata will have the id 'inlined_input'. Inputs ------ content -- the content of the file (a string). position -- (default 1) rank among all files of the model while parsing see FileMetadata file_id -- (default 'inlined_input') the file_id that will be used by kappa. """ if file_id is None: file_id = 'inlined_input' return cls(FileMetadata(file_id, position), content)
[ "Convenience", "method", "to", "create", "a", "file", "from", "a", "string", "." ]
Kappa-Dev/KaSim
python
https://github.com/Kappa-Dev/KaSim/blob/12a01c616a47e3046323103625795fb2fca8273a/python/kappy/kappa_common.py#L113-L129
[ "def", "from_string", "(", "cls", ",", "content", ",", "position", "=", "1", ",", "file_id", "=", "None", ")", ":", "if", "file_id", "is", "None", ":", "file_id", "=", "'inlined_input'", "return", "cls", "(", "FileMetadata", "(", "file_id", ",", "position", ")", ",", "content", ")" ]
12a01c616a47e3046323103625795fb2fca8273a
valid
File.from_file
Convience method to create a kappa file object from a file on disk Inputs ------ fpath -- path to the file on disk position -- (default 1) rank among all files of the model while parsing see FileMetadata file_id -- (default = fpath) the file_id that will be used by kappa.
python/kappy/kappa_common.py
def from_file(cls, fpath, position=1, file_id=None): """ Convience method to create a kappa file object from a file on disk Inputs ------ fpath -- path to the file on disk position -- (default 1) rank among all files of the model while parsing see FileMetadata file_id -- (default = fpath) the file_id that will be used by kappa. """ if file_id is None: file_id = fpath with open(fpath) as f: code = f.read() file_content = str(code) file_metadata = FileMetadata(file_id, position) return cls(file_metadata, file_content)
def from_file(cls, fpath, position=1, file_id=None): """ Convience method to create a kappa file object from a file on disk Inputs ------ fpath -- path to the file on disk position -- (default 1) rank among all files of the model while parsing see FileMetadata file_id -- (default = fpath) the file_id that will be used by kappa. """ if file_id is None: file_id = fpath with open(fpath) as f: code = f.read() file_content = str(code) file_metadata = FileMetadata(file_id, position) return cls(file_metadata, file_content)
[ "Convience", "method", "to", "create", "a", "kappa", "file", "object", "from", "a", "file", "on", "disk" ]
Kappa-Dev/KaSim
python
https://github.com/Kappa-Dev/KaSim/blob/12a01c616a47e3046323103625795fb2fca8273a/python/kappy/kappa_common.py#L132-L149
[ "def", "from_file", "(", "cls", ",", "fpath", ",", "position", "=", "1", ",", "file_id", "=", "None", ")", ":", "if", "file_id", "is", "None", ":", "file_id", "=", "fpath", "with", "open", "(", "fpath", ")", "as", "f", ":", "code", "=", "f", ".", "read", "(", ")", "file_content", "=", "str", "(", "code", ")", "file_metadata", "=", "FileMetadata", "(", "file_id", ",", "position", ")", "return", "cls", "(", "file_metadata", ",", "file_content", ")" ]
12a01c616a47e3046323103625795fb2fca8273a
valid
KappaApi._fix_docs
Make api method docs inheritted. Specifically, insepect.getdoc will return values inheritted from this abc for standardized api methods.
python/kappy/kappa_common.py
def _fix_docs(this_abc, child_class): """Make api method docs inheritted. Specifically, insepect.getdoc will return values inheritted from this abc for standardized api methods. """ # After python 3.5, this is basically handled automatically if sys.version_info >= (3, 5): return child_class if not issubclass(child_class, this_abc): raise KappaError('Cannot fix docs of class that is not decendent.') # This method is modified from solution given in # https://stackoverflow.com/a/8101598/8863865 for name, child_func in vars(child_class).items(): if callable(child_func) and not child_func.__doc__: if name in this_abc.__abstractmethods__: parent_func = getattr(this_abc, name) child_func.__doc__ = parent_func.__doc__ return child_class
def _fix_docs(this_abc, child_class): """Make api method docs inheritted. Specifically, insepect.getdoc will return values inheritted from this abc for standardized api methods. """ # After python 3.5, this is basically handled automatically if sys.version_info >= (3, 5): return child_class if not issubclass(child_class, this_abc): raise KappaError('Cannot fix docs of class that is not decendent.') # This method is modified from solution given in # https://stackoverflow.com/a/8101598/8863865 for name, child_func in vars(child_class).items(): if callable(child_func) and not child_func.__doc__: if name in this_abc.__abstractmethods__: parent_func = getattr(this_abc, name) child_func.__doc__ = parent_func.__doc__ return child_class
[ "Make", "api", "method", "docs", "inheritted", "." ]
Kappa-Dev/KaSim
python
https://github.com/Kappa-Dev/KaSim/blob/12a01c616a47e3046323103625795fb2fca8273a/python/kappy/kappa_common.py#L244-L264
[ "def", "_fix_docs", "(", "this_abc", ",", "child_class", ")", ":", "# After python 3.5, this is basically handled automatically", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "5", ")", ":", "return", "child_class", "if", "not", "issubclass", "(", "child_class", ",", "this_abc", ")", ":", "raise", "KappaError", "(", "'Cannot fix docs of class that is not decendent.'", ")", "# This method is modified from solution given in", "# https://stackoverflow.com/a/8101598/8863865", "for", "name", ",", "child_func", "in", "vars", "(", "child_class", ")", ".", "items", "(", ")", ":", "if", "callable", "(", "child_func", ")", "and", "not", "child_func", ".", "__doc__", ":", "if", "name", "in", "this_abc", ".", "__abstractmethods__", ":", "parent_func", "=", "getattr", "(", "this_abc", ",", "name", ")", "child_func", ".", "__doc__", "=", "parent_func", ".", "__doc__", "return", "child_class" ]
12a01c616a47e3046323103625795fb2fca8273a
valid
KappaApi.add_model_string
Add a kappa model given in a string to the project.
python/kappy/kappa_common.py
def add_model_string(self, model_str, position=1, file_id=None): """Add a kappa model given in a string to the project.""" if file_id is None: file_id = self.make_unique_id('inlined_input') ret_data = self.file_create(File.from_string(model_str, position, file_id)) return ret_data
def add_model_string(self, model_str, position=1, file_id=None): """Add a kappa model given in a string to the project.""" if file_id is None: file_id = self.make_unique_id('inlined_input') ret_data = self.file_create(File.from_string(model_str, position, file_id)) return ret_data
[ "Add", "a", "kappa", "model", "given", "in", "a", "string", "to", "the", "project", "." ]
Kappa-Dev/KaSim
python
https://github.com/Kappa-Dev/KaSim/blob/12a01c616a47e3046323103625795fb2fca8273a/python/kappy/kappa_common.py#L270-L276
[ "def", "add_model_string", "(", "self", ",", "model_str", ",", "position", "=", "1", ",", "file_id", "=", "None", ")", ":", "if", "file_id", "is", "None", ":", "file_id", "=", "self", ".", "make_unique_id", "(", "'inlined_input'", ")", "ret_data", "=", "self", ".", "file_create", "(", "File", ".", "from_string", "(", "model_str", ",", "position", ",", "file_id", ")", ")", "return", "ret_data" ]
12a01c616a47e3046323103625795fb2fca8273a
valid
KappaApi.add_model_file
Add a kappa model from a file at given path to the project.
python/kappy/kappa_common.py
def add_model_file(self, model_fpath, position=1, file_id=None): """Add a kappa model from a file at given path to the project.""" if file_id is None: file_id = self.make_unique_id('file_input') ret_data = self.file_create(File.from_file(model_fpath, position, file_id)) return ret_data
def add_model_file(self, model_fpath, position=1, file_id=None): """Add a kappa model from a file at given path to the project.""" if file_id is None: file_id = self.make_unique_id('file_input') ret_data = self.file_create(File.from_file(model_fpath, position, file_id)) return ret_data
[ "Add", "a", "kappa", "model", "from", "a", "file", "at", "given", "path", "to", "the", "project", "." ]
Kappa-Dev/KaSim
python
https://github.com/Kappa-Dev/KaSim/blob/12a01c616a47e3046323103625795fb2fca8273a/python/kappy/kappa_common.py#L278-L284
[ "def", "add_model_file", "(", "self", ",", "model_fpath", ",", "position", "=", "1", ",", "file_id", "=", "None", ")", ":", "if", "file_id", "is", "None", ":", "file_id", "=", "self", ".", "make_unique_id", "(", "'file_input'", ")", "ret_data", "=", "self", ".", "file_create", "(", "File", ".", "from_file", "(", "model_fpath", ",", "position", ",", "file_id", ")", ")", "return", "ret_data" ]
12a01c616a47e3046323103625795fb2fca8273a
valid
KappaApi.set_default_sim_param
Set the simulation default simulation parameters. You can pass one of two things in as input: - a kappa_common.SimulationParameter instance - the arguments and keyword argument to create such an instance. The parameters you specify will be used by default in simulations run by this client.
python/kappy/kappa_common.py
def set_default_sim_param(self, *args, **kwargs): """Set the simulation default simulation parameters. You can pass one of two things in as input: - a kappa_common.SimulationParameter instance - the arguments and keyword argument to create such an instance. The parameters you specify will be used by default in simulations run by this client. """ if len(args) is 1 and isinstance(args[0], SimulationParameter): self.__default_param = args[0] else: self.__default_param = SimulationParameter(*args, **kwargs) return
def set_default_sim_param(self, *args, **kwargs): """Set the simulation default simulation parameters. You can pass one of two things in as input: - a kappa_common.SimulationParameter instance - the arguments and keyword argument to create such an instance. The parameters you specify will be used by default in simulations run by this client. """ if len(args) is 1 and isinstance(args[0], SimulationParameter): self.__default_param = args[0] else: self.__default_param = SimulationParameter(*args, **kwargs) return
[ "Set", "the", "simulation", "default", "simulation", "parameters", "." ]
Kappa-Dev/KaSim
python
https://github.com/Kappa-Dev/KaSim/blob/12a01c616a47e3046323103625795fb2fca8273a/python/kappy/kappa_common.py#L286-L300
[ "def", "set_default_sim_param", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", "is", "1", "and", "isinstance", "(", "args", "[", "0", "]", ",", "SimulationParameter", ")", ":", "self", ".", "__default_param", "=", "args", "[", "0", "]", "else", ":", "self", ".", "__default_param", "=", "SimulationParameter", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return" ]
12a01c616a47e3046323103625795fb2fca8273a
valid
KappaApi.get_is_sim_running
Check if the current simulation is running.
python/kappy/kappa_common.py
def get_is_sim_running(self): """Check if the current simulation is running.""" sim_info = self.simulation_info() try: progress_info = sim_info['simulation_info_progress'] ret = progress_info['simulation_progress_is_running'] except KeyError: # Simulation has not been created. ret = False return ret
def get_is_sim_running(self): """Check if the current simulation is running.""" sim_info = self.simulation_info() try: progress_info = sim_info['simulation_info_progress'] ret = progress_info['simulation_progress_is_running'] except KeyError: # Simulation has not been created. ret = False return ret
[ "Check", "if", "the", "current", "simulation", "is", "running", "." ]
Kappa-Dev/KaSim
python
https://github.com/Kappa-Dev/KaSim/blob/12a01c616a47e3046323103625795fb2fca8273a/python/kappy/kappa_common.py#L308-L316
[ "def", "get_is_sim_running", "(", "self", ")", ":", "sim_info", "=", "self", ".", "simulation_info", "(", ")", "try", ":", "progress_info", "=", "sim_info", "[", "'simulation_info_progress'", "]", "ret", "=", "progress_info", "[", "'simulation_progress_is_running'", "]", "except", "KeyError", ":", "# Simulation has not been created.", "ret", "=", "False", "return", "ret" ]
12a01c616a47e3046323103625795fb2fca8273a
valid
KappaApi.wait_for_simulation_stop
Block until the simulation is done or timeout seconds exceeded. If the simulation stops before timeout, siminfo is returned.
python/kappy/kappa_common.py
def wait_for_simulation_stop(self, timeout=None): """Block until the simulation is done or timeout seconds exceeded. If the simulation stops before timeout, siminfo is returned. """ start = datetime.now() while self.get_is_sim_running(): sleep(0.5) if timeout is not None: if (datetime.now() - start).seconds >= timeout: ret = None break else: ret = self.simulation_info() return ret
def wait_for_simulation_stop(self, timeout=None): """Block until the simulation is done or timeout seconds exceeded. If the simulation stops before timeout, siminfo is returned. """ start = datetime.now() while self.get_is_sim_running(): sleep(0.5) if timeout is not None: if (datetime.now() - start).seconds >= timeout: ret = None break else: ret = self.simulation_info() return ret
[ "Block", "until", "the", "simulation", "is", "done", "or", "timeout", "seconds", "exceeded", "." ]
Kappa-Dev/KaSim
python
https://github.com/Kappa-Dev/KaSim/blob/12a01c616a47e3046323103625795fb2fca8273a/python/kappy/kappa_common.py#L318-L332
[ "def", "wait_for_simulation_stop", "(", "self", ",", "timeout", "=", "None", ")", ":", "start", "=", "datetime", ".", "now", "(", ")", "while", "self", ".", "get_is_sim_running", "(", ")", ":", "sleep", "(", "0.5", ")", "if", "timeout", "is", "not", "None", ":", "if", "(", "datetime", ".", "now", "(", ")", "-", "start", ")", ".", "seconds", ">=", "timeout", ":", "ret", "=", "None", "break", "else", ":", "ret", "=", "self", ".", "simulation_info", "(", ")", "return", "ret" ]
12a01c616a47e3046323103625795fb2fca8273a
valid
available_devices
Display available input and output audio devices along with their port indices. :return: Dictionary whose keys are the device index, the number of inputs and outputs, and their names. :rtype: dict
sk_dsp_comm/pyaudio_helper.py
def available_devices(): """ Display available input and output audio devices along with their port indices. :return: Dictionary whose keys are the device index, the number of inputs and outputs, and their names. :rtype: dict """ devices = {} pA = pyaudio.PyAudio() device_string = str() for k in range(pA.get_device_count()): dev = pA.get_device_info_by_index(k) devices[k] = {'name': dev['name'], 'inputs': dev['maxInputChannels'], 'outputs': dev['maxOutputChannels']} device_string += 'Index %d device name = %s, inputs = %d, outputs = %d\n' % \ (k,dev['name'],dev['maxInputChannels'],dev['maxOutputChannels']) logger.debug(device_string) return devices
def available_devices(): """ Display available input and output audio devices along with their port indices. :return: Dictionary whose keys are the device index, the number of inputs and outputs, and their names. :rtype: dict """ devices = {} pA = pyaudio.PyAudio() device_string = str() for k in range(pA.get_device_count()): dev = pA.get_device_info_by_index(k) devices[k] = {'name': dev['name'], 'inputs': dev['maxInputChannels'], 'outputs': dev['maxOutputChannels']} device_string += 'Index %d device name = %s, inputs = %d, outputs = %d\n' % \ (k,dev['name'],dev['maxInputChannels'],dev['maxOutputChannels']) logger.debug(device_string) return devices
[ "Display", "available", "input", "and", "output", "audio", "devices", "along", "with", "their", "port", "indices", ".", ":", "return", ":", "Dictionary", "whose", "keys", "are", "the", "device", "index", "the", "number", "of", "inputs", "and", "outputs", "and", "their", "names", ".", ":", "rtype", ":", "dict" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L432-L449
[ "def", "available_devices", "(", ")", ":", "devices", "=", "{", "}", "pA", "=", "pyaudio", ".", "PyAudio", "(", ")", "device_string", "=", "str", "(", ")", "for", "k", "in", "range", "(", "pA", ".", "get_device_count", "(", ")", ")", ":", "dev", "=", "pA", ".", "get_device_info_by_index", "(", "k", ")", "devices", "[", "k", "]", "=", "{", "'name'", ":", "dev", "[", "'name'", "]", ",", "'inputs'", ":", "dev", "[", "'maxInputChannels'", "]", ",", "'outputs'", ":", "dev", "[", "'maxOutputChannels'", "]", "}", "device_string", "+=", "'Index %d device name = %s, inputs = %d, outputs = %d\\n'", "%", "(", "k", ",", "dev", "[", "'name'", "]", ",", "dev", "[", "'maxInputChannels'", "]", ",", "dev", "[", "'maxOutputChannels'", "]", ")", "logger", ".", "debug", "(", "device_string", ")", "return", "devices" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
DSP_io_stream.in_out_check
Checks the input and output to see if they are valid
sk_dsp_comm/pyaudio_helper.py
def in_out_check(self): """ Checks the input and output to see if they are valid """ devices = available_devices() if not self.in_idx in devices: raise OSError("Input device is unavailable") in_check = devices[self.in_idx] if not self.out_idx in devices: raise OSError("Output device is unavailable") out_check = devices[self.out_idx] if((in_check['inputs'] == 0) and (out_check['outputs']==0)): raise StandardError('Invalid input and output devices') elif(in_check['inputs'] == 0): raise ValueError('Selected input device has no inputs') elif(out_check['outputs'] == 0): raise ValueError('Selected output device has no outputs') return True
def in_out_check(self): """ Checks the input and output to see if they are valid """ devices = available_devices() if not self.in_idx in devices: raise OSError("Input device is unavailable") in_check = devices[self.in_idx] if not self.out_idx in devices: raise OSError("Output device is unavailable") out_check = devices[self.out_idx] if((in_check['inputs'] == 0) and (out_check['outputs']==0)): raise StandardError('Invalid input and output devices') elif(in_check['inputs'] == 0): raise ValueError('Selected input device has no inputs') elif(out_check['outputs'] == 0): raise ValueError('Selected output device has no outputs') return True
[ "Checks", "the", "input", "and", "output", "to", "see", "if", "they", "are", "valid" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L94-L112
[ "def", "in_out_check", "(", "self", ")", ":", "devices", "=", "available_devices", "(", ")", "if", "not", "self", ".", "in_idx", "in", "devices", ":", "raise", "OSError", "(", "\"Input device is unavailable\"", ")", "in_check", "=", "devices", "[", "self", ".", "in_idx", "]", "if", "not", "self", ".", "out_idx", "in", "devices", ":", "raise", "OSError", "(", "\"Output device is unavailable\"", ")", "out_check", "=", "devices", "[", "self", ".", "out_idx", "]", "if", "(", "(", "in_check", "[", "'inputs'", "]", "==", "0", ")", "and", "(", "out_check", "[", "'outputs'", "]", "==", "0", ")", ")", ":", "raise", "StandardError", "(", "'Invalid input and output devices'", ")", "elif", "(", "in_check", "[", "'inputs'", "]", "==", "0", ")", ":", "raise", "ValueError", "(", "'Selected input device has no inputs'", ")", "elif", "(", "out_check", "[", "'outputs'", "]", "==", "0", ")", ":", "raise", "ValueError", "(", "'Selected output device has no outputs'", ")", "return", "True" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
DSP_io_stream.interactive_stream
Stream audio with start and stop radio buttons Interactive stream is designed for streaming audio through this object using a callback function. This stream is threaded, so it can be used with ipywidgets. Click on the "Start Streaming" button to start streaming and click on "Stop Streaming" button to stop streaming. Parameters ---------- Tsec : stream time in seconds if Tsec > 0. If Tsec = 0, then stream goes to infinite mode. When in infinite mode, the "Stop Streaming" radio button or Tsec.stop() can be used to stop the stream. numChan : number of channels. Use 1 for mono and 2 for stereo.
sk_dsp_comm/pyaudio_helper.py
def interactive_stream(self,Tsec = 2, numChan = 1): """ Stream audio with start and stop radio buttons Interactive stream is designed for streaming audio through this object using a callback function. This stream is threaded, so it can be used with ipywidgets. Click on the "Start Streaming" button to start streaming and click on "Stop Streaming" button to stop streaming. Parameters ---------- Tsec : stream time in seconds if Tsec > 0. If Tsec = 0, then stream goes to infinite mode. When in infinite mode, the "Stop Streaming" radio button or Tsec.stop() can be used to stop the stream. numChan : number of channels. Use 1 for mono and 2 for stereo. """ self.Tsec = Tsec self.numChan = numChan self.interactiveFG = 1 self.play = interactive(self.interaction,Stream = ToggleButtons( options=['Start Streaming', 'Stop Streaming'], description = ' ', value = 'Stop Streaming') ) display(self.play)
def interactive_stream(self,Tsec = 2, numChan = 1): """ Stream audio with start and stop radio buttons Interactive stream is designed for streaming audio through this object using a callback function. This stream is threaded, so it can be used with ipywidgets. Click on the "Start Streaming" button to start streaming and click on "Stop Streaming" button to stop streaming. Parameters ---------- Tsec : stream time in seconds if Tsec > 0. If Tsec = 0, then stream goes to infinite mode. When in infinite mode, the "Stop Streaming" radio button or Tsec.stop() can be used to stop the stream. numChan : number of channels. Use 1 for mono and 2 for stereo. """ self.Tsec = Tsec self.numChan = numChan self.interactiveFG = 1 self.play = interactive(self.interaction,Stream = ToggleButtons( options=['Start Streaming', 'Stop Streaming'], description = ' ', value = 'Stop Streaming') ) display(self.play)
[ "Stream", "audio", "with", "start", "and", "stop", "radio", "buttons", "Interactive", "stream", "is", "designed", "for", "streaming", "audio", "through", "this", "object", "using", "a", "callback", "function", ".", "This", "stream", "is", "threaded", "so", "it", "can", "be", "used", "with", "ipywidgets", ".", "Click", "on", "the", "Start", "Streaming", "button", "to", "start", "streaming", "and", "click", "on", "Stop", "Streaming", "button", "to", "stop", "streaming", ".", "Parameters", "----------", "Tsec", ":", "stream", "time", "in", "seconds", "if", "Tsec", ">", "0", ".", "If", "Tsec", "=", "0", "then", "stream", "goes", "to", "infinite", "mode", ".", "When", "in", "infinite", "mode", "the", "Stop", "Streaming", "radio", "button", "or", "Tsec", ".", "stop", "()", "can", "be", "used", "to", "stop", "the", "stream", ".", "numChan", ":", "number", "of", "channels", ".", "Use", "1", "for", "mono", "and", "2", "for", "stereo", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L122-L149
[ "def", "interactive_stream", "(", "self", ",", "Tsec", "=", "2", ",", "numChan", "=", "1", ")", ":", "self", ".", "Tsec", "=", "Tsec", "self", ".", "numChan", "=", "numChan", "self", ".", "interactiveFG", "=", "1", "self", ".", "play", "=", "interactive", "(", "self", ".", "interaction", ",", "Stream", "=", "ToggleButtons", "(", "options", "=", "[", "'Start Streaming'", ",", "'Stop Streaming'", "]", ",", "description", "=", "' '", ",", "value", "=", "'Stop Streaming'", ")", ")", "display", "(", "self", ".", "play", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
DSP_io_stream.thread_stream
Stream audio in a thread using callback. The stream is threaded, so widgets can be used simultaneously during stream. Parameters ---------- Tsec : stream time in seconds if Tsec > 0. If Tsec = 0, then stream goes to infinite mode. When in infinite mode, Tsec.stop() can be used to stop the stream. numChan : number of channels. Use 1 for mono and 2 for stereo.
sk_dsp_comm/pyaudio_helper.py
def thread_stream(self,Tsec = 2,numChan = 1): """ Stream audio in a thread using callback. The stream is threaded, so widgets can be used simultaneously during stream. Parameters ---------- Tsec : stream time in seconds if Tsec > 0. If Tsec = 0, then stream goes to infinite mode. When in infinite mode, Tsec.stop() can be used to stop the stream. numChan : number of channels. Use 1 for mono and 2 for stereo. """ def stream_thread(time,channel): self.stream(Tsec=time,numChan = channel) # Thread the streaming function t = Thread(target=stream_thread, args=(Tsec,numChan,)) # Start the stream t.start()
def thread_stream(self,Tsec = 2,numChan = 1): """ Stream audio in a thread using callback. The stream is threaded, so widgets can be used simultaneously during stream. Parameters ---------- Tsec : stream time in seconds if Tsec > 0. If Tsec = 0, then stream goes to infinite mode. When in infinite mode, Tsec.stop() can be used to stop the stream. numChan : number of channels. Use 1 for mono and 2 for stereo. """ def stream_thread(time,channel): self.stream(Tsec=time,numChan = channel) # Thread the streaming function t = Thread(target=stream_thread, args=(Tsec,numChan,)) # Start the stream t.start()
[ "Stream", "audio", "in", "a", "thread", "using", "callback", ".", "The", "stream", "is", "threaded", "so", "widgets", "can", "be", "used", "simultaneously", "during", "stream", ".", "Parameters", "----------", "Tsec", ":", "stream", "time", "in", "seconds", "if", "Tsec", ">", "0", ".", "If", "Tsec", "=", "0", "then", "stream", "goes", "to", "infinite", "mode", ".", "When", "in", "infinite", "mode", "Tsec", ".", "stop", "()", "can", "be", "used", "to", "stop", "the", "stream", ".", "numChan", ":", "number", "of", "channels", ".", "Use", "1", "for", "mono", "and", "2", "for", "stereo", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L151-L172
[ "def", "thread_stream", "(", "self", ",", "Tsec", "=", "2", ",", "numChan", "=", "1", ")", ":", "def", "stream_thread", "(", "time", ",", "channel", ")", ":", "self", ".", "stream", "(", "Tsec", "=", "time", ",", "numChan", "=", "channel", ")", "# Thread the streaming function\r", "t", "=", "Thread", "(", "target", "=", "stream_thread", ",", "args", "=", "(", "Tsec", ",", "numChan", ",", ")", ")", "# Start the stream\r", "t", ".", "start", "(", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
DSP_io_stream.stream
Stream audio using callback Parameters ---------- Tsec : stream time in seconds if Tsec > 0. If Tsec = 0, then stream goes to infinite mode. When in infinite mode, Tsec.stop() can be used to stop the stream. numChan : number of channels. Use 1 for mono and 2 for stereo.
sk_dsp_comm/pyaudio_helper.py
def stream(self,Tsec = 2,numChan = 1): """ Stream audio using callback Parameters ---------- Tsec : stream time in seconds if Tsec > 0. If Tsec = 0, then stream goes to infinite mode. When in infinite mode, Tsec.stop() can be used to stop the stream. numChan : number of channels. Use 1 for mono and 2 for stereo. """ self.Tsec = Tsec self.numChan = numChan self.N_samples = int(self.fs*Tsec) self.data_capture = [] self.data_capture_left = [] self.data_capture_right = [] self.capture_sample_count = 0 self.DSP_tic = [] self.DSP_toc = [] self.start_time = time.time() self.stop_stream = False # open stream using callback (3) stream = self.p.open(format=pyaudio.paInt16, channels=numChan, rate=self.fs, input=True, output=True, input_device_index = self.in_idx, output_device_index = self.out_idx, frames_per_buffer = self.frame_length, stream_callback=self.stream_callback) # start the stream (4) stream.start_stream() # infinite mode if(Tsec == 0): while stream.is_active(): if self.stop_stream: stream.stop_stream() time.sleep(self.sleep_time) else: # wait for stream to finish (5) while stream.is_active(): if self.capture_sample_count >= self.N_samples: stream.stop_stream() if self.stop_stream: stream.stop_stream() time.sleep(self.sleep_time) # stop stream (6) stream.stop_stream() stream.close() # close PyAudio (7) self.p.terminate() self.stream_data = True # print('Audio input/output streaming session complete!') if(self.interactiveFG): # Move radio button back to 'Stop Streaming' self.play.children[0].value = 'Stop Streaming' else: if(self.print_when_done == 1): print('Completed')
def stream(self,Tsec = 2,numChan = 1): """ Stream audio using callback Parameters ---------- Tsec : stream time in seconds if Tsec > 0. If Tsec = 0, then stream goes to infinite mode. When in infinite mode, Tsec.stop() can be used to stop the stream. numChan : number of channels. Use 1 for mono and 2 for stereo. """ self.Tsec = Tsec self.numChan = numChan self.N_samples = int(self.fs*Tsec) self.data_capture = [] self.data_capture_left = [] self.data_capture_right = [] self.capture_sample_count = 0 self.DSP_tic = [] self.DSP_toc = [] self.start_time = time.time() self.stop_stream = False # open stream using callback (3) stream = self.p.open(format=pyaudio.paInt16, channels=numChan, rate=self.fs, input=True, output=True, input_device_index = self.in_idx, output_device_index = self.out_idx, frames_per_buffer = self.frame_length, stream_callback=self.stream_callback) # start the stream (4) stream.start_stream() # infinite mode if(Tsec == 0): while stream.is_active(): if self.stop_stream: stream.stop_stream() time.sleep(self.sleep_time) else: # wait for stream to finish (5) while stream.is_active(): if self.capture_sample_count >= self.N_samples: stream.stop_stream() if self.stop_stream: stream.stop_stream() time.sleep(self.sleep_time) # stop stream (6) stream.stop_stream() stream.close() # close PyAudio (7) self.p.terminate() self.stream_data = True # print('Audio input/output streaming session complete!') if(self.interactiveFG): # Move radio button back to 'Stop Streaming' self.play.children[0].value = 'Stop Streaming' else: if(self.print_when_done == 1): print('Completed')
[ "Stream", "audio", "using", "callback", "Parameters", "----------", "Tsec", ":", "stream", "time", "in", "seconds", "if", "Tsec", ">", "0", ".", "If", "Tsec", "=", "0", "then", "stream", "goes", "to", "infinite", "mode", ".", "When", "in", "infinite", "mode", "Tsec", ".", "stop", "()", "can", "be", "used", "to", "stop", "the", "stream", ".", "numChan", ":", "number", "of", "channels", ".", "Use", "1", "for", "mono", "and", "2", "for", "stereo", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L174-L241
[ "def", "stream", "(", "self", ",", "Tsec", "=", "2", ",", "numChan", "=", "1", ")", ":", "self", ".", "Tsec", "=", "Tsec", "self", ".", "numChan", "=", "numChan", "self", ".", "N_samples", "=", "int", "(", "self", ".", "fs", "*", "Tsec", ")", "self", ".", "data_capture", "=", "[", "]", "self", ".", "data_capture_left", "=", "[", "]", "self", ".", "data_capture_right", "=", "[", "]", "self", ".", "capture_sample_count", "=", "0", "self", ".", "DSP_tic", "=", "[", "]", "self", ".", "DSP_toc", "=", "[", "]", "self", ".", "start_time", "=", "time", ".", "time", "(", ")", "self", ".", "stop_stream", "=", "False", "# open stream using callback (3)\r", "stream", "=", "self", ".", "p", ".", "open", "(", "format", "=", "pyaudio", ".", "paInt16", ",", "channels", "=", "numChan", ",", "rate", "=", "self", ".", "fs", ",", "input", "=", "True", ",", "output", "=", "True", ",", "input_device_index", "=", "self", ".", "in_idx", ",", "output_device_index", "=", "self", ".", "out_idx", ",", "frames_per_buffer", "=", "self", ".", "frame_length", ",", "stream_callback", "=", "self", ".", "stream_callback", ")", "# start the stream (4)\r", "stream", ".", "start_stream", "(", ")", "# infinite mode\r", "if", "(", "Tsec", "==", "0", ")", ":", "while", "stream", ".", "is_active", "(", ")", ":", "if", "self", ".", "stop_stream", ":", "stream", ".", "stop_stream", "(", ")", "time", ".", "sleep", "(", "self", ".", "sleep_time", ")", "else", ":", "# wait for stream to finish (5)\r", "while", "stream", ".", "is_active", "(", ")", ":", "if", "self", ".", "capture_sample_count", ">=", "self", ".", "N_samples", ":", "stream", ".", "stop_stream", "(", ")", "if", "self", ".", "stop_stream", ":", "stream", ".", "stop_stream", "(", ")", "time", ".", "sleep", "(", "self", ".", "sleep_time", ")", "# stop stream (6)\r", "stream", ".", "stop_stream", "(", ")", "stream", ".", "close", "(", ")", "# close PyAudio (7)\r", "self", ".", "p", ".", "terminate", "(", ")", "self", ".", "stream_data", "=", "True", "# print('Audio input/output streaming session complete!')\r", "if", "(", "self", ".", "interactiveFG", ")", ":", "# Move radio button back to 'Stop Streaming'\r", "self", ".", "play", ".", "children", "[", "0", "]", ".", "value", "=", "'Stop Streaming'", "else", ":", "if", "(", "self", ".", "print_when_done", "==", "1", ")", ":", "print", "(", "'Completed'", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
DSP_io_stream.DSP_capture_add_samples
Append new samples to the data_capture array and increment the sample counter If length reaches Tcapture, then the newest samples will be kept. If Tcapture = 0 then new values are not appended to the data_capture array.
sk_dsp_comm/pyaudio_helper.py
def DSP_capture_add_samples(self,new_data): """ Append new samples to the data_capture array and increment the sample counter If length reaches Tcapture, then the newest samples will be kept. If Tcapture = 0 then new values are not appended to the data_capture array. """ self.capture_sample_count += len(new_data) if self.Tcapture > 0: self.data_capture = np.hstack((self.data_capture,new_data)) if (self.Tcapture > 0) and (len(self.data_capture) > self.Ncapture): self.data_capture = self.data_capture[-self.Ncapture:]
def DSP_capture_add_samples(self,new_data): """ Append new samples to the data_capture array and increment the sample counter If length reaches Tcapture, then the newest samples will be kept. If Tcapture = 0 then new values are not appended to the data_capture array. """ self.capture_sample_count += len(new_data) if self.Tcapture > 0: self.data_capture = np.hstack((self.data_capture,new_data)) if (self.Tcapture > 0) and (len(self.data_capture) > self.Ncapture): self.data_capture = self.data_capture[-self.Ncapture:]
[ "Append", "new", "samples", "to", "the", "data_capture", "array", "and", "increment", "the", "sample", "counter", "If", "length", "reaches", "Tcapture", "then", "the", "newest", "samples", "will", "be", "kept", ".", "If", "Tcapture", "=", "0", "then", "new", "values", "are", "not", "appended", "to", "the", "data_capture", "array", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L250-L261
[ "def", "DSP_capture_add_samples", "(", "self", ",", "new_data", ")", ":", "self", ".", "capture_sample_count", "+=", "len", "(", "new_data", ")", "if", "self", ".", "Tcapture", ">", "0", ":", "self", ".", "data_capture", "=", "np", ".", "hstack", "(", "(", "self", ".", "data_capture", ",", "new_data", ")", ")", "if", "(", "self", ".", "Tcapture", ">", "0", ")", "and", "(", "len", "(", "self", ".", "data_capture", ")", ">", "self", ".", "Ncapture", ")", ":", "self", ".", "data_capture", "=", "self", ".", "data_capture", "[", "-", "self", ".", "Ncapture", ":", "]" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
DSP_io_stream.DSP_capture_add_samples_stereo
Append new samples to the data_capture_left array and the data_capture_right array and increment the sample counter. If length reaches Tcapture, then the newest samples will be kept. If Tcapture = 0 then new values are not appended to the data_capture array.
sk_dsp_comm/pyaudio_helper.py
def DSP_capture_add_samples_stereo(self,new_data_left,new_data_right): """ Append new samples to the data_capture_left array and the data_capture_right array and increment the sample counter. If length reaches Tcapture, then the newest samples will be kept. If Tcapture = 0 then new values are not appended to the data_capture array. """ self.capture_sample_count = self.capture_sample_count + len(new_data_left) + len(new_data_right) if self.Tcapture > 0: self.data_capture_left = np.hstack((self.data_capture_left,new_data_left)) self.data_capture_right = np.hstack((self.data_capture_right,new_data_right)) if (len(self.data_capture_left) > self.Ncapture): self.data_capture_left = self.data_capture_left[-self.Ncapture:] if (len(self.data_capture_right) > self.Ncapture): self.data_capture_right = self.data_capture_right[-self.Ncapture:]
def DSP_capture_add_samples_stereo(self,new_data_left,new_data_right): """ Append new samples to the data_capture_left array and the data_capture_right array and increment the sample counter. If length reaches Tcapture, then the newest samples will be kept. If Tcapture = 0 then new values are not appended to the data_capture array. """ self.capture_sample_count = self.capture_sample_count + len(new_data_left) + len(new_data_right) if self.Tcapture > 0: self.data_capture_left = np.hstack((self.data_capture_left,new_data_left)) self.data_capture_right = np.hstack((self.data_capture_right,new_data_right)) if (len(self.data_capture_left) > self.Ncapture): self.data_capture_left = self.data_capture_left[-self.Ncapture:] if (len(self.data_capture_right) > self.Ncapture): self.data_capture_right = self.data_capture_right[-self.Ncapture:]
[ "Append", "new", "samples", "to", "the", "data_capture_left", "array", "and", "the", "data_capture_right", "array", "and", "increment", "the", "sample", "counter", ".", "If", "length", "reaches", "Tcapture", "then", "the", "newest", "samples", "will", "be", "kept", ".", "If", "Tcapture", "=", "0", "then", "new", "values", "are", "not", "appended", "to", "the", "data_capture", "array", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L263-L278
[ "def", "DSP_capture_add_samples_stereo", "(", "self", ",", "new_data_left", ",", "new_data_right", ")", ":", "self", ".", "capture_sample_count", "=", "self", ".", "capture_sample_count", "+", "len", "(", "new_data_left", ")", "+", "len", "(", "new_data_right", ")", "if", "self", ".", "Tcapture", ">", "0", ":", "self", ".", "data_capture_left", "=", "np", ".", "hstack", "(", "(", "self", ".", "data_capture_left", ",", "new_data_left", ")", ")", "self", ".", "data_capture_right", "=", "np", ".", "hstack", "(", "(", "self", ".", "data_capture_right", ",", "new_data_right", ")", ")", "if", "(", "len", "(", "self", ".", "data_capture_left", ")", ">", "self", ".", "Ncapture", ")", ":", "self", ".", "data_capture_left", "=", "self", ".", "data_capture_left", "[", "-", "self", ".", "Ncapture", ":", "]", "if", "(", "len", "(", "self", ".", "data_capture_right", ")", ">", "self", ".", "Ncapture", ")", ":", "self", ".", "data_capture_right", "=", "self", ".", "data_capture_right", "[", "-", "self", ".", "Ncapture", ":", "]" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
DSP_io_stream.DSP_callback_tic
Add new tic time to the DSP_tic list. Will not be called if Tcapture = 0.
sk_dsp_comm/pyaudio_helper.py
def DSP_callback_tic(self): """ Add new tic time to the DSP_tic list. Will not be called if Tcapture = 0. """ if self.Tcapture > 0: self.DSP_tic.append(time.time()-self.start_time)
def DSP_callback_tic(self): """ Add new tic time to the DSP_tic list. Will not be called if Tcapture = 0. """ if self.Tcapture > 0: self.DSP_tic.append(time.time()-self.start_time)
[ "Add", "new", "tic", "time", "to", "the", "DSP_tic", "list", ".", "Will", "not", "be", "called", "if", "Tcapture", "=", "0", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L281-L288
[ "def", "DSP_callback_tic", "(", "self", ")", ":", "if", "self", ".", "Tcapture", ">", "0", ":", "self", ".", "DSP_tic", ".", "append", "(", "time", ".", "time", "(", ")", "-", "self", ".", "start_time", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
DSP_io_stream.DSP_callback_toc
Add new toc time to the DSP_toc list. Will not be called if Tcapture = 0.
sk_dsp_comm/pyaudio_helper.py
def DSP_callback_toc(self): """ Add new toc time to the DSP_toc list. Will not be called if Tcapture = 0. """ if self.Tcapture > 0: self.DSP_toc.append(time.time()-self.start_time)
def DSP_callback_toc(self): """ Add new toc time to the DSP_toc list. Will not be called if Tcapture = 0. """ if self.Tcapture > 0: self.DSP_toc.append(time.time()-self.start_time)
[ "Add", "new", "toc", "time", "to", "the", "DSP_toc", "list", ".", "Will", "not", "be", "called", "if", "Tcapture", "=", "0", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L291-L298
[ "def", "DSP_callback_toc", "(", "self", ")", ":", "if", "self", ".", "Tcapture", ">", "0", ":", "self", ".", "DSP_toc", ".", "append", "(", "time", ".", "time", "(", ")", "-", "self", ".", "start_time", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
DSP_io_stream.stream_stats
Display basic statistics of callback execution: ideal period between callbacks, average measured period between callbacks, and average time spent in the callback.
sk_dsp_comm/pyaudio_helper.py
def stream_stats(self): """ Display basic statistics of callback execution: ideal period between callbacks, average measured period between callbacks, and average time spent in the callback. """ Tp = self.frame_length/float(self.fs)*1000 print('Delay (latency) in Entering the Callback the First Time = %6.2f (ms)' \ % (self.DSP_tic[0]*1000,)) print('Ideal Callback period = %1.2f (ms)' % Tp) Tmp_mean = np.mean(np.diff(np.array(self.DSP_tic))[1:]*1000) print('Average Callback Period = %1.2f (ms)' % Tmp_mean) Tprocess_mean = np.mean(np.array(self.DSP_toc)-np.array(self.DSP_tic))*1000 print('Average Callback process time = %1.2f (ms)' % Tprocess_mean)
def stream_stats(self): """ Display basic statistics of callback execution: ideal period between callbacks, average measured period between callbacks, and average time spent in the callback. """ Tp = self.frame_length/float(self.fs)*1000 print('Delay (latency) in Entering the Callback the First Time = %6.2f (ms)' \ % (self.DSP_tic[0]*1000,)) print('Ideal Callback period = %1.2f (ms)' % Tp) Tmp_mean = np.mean(np.diff(np.array(self.DSP_tic))[1:]*1000) print('Average Callback Period = %1.2f (ms)' % Tmp_mean) Tprocess_mean = np.mean(np.array(self.DSP_toc)-np.array(self.DSP_tic))*1000 print('Average Callback process time = %1.2f (ms)' % Tprocess_mean)
[ "Display", "basic", "statistics", "of", "callback", "execution", ":", "ideal", "period", "between", "callbacks", "average", "measured", "period", "between", "callbacks", "and", "average", "time", "spent", "in", "the", "callback", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L301-L315
[ "def", "stream_stats", "(", "self", ")", ":", "Tp", "=", "self", ".", "frame_length", "/", "float", "(", "self", ".", "fs", ")", "*", "1000", "print", "(", "'Delay (latency) in Entering the Callback the First Time = %6.2f (ms)'", "%", "(", "self", ".", "DSP_tic", "[", "0", "]", "*", "1000", ",", ")", ")", "print", "(", "'Ideal Callback period = %1.2f (ms)'", "%", "Tp", ")", "Tmp_mean", "=", "np", ".", "mean", "(", "np", ".", "diff", "(", "np", ".", "array", "(", "self", ".", "DSP_tic", ")", ")", "[", "1", ":", "]", "*", "1000", ")", "print", "(", "'Average Callback Period = %1.2f (ms)'", "%", "Tmp_mean", ")", "Tprocess_mean", "=", "np", ".", "mean", "(", "np", ".", "array", "(", "self", ".", "DSP_toc", ")", "-", "np", ".", "array", "(", "self", ".", "DSP_tic", ")", ")", "*", "1000", "print", "(", "'Average Callback process time = %1.2f (ms)'", "%", "Tprocess_mean", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
DSP_io_stream.cb_active_plot
Plot timing information of time spent in the callback. This is similar to what a logic analyzer provides when probing an interrupt. cb_active_plot( start_ms,stop_ms,line_color='b')
sk_dsp_comm/pyaudio_helper.py
def cb_active_plot(self,start_ms,stop_ms,line_color='b'): """ Plot timing information of time spent in the callback. This is similar to what a logic analyzer provides when probing an interrupt. cb_active_plot( start_ms,stop_ms,line_color='b') """ # Find bounding k values that contain the [start_ms,stop_ms] k_min_idx = np.nonzero(np.ravel(np.array(self.DSP_tic)*1000 < start_ms))[0] if len(k_min_idx) < 1: k_min = 0 else: k_min = k_min_idx[-1] k_max_idx = np.nonzero(np.ravel(np.array(self.DSP_tic)*1000 > stop_ms))[0] if len(k_min_idx) < 1: k_max= len(self.DSP_tic) else: k_max = k_max_idx[0] for k in range(k_min,k_max): if k == 0: plt.plot([0,self.DSP_tic[k]*1000,self.DSP_tic[k]*1000, self.DSP_toc[k]*1000,self.DSP_toc[k]*1000], [0,0,1,1,0],'b') else: plt.plot([self.DSP_toc[k-1]*1000,self.DSP_tic[k]*1000,self.DSP_tic[k]*1000, self.DSP_toc[k]*1000,self.DSP_toc[k]*1000],[0,0,1,1,0],'b') plt.plot([self.DSP_toc[k_max-1]*1000,stop_ms],[0,0],'b') plt.xlim([start_ms,stop_ms]) plt.title(r'Time Spent in the callback') plt.ylabel(r'Timing') plt.xlabel(r'Time (ms)') plt.grid();
def cb_active_plot(self,start_ms,stop_ms,line_color='b'): """ Plot timing information of time spent in the callback. This is similar to what a logic analyzer provides when probing an interrupt. cb_active_plot( start_ms,stop_ms,line_color='b') """ # Find bounding k values that contain the [start_ms,stop_ms] k_min_idx = np.nonzero(np.ravel(np.array(self.DSP_tic)*1000 < start_ms))[0] if len(k_min_idx) < 1: k_min = 0 else: k_min = k_min_idx[-1] k_max_idx = np.nonzero(np.ravel(np.array(self.DSP_tic)*1000 > stop_ms))[0] if len(k_min_idx) < 1: k_max= len(self.DSP_tic) else: k_max = k_max_idx[0] for k in range(k_min,k_max): if k == 0: plt.plot([0,self.DSP_tic[k]*1000,self.DSP_tic[k]*1000, self.DSP_toc[k]*1000,self.DSP_toc[k]*1000], [0,0,1,1,0],'b') else: plt.plot([self.DSP_toc[k-1]*1000,self.DSP_tic[k]*1000,self.DSP_tic[k]*1000, self.DSP_toc[k]*1000,self.DSP_toc[k]*1000],[0,0,1,1,0],'b') plt.plot([self.DSP_toc[k_max-1]*1000,stop_ms],[0,0],'b') plt.xlim([start_ms,stop_ms]) plt.title(r'Time Spent in the callback') plt.ylabel(r'Timing') plt.xlabel(r'Time (ms)') plt.grid();
[ "Plot", "timing", "information", "of", "time", "spent", "in", "the", "callback", ".", "This", "is", "similar", "to", "what", "a", "logic", "analyzer", "provides", "when", "probing", "an", "interrupt", ".", "cb_active_plot", "(", "start_ms", "stop_ms", "line_color", "=", "b", ")" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L318-L351
[ "def", "cb_active_plot", "(", "self", ",", "start_ms", ",", "stop_ms", ",", "line_color", "=", "'b'", ")", ":", "# Find bounding k values that contain the [start_ms,stop_ms]\r", "k_min_idx", "=", "np", ".", "nonzero", "(", "np", ".", "ravel", "(", "np", ".", "array", "(", "self", ".", "DSP_tic", ")", "*", "1000", "<", "start_ms", ")", ")", "[", "0", "]", "if", "len", "(", "k_min_idx", ")", "<", "1", ":", "k_min", "=", "0", "else", ":", "k_min", "=", "k_min_idx", "[", "-", "1", "]", "k_max_idx", "=", "np", ".", "nonzero", "(", "np", ".", "ravel", "(", "np", ".", "array", "(", "self", ".", "DSP_tic", ")", "*", "1000", ">", "stop_ms", ")", ")", "[", "0", "]", "if", "len", "(", "k_min_idx", ")", "<", "1", ":", "k_max", "=", "len", "(", "self", ".", "DSP_tic", ")", "else", ":", "k_max", "=", "k_max_idx", "[", "0", "]", "for", "k", "in", "range", "(", "k_min", ",", "k_max", ")", ":", "if", "k", "==", "0", ":", "plt", ".", "plot", "(", "[", "0", ",", "self", ".", "DSP_tic", "[", "k", "]", "*", "1000", ",", "self", ".", "DSP_tic", "[", "k", "]", "*", "1000", ",", "self", ".", "DSP_toc", "[", "k", "]", "*", "1000", ",", "self", ".", "DSP_toc", "[", "k", "]", "*", "1000", "]", ",", "[", "0", ",", "0", ",", "1", ",", "1", ",", "0", "]", ",", "'b'", ")", "else", ":", "plt", ".", "plot", "(", "[", "self", ".", "DSP_toc", "[", "k", "-", "1", "]", "*", "1000", ",", "self", ".", "DSP_tic", "[", "k", "]", "*", "1000", ",", "self", ".", "DSP_tic", "[", "k", "]", "*", "1000", ",", "self", ".", "DSP_toc", "[", "k", "]", "*", "1000", ",", "self", ".", "DSP_toc", "[", "k", "]", "*", "1000", "]", ",", "[", "0", ",", "0", ",", "1", ",", "1", ",", "0", "]", ",", "'b'", ")", "plt", ".", "plot", "(", "[", "self", ".", "DSP_toc", "[", "k_max", "-", "1", "]", "*", "1000", ",", "stop_ms", "]", ",", "[", "0", ",", "0", "]", ",", "'b'", ")", "plt", ".", "xlim", "(", "[", "start_ms", ",", "stop_ms", "]", ")", "plt", ".", "title", "(", "r'Time Spent in the callback'", ")", "plt", ".", "ylabel", "(", "r'Timing'", ")", "plt", ".", "xlabel", "(", "r'Time (ms)'", ")", "plt", ".", "grid", "(", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
DSP_io_stream.get_LR
Splits incoming packed stereo data into separate left and right channels and returns an array of left samples and an array of right samples Parameters ---------- in_data : input data from the streaming object in the callback function. Returns ------- left_in : array of incoming left channel samples right_in : array of incoming right channel samples
sk_dsp_comm/pyaudio_helper.py
def get_LR(self,in_data): """ Splits incoming packed stereo data into separate left and right channels and returns an array of left samples and an array of right samples Parameters ---------- in_data : input data from the streaming object in the callback function. Returns ------- left_in : array of incoming left channel samples right_in : array of incoming right channel samples """ for i in range(0,self.frame_length*2): if i % 2: self.right_in[(int)(i/2)] = in_data[i] else: self.left_in[(int)(i/2)] = in_data[i] return self.left_in, self.right_in
def get_LR(self,in_data): """ Splits incoming packed stereo data into separate left and right channels and returns an array of left samples and an array of right samples Parameters ---------- in_data : input data from the streaming object in the callback function. Returns ------- left_in : array of incoming left channel samples right_in : array of incoming right channel samples """ for i in range(0,self.frame_length*2): if i % 2: self.right_in[(int)(i/2)] = in_data[i] else: self.left_in[(int)(i/2)] = in_data[i] return self.left_in, self.right_in
[ "Splits", "incoming", "packed", "stereo", "data", "into", "separate", "left", "and", "right", "channels", "and", "returns", "an", "array", "of", "left", "samples", "and", "an", "array", "of", "right", "samples", "Parameters", "----------", "in_data", ":", "input", "data", "from", "the", "streaming", "object", "in", "the", "callback", "function", ".", "Returns", "-------", "left_in", ":", "array", "of", "incoming", "left", "channel", "samples", "right_in", ":", "array", "of", "incoming", "right", "channel", "samples" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L353-L373
[ "def", "get_LR", "(", "self", ",", "in_data", ")", ":", "for", "i", "in", "range", "(", "0", ",", "self", ".", "frame_length", "*", "2", ")", ":", "if", "i", "%", "2", ":", "self", ".", "right_in", "[", "(", "int", ")", "(", "i", "/", "2", ")", "]", "=", "in_data", "[", "i", "]", "else", ":", "self", ".", "left_in", "[", "(", "int", ")", "(", "i", "/", "2", ")", "]", "=", "in_data", "[", "i", "]", "return", "self", ".", "left_in", ",", "self", ".", "right_in" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
DSP_io_stream.pack_LR
Packs separate left and right channel data into one array to output and returns the output. Parameters ---------- left_out : left channel array of samples going to output right_out : right channel array of samples going to output Returns ------- out : packed left and right channel array of samples
sk_dsp_comm/pyaudio_helper.py
def pack_LR(self,left_out,right_out): """ Packs separate left and right channel data into one array to output and returns the output. Parameters ---------- left_out : left channel array of samples going to output right_out : right channel array of samples going to output Returns ------- out : packed left and right channel array of samples """ for i in range(0,self.frame_length*2): if i % 2: self.out[i] = right_out[(int)(i/2)] else: self.out[i] = left_out[(int)(i/2)] return self.out
def pack_LR(self,left_out,right_out): """ Packs separate left and right channel data into one array to output and returns the output. Parameters ---------- left_out : left channel array of samples going to output right_out : right channel array of samples going to output Returns ------- out : packed left and right channel array of samples """ for i in range(0,self.frame_length*2): if i % 2: self.out[i] = right_out[(int)(i/2)] else: self.out[i] = left_out[(int)(i/2)] return self.out
[ "Packs", "separate", "left", "and", "right", "channel", "data", "into", "one", "array", "to", "output", "and", "returns", "the", "output", ".", "Parameters", "----------", "left_out", ":", "left", "channel", "array", "of", "samples", "going", "to", "output", "right_out", ":", "right", "channel", "array", "of", "samples", "going", "to", "output", "Returns", "-------", "out", ":", "packed", "left", "and", "right", "channel", "array", "of", "samples" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/pyaudio_helper.py#L375-L394
[ "def", "pack_LR", "(", "self", ",", "left_out", ",", "right_out", ")", ":", "for", "i", "in", "range", "(", "0", ",", "self", ".", "frame_length", "*", "2", ")", ":", "if", "i", "%", "2", ":", "self", ".", "out", "[", "i", "]", "=", "right_out", "[", "(", "int", ")", "(", "i", "/", "2", ")", "]", "else", ":", "self", ".", "out", "[", "i", "]", "=", "left_out", "[", "(", "int", ")", "(", "i", "/", "2", ")", "]", "return", "self", ".", "out" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
IIR_lpf
Design an IIR lowpass filter using scipy.signal.iirdesign. The filter order is determined based on f_pass Hz, f_stop Hz, and the desired stopband attenuation d_stop in dB, all relative to a sampling rate of fs Hz. Parameters ---------- f_pass : Passband critical frequency in Hz f_stop : Stopband critical frequency in Hz Ripple_pass : Filter gain in dB at f_pass Atten_stop : Filter attenuation in dB at f_stop fs : Sampling rate in Hz ftype : Analog prototype from 'butter' 'cheby1', 'cheby2', 'ellip', and 'bessel' Returns ------- b : ndarray of the numerator coefficients a : ndarray of the denominator coefficients sos : 2D ndarray of second-order section coefficients Notes ----- Additionally a text string telling the user the filter order is written to the console, e.g., IIR cheby1 order = 8. Examples -------- >>> fs = 48000 >>> f_pass = 5000 >>> f_stop = 8000 >>> b_but,a_but,sos_but = IIR_lpf(f_pass,f_stop,0.5,60,fs,'butter') >>> b_cheb1,a_cheb1,sos_cheb1 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby1') >>> b_cheb2,a_cheb2,sos_cheb2 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby2') >>> b_elli,a_elli,sos_elli = IIR_lpf(f_pass,f_stop,0.5,60,fs,'ellip') Mark Wickert October 2016
sk_dsp_comm/iir_design_helper.py
def IIR_lpf(f_pass, f_stop, Ripple_pass, Atten_stop, fs = 1.00, ftype = 'butter'): """ Design an IIR lowpass filter using scipy.signal.iirdesign. The filter order is determined based on f_pass Hz, f_stop Hz, and the desired stopband attenuation d_stop in dB, all relative to a sampling rate of fs Hz. Parameters ---------- f_pass : Passband critical frequency in Hz f_stop : Stopband critical frequency in Hz Ripple_pass : Filter gain in dB at f_pass Atten_stop : Filter attenuation in dB at f_stop fs : Sampling rate in Hz ftype : Analog prototype from 'butter' 'cheby1', 'cheby2', 'ellip', and 'bessel' Returns ------- b : ndarray of the numerator coefficients a : ndarray of the denominator coefficients sos : 2D ndarray of second-order section coefficients Notes ----- Additionally a text string telling the user the filter order is written to the console, e.g., IIR cheby1 order = 8. Examples -------- >>> fs = 48000 >>> f_pass = 5000 >>> f_stop = 8000 >>> b_but,a_but,sos_but = IIR_lpf(f_pass,f_stop,0.5,60,fs,'butter') >>> b_cheb1,a_cheb1,sos_cheb1 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby1') >>> b_cheb2,a_cheb2,sos_cheb2 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby2') >>> b_elli,a_elli,sos_elli = IIR_lpf(f_pass,f_stop,0.5,60,fs,'ellip') Mark Wickert October 2016 """ b,a = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs, Ripple_pass, Atten_stop, ftype = ftype, output='ba') sos = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs, Ripple_pass, Atten_stop, ftype = ftype, output='sos') tag = 'IIR ' + ftype + ' order' print('%s = %d.' % (tag,len(a)-1)) return b, a, sos
def IIR_lpf(f_pass, f_stop, Ripple_pass, Atten_stop, fs = 1.00, ftype = 'butter'): """ Design an IIR lowpass filter using scipy.signal.iirdesign. The filter order is determined based on f_pass Hz, f_stop Hz, and the desired stopband attenuation d_stop in dB, all relative to a sampling rate of fs Hz. Parameters ---------- f_pass : Passband critical frequency in Hz f_stop : Stopband critical frequency in Hz Ripple_pass : Filter gain in dB at f_pass Atten_stop : Filter attenuation in dB at f_stop fs : Sampling rate in Hz ftype : Analog prototype from 'butter' 'cheby1', 'cheby2', 'ellip', and 'bessel' Returns ------- b : ndarray of the numerator coefficients a : ndarray of the denominator coefficients sos : 2D ndarray of second-order section coefficients Notes ----- Additionally a text string telling the user the filter order is written to the console, e.g., IIR cheby1 order = 8. Examples -------- >>> fs = 48000 >>> f_pass = 5000 >>> f_stop = 8000 >>> b_but,a_but,sos_but = IIR_lpf(f_pass,f_stop,0.5,60,fs,'butter') >>> b_cheb1,a_cheb1,sos_cheb1 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby1') >>> b_cheb2,a_cheb2,sos_cheb2 = IIR_lpf(f_pass,f_stop,0.5,60,fs,'cheby2') >>> b_elli,a_elli,sos_elli = IIR_lpf(f_pass,f_stop,0.5,60,fs,'ellip') Mark Wickert October 2016 """ b,a = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs, Ripple_pass, Atten_stop, ftype = ftype, output='ba') sos = signal.iirdesign(2*float(f_pass)/fs, 2*float(f_stop)/fs, Ripple_pass, Atten_stop, ftype = ftype, output='sos') tag = 'IIR ' + ftype + ' order' print('%s = %d.' % (tag,len(a)-1)) return b, a, sos
[ "Design", "an", "IIR", "lowpass", "filter", "using", "scipy", ".", "signal", ".", "iirdesign", ".", "The", "filter", "order", "is", "determined", "based", "on", "f_pass", "Hz", "f_stop", "Hz", "and", "the", "desired", "stopband", "attenuation", "d_stop", "in", "dB", "all", "relative", "to", "a", "sampling", "rate", "of", "fs", "Hz", ".", "Parameters", "----------", "f_pass", ":", "Passband", "critical", "frequency", "in", "Hz", "f_stop", ":", "Stopband", "critical", "frequency", "in", "Hz", "Ripple_pass", ":", "Filter", "gain", "in", "dB", "at", "f_pass", "Atten_stop", ":", "Filter", "attenuation", "in", "dB", "at", "f_stop", "fs", ":", "Sampling", "rate", "in", "Hz", "ftype", ":", "Analog", "prototype", "from", "butter", "cheby1", "cheby2", "ellip", "and", "bessel", "Returns", "-------", "b", ":", "ndarray", "of", "the", "numerator", "coefficients", "a", ":", "ndarray", "of", "the", "denominator", "coefficients", "sos", ":", "2D", "ndarray", "of", "second", "-", "order", "section", "coefficients", "Notes", "-----", "Additionally", "a", "text", "string", "telling", "the", "user", "the", "filter", "order", "is", "written", "to", "the", "console", "e", ".", "g", ".", "IIR", "cheby1", "order", "=", "8", ".", "Examples", "--------", ">>>", "fs", "=", "48000", ">>>", "f_pass", "=", "5000", ">>>", "f_stop", "=", "8000", ">>>", "b_but", "a_but", "sos_but", "=", "IIR_lpf", "(", "f_pass", "f_stop", "0", ".", "5", "60", "fs", "butter", ")", ">>>", "b_cheb1", "a_cheb1", "sos_cheb1", "=", "IIR_lpf", "(", "f_pass", "f_stop", "0", ".", "5", "60", "fs", "cheby1", ")", ">>>", "b_cheb2", "a_cheb2", "sos_cheb2", "=", "IIR_lpf", "(", "f_pass", "f_stop", "0", ".", "5", "60", "fs", "cheby2", ")", ">>>", "b_elli", "a_elli", "sos_elli", "=", "IIR_lpf", "(", "f_pass", "f_stop", "0", ".", "5", "60", "fs", "ellip", ")", "Mark", "Wickert", "October", "2016" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/iir_design_helper.py#L39-L90
[ "def", "IIR_lpf", "(", "f_pass", ",", "f_stop", ",", "Ripple_pass", ",", "Atten_stop", ",", "fs", "=", "1.00", ",", "ftype", "=", "'butter'", ")", ":", "b", ",", "a", "=", "signal", ".", "iirdesign", "(", "2", "*", "float", "(", "f_pass", ")", "/", "fs", ",", "2", "*", "float", "(", "f_stop", ")", "/", "fs", ",", "Ripple_pass", ",", "Atten_stop", ",", "ftype", "=", "ftype", ",", "output", "=", "'ba'", ")", "sos", "=", "signal", ".", "iirdesign", "(", "2", "*", "float", "(", "f_pass", ")", "/", "fs", ",", "2", "*", "float", "(", "f_stop", ")", "/", "fs", ",", "Ripple_pass", ",", "Atten_stop", ",", "ftype", "=", "ftype", ",", "output", "=", "'sos'", ")", "tag", "=", "'IIR '", "+", "ftype", "+", "' order'", "print", "(", "'%s = %d.'", "%", "(", "tag", ",", "len", "(", "a", ")", "-", "1", ")", ")", "return", "b", ",", "a", ",", "sos" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
IIR_bsf
Design an IIR bandstop filter using scipy.signal.iirdesign. The filter order is determined based on f_pass Hz, f_stop Hz, and the desired stopband attenuation d_stop in dB, all relative to a sampling rate of fs Hz. Mark Wickert October 2016
sk_dsp_comm/iir_design_helper.py
def IIR_bsf(f_pass1, f_stop1, f_stop2, f_pass2, Ripple_pass, Atten_stop, fs = 1.00, ftype = 'butter'): """ Design an IIR bandstop filter using scipy.signal.iirdesign. The filter order is determined based on f_pass Hz, f_stop Hz, and the desired stopband attenuation d_stop in dB, all relative to a sampling rate of fs Hz. Mark Wickert October 2016 """ b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs], [2*float(f_stop1)/fs, 2*float(f_stop2)/fs], Ripple_pass, Atten_stop, ftype = ftype, output='ba') sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs], [2*float(f_stop1)/fs, 2*float(f_stop2)/fs], Ripple_pass, Atten_stop, ftype =ftype, output='sos') tag = 'IIR ' + ftype + ' order' print('%s = %d.' % (tag,len(a)-1)) return b, a, sos
def IIR_bsf(f_pass1, f_stop1, f_stop2, f_pass2, Ripple_pass, Atten_stop, fs = 1.00, ftype = 'butter'): """ Design an IIR bandstop filter using scipy.signal.iirdesign. The filter order is determined based on f_pass Hz, f_stop Hz, and the desired stopband attenuation d_stop in dB, all relative to a sampling rate of fs Hz. Mark Wickert October 2016 """ b,a = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs], [2*float(f_stop1)/fs, 2*float(f_stop2)/fs], Ripple_pass, Atten_stop, ftype = ftype, output='ba') sos = signal.iirdesign([2*float(f_pass1)/fs, 2*float(f_pass2)/fs], [2*float(f_stop1)/fs, 2*float(f_stop2)/fs], Ripple_pass, Atten_stop, ftype =ftype, output='sos') tag = 'IIR ' + ftype + ' order' print('%s = %d.' % (tag,len(a)-1)) return b, a, sos
[ "Design", "an", "IIR", "bandstop", "filter", "using", "scipy", ".", "signal", ".", "iirdesign", ".", "The", "filter", "order", "is", "determined", "based", "on", "f_pass", "Hz", "f_stop", "Hz", "and", "the", "desired", "stopband", "attenuation", "d_stop", "in", "dB", "all", "relative", "to", "a", "sampling", "rate", "of", "fs", "Hz", ".", "Mark", "Wickert", "October", "2016" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/iir_design_helper.py#L190-L211
[ "def", "IIR_bsf", "(", "f_pass1", ",", "f_stop1", ",", "f_stop2", ",", "f_pass2", ",", "Ripple_pass", ",", "Atten_stop", ",", "fs", "=", "1.00", ",", "ftype", "=", "'butter'", ")", ":", "b", ",", "a", "=", "signal", ".", "iirdesign", "(", "[", "2", "*", "float", "(", "f_pass1", ")", "/", "fs", ",", "2", "*", "float", "(", "f_pass2", ")", "/", "fs", "]", ",", "[", "2", "*", "float", "(", "f_stop1", ")", "/", "fs", ",", "2", "*", "float", "(", "f_stop2", ")", "/", "fs", "]", ",", "Ripple_pass", ",", "Atten_stop", ",", "ftype", "=", "ftype", ",", "output", "=", "'ba'", ")", "sos", "=", "signal", ".", "iirdesign", "(", "[", "2", "*", "float", "(", "f_pass1", ")", "/", "fs", ",", "2", "*", "float", "(", "f_pass2", ")", "/", "fs", "]", ",", "[", "2", "*", "float", "(", "f_stop1", ")", "/", "fs", ",", "2", "*", "float", "(", "f_stop2", ")", "/", "fs", "]", ",", "Ripple_pass", ",", "Atten_stop", ",", "ftype", "=", "ftype", ",", "output", "=", "'sos'", ")", "tag", "=", "'IIR '", "+", "ftype", "+", "' order'", "print", "(", "'%s = %d.'", "%", "(", "tag", ",", "len", "(", "a", ")", "-", "1", ")", ")", "return", "b", ",", "a", ",", "sos" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
freqz_resp_list
A method for displaying digital filter frequency response magnitude, phase, and group delay. A plot is produced using matplotlib freq_resp(self,mode = 'dB',Npts = 1024) A method for displaying the filter frequency response magnitude, phase, and group delay. A plot is produced using matplotlib freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4)) b = ndarray of numerator coefficients a = ndarray of denominator coefficents mode = display mode: 'dB' magnitude, 'phase' in radians, or 'groupdelay_s' in samples and 'groupdelay_t' in sec, all versus frequency in Hz Npts = number of points to plot; default is 1024 fsize = figure size; defult is (6,4) inches Mark Wickert, January 2015
sk_dsp_comm/iir_design_helper.py
def freqz_resp_list(b,a=np.array([1]),mode = 'dB',fs=1.0,Npts = 1024,fsize=(6,4)): """ A method for displaying digital filter frequency response magnitude, phase, and group delay. A plot is produced using matplotlib freq_resp(self,mode = 'dB',Npts = 1024) A method for displaying the filter frequency response magnitude, phase, and group delay. A plot is produced using matplotlib freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4)) b = ndarray of numerator coefficients a = ndarray of denominator coefficents mode = display mode: 'dB' magnitude, 'phase' in radians, or 'groupdelay_s' in samples and 'groupdelay_t' in sec, all versus frequency in Hz Npts = number of points to plot; default is 1024 fsize = figure size; defult is (6,4) inches Mark Wickert, January 2015 """ if type(b) == list: # We have a list of filters N_filt = len(b) f = np.arange(0,Npts)/(2.0*Npts) for n in range(N_filt): w,H = signal.freqz(b[n],a[n],2*np.pi*f) if n == 0: plt.figure(figsize=fsize) if mode.lower() == 'db': plt.plot(f*fs,20*np.log10(np.abs(H))) if n == N_filt-1: plt.xlabel('Frequency (Hz)') plt.ylabel('Gain (dB)') plt.title('Frequency Response - Magnitude') elif mode.lower() == 'phase': plt.plot(f*fs,np.angle(H)) if n == N_filt-1: plt.xlabel('Frequency (Hz)') plt.ylabel('Phase (rad)') plt.title('Frequency Response - Phase') elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'): """ Notes ----- Since this calculation involves finding the derivative of the phase response, care must be taken at phase wrapping points and when the phase jumps by +/-pi, which occurs when the amplitude response changes sign. Since the amplitude response is zero when the sign changes, the jumps do not alter the group delay results. """ theta = np.unwrap(np.angle(H)) # Since theta for an FIR filter is likely to have many pi phase # jumps too, we unwrap a second time 2*theta and divide by 2 theta2 = np.unwrap(2*theta)/2. theta_dif = np.diff(theta2) f_diff = np.diff(f) Tg = -np.diff(theta2)/np.diff(w) # For gain almost zero set groupdelay = 0 idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0] Tg[idx] = np.zeros(len(idx)) max_Tg = np.max(Tg) #print(max_Tg) if mode.lower() == 'groupdelay_t': max_Tg /= fs plt.plot(f[:-1]*fs,Tg/fs) plt.ylim([0,1.2*max_Tg]) else: plt.plot(f[:-1]*fs,Tg) plt.ylim([0,1.2*max_Tg]) if n == N_filt-1: plt.xlabel('Frequency (Hz)') if mode.lower() == 'groupdelay_t': plt.ylabel('Group Delay (s)') else: plt.ylabel('Group Delay (samples)') plt.title('Frequency Response - Group Delay') else: s1 = 'Error, mode must be "dB", "phase, ' s2 = '"groupdelay_s", or "groupdelay_t"' print(s1 + s2)
def freqz_resp_list(b,a=np.array([1]),mode = 'dB',fs=1.0,Npts = 1024,fsize=(6,4)): """ A method for displaying digital filter frequency response magnitude, phase, and group delay. A plot is produced using matplotlib freq_resp(self,mode = 'dB',Npts = 1024) A method for displaying the filter frequency response magnitude, phase, and group delay. A plot is produced using matplotlib freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4)) b = ndarray of numerator coefficients a = ndarray of denominator coefficents mode = display mode: 'dB' magnitude, 'phase' in radians, or 'groupdelay_s' in samples and 'groupdelay_t' in sec, all versus frequency in Hz Npts = number of points to plot; default is 1024 fsize = figure size; defult is (6,4) inches Mark Wickert, January 2015 """ if type(b) == list: # We have a list of filters N_filt = len(b) f = np.arange(0,Npts)/(2.0*Npts) for n in range(N_filt): w,H = signal.freqz(b[n],a[n],2*np.pi*f) if n == 0: plt.figure(figsize=fsize) if mode.lower() == 'db': plt.plot(f*fs,20*np.log10(np.abs(H))) if n == N_filt-1: plt.xlabel('Frequency (Hz)') plt.ylabel('Gain (dB)') plt.title('Frequency Response - Magnitude') elif mode.lower() == 'phase': plt.plot(f*fs,np.angle(H)) if n == N_filt-1: plt.xlabel('Frequency (Hz)') plt.ylabel('Phase (rad)') plt.title('Frequency Response - Phase') elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'): """ Notes ----- Since this calculation involves finding the derivative of the phase response, care must be taken at phase wrapping points and when the phase jumps by +/-pi, which occurs when the amplitude response changes sign. Since the amplitude response is zero when the sign changes, the jumps do not alter the group delay results. """ theta = np.unwrap(np.angle(H)) # Since theta for an FIR filter is likely to have many pi phase # jumps too, we unwrap a second time 2*theta and divide by 2 theta2 = np.unwrap(2*theta)/2. theta_dif = np.diff(theta2) f_diff = np.diff(f) Tg = -np.diff(theta2)/np.diff(w) # For gain almost zero set groupdelay = 0 idx = np.nonzero(np.ravel(20*np.log10(H[:-1]) < -400))[0] Tg[idx] = np.zeros(len(idx)) max_Tg = np.max(Tg) #print(max_Tg) if mode.lower() == 'groupdelay_t': max_Tg /= fs plt.plot(f[:-1]*fs,Tg/fs) plt.ylim([0,1.2*max_Tg]) else: plt.plot(f[:-1]*fs,Tg) plt.ylim([0,1.2*max_Tg]) if n == N_filt-1: plt.xlabel('Frequency (Hz)') if mode.lower() == 'groupdelay_t': plt.ylabel('Group Delay (s)') else: plt.ylabel('Group Delay (samples)') plt.title('Frequency Response - Group Delay') else: s1 = 'Error, mode must be "dB", "phase, ' s2 = '"groupdelay_s", or "groupdelay_t"' print(s1 + s2)
[ "A", "method", "for", "displaying", "digital", "filter", "frequency", "response", "magnitude", "phase", "and", "group", "delay", ".", "A", "plot", "is", "produced", "using", "matplotlib", "freq_resp", "(", "self", "mode", "=", "dB", "Npts", "=", "1024", ")", "A", "method", "for", "displaying", "the", "filter", "frequency", "response", "magnitude", "phase", "and", "group", "delay", ".", "A", "plot", "is", "produced", "using", "matplotlib", "freqz_resp", "(", "b", "a", "=", "[", "1", "]", "mode", "=", "dB", "Npts", "=", "1024", "fsize", "=", "(", "6", "4", "))", "b", "=", "ndarray", "of", "numerator", "coefficients", "a", "=", "ndarray", "of", "denominator", "coefficents", "mode", "=", "display", "mode", ":", "dB", "magnitude", "phase", "in", "radians", "or", "groupdelay_s", "in", "samples", "and", "groupdelay_t", "in", "sec", "all", "versus", "frequency", "in", "Hz", "Npts", "=", "number", "of", "points", "to", "plot", ";", "default", "is", "1024", "fsize", "=", "figure", "size", ";", "defult", "is", "(", "6", "4", ")", "inches", "Mark", "Wickert", "January", "2015" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/iir_design_helper.py#L213-L298
[ "def", "freqz_resp_list", "(", "b", ",", "a", "=", "np", ".", "array", "(", "[", "1", "]", ")", ",", "mode", "=", "'dB'", ",", "fs", "=", "1.0", ",", "Npts", "=", "1024", ",", "fsize", "=", "(", "6", ",", "4", ")", ")", ":", "if", "type", "(", "b", ")", "==", "list", ":", "# We have a list of filters\r", "N_filt", "=", "len", "(", "b", ")", "f", "=", "np", ".", "arange", "(", "0", ",", "Npts", ")", "/", "(", "2.0", "*", "Npts", ")", "for", "n", "in", "range", "(", "N_filt", ")", ":", "w", ",", "H", "=", "signal", ".", "freqz", "(", "b", "[", "n", "]", ",", "a", "[", "n", "]", ",", "2", "*", "np", ".", "pi", "*", "f", ")", "if", "n", "==", "0", ":", "plt", ".", "figure", "(", "figsize", "=", "fsize", ")", "if", "mode", ".", "lower", "(", ")", "==", "'db'", ":", "plt", ".", "plot", "(", "f", "*", "fs", ",", "20", "*", "np", ".", "log10", "(", "np", ".", "abs", "(", "H", ")", ")", ")", "if", "n", "==", "N_filt", "-", "1", ":", "plt", ".", "xlabel", "(", "'Frequency (Hz)'", ")", "plt", ".", "ylabel", "(", "'Gain (dB)'", ")", "plt", ".", "title", "(", "'Frequency Response - Magnitude'", ")", "elif", "mode", ".", "lower", "(", ")", "==", "'phase'", ":", "plt", ".", "plot", "(", "f", "*", "fs", ",", "np", ".", "angle", "(", "H", ")", ")", "if", "n", "==", "N_filt", "-", "1", ":", "plt", ".", "xlabel", "(", "'Frequency (Hz)'", ")", "plt", ".", "ylabel", "(", "'Phase (rad)'", ")", "plt", ".", "title", "(", "'Frequency Response - Phase'", ")", "elif", "(", "mode", ".", "lower", "(", ")", "==", "'groupdelay_s'", ")", "or", "(", "mode", ".", "lower", "(", ")", "==", "'groupdelay_t'", ")", ":", "\"\"\"\r\n Notes\r\n -----\r\n\r\n Since this calculation involves finding the derivative of the\r\n phase response, care must be taken at phase wrapping points \r\n and when the phase jumps by +/-pi, which occurs when the \r\n amplitude response changes sign. Since the amplitude response\r\n is zero when the sign changes, the jumps do not alter the group \r\n delay results.\r\n \"\"\"", "theta", "=", "np", ".", "unwrap", "(", "np", ".", "angle", "(", "H", ")", ")", "# Since theta for an FIR filter is likely to have many pi phase\r", "# jumps too, we unwrap a second time 2*theta and divide by 2\r", "theta2", "=", "np", ".", "unwrap", "(", "2", "*", "theta", ")", "/", "2.", "theta_dif", "=", "np", ".", "diff", "(", "theta2", ")", "f_diff", "=", "np", ".", "diff", "(", "f", ")", "Tg", "=", "-", "np", ".", "diff", "(", "theta2", ")", "/", "np", ".", "diff", "(", "w", ")", "# For gain almost zero set groupdelay = 0\r", "idx", "=", "np", ".", "nonzero", "(", "np", ".", "ravel", "(", "20", "*", "np", ".", "log10", "(", "H", "[", ":", "-", "1", "]", ")", "<", "-", "400", ")", ")", "[", "0", "]", "Tg", "[", "idx", "]", "=", "np", ".", "zeros", "(", "len", "(", "idx", ")", ")", "max_Tg", "=", "np", ".", "max", "(", "Tg", ")", "#print(max_Tg)\r", "if", "mode", ".", "lower", "(", ")", "==", "'groupdelay_t'", ":", "max_Tg", "/=", "fs", "plt", ".", "plot", "(", "f", "[", ":", "-", "1", "]", "*", "fs", ",", "Tg", "/", "fs", ")", "plt", ".", "ylim", "(", "[", "0", ",", "1.2", "*", "max_Tg", "]", ")", "else", ":", "plt", ".", "plot", "(", "f", "[", ":", "-", "1", "]", "*", "fs", ",", "Tg", ")", "plt", ".", "ylim", "(", "[", "0", ",", "1.2", "*", "max_Tg", "]", ")", "if", "n", "==", "N_filt", "-", "1", ":", "plt", ".", "xlabel", "(", "'Frequency (Hz)'", ")", "if", "mode", ".", "lower", "(", ")", "==", "'groupdelay_t'", ":", "plt", ".", "ylabel", "(", "'Group Delay (s)'", ")", "else", ":", "plt", ".", "ylabel", "(", "'Group Delay (samples)'", ")", "plt", ".", "title", "(", "'Frequency Response - Group Delay'", ")", "else", ":", "s1", "=", "'Error, mode must be \"dB\", \"phase, '", "s2", "=", "'\"groupdelay_s\", or \"groupdelay_t\"'", "print", "(", "s1", "+", "s2", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
freqz_cas
Cascade frequency response Mark Wickert October 2016
sk_dsp_comm/iir_design_helper.py
def freqz_cas(sos,w): """ Cascade frequency response Mark Wickert October 2016 """ Ns,Mcol = sos.shape w,Hcas = signal.freqz(sos[0,:3],sos[0,3:],w) for k in range(1,Ns): w,Htemp = signal.freqz(sos[k,:3],sos[k,3:],w) Hcas *= Htemp return w, Hcas
def freqz_cas(sos,w): """ Cascade frequency response Mark Wickert October 2016 """ Ns,Mcol = sos.shape w,Hcas = signal.freqz(sos[0,:3],sos[0,3:],w) for k in range(1,Ns): w,Htemp = signal.freqz(sos[k,:3],sos[k,3:],w) Hcas *= Htemp return w, Hcas
[ "Cascade", "frequency", "response", "Mark", "Wickert", "October", "2016" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/iir_design_helper.py#L301-L312
[ "def", "freqz_cas", "(", "sos", ",", "w", ")", ":", "Ns", ",", "Mcol", "=", "sos", ".", "shape", "w", ",", "Hcas", "=", "signal", ".", "freqz", "(", "sos", "[", "0", ",", ":", "3", "]", ",", "sos", "[", "0", ",", "3", ":", "]", ",", "w", ")", "for", "k", "in", "range", "(", "1", ",", "Ns", ")", ":", "w", ",", "Htemp", "=", "signal", ".", "freqz", "(", "sos", "[", "k", ",", ":", "3", "]", ",", "sos", "[", "k", ",", "3", ":", "]", ",", "w", ")", "Hcas", "*=", "Htemp", "return", "w", ",", "Hcas" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
unique_cpx_roots
The average of the root values is used when multiplicity is greater than one. Mark Wickert October 2016
sk_dsp_comm/iir_design_helper.py
def unique_cpx_roots(rlist,tol = 0.001): """ The average of the root values is used when multiplicity is greater than one. Mark Wickert October 2016 """ uniq = [rlist[0]] mult = [1] for k in range(1,len(rlist)): N_uniq = len(uniq) for m in range(N_uniq): if abs(rlist[k]-uniq[m]) <= tol: mult[m] += 1 uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m]) break uniq = np.hstack((uniq,rlist[k])) mult = np.hstack((mult,[1])) return np.array(uniq), np.array(mult)
def unique_cpx_roots(rlist,tol = 0.001): """ The average of the root values is used when multiplicity is greater than one. Mark Wickert October 2016 """ uniq = [rlist[0]] mult = [1] for k in range(1,len(rlist)): N_uniq = len(uniq) for m in range(N_uniq): if abs(rlist[k]-uniq[m]) <= tol: mult[m] += 1 uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m]) break uniq = np.hstack((uniq,rlist[k])) mult = np.hstack((mult,[1])) return np.array(uniq), np.array(mult)
[ "The", "average", "of", "the", "root", "values", "is", "used", "when", "multiplicity", "is", "greater", "than", "one", ".", "Mark", "Wickert", "October", "2016" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/iir_design_helper.py#L403-L422
[ "def", "unique_cpx_roots", "(", "rlist", ",", "tol", "=", "0.001", ")", ":", "uniq", "=", "[", "rlist", "[", "0", "]", "]", "mult", "=", "[", "1", "]", "for", "k", "in", "range", "(", "1", ",", "len", "(", "rlist", ")", ")", ":", "N_uniq", "=", "len", "(", "uniq", ")", "for", "m", "in", "range", "(", "N_uniq", ")", ":", "if", "abs", "(", "rlist", "[", "k", "]", "-", "uniq", "[", "m", "]", ")", "<=", "tol", ":", "mult", "[", "m", "]", "+=", "1", "uniq", "[", "m", "]", "=", "(", "uniq", "[", "m", "]", "*", "(", "mult", "[", "m", "]", "-", "1", ")", "+", "rlist", "[", "k", "]", ")", "/", "float", "(", "mult", "[", "m", "]", ")", "break", "uniq", "=", "np", ".", "hstack", "(", "(", "uniq", ",", "rlist", "[", "k", "]", ")", ")", "mult", "=", "np", ".", "hstack", "(", "(", "mult", ",", "[", "1", "]", ")", ")", "return", "np", ".", "array", "(", "uniq", ")", ",", "np", ".", "array", "(", "mult", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
sos_zplane
Create an z-plane pole-zero plot. Create an z-plane pole-zero plot using the numerator and denominator z-domain system function coefficient ndarrays b and a respectively. Assume descending powers of z. Parameters ---------- sos : ndarray of the sos coefficients auto_scale : bool (default True) size : plot radius maximum when scale = False Returns ------- (M,N) : tuple of zero and pole counts + plot window Notes ----- This function tries to identify repeated poles and zeros and will place the multiplicity number above and to the right of the pole or zero. The difficulty is setting the tolerance for this detection. Currently it is set at 1e-3 via the function signal.unique_roots. Examples -------- >>> # Here the plot is generated using auto_scale >>> sos_zplane(sos) >>> # Here the plot is generated using manual scaling >>> sos_zplane(sos,False,1.5)
sk_dsp_comm/iir_design_helper.py
def sos_zplane(sos,auto_scale=True,size=2,tol = 0.001): """ Create an z-plane pole-zero plot. Create an z-plane pole-zero plot using the numerator and denominator z-domain system function coefficient ndarrays b and a respectively. Assume descending powers of z. Parameters ---------- sos : ndarray of the sos coefficients auto_scale : bool (default True) size : plot radius maximum when scale = False Returns ------- (M,N) : tuple of zero and pole counts + plot window Notes ----- This function tries to identify repeated poles and zeros and will place the multiplicity number above and to the right of the pole or zero. The difficulty is setting the tolerance for this detection. Currently it is set at 1e-3 via the function signal.unique_roots. Examples -------- >>> # Here the plot is generated using auto_scale >>> sos_zplane(sos) >>> # Here the plot is generated using manual scaling >>> sos_zplane(sos,False,1.5) """ Ns,Mcol = sos.shape # Extract roots from sos num and den removing z = 0 # roots due to first-order sections N_roots = [] for k in range(Ns): N_roots_tmp = np.roots(sos[k,:3]) if N_roots_tmp[1] == 0.: N_roots = np.hstack((N_roots,N_roots_tmp[0])) else: N_roots = np.hstack((N_roots,N_roots_tmp)) D_roots = [] for k in range(Ns): D_roots_tmp = np.roots(sos[k,3:]) if D_roots_tmp[1] == 0.: D_roots = np.hstack((D_roots,D_roots_tmp[0])) else: D_roots = np.hstack((D_roots,D_roots_tmp)) # Plot labels if multiplicity greater than 1 x_scale = 1.5*size y_scale = 1.5*size x_off = 0.02 y_off = 0.01 M = len(N_roots) N = len(D_roots) if auto_scale: if M > 0 and N > 0: size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+.1 elif M > 0: size = max(np.max(np.abs(N_roots)),1.0)+.1 elif N > 0: size = max(1.0,np.max(np.abs(D_roots)))+.1 else: size = 1.1 plt.figure(figsize=(5,5)) plt.axis('equal') r = np.linspace(0,2*np.pi,200) plt.plot(np.cos(r),np.sin(r),'r--') plt.plot([-size,size],[0,0],'k-.') plt.plot([0,0],[-size,size],'k-.') if M > 0: #N_roots = np.roots(b) N_uniq, N_mult=unique_cpx_roots(N_roots,tol=tol) plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8) idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0] for k in range(len(idx_N_mult)): x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]), ha='center',va='bottom',fontsize=10) if N > 0: #D_roots = np.roots(a) D_uniq, D_mult=unique_cpx_roots(D_roots,tol=tol) plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8) idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0] for k in range(len(idx_D_mult)): x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]), ha='center',va='bottom',fontsize=10) if M - N < 0: plt.plot(0.0,0.0,'bo',mfc='None',ms=8) elif M - N > 0: plt.plot(0.0,0.0,'kx',ms=8) if abs(M - N) > 1: plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)), ha='center',va='bottom',fontsize=10) plt.xlabel('Real Part') plt.ylabel('Imaginary Part') plt.title('Pole-Zero Plot') #plt.grid() plt.axis([-size,size,-size,size]) return M,N
def sos_zplane(sos,auto_scale=True,size=2,tol = 0.001): """ Create an z-plane pole-zero plot. Create an z-plane pole-zero plot using the numerator and denominator z-domain system function coefficient ndarrays b and a respectively. Assume descending powers of z. Parameters ---------- sos : ndarray of the sos coefficients auto_scale : bool (default True) size : plot radius maximum when scale = False Returns ------- (M,N) : tuple of zero and pole counts + plot window Notes ----- This function tries to identify repeated poles and zeros and will place the multiplicity number above and to the right of the pole or zero. The difficulty is setting the tolerance for this detection. Currently it is set at 1e-3 via the function signal.unique_roots. Examples -------- >>> # Here the plot is generated using auto_scale >>> sos_zplane(sos) >>> # Here the plot is generated using manual scaling >>> sos_zplane(sos,False,1.5) """ Ns,Mcol = sos.shape # Extract roots from sos num and den removing z = 0 # roots due to first-order sections N_roots = [] for k in range(Ns): N_roots_tmp = np.roots(sos[k,:3]) if N_roots_tmp[1] == 0.: N_roots = np.hstack((N_roots,N_roots_tmp[0])) else: N_roots = np.hstack((N_roots,N_roots_tmp)) D_roots = [] for k in range(Ns): D_roots_tmp = np.roots(sos[k,3:]) if D_roots_tmp[1] == 0.: D_roots = np.hstack((D_roots,D_roots_tmp[0])) else: D_roots = np.hstack((D_roots,D_roots_tmp)) # Plot labels if multiplicity greater than 1 x_scale = 1.5*size y_scale = 1.5*size x_off = 0.02 y_off = 0.01 M = len(N_roots) N = len(D_roots) if auto_scale: if M > 0 and N > 0: size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+.1 elif M > 0: size = max(np.max(np.abs(N_roots)),1.0)+.1 elif N > 0: size = max(1.0,np.max(np.abs(D_roots)))+.1 else: size = 1.1 plt.figure(figsize=(5,5)) plt.axis('equal') r = np.linspace(0,2*np.pi,200) plt.plot(np.cos(r),np.sin(r),'r--') plt.plot([-size,size],[0,0],'k-.') plt.plot([0,0],[-size,size],'k-.') if M > 0: #N_roots = np.roots(b) N_uniq, N_mult=unique_cpx_roots(N_roots,tol=tol) plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8) idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0] for k in range(len(idx_N_mult)): x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]), ha='center',va='bottom',fontsize=10) if N > 0: #D_roots = np.roots(a) D_uniq, D_mult=unique_cpx_roots(D_roots,tol=tol) plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8) idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0] for k in range(len(idx_D_mult)): x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]), ha='center',va='bottom',fontsize=10) if M - N < 0: plt.plot(0.0,0.0,'bo',mfc='None',ms=8) elif M - N > 0: plt.plot(0.0,0.0,'kx',ms=8) if abs(M - N) > 1: plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)), ha='center',va='bottom',fontsize=10) plt.xlabel('Real Part') plt.ylabel('Imaginary Part') plt.title('Pole-Zero Plot') #plt.grid() plt.axis([-size,size,-size,size]) return M,N
[ "Create", "an", "z", "-", "plane", "pole", "-", "zero", "plot", ".", "Create", "an", "z", "-", "plane", "pole", "-", "zero", "plot", "using", "the", "numerator", "and", "denominator", "z", "-", "domain", "system", "function", "coefficient", "ndarrays", "b", "and", "a", "respectively", ".", "Assume", "descending", "powers", "of", "z", ".", "Parameters", "----------", "sos", ":", "ndarray", "of", "the", "sos", "coefficients", "auto_scale", ":", "bool", "(", "default", "True", ")", "size", ":", "plot", "radius", "maximum", "when", "scale", "=", "False", "Returns", "-------", "(", "M", "N", ")", ":", "tuple", "of", "zero", "and", "pole", "counts", "+", "plot", "window", "Notes", "-----", "This", "function", "tries", "to", "identify", "repeated", "poles", "and", "zeros", "and", "will", "place", "the", "multiplicity", "number", "above", "and", "to", "the", "right", "of", "the", "pole", "or", "zero", ".", "The", "difficulty", "is", "setting", "the", "tolerance", "for", "this", "detection", ".", "Currently", "it", "is", "set", "at", "1e", "-", "3", "via", "the", "function", "signal", ".", "unique_roots", ".", "Examples", "--------", ">>>", "#", "Here", "the", "plot", "is", "generated", "using", "auto_scale", ">>>", "sos_zplane", "(", "sos", ")", ">>>", "#", "Here", "the", "plot", "is", "generated", "using", "manual", "scaling", ">>>", "sos_zplane", "(", "sos", "False", "1", ".", "5", ")" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/iir_design_helper.py#L433-L536
[ "def", "sos_zplane", "(", "sos", ",", "auto_scale", "=", "True", ",", "size", "=", "2", ",", "tol", "=", "0.001", ")", ":", "Ns", ",", "Mcol", "=", "sos", ".", "shape", "# Extract roots from sos num and den removing z = 0\r", "# roots due to first-order sections\r", "N_roots", "=", "[", "]", "for", "k", "in", "range", "(", "Ns", ")", ":", "N_roots_tmp", "=", "np", ".", "roots", "(", "sos", "[", "k", ",", ":", "3", "]", ")", "if", "N_roots_tmp", "[", "1", "]", "==", "0.", ":", "N_roots", "=", "np", ".", "hstack", "(", "(", "N_roots", ",", "N_roots_tmp", "[", "0", "]", ")", ")", "else", ":", "N_roots", "=", "np", ".", "hstack", "(", "(", "N_roots", ",", "N_roots_tmp", ")", ")", "D_roots", "=", "[", "]", "for", "k", "in", "range", "(", "Ns", ")", ":", "D_roots_tmp", "=", "np", ".", "roots", "(", "sos", "[", "k", ",", "3", ":", "]", ")", "if", "D_roots_tmp", "[", "1", "]", "==", "0.", ":", "D_roots", "=", "np", ".", "hstack", "(", "(", "D_roots", ",", "D_roots_tmp", "[", "0", "]", ")", ")", "else", ":", "D_roots", "=", "np", ".", "hstack", "(", "(", "D_roots", ",", "D_roots_tmp", ")", ")", "# Plot labels if multiplicity greater than 1\r", "x_scale", "=", "1.5", "*", "size", "y_scale", "=", "1.5", "*", "size", "x_off", "=", "0.02", "y_off", "=", "0.01", "M", "=", "len", "(", "N_roots", ")", "N", "=", "len", "(", "D_roots", ")", "if", "auto_scale", ":", "if", "M", ">", "0", "and", "N", ">", "0", ":", "size", "=", "max", "(", "np", ".", "max", "(", "np", ".", "abs", "(", "N_roots", ")", ")", ",", "np", ".", "max", "(", "np", ".", "abs", "(", "D_roots", ")", ")", ")", "+", ".1", "elif", "M", ">", "0", ":", "size", "=", "max", "(", "np", ".", "max", "(", "np", ".", "abs", "(", "N_roots", ")", ")", ",", "1.0", ")", "+", ".1", "elif", "N", ">", "0", ":", "size", "=", "max", "(", "1.0", ",", "np", ".", "max", "(", "np", ".", "abs", "(", "D_roots", ")", ")", ")", "+", ".1", "else", ":", "size", "=", "1.1", "plt", ".", "figure", "(", "figsize", "=", "(", "5", ",", "5", ")", ")", "plt", ".", "axis", "(", "'equal'", ")", "r", "=", "np", ".", "linspace", "(", "0", ",", "2", "*", "np", ".", "pi", ",", "200", ")", "plt", ".", "plot", "(", "np", ".", "cos", "(", "r", ")", ",", "np", ".", "sin", "(", "r", ")", ",", "'r--'", ")", "plt", ".", "plot", "(", "[", "-", "size", ",", "size", "]", ",", "[", "0", ",", "0", "]", ",", "'k-.'", ")", "plt", ".", "plot", "(", "[", "0", ",", "0", "]", ",", "[", "-", "size", ",", "size", "]", ",", "'k-.'", ")", "if", "M", ">", "0", ":", "#N_roots = np.roots(b)\r", "N_uniq", ",", "N_mult", "=", "unique_cpx_roots", "(", "N_roots", ",", "tol", "=", "tol", ")", "plt", ".", "plot", "(", "np", ".", "real", "(", "N_uniq", ")", ",", "np", ".", "imag", "(", "N_uniq", ")", ",", "'ko'", ",", "mfc", "=", "'None'", ",", "ms", "=", "8", ")", "idx_N_mult", "=", "np", ".", "nonzero", "(", "np", ".", "ravel", "(", "N_mult", ">", "1", ")", ")", "[", "0", "]", "for", "k", "in", "range", "(", "len", "(", "idx_N_mult", ")", ")", ":", "x_loc", "=", "np", ".", "real", "(", "N_uniq", "[", "idx_N_mult", "[", "k", "]", "]", ")", "+", "x_off", "*", "x_scale", "y_loc", "=", "np", ".", "imag", "(", "N_uniq", "[", "idx_N_mult", "[", "k", "]", "]", ")", "+", "y_off", "*", "y_scale", "plt", ".", "text", "(", "x_loc", ",", "y_loc", ",", "str", "(", "N_mult", "[", "idx_N_mult", "[", "k", "]", "]", ")", ",", "ha", "=", "'center'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "10", ")", "if", "N", ">", "0", ":", "#D_roots = np.roots(a)\r", "D_uniq", ",", "D_mult", "=", "unique_cpx_roots", "(", "D_roots", ",", "tol", "=", "tol", ")", "plt", ".", "plot", "(", "np", ".", "real", "(", "D_uniq", ")", ",", "np", ".", "imag", "(", "D_uniq", ")", ",", "'kx'", ",", "ms", "=", "8", ")", "idx_D_mult", "=", "np", ".", "nonzero", "(", "np", ".", "ravel", "(", "D_mult", ">", "1", ")", ")", "[", "0", "]", "for", "k", "in", "range", "(", "len", "(", "idx_D_mult", ")", ")", ":", "x_loc", "=", "np", ".", "real", "(", "D_uniq", "[", "idx_D_mult", "[", "k", "]", "]", ")", "+", "x_off", "*", "x_scale", "y_loc", "=", "np", ".", "imag", "(", "D_uniq", "[", "idx_D_mult", "[", "k", "]", "]", ")", "+", "y_off", "*", "y_scale", "plt", ".", "text", "(", "x_loc", ",", "y_loc", ",", "str", "(", "D_mult", "[", "idx_D_mult", "[", "k", "]", "]", ")", ",", "ha", "=", "'center'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "10", ")", "if", "M", "-", "N", "<", "0", ":", "plt", ".", "plot", "(", "0.0", ",", "0.0", ",", "'bo'", ",", "mfc", "=", "'None'", ",", "ms", "=", "8", ")", "elif", "M", "-", "N", ">", "0", ":", "plt", ".", "plot", "(", "0.0", ",", "0.0", ",", "'kx'", ",", "ms", "=", "8", ")", "if", "abs", "(", "M", "-", "N", ")", ">", "1", ":", "plt", ".", "text", "(", "x_off", "*", "x_scale", ",", "y_off", "*", "y_scale", ",", "str", "(", "abs", "(", "M", "-", "N", ")", ")", ",", "ha", "=", "'center'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "10", ")", "plt", ".", "xlabel", "(", "'Real Part'", ")", "plt", ".", "ylabel", "(", "'Imaginary Part'", ")", "plt", ".", "title", "(", "'Pole-Zero Plot'", ")", "#plt.grid()\r", "plt", ".", "axis", "(", "[", "-", "size", ",", "size", ",", "-", "size", ",", "size", "]", ")", "return", "M", ",", "N" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
firwin_bpf
Design a windowed FIR bandpass filter in terms of passband critical frequencies f1 < f2 in Hz relative to sampling rate fs in Hz. The number of taps must be provided. Mark Wickert October 2016
sk_dsp_comm/fir_design_helper.py
def firwin_bpf(N_taps, f1, f2, fs = 1.0, pass_zero=False): """ Design a windowed FIR bandpass filter in terms of passband critical frequencies f1 < f2 in Hz relative to sampling rate fs in Hz. The number of taps must be provided. Mark Wickert October 2016 """ return signal.firwin(N_taps,2*(f1,f2)/fs,pass_zero=pass_zero)
def firwin_bpf(N_taps, f1, f2, fs = 1.0, pass_zero=False): """ Design a windowed FIR bandpass filter in terms of passband critical frequencies f1 < f2 in Hz relative to sampling rate fs in Hz. The number of taps must be provided. Mark Wickert October 2016 """ return signal.firwin(N_taps,2*(f1,f2)/fs,pass_zero=pass_zero)
[ "Design", "a", "windowed", "FIR", "bandpass", "filter", "in", "terms", "of", "passband", "critical", "frequencies", "f1", "<", "f2", "in", "Hz", "relative", "to", "sampling", "rate", "fs", "in", "Hz", ".", "The", "number", "of", "taps", "must", "be", "provided", ".", "Mark", "Wickert", "October", "2016" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fir_design_helper.py#L48-L56
[ "def", "firwin_bpf", "(", "N_taps", ",", "f1", ",", "f2", ",", "fs", "=", "1.0", ",", "pass_zero", "=", "False", ")", ":", "return", "signal", ".", "firwin", "(", "N_taps", ",", "2", "*", "(", "f1", ",", "f2", ")", "/", "fs", ",", "pass_zero", "=", "pass_zero", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
firwin_kaiser_lpf
Design an FIR lowpass filter using the sinc() kernel and a Kaiser window. The filter order is determined based on f_pass Hz, f_stop Hz, and the desired stopband attenuation d_stop in dB, all relative to a sampling rate of fs Hz. Note: the passband ripple cannot be set independent of the stopband attenuation. Mark Wickert October 2016
sk_dsp_comm/fir_design_helper.py
def firwin_kaiser_lpf(f_pass, f_stop, d_stop, fs = 1.0, N_bump=0): """ Design an FIR lowpass filter using the sinc() kernel and a Kaiser window. The filter order is determined based on f_pass Hz, f_stop Hz, and the desired stopband attenuation d_stop in dB, all relative to a sampling rate of fs Hz. Note: the passband ripple cannot be set independent of the stopband attenuation. Mark Wickert October 2016 """ wc = 2*np.pi*(f_pass + f_stop)/2/fs delta_w = 2*np.pi*(f_stop - f_pass)/fs # Find the filter order M = np.ceil((d_stop - 8)/(2.285*delta_w)) # Adjust filter order up or down as needed M += N_bump N_taps = M + 1 # Obtain the Kaiser window beta = signal.kaiser_beta(d_stop) w_k = signal.kaiser(N_taps,beta) n = np.arange(N_taps) b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k b_k /= np.sum(b_k) print('Kaiser Win filter taps = %d.' % N_taps) return b_k
def firwin_kaiser_lpf(f_pass, f_stop, d_stop, fs = 1.0, N_bump=0): """ Design an FIR lowpass filter using the sinc() kernel and a Kaiser window. The filter order is determined based on f_pass Hz, f_stop Hz, and the desired stopband attenuation d_stop in dB, all relative to a sampling rate of fs Hz. Note: the passband ripple cannot be set independent of the stopband attenuation. Mark Wickert October 2016 """ wc = 2*np.pi*(f_pass + f_stop)/2/fs delta_w = 2*np.pi*(f_stop - f_pass)/fs # Find the filter order M = np.ceil((d_stop - 8)/(2.285*delta_w)) # Adjust filter order up or down as needed M += N_bump N_taps = M + 1 # Obtain the Kaiser window beta = signal.kaiser_beta(d_stop) w_k = signal.kaiser(N_taps,beta) n = np.arange(N_taps) b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k b_k /= np.sum(b_k) print('Kaiser Win filter taps = %d.' % N_taps) return b_k
[ "Design", "an", "FIR", "lowpass", "filter", "using", "the", "sinc", "()", "kernel", "and", "a", "Kaiser", "window", ".", "The", "filter", "order", "is", "determined", "based", "on", "f_pass", "Hz", "f_stop", "Hz", "and", "the", "desired", "stopband", "attenuation", "d_stop", "in", "dB", "all", "relative", "to", "a", "sampling", "rate", "of", "fs", "Hz", ".", "Note", ":", "the", "passband", "ripple", "cannot", "be", "set", "independent", "of", "the", "stopband", "attenuation", ".", "Mark", "Wickert", "October", "2016" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fir_design_helper.py#L59-L84
[ "def", "firwin_kaiser_lpf", "(", "f_pass", ",", "f_stop", ",", "d_stop", ",", "fs", "=", "1.0", ",", "N_bump", "=", "0", ")", ":", "wc", "=", "2", "*", "np", ".", "pi", "*", "(", "f_pass", "+", "f_stop", ")", "/", "2", "/", "fs", "delta_w", "=", "2", "*", "np", ".", "pi", "*", "(", "f_stop", "-", "f_pass", ")", "/", "fs", "# Find the filter order\r", "M", "=", "np", ".", "ceil", "(", "(", "d_stop", "-", "8", ")", "/", "(", "2.285", "*", "delta_w", ")", ")", "# Adjust filter order up or down as needed\r", "M", "+=", "N_bump", "N_taps", "=", "M", "+", "1", "# Obtain the Kaiser window\r", "beta", "=", "signal", ".", "kaiser_beta", "(", "d_stop", ")", "w_k", "=", "signal", ".", "kaiser", "(", "N_taps", ",", "beta", ")", "n", "=", "np", ".", "arange", "(", "N_taps", ")", "b_k", "=", "wc", "/", "np", ".", "pi", "*", "np", ".", "sinc", "(", "wc", "/", "np", ".", "pi", "*", "(", "n", "-", "M", "/", "2", ")", ")", "*", "w_k", "b_k", "/=", "np", ".", "sum", "(", "b_k", ")", "print", "(", "'Kaiser Win filter taps = %d.'", "%", "N_taps", ")", "return", "b_k" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
firwin_kaiser_bpf
Design an FIR bandpass filter using the sinc() kernel and a Kaiser window. The filter order is determined based on f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the desired stopband attenuation d_stop in dB for both stopbands, all relative to a sampling rate of fs Hz. Note: the passband ripple cannot be set independent of the stopband attenuation. Mark Wickert October 2016
sk_dsp_comm/fir_design_helper.py
def firwin_kaiser_bpf(f_stop1, f_pass1, f_pass2, f_stop2, d_stop, fs = 1.0, N_bump=0): """ Design an FIR bandpass filter using the sinc() kernel and a Kaiser window. The filter order is determined based on f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the desired stopband attenuation d_stop in dB for both stopbands, all relative to a sampling rate of fs Hz. Note: the passband ripple cannot be set independent of the stopband attenuation. Mark Wickert October 2016 """ # Design BPF starting from simple LPF equivalent # The upper and lower stopbands are assumed to have # the same attenuation level. The LPF equivalent critical # frequencies: f_pass = (f_pass2 - f_pass1)/2 f_stop = (f_stop2 - f_stop1)/2 # Continue to design equivalent LPF wc = 2*np.pi*(f_pass + f_stop)/2/fs delta_w = 2*np.pi*(f_stop - f_pass)/fs # Find the filter order M = np.ceil((d_stop - 8)/(2.285*delta_w)) # Adjust filter order up or down as needed M += N_bump N_taps = M + 1 # Obtain the Kaiser window beta = signal.kaiser_beta(d_stop) w_k = signal.kaiser(N_taps,beta) n = np.arange(N_taps) b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k b_k /= np.sum(b_k) # Transform LPF to BPF f0 = (f_pass2 + f_pass1)/2 w0 = 2*np.pi*f0/fs n = np.arange(len(b_k)) b_k_bp = 2*b_k*np.cos(w0*(n-M/2)) print('Kaiser Win filter taps = %d.' % N_taps) return b_k_bp
def firwin_kaiser_bpf(f_stop1, f_pass1, f_pass2, f_stop2, d_stop, fs = 1.0, N_bump=0): """ Design an FIR bandpass filter using the sinc() kernel and a Kaiser window. The filter order is determined based on f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the desired stopband attenuation d_stop in dB for both stopbands, all relative to a sampling rate of fs Hz. Note: the passband ripple cannot be set independent of the stopband attenuation. Mark Wickert October 2016 """ # Design BPF starting from simple LPF equivalent # The upper and lower stopbands are assumed to have # the same attenuation level. The LPF equivalent critical # frequencies: f_pass = (f_pass2 - f_pass1)/2 f_stop = (f_stop2 - f_stop1)/2 # Continue to design equivalent LPF wc = 2*np.pi*(f_pass + f_stop)/2/fs delta_w = 2*np.pi*(f_stop - f_pass)/fs # Find the filter order M = np.ceil((d_stop - 8)/(2.285*delta_w)) # Adjust filter order up or down as needed M += N_bump N_taps = M + 1 # Obtain the Kaiser window beta = signal.kaiser_beta(d_stop) w_k = signal.kaiser(N_taps,beta) n = np.arange(N_taps) b_k = wc/np.pi*np.sinc(wc/np.pi*(n-M/2)) * w_k b_k /= np.sum(b_k) # Transform LPF to BPF f0 = (f_pass2 + f_pass1)/2 w0 = 2*np.pi*f0/fs n = np.arange(len(b_k)) b_k_bp = 2*b_k*np.cos(w0*(n-M/2)) print('Kaiser Win filter taps = %d.' % N_taps) return b_k_bp
[ "Design", "an", "FIR", "bandpass", "filter", "using", "the", "sinc", "()", "kernel", "and", "a", "Kaiser", "window", ".", "The", "filter", "order", "is", "determined", "based", "on", "f_stop1", "Hz", "f_pass1", "Hz", "f_pass2", "Hz", "f_stop2", "Hz", "and", "the", "desired", "stopband", "attenuation", "d_stop", "in", "dB", "for", "both", "stopbands", "all", "relative", "to", "a", "sampling", "rate", "of", "fs", "Hz", ".", "Note", ":", "the", "passband", "ripple", "cannot", "be", "set", "independent", "of", "the", "stopband", "attenuation", ".", "Mark", "Wickert", "October", "2016" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fir_design_helper.py#L122-L161
[ "def", "firwin_kaiser_bpf", "(", "f_stop1", ",", "f_pass1", ",", "f_pass2", ",", "f_stop2", ",", "d_stop", ",", "fs", "=", "1.0", ",", "N_bump", "=", "0", ")", ":", "# Design BPF starting from simple LPF equivalent\r", "# The upper and lower stopbands are assumed to have \r", "# the same attenuation level. The LPF equivalent critical\r", "# frequencies:\r", "f_pass", "=", "(", "f_pass2", "-", "f_pass1", ")", "/", "2", "f_stop", "=", "(", "f_stop2", "-", "f_stop1", ")", "/", "2", "# Continue to design equivalent LPF\r", "wc", "=", "2", "*", "np", ".", "pi", "*", "(", "f_pass", "+", "f_stop", ")", "/", "2", "/", "fs", "delta_w", "=", "2", "*", "np", ".", "pi", "*", "(", "f_stop", "-", "f_pass", ")", "/", "fs", "# Find the filter order\r", "M", "=", "np", ".", "ceil", "(", "(", "d_stop", "-", "8", ")", "/", "(", "2.285", "*", "delta_w", ")", ")", "# Adjust filter order up or down as needed\r", "M", "+=", "N_bump", "N_taps", "=", "M", "+", "1", "# Obtain the Kaiser window\r", "beta", "=", "signal", ".", "kaiser_beta", "(", "d_stop", ")", "w_k", "=", "signal", ".", "kaiser", "(", "N_taps", ",", "beta", ")", "n", "=", "np", ".", "arange", "(", "N_taps", ")", "b_k", "=", "wc", "/", "np", ".", "pi", "*", "np", ".", "sinc", "(", "wc", "/", "np", ".", "pi", "*", "(", "n", "-", "M", "/", "2", ")", ")", "*", "w_k", "b_k", "/=", "np", ".", "sum", "(", "b_k", ")", "# Transform LPF to BPF\r", "f0", "=", "(", "f_pass2", "+", "f_pass1", ")", "/", "2", "w0", "=", "2", "*", "np", ".", "pi", "*", "f0", "/", "fs", "n", "=", "np", ".", "arange", "(", "len", "(", "b_k", ")", ")", "b_k_bp", "=", "2", "*", "b_k", "*", "np", ".", "cos", "(", "w0", "*", "(", "n", "-", "M", "/", "2", ")", ")", "print", "(", "'Kaiser Win filter taps = %d.'", "%", "N_taps", ")", "return", "b_k_bp" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
lowpass_order
Optimal FIR (equal ripple) Lowpass Order Determination Text reference: Ifeachor, Digital Signal Processing a Practical Approach, second edition, Prentice Hall, 2002. Journal paper reference: Herriman et al., Practical Design Rules for Optimum Finite Imulse Response Digitl Filters, Bell Syst. Tech. J., vol 52, pp. 769-799, July-Aug., 1973.IEEE, 1973.
sk_dsp_comm/fir_design_helper.py
def lowpass_order(f_pass, f_stop, dpass_dB, dstop_dB, fsamp = 1): """ Optimal FIR (equal ripple) Lowpass Order Determination Text reference: Ifeachor, Digital Signal Processing a Practical Approach, second edition, Prentice Hall, 2002. Journal paper reference: Herriman et al., Practical Design Rules for Optimum Finite Imulse Response Digitl Filters, Bell Syst. Tech. J., vol 52, pp. 769-799, July-Aug., 1973.IEEE, 1973. """ dpass = 1 - 10**(-dpass_dB/20) dstop = 10**(-dstop_dB/20) Df = (f_stop - f_pass)/fsamp a1 = 5.309e-3 a2 = 7.114e-2 a3 = -4.761e-1 a4 = -2.66e-3 a5 = -5.941e-1 a6 = -4.278e-1 Dinf = np.log10(dstop)*(a1*np.log10(dpass)**2 + a2*np.log10(dpass) + a3) \ + (a4*np.log10(dpass)**2 + a5*np.log10(dpass) + a6) f = 11.01217 + 0.51244*(np.log10(dpass) - np.log10(dstop)) N = Dinf/Df - f*Df + 1 ff = 2*np.array([0, f_pass, f_stop, fsamp/2])/fsamp aa = np.array([1, 1, 0, 0]) wts = np.array([1.0, dpass/dstop]) return int(N), ff, aa, wts
def lowpass_order(f_pass, f_stop, dpass_dB, dstop_dB, fsamp = 1): """ Optimal FIR (equal ripple) Lowpass Order Determination Text reference: Ifeachor, Digital Signal Processing a Practical Approach, second edition, Prentice Hall, 2002. Journal paper reference: Herriman et al., Practical Design Rules for Optimum Finite Imulse Response Digitl Filters, Bell Syst. Tech. J., vol 52, pp. 769-799, July-Aug., 1973.IEEE, 1973. """ dpass = 1 - 10**(-dpass_dB/20) dstop = 10**(-dstop_dB/20) Df = (f_stop - f_pass)/fsamp a1 = 5.309e-3 a2 = 7.114e-2 a3 = -4.761e-1 a4 = -2.66e-3 a5 = -5.941e-1 a6 = -4.278e-1 Dinf = np.log10(dstop)*(a1*np.log10(dpass)**2 + a2*np.log10(dpass) + a3) \ + (a4*np.log10(dpass)**2 + a5*np.log10(dpass) + a6) f = 11.01217 + 0.51244*(np.log10(dpass) - np.log10(dstop)) N = Dinf/Df - f*Df + 1 ff = 2*np.array([0, f_pass, f_stop, fsamp/2])/fsamp aa = np.array([1, 1, 0, 0]) wts = np.array([1.0, dpass/dstop]) return int(N), ff, aa, wts
[ "Optimal", "FIR", "(", "equal", "ripple", ")", "Lowpass", "Order", "Determination", "Text", "reference", ":", "Ifeachor", "Digital", "Signal", "Processing", "a", "Practical", "Approach", "second", "edition", "Prentice", "Hall", "2002", ".", "Journal", "paper", "reference", ":", "Herriman", "et", "al", ".", "Practical", "Design", "Rules", "for", "Optimum", "Finite", "Imulse", "Response", "Digitl", "Filters", "Bell", "Syst", ".", "Tech", ".", "J", ".", "vol", "52", "pp", ".", "769", "-", "799", "July", "-", "Aug", ".", "1973", ".", "IEEE", "1973", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fir_design_helper.py#L214-L241
[ "def", "lowpass_order", "(", "f_pass", ",", "f_stop", ",", "dpass_dB", ",", "dstop_dB", ",", "fsamp", "=", "1", ")", ":", "dpass", "=", "1", "-", "10", "**", "(", "-", "dpass_dB", "/", "20", ")", "dstop", "=", "10", "**", "(", "-", "dstop_dB", "/", "20", ")", "Df", "=", "(", "f_stop", "-", "f_pass", ")", "/", "fsamp", "a1", "=", "5.309e-3", "a2", "=", "7.114e-2", "a3", "=", "-", "4.761e-1", "a4", "=", "-", "2.66e-3", "a5", "=", "-", "5.941e-1", "a6", "=", "-", "4.278e-1", "Dinf", "=", "np", ".", "log10", "(", "dstop", ")", "*", "(", "a1", "*", "np", ".", "log10", "(", "dpass", ")", "**", "2", "+", "a2", "*", "np", ".", "log10", "(", "dpass", ")", "+", "a3", ")", "+", "(", "a4", "*", "np", ".", "log10", "(", "dpass", ")", "**", "2", "+", "a5", "*", "np", ".", "log10", "(", "dpass", ")", "+", "a6", ")", "f", "=", "11.01217", "+", "0.51244", "*", "(", "np", ".", "log10", "(", "dpass", ")", "-", "np", ".", "log10", "(", "dstop", ")", ")", "N", "=", "Dinf", "/", "Df", "-", "f", "*", "Df", "+", "1", "ff", "=", "2", "*", "np", ".", "array", "(", "[", "0", ",", "f_pass", ",", "f_stop", ",", "fsamp", "/", "2", "]", ")", "/", "fsamp", "aa", "=", "np", ".", "array", "(", "[", "1", ",", "1", ",", "0", ",", "0", "]", ")", "wts", "=", "np", ".", "array", "(", "[", "1.0", ",", "dpass", "/", "dstop", "]", ")", "return", "int", "(", "N", ")", ",", "ff", ",", "aa", ",", "wts" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
bandpass_order
Optimal FIR (equal ripple) Bandpass Order Determination Text reference: Ifeachor, Digital Signal Processing a Practical Approach, second edition, Prentice Hall, 2002. Journal paper reference: F. Mintzer & B. Liu, Practical Design Rules for Optimum FIR Bandpass Digital Filters, IEEE Transactions on Acoustics and Speech, pp. 204-206, April,1979.
sk_dsp_comm/fir_design_helper.py
def bandpass_order(f_stop1, f_pass1, f_pass2, f_stop2, dpass_dB, dstop_dB, fsamp = 1): """ Optimal FIR (equal ripple) Bandpass Order Determination Text reference: Ifeachor, Digital Signal Processing a Practical Approach, second edition, Prentice Hall, 2002. Journal paper reference: F. Mintzer & B. Liu, Practical Design Rules for Optimum FIR Bandpass Digital Filters, IEEE Transactions on Acoustics and Speech, pp. 204-206, April,1979. """ dpass = 1 - 10**(-dpass_dB/20) dstop = 10**(-dstop_dB/20) Df1 = (f_pass1 - f_stop1)/fsamp Df2 = (f_stop2 - f_pass2)/fsamp b1 = 0.01201 b2 = 0.09664 b3 = -0.51325 b4 = 0.00203 b5 = -0.5705 b6 = -0.44314 Df = min(Df1, Df2) Cinf = np.log10(dstop)*(b1*np.log10(dpass)**2 + b2*np.log10(dpass) + b3) \ + (b4*np.log10(dpass)**2 + b5*np.log10(dpass) + b6) g = -14.6*np.log10(dpass/dstop) - 16.9 N = Cinf/Df + g*Df + 1 ff = 2*np.array([0, f_stop1, f_pass1, f_pass2, f_stop2, fsamp/2])/fsamp aa = np.array([0, 0, 1, 1, 0, 0]) wts = np.array([dpass/dstop, 1, dpass/dstop]) return int(N), ff, aa, wts
def bandpass_order(f_stop1, f_pass1, f_pass2, f_stop2, dpass_dB, dstop_dB, fsamp = 1): """ Optimal FIR (equal ripple) Bandpass Order Determination Text reference: Ifeachor, Digital Signal Processing a Practical Approach, second edition, Prentice Hall, 2002. Journal paper reference: F. Mintzer & B. Liu, Practical Design Rules for Optimum FIR Bandpass Digital Filters, IEEE Transactions on Acoustics and Speech, pp. 204-206, April,1979. """ dpass = 1 - 10**(-dpass_dB/20) dstop = 10**(-dstop_dB/20) Df1 = (f_pass1 - f_stop1)/fsamp Df2 = (f_stop2 - f_pass2)/fsamp b1 = 0.01201 b2 = 0.09664 b3 = -0.51325 b4 = 0.00203 b5 = -0.5705 b6 = -0.44314 Df = min(Df1, Df2) Cinf = np.log10(dstop)*(b1*np.log10(dpass)**2 + b2*np.log10(dpass) + b3) \ + (b4*np.log10(dpass)**2 + b5*np.log10(dpass) + b6) g = -14.6*np.log10(dpass/dstop) - 16.9 N = Cinf/Df + g*Df + 1 ff = 2*np.array([0, f_stop1, f_pass1, f_pass2, f_stop2, fsamp/2])/fsamp aa = np.array([0, 0, 1, 1, 0, 0]) wts = np.array([dpass/dstop, 1, dpass/dstop]) return int(N), ff, aa, wts
[ "Optimal", "FIR", "(", "equal", "ripple", ")", "Bandpass", "Order", "Determination", "Text", "reference", ":", "Ifeachor", "Digital", "Signal", "Processing", "a", "Practical", "Approach", "second", "edition", "Prentice", "Hall", "2002", ".", "Journal", "paper", "reference", ":", "F", ".", "Mintzer", "&", "B", ".", "Liu", "Practical", "Design", "Rules", "for", "Optimum", "FIR", "Bandpass", "Digital", "Filters", "IEEE", "Transactions", "on", "Acoustics", "and", "Speech", "pp", ".", "204", "-", "206", "April", "1979", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fir_design_helper.py#L244-L273
[ "def", "bandpass_order", "(", "f_stop1", ",", "f_pass1", ",", "f_pass2", ",", "f_stop2", ",", "dpass_dB", ",", "dstop_dB", ",", "fsamp", "=", "1", ")", ":", "dpass", "=", "1", "-", "10", "**", "(", "-", "dpass_dB", "/", "20", ")", "dstop", "=", "10", "**", "(", "-", "dstop_dB", "/", "20", ")", "Df1", "=", "(", "f_pass1", "-", "f_stop1", ")", "/", "fsamp", "Df2", "=", "(", "f_stop2", "-", "f_pass2", ")", "/", "fsamp", "b1", "=", "0.01201", "b2", "=", "0.09664", "b3", "=", "-", "0.51325", "b4", "=", "0.00203", "b5", "=", "-", "0.5705", "b6", "=", "-", "0.44314", "Df", "=", "min", "(", "Df1", ",", "Df2", ")", "Cinf", "=", "np", ".", "log10", "(", "dstop", ")", "*", "(", "b1", "*", "np", ".", "log10", "(", "dpass", ")", "**", "2", "+", "b2", "*", "np", ".", "log10", "(", "dpass", ")", "+", "b3", ")", "+", "(", "b4", "*", "np", ".", "log10", "(", "dpass", ")", "**", "2", "+", "b5", "*", "np", ".", "log10", "(", "dpass", ")", "+", "b6", ")", "g", "=", "-", "14.6", "*", "np", ".", "log10", "(", "dpass", "/", "dstop", ")", "-", "16.9", "N", "=", "Cinf", "/", "Df", "+", "g", "*", "Df", "+", "1", "ff", "=", "2", "*", "np", ".", "array", "(", "[", "0", ",", "f_stop1", ",", "f_pass1", ",", "f_pass2", ",", "f_stop2", ",", "fsamp", "/", "2", "]", ")", "/", "fsamp", "aa", "=", "np", ".", "array", "(", "[", "0", ",", "0", ",", "1", ",", "1", ",", "0", ",", "0", "]", ")", "wts", "=", "np", ".", "array", "(", "[", "dpass", "/", "dstop", ",", "1", ",", "dpass", "/", "dstop", "]", ")", "return", "int", "(", "N", ")", ",", "ff", ",", "aa", ",", "wts" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
fir_remez_lpf
Design an FIR lowpass filter using remez with order determination. The filter order is determined based on f_pass Hz, fstop Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018
sk_dsp_comm/fir_design_helper.py
def fir_remez_lpf(f_pass, f_stop, d_pass, d_stop, fs = 1.0, N_bump=5): """ Design an FIR lowpass filter using remez with order determination. The filter order is determined based on f_pass Hz, fstop Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018 """ n, ff, aa, wts = lowpass_order(f_pass, f_stop, d_pass, d_stop, fsamp=fs) # Bump up the order by N_bump to bring down the final d_pass & d_stop N_taps = n N_taps += N_bump b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2) print('Remez filter taps = %d.' % N_taps) return b
def fir_remez_lpf(f_pass, f_stop, d_pass, d_stop, fs = 1.0, N_bump=5): """ Design an FIR lowpass filter using remez with order determination. The filter order is determined based on f_pass Hz, fstop Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018 """ n, ff, aa, wts = lowpass_order(f_pass, f_stop, d_pass, d_stop, fsamp=fs) # Bump up the order by N_bump to bring down the final d_pass & d_stop N_taps = n N_taps += N_bump b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2) print('Remez filter taps = %d.' % N_taps) return b
[ "Design", "an", "FIR", "lowpass", "filter", "using", "remez", "with", "order", "determination", ".", "The", "filter", "order", "is", "determined", "based", "on", "f_pass", "Hz", "fstop", "Hz", "and", "the", "desired", "passband", "ripple", "d_pass", "dB", "and", "stopband", "attenuation", "d_stop", "dB", "all", "relative", "to", "a", "sampling", "rate", "of", "fs", "Hz", ".", "Mark", "Wickert", "October", "2016", "updated", "October", "2018" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fir_design_helper.py#L308-L324
[ "def", "fir_remez_lpf", "(", "f_pass", ",", "f_stop", ",", "d_pass", ",", "d_stop", ",", "fs", "=", "1.0", ",", "N_bump", "=", "5", ")", ":", "n", ",", "ff", ",", "aa", ",", "wts", "=", "lowpass_order", "(", "f_pass", ",", "f_stop", ",", "d_pass", ",", "d_stop", ",", "fsamp", "=", "fs", ")", "# Bump up the order by N_bump to bring down the final d_pass & d_stop\r", "N_taps", "=", "n", "N_taps", "+=", "N_bump", "b", "=", "signal", ".", "remez", "(", "N_taps", ",", "ff", ",", "aa", "[", "0", ":", ":", "2", "]", ",", "wts", ",", "Hz", "=", "2", ")", "print", "(", "'Remez filter taps = %d.'", "%", "N_taps", ")", "return", "b" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
fir_remez_hpf
Design an FIR highpass filter using remez with order determination. The filter order is determined based on f_pass Hz, fstop Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018
sk_dsp_comm/fir_design_helper.py
def fir_remez_hpf(f_stop, f_pass, d_pass, d_stop, fs = 1.0, N_bump=5): """ Design an FIR highpass filter using remez with order determination. The filter order is determined based on f_pass Hz, fstop Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018 """ # Transform HPF critical frequencies to lowpass equivalent f_pass_eq = fs/2. - f_pass f_stop_eq = fs/2. - f_stop # Design LPF equivalent n, ff, aa, wts = lowpass_order(f_pass_eq, f_stop_eq, d_pass, d_stop, fsamp=fs) # Bump up the order by N_bump to bring down the final d_pass & d_stop N_taps = n N_taps += N_bump b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2) # Transform LPF equivalent to HPF n = np.arange(len(b)) b *= (-1)**n print('Remez filter taps = %d.' % N_taps) return b
def fir_remez_hpf(f_stop, f_pass, d_pass, d_stop, fs = 1.0, N_bump=5): """ Design an FIR highpass filter using remez with order determination. The filter order is determined based on f_pass Hz, fstop Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018 """ # Transform HPF critical frequencies to lowpass equivalent f_pass_eq = fs/2. - f_pass f_stop_eq = fs/2. - f_stop # Design LPF equivalent n, ff, aa, wts = lowpass_order(f_pass_eq, f_stop_eq, d_pass, d_stop, fsamp=fs) # Bump up the order by N_bump to bring down the final d_pass & d_stop N_taps = n N_taps += N_bump b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2) # Transform LPF equivalent to HPF n = np.arange(len(b)) b *= (-1)**n print('Remez filter taps = %d.' % N_taps) return b
[ "Design", "an", "FIR", "highpass", "filter", "using", "remez", "with", "order", "determination", ".", "The", "filter", "order", "is", "determined", "based", "on", "f_pass", "Hz", "fstop", "Hz", "and", "the", "desired", "passband", "ripple", "d_pass", "dB", "and", "stopband", "attenuation", "d_stop", "dB", "all", "relative", "to", "a", "sampling", "rate", "of", "fs", "Hz", ".", "Mark", "Wickert", "October", "2016", "updated", "October", "2018" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fir_design_helper.py#L327-L350
[ "def", "fir_remez_hpf", "(", "f_stop", ",", "f_pass", ",", "d_pass", ",", "d_stop", ",", "fs", "=", "1.0", ",", "N_bump", "=", "5", ")", ":", "# Transform HPF critical frequencies to lowpass equivalent\r", "f_pass_eq", "=", "fs", "/", "2.", "-", "f_pass", "f_stop_eq", "=", "fs", "/", "2.", "-", "f_stop", "# Design LPF equivalent\r", "n", ",", "ff", ",", "aa", ",", "wts", "=", "lowpass_order", "(", "f_pass_eq", ",", "f_stop_eq", ",", "d_pass", ",", "d_stop", ",", "fsamp", "=", "fs", ")", "# Bump up the order by N_bump to bring down the final d_pass & d_stop\r", "N_taps", "=", "n", "N_taps", "+=", "N_bump", "b", "=", "signal", ".", "remez", "(", "N_taps", ",", "ff", ",", "aa", "[", "0", ":", ":", "2", "]", ",", "wts", ",", "Hz", "=", "2", ")", "# Transform LPF equivalent to HPF\r", "n", "=", "np", ".", "arange", "(", "len", "(", "b", ")", ")", "b", "*=", "(", "-", "1", ")", "**", "n", "print", "(", "'Remez filter taps = %d.'", "%", "N_taps", ")", "return", "b" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
fir_remez_bpf
Design an FIR bandpass filter using remez with order determination. The filter order is determined based on f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018
sk_dsp_comm/fir_design_helper.py
def fir_remez_bpf(f_stop1, f_pass1, f_pass2, f_stop2, d_pass, d_stop, fs = 1.0, N_bump=5): """ Design an FIR bandpass filter using remez with order determination. The filter order is determined based on f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018 """ n, ff, aa, wts = bandpass_order(f_stop1, f_pass1, f_pass2, f_stop2, d_pass, d_stop, fsamp=fs) # Bump up the order by N_bump to bring down the final d_pass & d_stop N_taps = n N_taps += N_bump b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2) print('Remez filter taps = %d.' % N_taps) return b
def fir_remez_bpf(f_stop1, f_pass1, f_pass2, f_stop2, d_pass, d_stop, fs = 1.0, N_bump=5): """ Design an FIR bandpass filter using remez with order determination. The filter order is determined based on f_stop1 Hz, f_pass1 Hz, f_pass2 Hz, f_stop2 Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018 """ n, ff, aa, wts = bandpass_order(f_stop1, f_pass1, f_pass2, f_stop2, d_pass, d_stop, fsamp=fs) # Bump up the order by N_bump to bring down the final d_pass & d_stop N_taps = n N_taps += N_bump b = signal.remez(N_taps, ff, aa[0::2], wts,Hz=2) print('Remez filter taps = %d.' % N_taps) return b
[ "Design", "an", "FIR", "bandpass", "filter", "using", "remez", "with", "order", "determination", ".", "The", "filter", "order", "is", "determined", "based", "on", "f_stop1", "Hz", "f_pass1", "Hz", "f_pass2", "Hz", "f_stop2", "Hz", "and", "the", "desired", "passband", "ripple", "d_pass", "dB", "and", "stopband", "attenuation", "d_stop", "dB", "all", "relative", "to", "a", "sampling", "rate", "of", "fs", "Hz", ".", "Mark", "Wickert", "October", "2016", "updated", "October", "2018" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fir_design_helper.py#L353-L371
[ "def", "fir_remez_bpf", "(", "f_stop1", ",", "f_pass1", ",", "f_pass2", ",", "f_stop2", ",", "d_pass", ",", "d_stop", ",", "fs", "=", "1.0", ",", "N_bump", "=", "5", ")", ":", "n", ",", "ff", ",", "aa", ",", "wts", "=", "bandpass_order", "(", "f_stop1", ",", "f_pass1", ",", "f_pass2", ",", "f_stop2", ",", "d_pass", ",", "d_stop", ",", "fsamp", "=", "fs", ")", "# Bump up the order by N_bump to bring down the final d_pass & d_stop\r", "N_taps", "=", "n", "N_taps", "+=", "N_bump", "b", "=", "signal", ".", "remez", "(", "N_taps", ",", "ff", ",", "aa", "[", "0", ":", ":", "2", "]", ",", "wts", ",", "Hz", "=", "2", ")", "print", "(", "'Remez filter taps = %d.'", "%", "N_taps", ")", "return", "b" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
fir_remez_bsf
Design an FIR bandstop filter using remez with order determination. The filter order is determined based on f_pass1 Hz, f_stop1 Hz, f_stop2 Hz, f_pass2 Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018
sk_dsp_comm/fir_design_helper.py
def fir_remez_bsf(f_pass1, f_stop1, f_stop2, f_pass2, d_pass, d_stop, fs = 1.0, N_bump=5): """ Design an FIR bandstop filter using remez with order determination. The filter order is determined based on f_pass1 Hz, f_stop1 Hz, f_stop2 Hz, f_pass2 Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018 """ n, ff, aa, wts = bandstop_order(f_pass1, f_stop1, f_stop2, f_pass2, d_pass, d_stop, fsamp=fs) # Bump up the order by N_bump to bring down the final d_pass & d_stop # Initially make sure the number of taps is even so N_bump needs to be odd if np.mod(n,2) != 0: n += 1 N_taps = n N_taps += N_bump b = signal.remez(N_taps, ff, aa[0::2], wts, Hz=2, maxiter = 25, grid_density = 16) print('N_bump must be odd to maintain odd filter length') print('Remez filter taps = %d.' % N_taps) return b
def fir_remez_bsf(f_pass1, f_stop1, f_stop2, f_pass2, d_pass, d_stop, fs = 1.0, N_bump=5): """ Design an FIR bandstop filter using remez with order determination. The filter order is determined based on f_pass1 Hz, f_stop1 Hz, f_stop2 Hz, f_pass2 Hz, and the desired passband ripple d_pass dB and stopband attenuation d_stop dB all relative to a sampling rate of fs Hz. Mark Wickert October 2016, updated October 2018 """ n, ff, aa, wts = bandstop_order(f_pass1, f_stop1, f_stop2, f_pass2, d_pass, d_stop, fsamp=fs) # Bump up the order by N_bump to bring down the final d_pass & d_stop # Initially make sure the number of taps is even so N_bump needs to be odd if np.mod(n,2) != 0: n += 1 N_taps = n N_taps += N_bump b = signal.remez(N_taps, ff, aa[0::2], wts, Hz=2, maxiter = 25, grid_density = 16) print('N_bump must be odd to maintain odd filter length') print('Remez filter taps = %d.' % N_taps) return b
[ "Design", "an", "FIR", "bandstop", "filter", "using", "remez", "with", "order", "determination", ".", "The", "filter", "order", "is", "determined", "based", "on", "f_pass1", "Hz", "f_stop1", "Hz", "f_stop2", "Hz", "f_pass2", "Hz", "and", "the", "desired", "passband", "ripple", "d_pass", "dB", "and", "stopband", "attenuation", "d_stop", "dB", "all", "relative", "to", "a", "sampling", "rate", "of", "fs", "Hz", ".", "Mark", "Wickert", "October", "2016", "updated", "October", "2018" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fir_design_helper.py#L373-L396
[ "def", "fir_remez_bsf", "(", "f_pass1", ",", "f_stop1", ",", "f_stop2", ",", "f_pass2", ",", "d_pass", ",", "d_stop", ",", "fs", "=", "1.0", ",", "N_bump", "=", "5", ")", ":", "n", ",", "ff", ",", "aa", ",", "wts", "=", "bandstop_order", "(", "f_pass1", ",", "f_stop1", ",", "f_stop2", ",", "f_pass2", ",", "d_pass", ",", "d_stop", ",", "fsamp", "=", "fs", ")", "# Bump up the order by N_bump to bring down the final d_pass & d_stop\r", "# Initially make sure the number of taps is even so N_bump needs to be odd\r", "if", "np", ".", "mod", "(", "n", ",", "2", ")", "!=", "0", ":", "n", "+=", "1", "N_taps", "=", "n", "N_taps", "+=", "N_bump", "b", "=", "signal", ".", "remez", "(", "N_taps", ",", "ff", ",", "aa", "[", "0", ":", ":", "2", "]", ",", "wts", ",", "Hz", "=", "2", ",", "maxiter", "=", "25", ",", "grid_density", "=", "16", ")", "print", "(", "'N_bump must be odd to maintain odd filter length'", ")", "print", "(", "'Remez filter taps = %d.'", "%", "N_taps", ")", "return", "b" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
CIC
A functional form implementation of a cascade of integrator comb (CIC) filters. Parameters ---------- M : Effective number of taps per section (typically the decimation factor). K : The number of CIC sections cascaded (larger K gives the filter a wider image rejection bandwidth). Returns ------- b : FIR filter coefficients for a simple direct form implementation using the filter() function. Notes ----- Commonly used in multirate signal processing digital down-converters and digital up-converters. A true CIC filter requires no multiplies, only add and subtract operations. The functional form created here is a simple FIR requiring real coefficient multiplies via filter(). Mark Wickert July 2013
sk_dsp_comm/sigsys.py
def CIC(M, K): """ A functional form implementation of a cascade of integrator comb (CIC) filters. Parameters ---------- M : Effective number of taps per section (typically the decimation factor). K : The number of CIC sections cascaded (larger K gives the filter a wider image rejection bandwidth). Returns ------- b : FIR filter coefficients for a simple direct form implementation using the filter() function. Notes ----- Commonly used in multirate signal processing digital down-converters and digital up-converters. A true CIC filter requires no multiplies, only add and subtract operations. The functional form created here is a simple FIR requiring real coefficient multiplies via filter(). Mark Wickert July 2013 """ if K == 1: b = np.ones(M) else: h = np.ones(M) b = h for i in range(1, K): b = signal.convolve(b, h) # cascade by convolving impulse responses # Make filter have unity gain at DC return b / np.sum(b)
def CIC(M, K): """ A functional form implementation of a cascade of integrator comb (CIC) filters. Parameters ---------- M : Effective number of taps per section (typically the decimation factor). K : The number of CIC sections cascaded (larger K gives the filter a wider image rejection bandwidth). Returns ------- b : FIR filter coefficients for a simple direct form implementation using the filter() function. Notes ----- Commonly used in multirate signal processing digital down-converters and digital up-converters. A true CIC filter requires no multiplies, only add and subtract operations. The functional form created here is a simple FIR requiring real coefficient multiplies via filter(). Mark Wickert July 2013 """ if K == 1: b = np.ones(M) else: h = np.ones(M) b = h for i in range(1, K): b = signal.convolve(b, h) # cascade by convolving impulse responses # Make filter have unity gain at DC return b / np.sum(b)
[ "A", "functional", "form", "implementation", "of", "a", "cascade", "of", "integrator", "comb", "(", "CIC", ")", "filters", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L61-L92
[ "def", "CIC", "(", "M", ",", "K", ")", ":", "if", "K", "==", "1", ":", "b", "=", "np", ".", "ones", "(", "M", ")", "else", ":", "h", "=", "np", ".", "ones", "(", "M", ")", "b", "=", "h", "for", "i", "in", "range", "(", "1", ",", "K", ")", ":", "b", "=", "signal", ".", "convolve", "(", "b", ",", "h", ")", "# cascade by convolving impulse responses", "# Make filter have unity gain at DC", "return", "b", "/", "np", ".", "sum", "(", "b", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
ten_band_eq_filt
Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB. The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate is assumed to be 44.1 kHz. Parameters ---------- x : ndarray of the input signal samples GdB : ndarray containing ten octave band gain values [G0dB,...,G9dB] Q : Quality factor vector for each of the NB peaking filters Returns ------- y : ndarray of output signal samples Examples -------- >>> # Test with white noise >>> w = randn(100000) >>> y = ten_band_eq_filt(x,GdB) >>> psd(y,2**10,44.1)
sk_dsp_comm/sigsys.py
def ten_band_eq_filt(x,GdB,Q=3.5): """ Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB. The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate is assumed to be 44.1 kHz. Parameters ---------- x : ndarray of the input signal samples GdB : ndarray containing ten octave band gain values [G0dB,...,G9dB] Q : Quality factor vector for each of the NB peaking filters Returns ------- y : ndarray of output signal samples Examples -------- >>> # Test with white noise >>> w = randn(100000) >>> y = ten_band_eq_filt(x,GdB) >>> psd(y,2**10,44.1) """ fs = 44100.0 # Hz NB = len(GdB) if not NB == 10: raise ValueError("GdB length not equal to ten") Fc = 31.25*2**np.arange(NB) B = np.zeros((NB,3)) A = np.zeros((NB,3)) # Create matrix of cascade coefficients for k in range(NB): [b,a] = peaking(GdB[k],Fc[k],Q) B[k,:] = b A[k,:] = a # Pass signal x through the cascade of ten filters y = np.zeros(len(x)) for k in range(NB): if k == 0: y = signal.lfilter(B[k,:],A[k,:],x) else: y = signal.lfilter(B[k,:],A[k,:],y) return y
def ten_band_eq_filt(x,GdB,Q=3.5): """ Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB. The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate is assumed to be 44.1 kHz. Parameters ---------- x : ndarray of the input signal samples GdB : ndarray containing ten octave band gain values [G0dB,...,G9dB] Q : Quality factor vector for each of the NB peaking filters Returns ------- y : ndarray of output signal samples Examples -------- >>> # Test with white noise >>> w = randn(100000) >>> y = ten_band_eq_filt(x,GdB) >>> psd(y,2**10,44.1) """ fs = 44100.0 # Hz NB = len(GdB) if not NB == 10: raise ValueError("GdB length not equal to ten") Fc = 31.25*2**np.arange(NB) B = np.zeros((NB,3)) A = np.zeros((NB,3)) # Create matrix of cascade coefficients for k in range(NB): [b,a] = peaking(GdB[k],Fc[k],Q) B[k,:] = b A[k,:] = a # Pass signal x through the cascade of ten filters y = np.zeros(len(x)) for k in range(NB): if k == 0: y = signal.lfilter(B[k,:],A[k,:],x) else: y = signal.lfilter(B[k,:],A[k,:],y) return y
[ "Filter", "the", "input", "signal", "x", "with", "a", "ten", "-", "band", "equalizer", "having", "octave", "gain", "values", "in", "ndarray", "GdB", ".", "The", "signal", "x", "is", "filtered", "using", "octave", "-", "spaced", "peaking", "filters", "starting", "at", "31", ".", "25", "Hz", "and", "stopping", "at", "16", "kHz", ".", "The", "Q", "of", "each", "filter", "is", "3", ".", "5", "but", "can", "be", "changed", ".", "The", "sampling", "rate", "is", "assumed", "to", "be", "44", ".", "1", "kHz", ".", "Parameters", "----------", "x", ":", "ndarray", "of", "the", "input", "signal", "samples", "GdB", ":", "ndarray", "containing", "ten", "octave", "band", "gain", "values", "[", "G0dB", "...", "G9dB", "]", "Q", ":", "Quality", "factor", "vector", "for", "each", "of", "the", "NB", "peaking", "filters", "Returns", "-------", "y", ":", "ndarray", "of", "output", "signal", "samples", "Examples", "--------", ">>>", "#", "Test", "with", "white", "noise", ">>>", "w", "=", "randn", "(", "100000", ")", ">>>", "y", "=", "ten_band_eq_filt", "(", "x", "GdB", ")", ">>>", "psd", "(", "y", "2", "**", "10", "44", ".", "1", ")" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L94-L139
[ "def", "ten_band_eq_filt", "(", "x", ",", "GdB", ",", "Q", "=", "3.5", ")", ":", "fs", "=", "44100.0", "# Hz", "NB", "=", "len", "(", "GdB", ")", "if", "not", "NB", "==", "10", ":", "raise", "ValueError", "(", "\"GdB length not equal to ten\"", ")", "Fc", "=", "31.25", "*", "2", "**", "np", ".", "arange", "(", "NB", ")", "B", "=", "np", ".", "zeros", "(", "(", "NB", ",", "3", ")", ")", "A", "=", "np", ".", "zeros", "(", "(", "NB", ",", "3", ")", ")", "# Create matrix of cascade coefficients", "for", "k", "in", "range", "(", "NB", ")", ":", "[", "b", ",", "a", "]", "=", "peaking", "(", "GdB", "[", "k", "]", ",", "Fc", "[", "k", "]", ",", "Q", ")", "B", "[", "k", ",", ":", "]", "=", "b", "A", "[", "k", ",", ":", "]", "=", "a", "# Pass signal x through the cascade of ten filters", "y", "=", "np", ".", "zeros", "(", "len", "(", "x", ")", ")", "for", "k", "in", "range", "(", "NB", ")", ":", "if", "k", "==", "0", ":", "y", "=", "signal", ".", "lfilter", "(", "B", "[", "k", ",", ":", "]", ",", "A", "[", "k", ",", ":", "]", ",", "x", ")", "else", ":", "y", "=", "signal", ".", "lfilter", "(", "B", "[", "k", ",", ":", "]", ",", "A", "[", "k", ",", ":", "]", ",", "y", ")", "return", "y" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
ten_band_eq_resp
Create a frequency response magnitude plot in dB of a ten band equalizer using a semilogplot (semilogx()) type plot Parameters ---------- GdB : Gain vector for 10 peaking filters [G0,...,G9] Q : Quality factor for each peaking filter (default 3.5) Returns ------- Nothing : two plots are created Examples -------- >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm import sigsys as ss >>> ss.ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0]) >>> plt.show()
sk_dsp_comm/sigsys.py
def ten_band_eq_resp(GdB,Q=3.5): """ Create a frequency response magnitude plot in dB of a ten band equalizer using a semilogplot (semilogx()) type plot Parameters ---------- GdB : Gain vector for 10 peaking filters [G0,...,G9] Q : Quality factor for each peaking filter (default 3.5) Returns ------- Nothing : two plots are created Examples -------- >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm import sigsys as ss >>> ss.ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0]) >>> plt.show() """ fs = 44100.0 # Hz NB = len(GdB) if not NB == 10: raise ValueError("GdB length not equal to ten") Fc = 31.25*2**np.arange(NB) B = np.zeros((NB,3)); A = np.zeros((NB,3)); # Create matrix of cascade coefficients for k in range(NB): b,a = peaking(GdB[k],Fc[k],Q,fs) B[k,:] = b A[k,:] = a # Create the cascade frequency response F = np.logspace(1,np.log10(20e3),1000) H = np.ones(len(F))*np.complex(1.0,0.0) for k in range(NB): w,Htemp = signal.freqz(B[k,:],A[k,:],2*np.pi*F/fs) H *= Htemp plt.figure(figsize=(6,4)) plt.subplot(211) plt.semilogx(F,20*np.log10(abs(H))) plt.axis([10, fs/2, -12, 12]) plt.grid() plt.title('Ten-Band Equalizer Frequency Response') plt.xlabel('Frequency (Hz)') plt.ylabel('Gain (dB)') plt.subplot(212) plt.stem(np.arange(NB),GdB,'b','bs') #plt.bar(np.arange(NB)-.1,GdB,0.2) plt.axis([0, NB-1, -12, 12]) plt.xlabel('Equalizer Band Number') plt.ylabel('Gain Set (dB)') plt.grid()
def ten_band_eq_resp(GdB,Q=3.5): """ Create a frequency response magnitude plot in dB of a ten band equalizer using a semilogplot (semilogx()) type plot Parameters ---------- GdB : Gain vector for 10 peaking filters [G0,...,G9] Q : Quality factor for each peaking filter (default 3.5) Returns ------- Nothing : two plots are created Examples -------- >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm import sigsys as ss >>> ss.ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0]) >>> plt.show() """ fs = 44100.0 # Hz NB = len(GdB) if not NB == 10: raise ValueError("GdB length not equal to ten") Fc = 31.25*2**np.arange(NB) B = np.zeros((NB,3)); A = np.zeros((NB,3)); # Create matrix of cascade coefficients for k in range(NB): b,a = peaking(GdB[k],Fc[k],Q,fs) B[k,:] = b A[k,:] = a # Create the cascade frequency response F = np.logspace(1,np.log10(20e3),1000) H = np.ones(len(F))*np.complex(1.0,0.0) for k in range(NB): w,Htemp = signal.freqz(B[k,:],A[k,:],2*np.pi*F/fs) H *= Htemp plt.figure(figsize=(6,4)) plt.subplot(211) plt.semilogx(F,20*np.log10(abs(H))) plt.axis([10, fs/2, -12, 12]) plt.grid() plt.title('Ten-Band Equalizer Frequency Response') plt.xlabel('Frequency (Hz)') plt.ylabel('Gain (dB)') plt.subplot(212) plt.stem(np.arange(NB),GdB,'b','bs') #plt.bar(np.arange(NB)-.1,GdB,0.2) plt.axis([0, NB-1, -12, 12]) plt.xlabel('Equalizer Band Number') plt.ylabel('Gain Set (dB)') plt.grid()
[ "Create", "a", "frequency", "response", "magnitude", "plot", "in", "dB", "of", "a", "ten", "band", "equalizer", "using", "a", "semilogplot", "(", "semilogx", "()", ")", "type", "plot", "Parameters", "----------", "GdB", ":", "Gain", "vector", "for", "10", "peaking", "filters", "[", "G0", "...", "G9", "]", "Q", ":", "Quality", "factor", "for", "each", "peaking", "filter", "(", "default", "3", ".", "5", ")", "Returns", "-------", "Nothing", ":", "two", "plots", "are", "created", "Examples", "--------", ">>>", "import", "matplotlib", ".", "pyplot", "as", "plt", ">>>", "from", "sk_dsp_comm", "import", "sigsys", "as", "ss", ">>>", "ss", ".", "ten_band_eq_resp", "(", "[", "0", "10", ".", "0", "0", "0", "-", "1", "0", "5", "0", "-", "4", "0", "]", ")", ">>>", "plt", ".", "show", "()" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L142-L197
[ "def", "ten_band_eq_resp", "(", "GdB", ",", "Q", "=", "3.5", ")", ":", "fs", "=", "44100.0", "# Hz", "NB", "=", "len", "(", "GdB", ")", "if", "not", "NB", "==", "10", ":", "raise", "ValueError", "(", "\"GdB length not equal to ten\"", ")", "Fc", "=", "31.25", "*", "2", "**", "np", ".", "arange", "(", "NB", ")", "B", "=", "np", ".", "zeros", "(", "(", "NB", ",", "3", ")", ")", "A", "=", "np", ".", "zeros", "(", "(", "NB", ",", "3", ")", ")", "# Create matrix of cascade coefficients", "for", "k", "in", "range", "(", "NB", ")", ":", "b", ",", "a", "=", "peaking", "(", "GdB", "[", "k", "]", ",", "Fc", "[", "k", "]", ",", "Q", ",", "fs", ")", "B", "[", "k", ",", ":", "]", "=", "b", "A", "[", "k", ",", ":", "]", "=", "a", "# Create the cascade frequency response", "F", "=", "np", ".", "logspace", "(", "1", ",", "np", ".", "log10", "(", "20e3", ")", ",", "1000", ")", "H", "=", "np", ".", "ones", "(", "len", "(", "F", ")", ")", "*", "np", ".", "complex", "(", "1.0", ",", "0.0", ")", "for", "k", "in", "range", "(", "NB", ")", ":", "w", ",", "Htemp", "=", "signal", ".", "freqz", "(", "B", "[", "k", ",", ":", "]", ",", "A", "[", "k", ",", ":", "]", ",", "2", "*", "np", ".", "pi", "*", "F", "/", "fs", ")", "H", "*=", "Htemp", "plt", ".", "figure", "(", "figsize", "=", "(", "6", ",", "4", ")", ")", "plt", ".", "subplot", "(", "211", ")", "plt", ".", "semilogx", "(", "F", ",", "20", "*", "np", ".", "log10", "(", "abs", "(", "H", ")", ")", ")", "plt", ".", "axis", "(", "[", "10", ",", "fs", "/", "2", ",", "-", "12", ",", "12", "]", ")", "plt", ".", "grid", "(", ")", "plt", ".", "title", "(", "'Ten-Band Equalizer Frequency Response'", ")", "plt", ".", "xlabel", "(", "'Frequency (Hz)'", ")", "plt", ".", "ylabel", "(", "'Gain (dB)'", ")", "plt", ".", "subplot", "(", "212", ")", "plt", ".", "stem", "(", "np", ".", "arange", "(", "NB", ")", ",", "GdB", ",", "'b'", ",", "'bs'", ")", "#plt.bar(np.arange(NB)-.1,GdB,0.2)", "plt", ".", "axis", "(", "[", "0", ",", "NB", "-", "1", ",", "-", "12", ",", "12", "]", ")", "plt", ".", "xlabel", "(", "'Equalizer Band Number'", ")", "plt", ".", "ylabel", "(", "'Gain Set (dB)'", ")", "plt", ".", "grid", "(", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
peaking
A second-order peaking filter having GdB gain at fc and approximately and 0 dB otherwise. The filter coefficients returns correspond to a biquadratic system function containing five parameters. Parameters ---------- GdB : Lowpass gain in dB fc : Center frequency in Hz Q : Filter Q which is inversely proportional to bandwidth fs : Sampling frquency in Hz Returns ------- b : ndarray containing the numerator filter coefficients a : ndarray containing the denominator filter coefficients Examples -------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> from sk_dsp_comm.sigsys import peaking >>> from scipy import signal >>> b,a = peaking(2.0,500) >>> f = np.logspace(1,5,400) >>> w,H = signal.freqz(b,a,2*np.pi*f/44100) >>> plt.semilogx(f,20*np.log10(abs(H))) >>> plt.ylabel("Power Spectral Density (dB)") >>> plt.xlabel("Frequency (Hz)") >>> plt.show() >>> b,a = peaking(-5.0,500,4) >>> w,H = signal.freqz(b,a,2*np.pi*f/44100) >>> plt.semilogx(f,20*np.log10(abs(H))) >>> plt.ylabel("Power Spectral Density (dB)") >>> plt.xlabel("Frequency (Hz)")
sk_dsp_comm/sigsys.py
def peaking(GdB, fc, Q=3.5, fs=44100.): """ A second-order peaking filter having GdB gain at fc and approximately and 0 dB otherwise. The filter coefficients returns correspond to a biquadratic system function containing five parameters. Parameters ---------- GdB : Lowpass gain in dB fc : Center frequency in Hz Q : Filter Q which is inversely proportional to bandwidth fs : Sampling frquency in Hz Returns ------- b : ndarray containing the numerator filter coefficients a : ndarray containing the denominator filter coefficients Examples -------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> from sk_dsp_comm.sigsys import peaking >>> from scipy import signal >>> b,a = peaking(2.0,500) >>> f = np.logspace(1,5,400) >>> w,H = signal.freqz(b,a,2*np.pi*f/44100) >>> plt.semilogx(f,20*np.log10(abs(H))) >>> plt.ylabel("Power Spectral Density (dB)") >>> plt.xlabel("Frequency (Hz)") >>> plt.show() >>> b,a = peaking(-5.0,500,4) >>> w,H = signal.freqz(b,a,2*np.pi*f/44100) >>> plt.semilogx(f,20*np.log10(abs(H))) >>> plt.ylabel("Power Spectral Density (dB)") >>> plt.xlabel("Frequency (Hz)") """ mu = 10**(GdB/20.) kq = 4/(1 + mu)*np.tan(2*np.pi*fc/fs/(2*Q)) Cpk = (1 + kq *mu)/(1 + kq) b1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq*mu) b2 = (1 - kq*mu)/(1 + kq*mu) a1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq) a2 = (1 - kq)/(1 + kq) b = Cpk*np.array([1, b1, b2]) a = np.array([1, a1, a2]) return b,a
def peaking(GdB, fc, Q=3.5, fs=44100.): """ A second-order peaking filter having GdB gain at fc and approximately and 0 dB otherwise. The filter coefficients returns correspond to a biquadratic system function containing five parameters. Parameters ---------- GdB : Lowpass gain in dB fc : Center frequency in Hz Q : Filter Q which is inversely proportional to bandwidth fs : Sampling frquency in Hz Returns ------- b : ndarray containing the numerator filter coefficients a : ndarray containing the denominator filter coefficients Examples -------- >>> import matplotlib.pyplot as plt >>> import numpy as np >>> from sk_dsp_comm.sigsys import peaking >>> from scipy import signal >>> b,a = peaking(2.0,500) >>> f = np.logspace(1,5,400) >>> w,H = signal.freqz(b,a,2*np.pi*f/44100) >>> plt.semilogx(f,20*np.log10(abs(H))) >>> plt.ylabel("Power Spectral Density (dB)") >>> plt.xlabel("Frequency (Hz)") >>> plt.show() >>> b,a = peaking(-5.0,500,4) >>> w,H = signal.freqz(b,a,2*np.pi*f/44100) >>> plt.semilogx(f,20*np.log10(abs(H))) >>> plt.ylabel("Power Spectral Density (dB)") >>> plt.xlabel("Frequency (Hz)") """ mu = 10**(GdB/20.) kq = 4/(1 + mu)*np.tan(2*np.pi*fc/fs/(2*Q)) Cpk = (1 + kq *mu)/(1 + kq) b1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq*mu) b2 = (1 - kq*mu)/(1 + kq*mu) a1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq) a2 = (1 - kq)/(1 + kq) b = Cpk*np.array([1, b1, b2]) a = np.array([1, a1, a2]) return b,a
[ "A", "second", "-", "order", "peaking", "filter", "having", "GdB", "gain", "at", "fc", "and", "approximately", "and", "0", "dB", "otherwise", ".", "The", "filter", "coefficients", "returns", "correspond", "to", "a", "biquadratic", "system", "function", "containing", "five", "parameters", ".", "Parameters", "----------", "GdB", ":", "Lowpass", "gain", "in", "dB", "fc", ":", "Center", "frequency", "in", "Hz", "Q", ":", "Filter", "Q", "which", "is", "inversely", "proportional", "to", "bandwidth", "fs", ":", "Sampling", "frquency", "in", "Hz", "Returns", "-------", "b", ":", "ndarray", "containing", "the", "numerator", "filter", "coefficients", "a", ":", "ndarray", "containing", "the", "denominator", "filter", "coefficients", "Examples", "--------", ">>>", "import", "matplotlib", ".", "pyplot", "as", "plt", ">>>", "import", "numpy", "as", "np", ">>>", "from", "sk_dsp_comm", ".", "sigsys", "import", "peaking", ">>>", "from", "scipy", "import", "signal", ">>>", "b", "a", "=", "peaking", "(", "2", ".", "0", "500", ")", ">>>", "f", "=", "np", ".", "logspace", "(", "1", "5", "400", ")", ">>>", "w", "H", "=", "signal", ".", "freqz", "(", "b", "a", "2", "*", "np", ".", "pi", "*", "f", "/", "44100", ")", ">>>", "plt", ".", "semilogx", "(", "f", "20", "*", "np", ".", "log10", "(", "abs", "(", "H", ")))", ">>>", "plt", ".", "ylabel", "(", "Power", "Spectral", "Density", "(", "dB", ")", ")", ">>>", "plt", ".", "xlabel", "(", "Frequency", "(", "Hz", ")", ")", ">>>", "plt", ".", "show", "()" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L200-L249
[ "def", "peaking", "(", "GdB", ",", "fc", ",", "Q", "=", "3.5", ",", "fs", "=", "44100.", ")", ":", "mu", "=", "10", "**", "(", "GdB", "/", "20.", ")", "kq", "=", "4", "/", "(", "1", "+", "mu", ")", "*", "np", ".", "tan", "(", "2", "*", "np", ".", "pi", "*", "fc", "/", "fs", "/", "(", "2", "*", "Q", ")", ")", "Cpk", "=", "(", "1", "+", "kq", "*", "mu", ")", "/", "(", "1", "+", "kq", ")", "b1", "=", "-", "2", "*", "np", ".", "cos", "(", "2", "*", "np", ".", "pi", "*", "fc", "/", "fs", ")", "/", "(", "1", "+", "kq", "*", "mu", ")", "b2", "=", "(", "1", "-", "kq", "*", "mu", ")", "/", "(", "1", "+", "kq", "*", "mu", ")", "a1", "=", "-", "2", "*", "np", ".", "cos", "(", "2", "*", "np", ".", "pi", "*", "fc", "/", "fs", ")", "/", "(", "1", "+", "kq", ")", "a2", "=", "(", "1", "-", "kq", ")", "/", "(", "1", "+", "kq", ")", "b", "=", "Cpk", "*", "np", ".", "array", "(", "[", "1", ",", "b1", ",", "b2", "]", ")", "a", "=", "np", ".", "array", "(", "[", "1", ",", "a1", ",", "a2", "]", ")", "return", "b", ",", "a" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
ex6_2
Generate a triangle pulse as described in Example 6-2 of Chapter 6. You need to supply an index array n that covers at least [-2, 5]. The function returns the hard-coded signal of the example. Parameters ---------- n : time index ndarray covering at least -2 to +5. Returns ------- x : ndarray of signal samples in x Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm import sigsys as ss >>> n = np.arange(-5,8) >>> x = ss.ex6_2(n) >>> plt.stem(n,x) # creates a stem plot of x vs n
sk_dsp_comm/sigsys.py
def ex6_2(n): """ Generate a triangle pulse as described in Example 6-2 of Chapter 6. You need to supply an index array n that covers at least [-2, 5]. The function returns the hard-coded signal of the example. Parameters ---------- n : time index ndarray covering at least -2 to +5. Returns ------- x : ndarray of signal samples in x Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm import sigsys as ss >>> n = np.arange(-5,8) >>> x = ss.ex6_2(n) >>> plt.stem(n,x) # creates a stem plot of x vs n """ x = np.zeros(len(n)) for k, nn in enumerate(n): if nn >= -2 and nn <= 5: x[k] = 8 - nn return x
def ex6_2(n): """ Generate a triangle pulse as described in Example 6-2 of Chapter 6. You need to supply an index array n that covers at least [-2, 5]. The function returns the hard-coded signal of the example. Parameters ---------- n : time index ndarray covering at least -2 to +5. Returns ------- x : ndarray of signal samples in x Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm import sigsys as ss >>> n = np.arange(-5,8) >>> x = ss.ex6_2(n) >>> plt.stem(n,x) # creates a stem plot of x vs n """ x = np.zeros(len(n)) for k, nn in enumerate(n): if nn >= -2 and nn <= 5: x[k] = 8 - nn return x
[ "Generate", "a", "triangle", "pulse", "as", "described", "in", "Example", "6", "-", "2", "of", "Chapter", "6", ".", "You", "need", "to", "supply", "an", "index", "array", "n", "that", "covers", "at", "least", "[", "-", "2", "5", "]", ".", "The", "function", "returns", "the", "hard", "-", "coded", "signal", "of", "the", "example", ".", "Parameters", "----------", "n", ":", "time", "index", "ndarray", "covering", "at", "least", "-", "2", "to", "+", "5", ".", "Returns", "-------", "x", ":", "ndarray", "of", "signal", "samples", "in", "x", "Examples", "--------", ">>>", "import", "numpy", "as", "np", ">>>", "import", "matplotlib", ".", "pyplot", "as", "plt", ">>>", "from", "sk_dsp_comm", "import", "sigsys", "as", "ss", ">>>", "n", "=", "np", ".", "arange", "(", "-", "5", "8", ")", ">>>", "x", "=", "ss", ".", "ex6_2", "(", "n", ")", ">>>", "plt", ".", "stem", "(", "n", "x", ")", "#", "creates", "a", "stem", "plot", "of", "x", "vs", "n" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L252-L281
[ "def", "ex6_2", "(", "n", ")", ":", "x", "=", "np", ".", "zeros", "(", "len", "(", "n", ")", ")", "for", "k", ",", "nn", "in", "enumerate", "(", "n", ")", ":", "if", "nn", ">=", "-", "2", "and", "nn", "<=", "5", ":", "x", "[", "k", "]", "=", "8", "-", "nn", "return", "x" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
position_CD
CD sled position control case study of Chapter 18. The function returns the closed-loop and open-loop system function for a CD/DVD sled position control system. The loop amplifier gain is the only variable that may be changed. The returned system function can however be changed. Parameters ---------- Ka : loop amplifier gain, start with 50. out_type : 'open_loop' for open loop system function out_type : 'fb_approx' for closed-loop approximation out_type : 'fb_exact' for closed-loop exact Returns ------- b : numerator coefficient ndarray a : denominator coefficient ndarray Notes ----- With the exception of the loop amplifier gain, all other parameters are hard-coded from Case Study example. Examples -------- >>> b,a = position_CD(Ka,'fb_approx') >>> b,a = position_CD(Ka,'fb_exact')
sk_dsp_comm/sigsys.py
def position_CD(Ka,out_type = 'fb_exact'): """ CD sled position control case study of Chapter 18. The function returns the closed-loop and open-loop system function for a CD/DVD sled position control system. The loop amplifier gain is the only variable that may be changed. The returned system function can however be changed. Parameters ---------- Ka : loop amplifier gain, start with 50. out_type : 'open_loop' for open loop system function out_type : 'fb_approx' for closed-loop approximation out_type : 'fb_exact' for closed-loop exact Returns ------- b : numerator coefficient ndarray a : denominator coefficient ndarray Notes ----- With the exception of the loop amplifier gain, all other parameters are hard-coded from Case Study example. Examples -------- >>> b,a = position_CD(Ka,'fb_approx') >>> b,a = position_CD(Ka,'fb_exact') """ rs = 10/(2*np.pi) # Load b and a ndarrays with the coefficients if out_type.lower() == 'open_loop': b = np.array([Ka*4000*rs]) a = np.array([1,1275,31250,0]) elif out_type.lower() == 'fb_approx': b = np.array([3.2*Ka*rs]) a = np.array([1, 25, 3.2*Ka*rs]) elif out_type.lower() == 'fb_exact': b = np.array([4000*Ka*rs]) a = np.array([1, 1250+25, 25*1250, 4000*Ka*rs]) else: raise ValueError('out_type must be: open_loop, fb_approx, or fc_exact') return b, a
def position_CD(Ka,out_type = 'fb_exact'): """ CD sled position control case study of Chapter 18. The function returns the closed-loop and open-loop system function for a CD/DVD sled position control system. The loop amplifier gain is the only variable that may be changed. The returned system function can however be changed. Parameters ---------- Ka : loop amplifier gain, start with 50. out_type : 'open_loop' for open loop system function out_type : 'fb_approx' for closed-loop approximation out_type : 'fb_exact' for closed-loop exact Returns ------- b : numerator coefficient ndarray a : denominator coefficient ndarray Notes ----- With the exception of the loop amplifier gain, all other parameters are hard-coded from Case Study example. Examples -------- >>> b,a = position_CD(Ka,'fb_approx') >>> b,a = position_CD(Ka,'fb_exact') """ rs = 10/(2*np.pi) # Load b and a ndarrays with the coefficients if out_type.lower() == 'open_loop': b = np.array([Ka*4000*rs]) a = np.array([1,1275,31250,0]) elif out_type.lower() == 'fb_approx': b = np.array([3.2*Ka*rs]) a = np.array([1, 25, 3.2*Ka*rs]) elif out_type.lower() == 'fb_exact': b = np.array([4000*Ka*rs]) a = np.array([1, 1250+25, 25*1250, 4000*Ka*rs]) else: raise ValueError('out_type must be: open_loop, fb_approx, or fc_exact') return b, a
[ "CD", "sled", "position", "control", "case", "study", "of", "Chapter", "18", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L284-L329
[ "def", "position_CD", "(", "Ka", ",", "out_type", "=", "'fb_exact'", ")", ":", "rs", "=", "10", "/", "(", "2", "*", "np", ".", "pi", ")", "# Load b and a ndarrays with the coefficients", "if", "out_type", ".", "lower", "(", ")", "==", "'open_loop'", ":", "b", "=", "np", ".", "array", "(", "[", "Ka", "*", "4000", "*", "rs", "]", ")", "a", "=", "np", ".", "array", "(", "[", "1", ",", "1275", ",", "31250", ",", "0", "]", ")", "elif", "out_type", ".", "lower", "(", ")", "==", "'fb_approx'", ":", "b", "=", "np", ".", "array", "(", "[", "3.2", "*", "Ka", "*", "rs", "]", ")", "a", "=", "np", ".", "array", "(", "[", "1", ",", "25", ",", "3.2", "*", "Ka", "*", "rs", "]", ")", "elif", "out_type", ".", "lower", "(", ")", "==", "'fb_exact'", ":", "b", "=", "np", ".", "array", "(", "[", "4000", "*", "Ka", "*", "rs", "]", ")", "a", "=", "np", ".", "array", "(", "[", "1", ",", "1250", "+", "25", ",", "25", "*", "1250", ",", "4000", "*", "Ka", "*", "rs", "]", ")", "else", ":", "raise", "ValueError", "(", "'out_type must be: open_loop, fb_approx, or fc_exact'", ")", "return", "b", ",", "a" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
cruise_control
Cruise control with PI controller and hill disturbance. This function returns various system function configurations for a the cruise control Case Study example found in the supplementary article. The plant model is obtained by the linearizing the equations of motion and the controller contains a proportional and integral gain term set via the closed-loop parameters natuarl frequency wn (rad/s) and damping zeta. Parameters ---------- wn : closed-loop natural frequency in rad/s, nominally 0.1 zeta : closed-loop damping factor, nominally 1.0 T : vehicle time constant, nominally 10 s vcruise : cruise velocity set point, nominally 75 mph vmax : maximum vehicle velocity, nominally 120 mph tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function 'H' : closed-loop system function V(s)/R(s) 'HE' : closed-loop system function E(s)/R(s) 'HVW' : closed-loop system function V(s)/W(s) 'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input Returns ------- b : numerator coefficient ndarray a : denominator coefficient ndarray Examples -------- >>> # return the closed-loop system function output/input velocity >>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H') >>> # return the closed-loop system function loop error/hill disturbance >>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='HED')
sk_dsp_comm/sigsys.py
def cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H'): """ Cruise control with PI controller and hill disturbance. This function returns various system function configurations for a the cruise control Case Study example found in the supplementary article. The plant model is obtained by the linearizing the equations of motion and the controller contains a proportional and integral gain term set via the closed-loop parameters natuarl frequency wn (rad/s) and damping zeta. Parameters ---------- wn : closed-loop natural frequency in rad/s, nominally 0.1 zeta : closed-loop damping factor, nominally 1.0 T : vehicle time constant, nominally 10 s vcruise : cruise velocity set point, nominally 75 mph vmax : maximum vehicle velocity, nominally 120 mph tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function 'H' : closed-loop system function V(s)/R(s) 'HE' : closed-loop system function E(s)/R(s) 'HVW' : closed-loop system function V(s)/W(s) 'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input Returns ------- b : numerator coefficient ndarray a : denominator coefficient ndarray Examples -------- >>> # return the closed-loop system function output/input velocity >>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H') >>> # return the closed-loop system function loop error/hill disturbance >>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='HED') """ tau = T/2.*vmax/vcruise g = 9.8 g *= 3*60**2/5280. # m/s to mph conversion Kp = T*(2*zeta*wn-1/tau)/vmax Ki = T*wn**2./vmax K = Kp*vmax/T print('wn = ', np.sqrt(K/(Kp/Ki))) print('zeta = ', (K + 1/tau)/(2*wn)) a = np.array([1, 2*zeta*wn, wn**2]) if tf_mode == 'H': b = np.array([K, wn**2]) elif tf_mode == 'HE': b = np.array([1, 2*zeta*wn-K, 0.]) elif tf_mode == 'HVW': b = np.array([ 1, wn**2/K+1/tau, wn**2/(K*tau)]) b *= Kp elif tf_mode == 'HED': b = np.array([g, 0]) else: raise ValueError('tf_mode must be: H, HE, HVU, or HED') return b, a
def cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H'): """ Cruise control with PI controller and hill disturbance. This function returns various system function configurations for a the cruise control Case Study example found in the supplementary article. The plant model is obtained by the linearizing the equations of motion and the controller contains a proportional and integral gain term set via the closed-loop parameters natuarl frequency wn (rad/s) and damping zeta. Parameters ---------- wn : closed-loop natural frequency in rad/s, nominally 0.1 zeta : closed-loop damping factor, nominally 1.0 T : vehicle time constant, nominally 10 s vcruise : cruise velocity set point, nominally 75 mph vmax : maximum vehicle velocity, nominally 120 mph tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function 'H' : closed-loop system function V(s)/R(s) 'HE' : closed-loop system function E(s)/R(s) 'HVW' : closed-loop system function V(s)/W(s) 'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input Returns ------- b : numerator coefficient ndarray a : denominator coefficient ndarray Examples -------- >>> # return the closed-loop system function output/input velocity >>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H') >>> # return the closed-loop system function loop error/hill disturbance >>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='HED') """ tau = T/2.*vmax/vcruise g = 9.8 g *= 3*60**2/5280. # m/s to mph conversion Kp = T*(2*zeta*wn-1/tau)/vmax Ki = T*wn**2./vmax K = Kp*vmax/T print('wn = ', np.sqrt(K/(Kp/Ki))) print('zeta = ', (K + 1/tau)/(2*wn)) a = np.array([1, 2*zeta*wn, wn**2]) if tf_mode == 'H': b = np.array([K, wn**2]) elif tf_mode == 'HE': b = np.array([1, 2*zeta*wn-K, 0.]) elif tf_mode == 'HVW': b = np.array([ 1, wn**2/K+1/tau, wn**2/(K*tau)]) b *= Kp elif tf_mode == 'HED': b = np.array([g, 0]) else: raise ValueError('tf_mode must be: H, HE, HVU, or HED') return b, a
[ "Cruise", "control", "with", "PI", "controller", "and", "hill", "disturbance", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L332-L388
[ "def", "cruise_control", "(", "wn", ",", "zeta", ",", "T", ",", "vcruise", ",", "vmax", ",", "tf_mode", "=", "'H'", ")", ":", "tau", "=", "T", "/", "2.", "*", "vmax", "/", "vcruise", "g", "=", "9.8", "g", "*=", "3", "*", "60", "**", "2", "/", "5280.", "# m/s to mph conversion", "Kp", "=", "T", "*", "(", "2", "*", "zeta", "*", "wn", "-", "1", "/", "tau", ")", "/", "vmax", "Ki", "=", "T", "*", "wn", "**", "2.", "/", "vmax", "K", "=", "Kp", "*", "vmax", "/", "T", "print", "(", "'wn = '", ",", "np", ".", "sqrt", "(", "K", "/", "(", "Kp", "/", "Ki", ")", ")", ")", "print", "(", "'zeta = '", ",", "(", "K", "+", "1", "/", "tau", ")", "/", "(", "2", "*", "wn", ")", ")", "a", "=", "np", ".", "array", "(", "[", "1", ",", "2", "*", "zeta", "*", "wn", ",", "wn", "**", "2", "]", ")", "if", "tf_mode", "==", "'H'", ":", "b", "=", "np", ".", "array", "(", "[", "K", ",", "wn", "**", "2", "]", ")", "elif", "tf_mode", "==", "'HE'", ":", "b", "=", "np", ".", "array", "(", "[", "1", ",", "2", "*", "zeta", "*", "wn", "-", "K", ",", "0.", "]", ")", "elif", "tf_mode", "==", "'HVW'", ":", "b", "=", "np", ".", "array", "(", "[", "1", ",", "wn", "**", "2", "/", "K", "+", "1", "/", "tau", ",", "wn", "**", "2", "/", "(", "K", "*", "tau", ")", "]", ")", "b", "*=", "Kp", "elif", "tf_mode", "==", "'HED'", ":", "b", "=", "np", ".", "array", "(", "[", "g", ",", "0", "]", ")", "else", ":", "raise", "ValueError", "(", "'tf_mode must be: H, HE, HVU, or HED'", ")", "return", "b", ",", "a" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
splane
Create an s-plane pole-zero plot. As input the function uses the numerator and denominator s-domain system function coefficient ndarrays b and a respectively. Assumed to be stored in descending powers of s. Parameters ---------- b : numerator coefficient ndarray. a : denominator coefficient ndarray. auto_scale : True size : [xmin,xmax,ymin,ymax] plot scaling when scale = False Returns ------- (M,N) : tuple of zero and pole counts + plot window Notes ----- This function tries to identify repeated poles and zeros and will place the multiplicity number above and to the right of the pole or zero. The difficulty is setting the tolerance for this detection. Currently it is set at 1e-3 via the function signal.unique_roots. Examples -------- >>> # Here the plot is generated using auto_scale >>> splane(b,a) >>> # Here the plot is generated using manual scaling >>> splane(b,a,False,[-10,1,-10,10])
sk_dsp_comm/sigsys.py
def splane(b,a,auto_scale=True,size=[-1,1,-1,1]): """ Create an s-plane pole-zero plot. As input the function uses the numerator and denominator s-domain system function coefficient ndarrays b and a respectively. Assumed to be stored in descending powers of s. Parameters ---------- b : numerator coefficient ndarray. a : denominator coefficient ndarray. auto_scale : True size : [xmin,xmax,ymin,ymax] plot scaling when scale = False Returns ------- (M,N) : tuple of zero and pole counts + plot window Notes ----- This function tries to identify repeated poles and zeros and will place the multiplicity number above and to the right of the pole or zero. The difficulty is setting the tolerance for this detection. Currently it is set at 1e-3 via the function signal.unique_roots. Examples -------- >>> # Here the plot is generated using auto_scale >>> splane(b,a) >>> # Here the plot is generated using manual scaling >>> splane(b,a,False,[-10,1,-10,10]) """ M = len(b) - 1 N = len(a) - 1 plt.figure(figsize=(5,5)) #plt.axis('equal') N_roots = np.array([0.0]) if M > 0: N_roots = np.roots(b) D_roots = np.array([0.0]) if N > 0: D_roots = np.roots(a) if auto_scale: size[0] = min(np.min(np.real(N_roots)),np.min(np.real(D_roots)))-0.5 size[1] = max(np.max(np.real(N_roots)),np.max(np.real(D_roots)))+0.5 size[1] = max(size[1],0.5) size[2] = min(np.min(np.imag(N_roots)),np.min(np.imag(D_roots)))-0.5 size[3] = max(np.max(np.imag(N_roots)),np.max(np.imag(D_roots)))+0.5 plt.plot([size[0],size[1]],[0,0],'k--') plt.plot([0,0],[size[2],size[3]],'r--') # Plot labels if multiplicity greater than 1 x_scale = size[1]-size[0] y_scale = size[3]-size[2] x_off = 0.03 y_off = 0.01 if M > 0: #N_roots = np.roots(b) N_uniq, N_mult=signal.unique_roots(N_roots,tol=1e-3, rtype='avg') plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8) idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0] for k in range(len(idx_N_mult)): x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10) if N > 0: #D_roots = np.roots(a) D_uniq, D_mult=signal.unique_roots(D_roots,tol=1e-3, rtype='avg') plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8) idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0] for k in range(len(idx_D_mult)): x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10) plt.xlabel('Real Part') plt.ylabel('Imaginary Part') plt.title('Pole-Zero Plot') #plt.grid() plt.axis(np.array(size)) return M,N
def splane(b,a,auto_scale=True,size=[-1,1,-1,1]): """ Create an s-plane pole-zero plot. As input the function uses the numerator and denominator s-domain system function coefficient ndarrays b and a respectively. Assumed to be stored in descending powers of s. Parameters ---------- b : numerator coefficient ndarray. a : denominator coefficient ndarray. auto_scale : True size : [xmin,xmax,ymin,ymax] plot scaling when scale = False Returns ------- (M,N) : tuple of zero and pole counts + plot window Notes ----- This function tries to identify repeated poles and zeros and will place the multiplicity number above and to the right of the pole or zero. The difficulty is setting the tolerance for this detection. Currently it is set at 1e-3 via the function signal.unique_roots. Examples -------- >>> # Here the plot is generated using auto_scale >>> splane(b,a) >>> # Here the plot is generated using manual scaling >>> splane(b,a,False,[-10,1,-10,10]) """ M = len(b) - 1 N = len(a) - 1 plt.figure(figsize=(5,5)) #plt.axis('equal') N_roots = np.array([0.0]) if M > 0: N_roots = np.roots(b) D_roots = np.array([0.0]) if N > 0: D_roots = np.roots(a) if auto_scale: size[0] = min(np.min(np.real(N_roots)),np.min(np.real(D_roots)))-0.5 size[1] = max(np.max(np.real(N_roots)),np.max(np.real(D_roots)))+0.5 size[1] = max(size[1],0.5) size[2] = min(np.min(np.imag(N_roots)),np.min(np.imag(D_roots)))-0.5 size[3] = max(np.max(np.imag(N_roots)),np.max(np.imag(D_roots)))+0.5 plt.plot([size[0],size[1]],[0,0],'k--') plt.plot([0,0],[size[2],size[3]],'r--') # Plot labels if multiplicity greater than 1 x_scale = size[1]-size[0] y_scale = size[3]-size[2] x_off = 0.03 y_off = 0.01 if M > 0: #N_roots = np.roots(b) N_uniq, N_mult=signal.unique_roots(N_roots,tol=1e-3, rtype='avg') plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8) idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0] for k in range(len(idx_N_mult)): x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10) if N > 0: #D_roots = np.roots(a) D_uniq, D_mult=signal.unique_roots(D_roots,tol=1e-3, rtype='avg') plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8) idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0] for k in range(len(idx_D_mult)): x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10) plt.xlabel('Real Part') plt.ylabel('Imaginary Part') plt.title('Pole-Zero Plot') #plt.grid() plt.axis(np.array(size)) return M,N
[ "Create", "an", "s", "-", "plane", "pole", "-", "zero", "plot", ".", "As", "input", "the", "function", "uses", "the", "numerator", "and", "denominator", "s", "-", "domain", "system", "function", "coefficient", "ndarrays", "b", "and", "a", "respectively", ".", "Assumed", "to", "be", "stored", "in", "descending", "powers", "of", "s", ".", "Parameters", "----------", "b", ":", "numerator", "coefficient", "ndarray", ".", "a", ":", "denominator", "coefficient", "ndarray", ".", "auto_scale", ":", "True", "size", ":", "[", "xmin", "xmax", "ymin", "ymax", "]", "plot", "scaling", "when", "scale", "=", "False", "Returns", "-------", "(", "M", "N", ")", ":", "tuple", "of", "zero", "and", "pole", "counts", "+", "plot", "window", "Notes", "-----", "This", "function", "tries", "to", "identify", "repeated", "poles", "and", "zeros", "and", "will", "place", "the", "multiplicity", "number", "above", "and", "to", "the", "right", "of", "the", "pole", "or", "zero", ".", "The", "difficulty", "is", "setting", "the", "tolerance", "for", "this", "detection", ".", "Currently", "it", "is", "set", "at", "1e", "-", "3", "via", "the", "function", "signal", ".", "unique_roots", ".", "Examples", "--------", ">>>", "#", "Here", "the", "plot", "is", "generated", "using", "auto_scale", ">>>", "splane", "(", "b", "a", ")", ">>>", "#", "Here", "the", "plot", "is", "generated", "using", "manual", "scaling", ">>>", "splane", "(", "b", "a", "False", "[", "-", "10", "1", "-", "10", "10", "]", ")" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L391-L471
[ "def", "splane", "(", "b", ",", "a", ",", "auto_scale", "=", "True", ",", "size", "=", "[", "-", "1", ",", "1", ",", "-", "1", ",", "1", "]", ")", ":", "M", "=", "len", "(", "b", ")", "-", "1", "N", "=", "len", "(", "a", ")", "-", "1", "plt", ".", "figure", "(", "figsize", "=", "(", "5", ",", "5", ")", ")", "#plt.axis('equal')", "N_roots", "=", "np", ".", "array", "(", "[", "0.0", "]", ")", "if", "M", ">", "0", ":", "N_roots", "=", "np", ".", "roots", "(", "b", ")", "D_roots", "=", "np", ".", "array", "(", "[", "0.0", "]", ")", "if", "N", ">", "0", ":", "D_roots", "=", "np", ".", "roots", "(", "a", ")", "if", "auto_scale", ":", "size", "[", "0", "]", "=", "min", "(", "np", ".", "min", "(", "np", ".", "real", "(", "N_roots", ")", ")", ",", "np", ".", "min", "(", "np", ".", "real", "(", "D_roots", ")", ")", ")", "-", "0.5", "size", "[", "1", "]", "=", "max", "(", "np", ".", "max", "(", "np", ".", "real", "(", "N_roots", ")", ")", ",", "np", ".", "max", "(", "np", ".", "real", "(", "D_roots", ")", ")", ")", "+", "0.5", "size", "[", "1", "]", "=", "max", "(", "size", "[", "1", "]", ",", "0.5", ")", "size", "[", "2", "]", "=", "min", "(", "np", ".", "min", "(", "np", ".", "imag", "(", "N_roots", ")", ")", ",", "np", ".", "min", "(", "np", ".", "imag", "(", "D_roots", ")", ")", ")", "-", "0.5", "size", "[", "3", "]", "=", "max", "(", "np", ".", "max", "(", "np", ".", "imag", "(", "N_roots", ")", ")", ",", "np", ".", "max", "(", "np", ".", "imag", "(", "D_roots", ")", ")", ")", "+", "0.5", "plt", ".", "plot", "(", "[", "size", "[", "0", "]", ",", "size", "[", "1", "]", "]", ",", "[", "0", ",", "0", "]", ",", "'k--'", ")", "plt", ".", "plot", "(", "[", "0", ",", "0", "]", ",", "[", "size", "[", "2", "]", ",", "size", "[", "3", "]", "]", ",", "'r--'", ")", "# Plot labels if multiplicity greater than 1", "x_scale", "=", "size", "[", "1", "]", "-", "size", "[", "0", "]", "y_scale", "=", "size", "[", "3", "]", "-", "size", "[", "2", "]", "x_off", "=", "0.03", "y_off", "=", "0.01", "if", "M", ">", "0", ":", "#N_roots = np.roots(b)", "N_uniq", ",", "N_mult", "=", "signal", ".", "unique_roots", "(", "N_roots", ",", "tol", "=", "1e-3", ",", "rtype", "=", "'avg'", ")", "plt", ".", "plot", "(", "np", ".", "real", "(", "N_uniq", ")", ",", "np", ".", "imag", "(", "N_uniq", ")", ",", "'ko'", ",", "mfc", "=", "'None'", ",", "ms", "=", "8", ")", "idx_N_mult", "=", "np", ".", "nonzero", "(", "np", ".", "ravel", "(", "N_mult", ">", "1", ")", ")", "[", "0", "]", "for", "k", "in", "range", "(", "len", "(", "idx_N_mult", ")", ")", ":", "x_loc", "=", "np", ".", "real", "(", "N_uniq", "[", "idx_N_mult", "[", "k", "]", "]", ")", "+", "x_off", "*", "x_scale", "y_loc", "=", "np", ".", "imag", "(", "N_uniq", "[", "idx_N_mult", "[", "k", "]", "]", ")", "+", "y_off", "*", "y_scale", "plt", ".", "text", "(", "x_loc", ",", "y_loc", ",", "str", "(", "N_mult", "[", "idx_N_mult", "[", "k", "]", "]", ")", ",", "ha", "=", "'center'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "10", ")", "if", "N", ">", "0", ":", "#D_roots = np.roots(a)", "D_uniq", ",", "D_mult", "=", "signal", ".", "unique_roots", "(", "D_roots", ",", "tol", "=", "1e-3", ",", "rtype", "=", "'avg'", ")", "plt", ".", "plot", "(", "np", ".", "real", "(", "D_uniq", ")", ",", "np", ".", "imag", "(", "D_uniq", ")", ",", "'kx'", ",", "ms", "=", "8", ")", "idx_D_mult", "=", "np", ".", "nonzero", "(", "np", ".", "ravel", "(", "D_mult", ">", "1", ")", ")", "[", "0", "]", "for", "k", "in", "range", "(", "len", "(", "idx_D_mult", ")", ")", ":", "x_loc", "=", "np", ".", "real", "(", "D_uniq", "[", "idx_D_mult", "[", "k", "]", "]", ")", "+", "x_off", "*", "x_scale", "y_loc", "=", "np", ".", "imag", "(", "D_uniq", "[", "idx_D_mult", "[", "k", "]", "]", ")", "+", "y_off", "*", "y_scale", "plt", ".", "text", "(", "x_loc", ",", "y_loc", ",", "str", "(", "D_mult", "[", "idx_D_mult", "[", "k", "]", "]", ")", ",", "ha", "=", "'center'", ",", "va", "=", "'bottom'", ",", "fontsize", "=", "10", ")", "plt", ".", "xlabel", "(", "'Real Part'", ")", "plt", ".", "ylabel", "(", "'Imaginary Part'", ")", "plt", ".", "title", "(", "'Pole-Zero Plot'", ")", "#plt.grid()", "plt", ".", "axis", "(", "np", ".", "array", "(", "size", ")", ")", "return", "M", ",", "N" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
OS_filter
Overlap and save transform domain FIR filtering. This function implements the classical overlap and save method of transform domain filtering using a length P FIR filter. Parameters ---------- x : input signal to be filtered as an ndarray h : FIR filter coefficients as an ndarray of length P N : FFT size > P, typically a power of two mode : 0 or 1, when 1 returns a diagnostic matrix Returns ------- y : the filtered output as an ndarray y_mat : an ndarray whose rows are the individual overlap outputs. Notes ----- y_mat is used for diagnostics and to gain understanding of the algorithm. Examples -------- >>> n = arange(0,100) >>> x = cos(2*pi*0.05*n) >>> b = ones(10) >>> y = OS_filter(x,h,N) >>> # set mode = 1 >>> y, y_mat = OS_filter(x,h,N,1)
sk_dsp_comm/sigsys.py
def OS_filter(x,h,N,mode=0): """ Overlap and save transform domain FIR filtering. This function implements the classical overlap and save method of transform domain filtering using a length P FIR filter. Parameters ---------- x : input signal to be filtered as an ndarray h : FIR filter coefficients as an ndarray of length P N : FFT size > P, typically a power of two mode : 0 or 1, when 1 returns a diagnostic matrix Returns ------- y : the filtered output as an ndarray y_mat : an ndarray whose rows are the individual overlap outputs. Notes ----- y_mat is used for diagnostics and to gain understanding of the algorithm. Examples -------- >>> n = arange(0,100) >>> x = cos(2*pi*0.05*n) >>> b = ones(10) >>> y = OS_filter(x,h,N) >>> # set mode = 1 >>> y, y_mat = OS_filter(x,h,N,1) """ P = len(h) # zero pad start of x so first frame can recover first true samples of x x = np.hstack((np.zeros(P-1),x)) L = N - P + 1 Nx = len(x) Nframe = int(np.ceil(Nx/float(L))) # zero pad end of x to full number of frames needed x = np.hstack((x,np.zeros(Nframe*L-Nx))) y = np.zeros(int(Nframe*N)) # create an instrumentation matrix to observe the overlap and save behavior y_mat = np.zeros((Nframe,int(Nframe*N))) H = fft.fft(h,N) # begin the filtering operation for k in range(Nframe): xk = x[k*L:k*L+N] Xk = fft.fft(xk,N) Yk = H*Xk yk = np.real(fft.ifft(Yk)) # imag part should be zero y[k*L+P-1:k*L+N] = yk[P-1:] y_mat[k,k*L:k*L+N] = yk if mode == 1: return y[P-1:Nx], y_mat[:,P-1:Nx] else: return y[P-1:Nx]
def OS_filter(x,h,N,mode=0): """ Overlap and save transform domain FIR filtering. This function implements the classical overlap and save method of transform domain filtering using a length P FIR filter. Parameters ---------- x : input signal to be filtered as an ndarray h : FIR filter coefficients as an ndarray of length P N : FFT size > P, typically a power of two mode : 0 or 1, when 1 returns a diagnostic matrix Returns ------- y : the filtered output as an ndarray y_mat : an ndarray whose rows are the individual overlap outputs. Notes ----- y_mat is used for diagnostics and to gain understanding of the algorithm. Examples -------- >>> n = arange(0,100) >>> x = cos(2*pi*0.05*n) >>> b = ones(10) >>> y = OS_filter(x,h,N) >>> # set mode = 1 >>> y, y_mat = OS_filter(x,h,N,1) """ P = len(h) # zero pad start of x so first frame can recover first true samples of x x = np.hstack((np.zeros(P-1),x)) L = N - P + 1 Nx = len(x) Nframe = int(np.ceil(Nx/float(L))) # zero pad end of x to full number of frames needed x = np.hstack((x,np.zeros(Nframe*L-Nx))) y = np.zeros(int(Nframe*N)) # create an instrumentation matrix to observe the overlap and save behavior y_mat = np.zeros((Nframe,int(Nframe*N))) H = fft.fft(h,N) # begin the filtering operation for k in range(Nframe): xk = x[k*L:k*L+N] Xk = fft.fft(xk,N) Yk = H*Xk yk = np.real(fft.ifft(Yk)) # imag part should be zero y[k*L+P-1:k*L+N] = yk[P-1:] y_mat[k,k*L:k*L+N] = yk if mode == 1: return y[P-1:Nx], y_mat[:,P-1:Nx] else: return y[P-1:Nx]
[ "Overlap", "and", "save", "transform", "domain", "FIR", "filtering", ".", "This", "function", "implements", "the", "classical", "overlap", "and", "save", "method", "of", "transform", "domain", "filtering", "using", "a", "length", "P", "FIR", "filter", ".", "Parameters", "----------", "x", ":", "input", "signal", "to", "be", "filtered", "as", "an", "ndarray", "h", ":", "FIR", "filter", "coefficients", "as", "an", "ndarray", "of", "length", "P", "N", ":", "FFT", "size", ">", "P", "typically", "a", "power", "of", "two", "mode", ":", "0", "or", "1", "when", "1", "returns", "a", "diagnostic", "matrix", "Returns", "-------", "y", ":", "the", "filtered", "output", "as", "an", "ndarray", "y_mat", ":", "an", "ndarray", "whose", "rows", "are", "the", "individual", "overlap", "outputs", ".", "Notes", "-----", "y_mat", "is", "used", "for", "diagnostics", "and", "to", "gain", "understanding", "of", "the", "algorithm", ".", "Examples", "--------", ">>>", "n", "=", "arange", "(", "0", "100", ")", ">>>", "x", "=", "cos", "(", "2", "*", "pi", "*", "0", ".", "05", "*", "n", ")", ">>>", "b", "=", "ones", "(", "10", ")", ">>>", "y", "=", "OS_filter", "(", "x", "h", "N", ")", ">>>", "#", "set", "mode", "=", "1", ">>>", "y", "y_mat", "=", "OS_filter", "(", "x", "h", "N", "1", ")" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L474-L531
[ "def", "OS_filter", "(", "x", ",", "h", ",", "N", ",", "mode", "=", "0", ")", ":", "P", "=", "len", "(", "h", ")", "# zero pad start of x so first frame can recover first true samples of x", "x", "=", "np", ".", "hstack", "(", "(", "np", ".", "zeros", "(", "P", "-", "1", ")", ",", "x", ")", ")", "L", "=", "N", "-", "P", "+", "1", "Nx", "=", "len", "(", "x", ")", "Nframe", "=", "int", "(", "np", ".", "ceil", "(", "Nx", "/", "float", "(", "L", ")", ")", ")", "# zero pad end of x to full number of frames needed", "x", "=", "np", ".", "hstack", "(", "(", "x", ",", "np", ".", "zeros", "(", "Nframe", "*", "L", "-", "Nx", ")", ")", ")", "y", "=", "np", ".", "zeros", "(", "int", "(", "Nframe", "*", "N", ")", ")", "# create an instrumentation matrix to observe the overlap and save behavior", "y_mat", "=", "np", ".", "zeros", "(", "(", "Nframe", ",", "int", "(", "Nframe", "*", "N", ")", ")", ")", "H", "=", "fft", ".", "fft", "(", "h", ",", "N", ")", "# begin the filtering operation", "for", "k", "in", "range", "(", "Nframe", ")", ":", "xk", "=", "x", "[", "k", "*", "L", ":", "k", "*", "L", "+", "N", "]", "Xk", "=", "fft", ".", "fft", "(", "xk", ",", "N", ")", "Yk", "=", "H", "*", "Xk", "yk", "=", "np", ".", "real", "(", "fft", ".", "ifft", "(", "Yk", ")", ")", "# imag part should be zero", "y", "[", "k", "*", "L", "+", "P", "-", "1", ":", "k", "*", "L", "+", "N", "]", "=", "yk", "[", "P", "-", "1", ":", "]", "y_mat", "[", "k", ",", "k", "*", "L", ":", "k", "*", "L", "+", "N", "]", "=", "yk", "if", "mode", "==", "1", ":", "return", "y", "[", "P", "-", "1", ":", "Nx", "]", ",", "y_mat", "[", ":", ",", "P", "-", "1", ":", "Nx", "]", "else", ":", "return", "y", "[", "P", "-", "1", ":", "Nx", "]" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
lp_samp
Lowpass sampling theorem plotting function. Display the spectrum of a sampled signal after setting the bandwidth, sampling frequency, maximum display frequency, and spectral shape. Parameters ---------- fb : spectrum lowpass bandwidth in Hz fs : sampling frequency in Hz fmax : plot over [-fmax,fmax] shape : 'tri' or 'line' N : number of translates, N positive and N negative fsize : the size of the figure window, default (6,4) Returns ------- Nothing : A plot window opens containing the spectrum plot Examples -------- >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm.sigsys import lp_samp No aliasing as bandwidth 10 Hz < 25/2; fs > fb. >>> lp_samp(10,25,50,10) >>> plt.show() Now aliasing as bandwidth 15 Hz > 25/2; fs < fb. >>> lp_samp(15,25,50,10)
sk_dsp_comm/sigsys.py
def lp_samp(fb,fs,fmax,N,shape='tri',fsize=(6,4)): """ Lowpass sampling theorem plotting function. Display the spectrum of a sampled signal after setting the bandwidth, sampling frequency, maximum display frequency, and spectral shape. Parameters ---------- fb : spectrum lowpass bandwidth in Hz fs : sampling frequency in Hz fmax : plot over [-fmax,fmax] shape : 'tri' or 'line' N : number of translates, N positive and N negative fsize : the size of the figure window, default (6,4) Returns ------- Nothing : A plot window opens containing the spectrum plot Examples -------- >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm.sigsys import lp_samp No aliasing as bandwidth 10 Hz < 25/2; fs > fb. >>> lp_samp(10,25,50,10) >>> plt.show() Now aliasing as bandwidth 15 Hz > 25/2; fs < fb. >>> lp_samp(15,25,50,10) """ plt.figure(figsize=fsize) # define the plot interval f = np.arange(-fmax,fmax+fmax/200.,fmax/200.) A = 1.0 line_ampl = A/2.*np.array([0, 1]) # plot the lowpass spectrum in black shapes = ['tri', 'line'] if shape.lower() not in shapes: raise ValueError('shape must be tri or line') if shape.lower() == 'tri': plt.plot(f,lp_tri(f,fb)) # overlay positive and negative frequency translates for n in range(N): plt.plot(f, lp_tri(f - (n + 1) * fs, fb), '--r') plt.plot(f, lp_tri(f + (n + 1) * fs, fb), '--g') elif shape.lower() == 'line': plt.plot([fb, fb],line_ampl,'b', linewidth=2) plt.plot([-fb, -fb],line_ampl,'b', linewidth=2) # overlay positive and negative frequency translates for n in range(N): plt.plot([fb+(n+1)*fs, fb+(n+1)*fs],line_ampl,'--r', linewidth=2) plt.plot([-fb+(n+1)*fs, -fb+(n+1)*fs],line_ampl,'--r', linewidth=2) plt.plot([fb-(n+1)*fs, fb-(n+1)*fs],line_ampl,'--g', linewidth=2) plt.plot([-fb-(n+1)*fs, -fb-(n+1)*fs],line_ampl,'--g', linewidth=2) plt.ylabel('Spectrum Magnitude') plt.xlabel('Frequency in Hz') plt.axis([-fmax,fmax,0,1]) plt.grid()
def lp_samp(fb,fs,fmax,N,shape='tri',fsize=(6,4)): """ Lowpass sampling theorem plotting function. Display the spectrum of a sampled signal after setting the bandwidth, sampling frequency, maximum display frequency, and spectral shape. Parameters ---------- fb : spectrum lowpass bandwidth in Hz fs : sampling frequency in Hz fmax : plot over [-fmax,fmax] shape : 'tri' or 'line' N : number of translates, N positive and N negative fsize : the size of the figure window, default (6,4) Returns ------- Nothing : A plot window opens containing the spectrum plot Examples -------- >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm.sigsys import lp_samp No aliasing as bandwidth 10 Hz < 25/2; fs > fb. >>> lp_samp(10,25,50,10) >>> plt.show() Now aliasing as bandwidth 15 Hz > 25/2; fs < fb. >>> lp_samp(15,25,50,10) """ plt.figure(figsize=fsize) # define the plot interval f = np.arange(-fmax,fmax+fmax/200.,fmax/200.) A = 1.0 line_ampl = A/2.*np.array([0, 1]) # plot the lowpass spectrum in black shapes = ['tri', 'line'] if shape.lower() not in shapes: raise ValueError('shape must be tri or line') if shape.lower() == 'tri': plt.plot(f,lp_tri(f,fb)) # overlay positive and negative frequency translates for n in range(N): plt.plot(f, lp_tri(f - (n + 1) * fs, fb), '--r') plt.plot(f, lp_tri(f + (n + 1) * fs, fb), '--g') elif shape.lower() == 'line': plt.plot([fb, fb],line_ampl,'b', linewidth=2) plt.plot([-fb, -fb],line_ampl,'b', linewidth=2) # overlay positive and negative frequency translates for n in range(N): plt.plot([fb+(n+1)*fs, fb+(n+1)*fs],line_ampl,'--r', linewidth=2) plt.plot([-fb+(n+1)*fs, -fb+(n+1)*fs],line_ampl,'--r', linewidth=2) plt.plot([fb-(n+1)*fs, fb-(n+1)*fs],line_ampl,'--g', linewidth=2) plt.plot([-fb-(n+1)*fs, -fb-(n+1)*fs],line_ampl,'--g', linewidth=2) plt.ylabel('Spectrum Magnitude') plt.xlabel('Frequency in Hz') plt.axis([-fmax,fmax,0,1]) plt.grid()
[ "Lowpass", "sampling", "theorem", "plotting", "function", ".", "Display", "the", "spectrum", "of", "a", "sampled", "signal", "after", "setting", "the", "bandwidth", "sampling", "frequency", "maximum", "display", "frequency", "and", "spectral", "shape", ".", "Parameters", "----------", "fb", ":", "spectrum", "lowpass", "bandwidth", "in", "Hz", "fs", ":", "sampling", "frequency", "in", "Hz", "fmax", ":", "plot", "over", "[", "-", "fmax", "fmax", "]", "shape", ":", "tri", "or", "line", "N", ":", "number", "of", "translates", "N", "positive", "and", "N", "negative", "fsize", ":", "the", "size", "of", "the", "figure", "window", "default", "(", "6", "4", ")", "Returns", "-------", "Nothing", ":", "A", "plot", "window", "opens", "containing", "the", "spectrum", "plot", "Examples", "--------", ">>>", "import", "matplotlib", ".", "pyplot", "as", "plt", ">>>", "from", "sk_dsp_comm", ".", "sigsys", "import", "lp_samp" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L592-L654
[ "def", "lp_samp", "(", "fb", ",", "fs", ",", "fmax", ",", "N", ",", "shape", "=", "'tri'", ",", "fsize", "=", "(", "6", ",", "4", ")", ")", ":", "plt", ".", "figure", "(", "figsize", "=", "fsize", ")", "# define the plot interval", "f", "=", "np", ".", "arange", "(", "-", "fmax", ",", "fmax", "+", "fmax", "/", "200.", ",", "fmax", "/", "200.", ")", "A", "=", "1.0", "line_ampl", "=", "A", "/", "2.", "*", "np", ".", "array", "(", "[", "0", ",", "1", "]", ")", "# plot the lowpass spectrum in black", "shapes", "=", "[", "'tri'", ",", "'line'", "]", "if", "shape", ".", "lower", "(", ")", "not", "in", "shapes", ":", "raise", "ValueError", "(", "'shape must be tri or line'", ")", "if", "shape", ".", "lower", "(", ")", "==", "'tri'", ":", "plt", ".", "plot", "(", "f", ",", "lp_tri", "(", "f", ",", "fb", ")", ")", "# overlay positive and negative frequency translates", "for", "n", "in", "range", "(", "N", ")", ":", "plt", ".", "plot", "(", "f", ",", "lp_tri", "(", "f", "-", "(", "n", "+", "1", ")", "*", "fs", ",", "fb", ")", ",", "'--r'", ")", "plt", ".", "plot", "(", "f", ",", "lp_tri", "(", "f", "+", "(", "n", "+", "1", ")", "*", "fs", ",", "fb", ")", ",", "'--g'", ")", "elif", "shape", ".", "lower", "(", ")", "==", "'line'", ":", "plt", ".", "plot", "(", "[", "fb", ",", "fb", "]", ",", "line_ampl", ",", "'b'", ",", "linewidth", "=", "2", ")", "plt", ".", "plot", "(", "[", "-", "fb", ",", "-", "fb", "]", ",", "line_ampl", ",", "'b'", ",", "linewidth", "=", "2", ")", "# overlay positive and negative frequency translates", "for", "n", "in", "range", "(", "N", ")", ":", "plt", ".", "plot", "(", "[", "fb", "+", "(", "n", "+", "1", ")", "*", "fs", ",", "fb", "+", "(", "n", "+", "1", ")", "*", "fs", "]", ",", "line_ampl", ",", "'--r'", ",", "linewidth", "=", "2", ")", "plt", ".", "plot", "(", "[", "-", "fb", "+", "(", "n", "+", "1", ")", "*", "fs", ",", "-", "fb", "+", "(", "n", "+", "1", ")", "*", "fs", "]", ",", "line_ampl", ",", "'--r'", ",", "linewidth", "=", "2", ")", "plt", ".", "plot", "(", "[", "fb", "-", "(", "n", "+", "1", ")", "*", "fs", ",", "fb", "-", "(", "n", "+", "1", ")", "*", "fs", "]", ",", "line_ampl", ",", "'--g'", ",", "linewidth", "=", "2", ")", "plt", ".", "plot", "(", "[", "-", "fb", "-", "(", "n", "+", "1", ")", "*", "fs", ",", "-", "fb", "-", "(", "n", "+", "1", ")", "*", "fs", "]", ",", "line_ampl", ",", "'--g'", ",", "linewidth", "=", "2", ")", "plt", ".", "ylabel", "(", "'Spectrum Magnitude'", ")", "plt", ".", "xlabel", "(", "'Frequency in Hz'", ")", "plt", ".", "axis", "(", "[", "-", "fmax", ",", "fmax", ",", "0", ",", "1", "]", ")", "plt", ".", "grid", "(", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
lp_tri
Triangle spectral shape function used by :func:`lp_samp`. Parameters ---------- f : ndarray containing frequency samples fb : the bandwidth as a float constant Returns ------- x : ndarray of spectrum samples for a single triangle shape Notes ----- This is a support function for the lowpass spectrum plotting function :func:`lp_samp`. Examples -------- >>> x = lp_tri(f, fb)
sk_dsp_comm/sigsys.py
def lp_tri(f, fb): """ Triangle spectral shape function used by :func:`lp_samp`. Parameters ---------- f : ndarray containing frequency samples fb : the bandwidth as a float constant Returns ------- x : ndarray of spectrum samples for a single triangle shape Notes ----- This is a support function for the lowpass spectrum plotting function :func:`lp_samp`. Examples -------- >>> x = lp_tri(f, fb) """ x = np.zeros(len(f)) for k in range(len(f)): if abs(f[k]) <= fb: x[k] = 1 - abs(f[k])/float(fb) return x
def lp_tri(f, fb): """ Triangle spectral shape function used by :func:`lp_samp`. Parameters ---------- f : ndarray containing frequency samples fb : the bandwidth as a float constant Returns ------- x : ndarray of spectrum samples for a single triangle shape Notes ----- This is a support function for the lowpass spectrum plotting function :func:`lp_samp`. Examples -------- >>> x = lp_tri(f, fb) """ x = np.zeros(len(f)) for k in range(len(f)): if abs(f[k]) <= fb: x[k] = 1 - abs(f[k])/float(fb) return x
[ "Triangle", "spectral", "shape", "function", "used", "by", ":", "func", ":", "lp_samp", "." ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L657-L684
[ "def", "lp_tri", "(", "f", ",", "fb", ")", ":", "x", "=", "np", ".", "zeros", "(", "len", "(", "f", ")", ")", "for", "k", "in", "range", "(", "len", "(", "f", ")", ")", ":", "if", "abs", "(", "f", "[", "k", "]", ")", "<=", "fb", ":", "x", "[", "k", "]", "=", "1", "-", "abs", "(", "f", "[", "k", "]", ")", "/", "float", "(", "fb", ")", "return", "x" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
sinusoidAWGN
Add white Gaussian noise to a single real sinusoid. Input a single sinusoid to this function and it returns a noisy sinusoid at a specific SNR value in dB. Sinusoid power is calculated using np.var. Parameters ---------- x : Input signal as ndarray consisting of a single sinusoid SNRdB : SNR in dB for output sinusoid Returns ------- y : Noisy sinusoid return vector Examples -------- >>> # set the SNR to 10 dB >>> n = arange(0,10000) >>> x = cos(2*pi*0.04*n) >>> y = sinusoidAWGN(x,10.0)
sk_dsp_comm/sigsys.py
def sinusoidAWGN(x,SNRdB): """ Add white Gaussian noise to a single real sinusoid. Input a single sinusoid to this function and it returns a noisy sinusoid at a specific SNR value in dB. Sinusoid power is calculated using np.var. Parameters ---------- x : Input signal as ndarray consisting of a single sinusoid SNRdB : SNR in dB for output sinusoid Returns ------- y : Noisy sinusoid return vector Examples -------- >>> # set the SNR to 10 dB >>> n = arange(0,10000) >>> x = cos(2*pi*0.04*n) >>> y = sinusoidAWGN(x,10.0) """ # Estimate signal power x_pwr = np.var(x) # Create noise vector noise = np.sqrt(x_pwr/10**(SNRdB/10.))*np.random.randn(len(x)); return x + noise
def sinusoidAWGN(x,SNRdB): """ Add white Gaussian noise to a single real sinusoid. Input a single sinusoid to this function and it returns a noisy sinusoid at a specific SNR value in dB. Sinusoid power is calculated using np.var. Parameters ---------- x : Input signal as ndarray consisting of a single sinusoid SNRdB : SNR in dB for output sinusoid Returns ------- y : Noisy sinusoid return vector Examples -------- >>> # set the SNR to 10 dB >>> n = arange(0,10000) >>> x = cos(2*pi*0.04*n) >>> y = sinusoidAWGN(x,10.0) """ # Estimate signal power x_pwr = np.var(x) # Create noise vector noise = np.sqrt(x_pwr/10**(SNRdB/10.))*np.random.randn(len(x)); return x + noise
[ "Add", "white", "Gaussian", "noise", "to", "a", "single", "real", "sinusoid", ".", "Input", "a", "single", "sinusoid", "to", "this", "function", "and", "it", "returns", "a", "noisy", "sinusoid", "at", "a", "specific", "SNR", "value", "in", "dB", ".", "Sinusoid", "power", "is", "calculated", "using", "np", ".", "var", ".", "Parameters", "----------", "x", ":", "Input", "signal", "as", "ndarray", "consisting", "of", "a", "single", "sinusoid", "SNRdB", ":", "SNR", "in", "dB", "for", "output", "sinusoid", "Returns", "-------", "y", ":", "Noisy", "sinusoid", "return", "vector" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L687-L716
[ "def", "sinusoidAWGN", "(", "x", ",", "SNRdB", ")", ":", "# Estimate signal power", "x_pwr", "=", "np", ".", "var", "(", "x", ")", "# Create noise vector", "noise", "=", "np", ".", "sqrt", "(", "x_pwr", "/", "10", "**", "(", "SNRdB", "/", "10.", ")", ")", "*", "np", ".", "random", ".", "randn", "(", "len", "(", "x", ")", ")", "return", "x", "+", "noise" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
discrim
function disdata = discrim(x) where x is an angle modulated signal in complex baseband form. Mark Wickert
sk_dsp_comm/rtlsdr_helper.py
def discrim(x): """ function disdata = discrim(x) where x is an angle modulated signal in complex baseband form. Mark Wickert """ X=np.real(x) # X is the real part of the received signal Y=np.imag(x) # Y is the imaginary part of the received signal b=np.array([1, -1]) # filter coefficients for discrete derivative a=np.array([1, 0]) # filter coefficients for discrete derivative derY=signal.lfilter(b,a,Y) # derivative of Y, derX=signal.lfilter(b,a,X) # " X, disdata=(X*derY-Y*derX)/(X**2+Y**2) return disdata
def discrim(x): """ function disdata = discrim(x) where x is an angle modulated signal in complex baseband form. Mark Wickert """ X=np.real(x) # X is the real part of the received signal Y=np.imag(x) # Y is the imaginary part of the received signal b=np.array([1, -1]) # filter coefficients for discrete derivative a=np.array([1, 0]) # filter coefficients for discrete derivative derY=signal.lfilter(b,a,Y) # derivative of Y, derX=signal.lfilter(b,a,X) # " X, disdata=(X*derY-Y*derX)/(X**2+Y**2) return disdata
[ "function", "disdata", "=", "discrim", "(", "x", ")", "where", "x", "is", "an", "angle", "modulated", "signal", "in", "complex", "baseband", "form", ".", "Mark", "Wickert" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/rtlsdr_helper.py#L55-L69
[ "def", "discrim", "(", "x", ")", ":", "X", "=", "np", ".", "real", "(", "x", ")", "# X is the real part of the received signal\r", "Y", "=", "np", ".", "imag", "(", "x", ")", "# Y is the imaginary part of the received signal\r", "b", "=", "np", ".", "array", "(", "[", "1", ",", "-", "1", "]", ")", "# filter coefficients for discrete derivative\r", "a", "=", "np", ".", "array", "(", "[", "1", ",", "0", "]", ")", "# filter coefficients for discrete derivative\r", "derY", "=", "signal", ".", "lfilter", "(", "b", ",", "a", ",", "Y", ")", "# derivative of Y, \r", "derX", "=", "signal", ".", "lfilter", "(", "b", ",", "a", ",", "X", ")", "# \" X,\r", "disdata", "=", "(", "X", "*", "derY", "-", "Y", "*", "derX", ")", "/", "(", "X", "**", "2", "+", "Y", "**", "2", ")", "return", "disdata" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
mono_FM
Decimate complex baseband input by 10 Design 1st decimation lowpass filter (f_c = 200 KHz)
sk_dsp_comm/rtlsdr_helper.py
def mono_FM(x,fs=2.4e6,file_name='test.wav'): """ Decimate complex baseband input by 10 Design 1st decimation lowpass filter (f_c = 200 KHz) """ b = signal.firwin(64,2*200e3/float(fs)) # Filter and decimate (should be polyphase) y = signal.lfilter(b,1,x) z = ss.downsample(y,10) # Apply complex baseband discriminator z_bb = discrim(z) # Design 2nd decimation lowpass filter (fc = 12 KHz) bb = signal.firwin(64,2*12e3/(float(fs)/10)) # Filter and decimate zz_bb = signal.lfilter(bb,1,z_bb) # Decimate by 5 z_out = ss.downsample(zz_bb,5) # Save to wave file ss.to_wav(file_name, 48000, z_out/2) print('Done!') return z_bb, z_out
def mono_FM(x,fs=2.4e6,file_name='test.wav'): """ Decimate complex baseband input by 10 Design 1st decimation lowpass filter (f_c = 200 KHz) """ b = signal.firwin(64,2*200e3/float(fs)) # Filter and decimate (should be polyphase) y = signal.lfilter(b,1,x) z = ss.downsample(y,10) # Apply complex baseband discriminator z_bb = discrim(z) # Design 2nd decimation lowpass filter (fc = 12 KHz) bb = signal.firwin(64,2*12e3/(float(fs)/10)) # Filter and decimate zz_bb = signal.lfilter(bb,1,z_bb) # Decimate by 5 z_out = ss.downsample(zz_bb,5) # Save to wave file ss.to_wav(file_name, 48000, z_out/2) print('Done!') return z_bb, z_out
[ "Decimate", "complex", "baseband", "input", "by", "10", "Design", "1st", "decimation", "lowpass", "filter", "(", "f_c", "=", "200", "KHz", ")" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/rtlsdr_helper.py#L72-L92
[ "def", "mono_FM", "(", "x", ",", "fs", "=", "2.4e6", ",", "file_name", "=", "'test.wav'", ")", ":", "b", "=", "signal", ".", "firwin", "(", "64", ",", "2", "*", "200e3", "/", "float", "(", "fs", ")", ")", "# Filter and decimate (should be polyphase)\r", "y", "=", "signal", ".", "lfilter", "(", "b", ",", "1", ",", "x", ")", "z", "=", "ss", ".", "downsample", "(", "y", ",", "10", ")", "# Apply complex baseband discriminator\r", "z_bb", "=", "discrim", "(", "z", ")", "# Design 2nd decimation lowpass filter (fc = 12 KHz)\r", "bb", "=", "signal", ".", "firwin", "(", "64", ",", "2", "*", "12e3", "/", "(", "float", "(", "fs", ")", "/", "10", ")", ")", "# Filter and decimate\r", "zz_bb", "=", "signal", ".", "lfilter", "(", "bb", ",", "1", ",", "z_bb", ")", "# Decimate by 5\r", "z_out", "=", "ss", ".", "downsample", "(", "zz_bb", ",", "5", ")", "# Save to wave file\r", "ss", ".", "to_wav", "(", "file_name", ",", "48000", ",", "z_out", "/", "2", ")", "print", "(", "'Done!'", ")", "return", "z_bb", ",", "z_out" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
stereo_FM
Stereo demod from complex baseband at sampling rate fs. Assume fs is 2400 ksps Mark Wickert July 2017
sk_dsp_comm/rtlsdr_helper.py
def stereo_FM(x,fs=2.4e6,file_name='test.wav'): """ Stereo demod from complex baseband at sampling rate fs. Assume fs is 2400 ksps Mark Wickert July 2017 """ N1 = 10 b = signal.firwin(64,2*200e3/float(fs)) # Filter and decimate (should be polyphase) y = signal.lfilter(b,1,x) z = ss.downsample(y,N1) # Apply complex baseband discriminator z_bb = discrim(z) # Work with the (3) stereo multiplex signals: # Begin by designing a lowpass filter for L+R and DSP demoded (L-R) # (fc = 12 KHz) b12 = signal.firwin(128,2*12e3/(float(fs)/N1)) # The L + R term is at baseband, we just lowpass filter to remove # other terms above 12 kHz. y_lpr = signal.lfilter(b12,1,z_bb) b19 = signal.firwin(128,2*1e3*np.array([19-5,19+5])/(float(fs)/N1), pass_zero=False); z_bb19 = signal.lfilter(b19,1,z_bb) # Lock PLL to 19 kHz pilot # A type 2 loop with bandwidth Bn = 10 Hz and damping zeta = 0.707 # The VCO quiescent frequency is set to 19000 Hz. theta, phi_error = pilot_PLL(z_bb19,19000,fs/N1,2,10,0.707) # Coherently demodulate the L - R subcarrier at 38 kHz. # theta is the PLL output phase at 19 kHz, so to double multiply # by 2 and wrap with cos() or sin(). # First bandpass filter b38 = signal.firwin(128,2*1e3*np.array([38-5,38+5])/(float(fs)/N1), pass_zero=False); x_lmr = signal.lfilter(b38,1,z_bb) # Coherently demodulate using the PLL output phase x_lmr = 2*np.sqrt(2)*np.cos(2*theta)*x_lmr # Lowpass at 12 kHz to recover the desired DSB demod term y_lmr = signal.lfilter(b12,1,x_lmr) # Matrix the y_lmr and y_lpr for form right and left channels: y_left = y_lpr + y_lmr y_right = y_lpr - y_lmr # Decimate by N2 (nominally 5) N2 = 5 fs2 = float(fs)/(N1*N2) # (nominally 48 ksps) y_left_DN2 = ss.downsample(y_left,N2) y_right_DN2 = ss.downsample(y_right,N2) # Deemphasize with 75 us time constant to 'undo' the preemphasis # applied at the transmitter in broadcast FM. # A 1-pole digital lowpass works well here. a_de = np.exp(-2.1*1e3*2*np.pi/fs2) z_left = signal.lfilter([1-a_de],[1, -a_de],y_left_DN2) z_right = signal.lfilter([1-a_de],[1, -a_de],y_right_DN2) # Place left and righ channels as side-by-side columns in a 2D array z_out = np.hstack((np.array([z_left]).T,(np.array([z_right]).T))) ss.to_wav(file_name, 48000, z_out/2) print('Done!') #return z_bb, z_out return z_bb, theta, y_lpr, y_lmr, z_out
def stereo_FM(x,fs=2.4e6,file_name='test.wav'): """ Stereo demod from complex baseband at sampling rate fs. Assume fs is 2400 ksps Mark Wickert July 2017 """ N1 = 10 b = signal.firwin(64,2*200e3/float(fs)) # Filter and decimate (should be polyphase) y = signal.lfilter(b,1,x) z = ss.downsample(y,N1) # Apply complex baseband discriminator z_bb = discrim(z) # Work with the (3) stereo multiplex signals: # Begin by designing a lowpass filter for L+R and DSP demoded (L-R) # (fc = 12 KHz) b12 = signal.firwin(128,2*12e3/(float(fs)/N1)) # The L + R term is at baseband, we just lowpass filter to remove # other terms above 12 kHz. y_lpr = signal.lfilter(b12,1,z_bb) b19 = signal.firwin(128,2*1e3*np.array([19-5,19+5])/(float(fs)/N1), pass_zero=False); z_bb19 = signal.lfilter(b19,1,z_bb) # Lock PLL to 19 kHz pilot # A type 2 loop with bandwidth Bn = 10 Hz and damping zeta = 0.707 # The VCO quiescent frequency is set to 19000 Hz. theta, phi_error = pilot_PLL(z_bb19,19000,fs/N1,2,10,0.707) # Coherently demodulate the L - R subcarrier at 38 kHz. # theta is the PLL output phase at 19 kHz, so to double multiply # by 2 and wrap with cos() or sin(). # First bandpass filter b38 = signal.firwin(128,2*1e3*np.array([38-5,38+5])/(float(fs)/N1), pass_zero=False); x_lmr = signal.lfilter(b38,1,z_bb) # Coherently demodulate using the PLL output phase x_lmr = 2*np.sqrt(2)*np.cos(2*theta)*x_lmr # Lowpass at 12 kHz to recover the desired DSB demod term y_lmr = signal.lfilter(b12,1,x_lmr) # Matrix the y_lmr and y_lpr for form right and left channels: y_left = y_lpr + y_lmr y_right = y_lpr - y_lmr # Decimate by N2 (nominally 5) N2 = 5 fs2 = float(fs)/(N1*N2) # (nominally 48 ksps) y_left_DN2 = ss.downsample(y_left,N2) y_right_DN2 = ss.downsample(y_right,N2) # Deemphasize with 75 us time constant to 'undo' the preemphasis # applied at the transmitter in broadcast FM. # A 1-pole digital lowpass works well here. a_de = np.exp(-2.1*1e3*2*np.pi/fs2) z_left = signal.lfilter([1-a_de],[1, -a_de],y_left_DN2) z_right = signal.lfilter([1-a_de],[1, -a_de],y_right_DN2) # Place left and righ channels as side-by-side columns in a 2D array z_out = np.hstack((np.array([z_left]).T,(np.array([z_right]).T))) ss.to_wav(file_name, 48000, z_out/2) print('Done!') #return z_bb, z_out return z_bb, theta, y_lpr, y_lmr, z_out
[ "Stereo", "demod", "from", "complex", "baseband", "at", "sampling", "rate", "fs", ".", "Assume", "fs", "is", "2400", "ksps", "Mark", "Wickert", "July", "2017" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/rtlsdr_helper.py#L95-L155
[ "def", "stereo_FM", "(", "x", ",", "fs", "=", "2.4e6", ",", "file_name", "=", "'test.wav'", ")", ":", "N1", "=", "10", "b", "=", "signal", ".", "firwin", "(", "64", ",", "2", "*", "200e3", "/", "float", "(", "fs", ")", ")", "# Filter and decimate (should be polyphase)\r", "y", "=", "signal", ".", "lfilter", "(", "b", ",", "1", ",", "x", ")", "z", "=", "ss", ".", "downsample", "(", "y", ",", "N1", ")", "# Apply complex baseband discriminator\r", "z_bb", "=", "discrim", "(", "z", ")", "# Work with the (3) stereo multiplex signals:\r", "# Begin by designing a lowpass filter for L+R and DSP demoded (L-R)\r", "# (fc = 12 KHz)\r", "b12", "=", "signal", ".", "firwin", "(", "128", ",", "2", "*", "12e3", "/", "(", "float", "(", "fs", ")", "/", "N1", ")", ")", "# The L + R term is at baseband, we just lowpass filter to remove \r", "# other terms above 12 kHz.\r", "y_lpr", "=", "signal", ".", "lfilter", "(", "b12", ",", "1", ",", "z_bb", ")", "b19", "=", "signal", ".", "firwin", "(", "128", ",", "2", "*", "1e3", "*", "np", ".", "array", "(", "[", "19", "-", "5", ",", "19", "+", "5", "]", ")", "/", "(", "float", "(", "fs", ")", "/", "N1", ")", ",", "pass_zero", "=", "False", ")", "z_bb19", "=", "signal", ".", "lfilter", "(", "b19", ",", "1", ",", "z_bb", ")", "# Lock PLL to 19 kHz pilot\r", "# A type 2 loop with bandwidth Bn = 10 Hz and damping zeta = 0.707 \r", "# The VCO quiescent frequency is set to 19000 Hz.\r", "theta", ",", "phi_error", "=", "pilot_PLL", "(", "z_bb19", ",", "19000", ",", "fs", "/", "N1", ",", "2", ",", "10", ",", "0.707", ")", "# Coherently demodulate the L - R subcarrier at 38 kHz.\r", "# theta is the PLL output phase at 19 kHz, so to double multiply \r", "# by 2 and wrap with cos() or sin().\r", "# First bandpass filter\r", "b38", "=", "signal", ".", "firwin", "(", "128", ",", "2", "*", "1e3", "*", "np", ".", "array", "(", "[", "38", "-", "5", ",", "38", "+", "5", "]", ")", "/", "(", "float", "(", "fs", ")", "/", "N1", ")", ",", "pass_zero", "=", "False", ")", "x_lmr", "=", "signal", ".", "lfilter", "(", "b38", ",", "1", ",", "z_bb", ")", "# Coherently demodulate using the PLL output phase\r", "x_lmr", "=", "2", "*", "np", ".", "sqrt", "(", "2", ")", "*", "np", ".", "cos", "(", "2", "*", "theta", ")", "*", "x_lmr", "# Lowpass at 12 kHz to recover the desired DSB demod term\r", "y_lmr", "=", "signal", ".", "lfilter", "(", "b12", ",", "1", ",", "x_lmr", ")", "# Matrix the y_lmr and y_lpr for form right and left channels:\r", "y_left", "=", "y_lpr", "+", "y_lmr", "y_right", "=", "y_lpr", "-", "y_lmr", "# Decimate by N2 (nominally 5)\r", "N2", "=", "5", "fs2", "=", "float", "(", "fs", ")", "/", "(", "N1", "*", "N2", ")", "# (nominally 48 ksps)\r", "y_left_DN2", "=", "ss", ".", "downsample", "(", "y_left", ",", "N2", ")", "y_right_DN2", "=", "ss", ".", "downsample", "(", "y_right", ",", "N2", ")", "# Deemphasize with 75 us time constant to 'undo' the preemphasis \r", "# applied at the transmitter in broadcast FM.\r", "# A 1-pole digital lowpass works well here.\r", "a_de", "=", "np", ".", "exp", "(", "-", "2.1", "*", "1e3", "*", "2", "*", "np", ".", "pi", "/", "fs2", ")", "z_left", "=", "signal", ".", "lfilter", "(", "[", "1", "-", "a_de", "]", ",", "[", "1", ",", "-", "a_de", "]", ",", "y_left_DN2", ")", "z_right", "=", "signal", ".", "lfilter", "(", "[", "1", "-", "a_de", "]", ",", "[", "1", ",", "-", "a_de", "]", ",", "y_right_DN2", ")", "# Place left and righ channels as side-by-side columns in a 2D array\r", "z_out", "=", "np", ".", "hstack", "(", "(", "np", ".", "array", "(", "[", "z_left", "]", ")", ".", "T", ",", "(", "np", ".", "array", "(", "[", "z_right", "]", ")", ".", "T", ")", ")", ")", "ss", ".", "to_wav", "(", "file_name", ",", "48000", ",", "z_out", "/", "2", ")", "print", "(", "'Done!'", ")", "#return z_bb, z_out\r", "return", "z_bb", ",", "theta", ",", "y_lpr", ",", "y_lmr", ",", "z_out" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
pilot_PLL
theta, phi_error = pilot_PLL(xr,fq,fs,loop_type,Bn,zeta) Mark Wickert, April 2014
sk_dsp_comm/rtlsdr_helper.py
def pilot_PLL(xr,fq,fs,loop_type,Bn,zeta): """ theta, phi_error = pilot_PLL(xr,fq,fs,loop_type,Bn,zeta) Mark Wickert, April 2014 """ T = 1/float(fs) # Set the VCO gain in Hz/V Kv = 1.0 # Design a lowpass filter to remove the double freq term Norder = 5 b_lp,a_lp = signal.butter(Norder,2*(fq/2.)/float(fs)) fstate = np.zeros(Norder) # LPF state vector Kv = 2*np.pi*Kv # convert Kv in Hz/v to rad/s/v if loop_type == 1: # First-order loop parameters fn = Bn Kt = 2*np.pi*fn # loop natural frequency in rad/s elif loop_type == 2: # Second-order loop parameters fn = 1/(2*np.pi)*2*Bn/(zeta + 1/(4*zeta)) # given Bn in Hz Kt = 4*np.pi*zeta*fn # loop natural frequency in rad/s a = np.pi*fn/zeta else: print('Loop type must be 1 or 2') # Initialize integration approximation filters filt_in_last = 0 filt_out_last = 0 vco_in_last = 0 vco_out = 0 vco_out_last = 0 # Initialize working and final output vectors n = np.arange(0,len(xr)) theta = np.zeros(len(xr)) ev = np.zeros(len(xr)) phi_error = np.zeros(len(xr)) # Normalize total power in an attemp to make the 19kHz sinusoid # component have amplitude ~1. #xr = xr/(2/3*std(xr)); # Begin the simulation loop for kk in range(len(n)): # Sinusoidal phase detector (simple multiplier) phi_error[kk] = 2*xr[kk]*np.sin(vco_out) # LPF to remove double frequency term phi_error[kk],fstate = signal.lfilter(b_lp,a_lp,np.array([phi_error[kk]]),zi=fstate) pd_out = phi_error[kk] #pd_out = 0 # Loop gain gain_out = Kt/Kv*pd_out # apply VCO gain at VCO # Loop filter if loop_type == 2: filt_in = a*gain_out filt_out = filt_out_last + T/2.*(filt_in + filt_in_last) filt_in_last = filt_in filt_out_last = filt_out filt_out = filt_out + gain_out else: filt_out = gain_out # VCO vco_in = filt_out + fq/(Kv/(2*np.pi)) # bias to quiescent freq. vco_out = vco_out_last + T/2.*(vco_in + vco_in_last) vco_in_last = vco_in vco_out_last = vco_out vco_out = Kv*vco_out # apply Kv # Measured loop signals ev[kk] = filt_out theta[kk] = np.mod(vco_out,2*np.pi); # The vco phase mod 2pi return theta,phi_error
def pilot_PLL(xr,fq,fs,loop_type,Bn,zeta): """ theta, phi_error = pilot_PLL(xr,fq,fs,loop_type,Bn,zeta) Mark Wickert, April 2014 """ T = 1/float(fs) # Set the VCO gain in Hz/V Kv = 1.0 # Design a lowpass filter to remove the double freq term Norder = 5 b_lp,a_lp = signal.butter(Norder,2*(fq/2.)/float(fs)) fstate = np.zeros(Norder) # LPF state vector Kv = 2*np.pi*Kv # convert Kv in Hz/v to rad/s/v if loop_type == 1: # First-order loop parameters fn = Bn Kt = 2*np.pi*fn # loop natural frequency in rad/s elif loop_type == 2: # Second-order loop parameters fn = 1/(2*np.pi)*2*Bn/(zeta + 1/(4*zeta)) # given Bn in Hz Kt = 4*np.pi*zeta*fn # loop natural frequency in rad/s a = np.pi*fn/zeta else: print('Loop type must be 1 or 2') # Initialize integration approximation filters filt_in_last = 0 filt_out_last = 0 vco_in_last = 0 vco_out = 0 vco_out_last = 0 # Initialize working and final output vectors n = np.arange(0,len(xr)) theta = np.zeros(len(xr)) ev = np.zeros(len(xr)) phi_error = np.zeros(len(xr)) # Normalize total power in an attemp to make the 19kHz sinusoid # component have amplitude ~1. #xr = xr/(2/3*std(xr)); # Begin the simulation loop for kk in range(len(n)): # Sinusoidal phase detector (simple multiplier) phi_error[kk] = 2*xr[kk]*np.sin(vco_out) # LPF to remove double frequency term phi_error[kk],fstate = signal.lfilter(b_lp,a_lp,np.array([phi_error[kk]]),zi=fstate) pd_out = phi_error[kk] #pd_out = 0 # Loop gain gain_out = Kt/Kv*pd_out # apply VCO gain at VCO # Loop filter if loop_type == 2: filt_in = a*gain_out filt_out = filt_out_last + T/2.*(filt_in + filt_in_last) filt_in_last = filt_in filt_out_last = filt_out filt_out = filt_out + gain_out else: filt_out = gain_out # VCO vco_in = filt_out + fq/(Kv/(2*np.pi)) # bias to quiescent freq. vco_out = vco_out_last + T/2.*(vco_in + vco_in_last) vco_in_last = vco_in vco_out_last = vco_out vco_out = Kv*vco_out # apply Kv # Measured loop signals ev[kk] = filt_out theta[kk] = np.mod(vco_out,2*np.pi); # The vco phase mod 2pi return theta,phi_error
[ "theta", "phi_error", "=", "pilot_PLL", "(", "xr", "fq", "fs", "loop_type", "Bn", "zeta", ")", "Mark", "Wickert", "April", "2014" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/rtlsdr_helper.py#L171-L242
[ "def", "pilot_PLL", "(", "xr", ",", "fq", ",", "fs", ",", "loop_type", ",", "Bn", ",", "zeta", ")", ":", "T", "=", "1", "/", "float", "(", "fs", ")", "# Set the VCO gain in Hz/V \r", "Kv", "=", "1.0", "# Design a lowpass filter to remove the double freq term\r", "Norder", "=", "5", "b_lp", ",", "a_lp", "=", "signal", ".", "butter", "(", "Norder", ",", "2", "*", "(", "fq", "/", "2.", ")", "/", "float", "(", "fs", ")", ")", "fstate", "=", "np", ".", "zeros", "(", "Norder", ")", "# LPF state vector\r", "Kv", "=", "2", "*", "np", ".", "pi", "*", "Kv", "# convert Kv in Hz/v to rad/s/v\r", "if", "loop_type", "==", "1", ":", "# First-order loop parameters\r", "fn", "=", "Bn", "Kt", "=", "2", "*", "np", ".", "pi", "*", "fn", "# loop natural frequency in rad/s\r", "elif", "loop_type", "==", "2", ":", "# Second-order loop parameters\r", "fn", "=", "1", "/", "(", "2", "*", "np", ".", "pi", ")", "*", "2", "*", "Bn", "/", "(", "zeta", "+", "1", "/", "(", "4", "*", "zeta", ")", ")", "# given Bn in Hz\r", "Kt", "=", "4", "*", "np", ".", "pi", "*", "zeta", "*", "fn", "# loop natural frequency in rad/s\r", "a", "=", "np", ".", "pi", "*", "fn", "/", "zeta", "else", ":", "print", "(", "'Loop type must be 1 or 2'", ")", "# Initialize integration approximation filters\r", "filt_in_last", "=", "0", "filt_out_last", "=", "0", "vco_in_last", "=", "0", "vco_out", "=", "0", "vco_out_last", "=", "0", "# Initialize working and final output vectors\r", "n", "=", "np", ".", "arange", "(", "0", ",", "len", "(", "xr", ")", ")", "theta", "=", "np", ".", "zeros", "(", "len", "(", "xr", ")", ")", "ev", "=", "np", ".", "zeros", "(", "len", "(", "xr", ")", ")", "phi_error", "=", "np", ".", "zeros", "(", "len", "(", "xr", ")", ")", "# Normalize total power in an attemp to make the 19kHz sinusoid\r", "# component have amplitude ~1.\r", "#xr = xr/(2/3*std(xr));\r", "# Begin the simulation loop\r", "for", "kk", "in", "range", "(", "len", "(", "n", ")", ")", ":", "# Sinusoidal phase detector (simple multiplier)\r", "phi_error", "[", "kk", "]", "=", "2", "*", "xr", "[", "kk", "]", "*", "np", ".", "sin", "(", "vco_out", ")", "# LPF to remove double frequency term\r", "phi_error", "[", "kk", "]", ",", "fstate", "=", "signal", ".", "lfilter", "(", "b_lp", ",", "a_lp", ",", "np", ".", "array", "(", "[", "phi_error", "[", "kk", "]", "]", ")", ",", "zi", "=", "fstate", ")", "pd_out", "=", "phi_error", "[", "kk", "]", "#pd_out = 0\r", "# Loop gain\r", "gain_out", "=", "Kt", "/", "Kv", "*", "pd_out", "# apply VCO gain at VCO\r", "# Loop filter\r", "if", "loop_type", "==", "2", ":", "filt_in", "=", "a", "*", "gain_out", "filt_out", "=", "filt_out_last", "+", "T", "/", "2.", "*", "(", "filt_in", "+", "filt_in_last", ")", "filt_in_last", "=", "filt_in", "filt_out_last", "=", "filt_out", "filt_out", "=", "filt_out", "+", "gain_out", "else", ":", "filt_out", "=", "gain_out", "# VCO\r", "vco_in", "=", "filt_out", "+", "fq", "/", "(", "Kv", "/", "(", "2", "*", "np", ".", "pi", ")", ")", "# bias to quiescent freq.\r", "vco_out", "=", "vco_out_last", "+", "T", "/", "2.", "*", "(", "vco_in", "+", "vco_in_last", ")", "vco_in_last", "=", "vco_in", "vco_out_last", "=", "vco_out", "vco_out", "=", "Kv", "*", "vco_out", "# apply Kv\r", "# Measured loop signals\r", "ev", "[", "kk", "]", "=", "filt_out", "theta", "[", "kk", "]", "=", "np", ".", "mod", "(", "vco_out", ",", "2", "*", "np", ".", "pi", ")", "# The vco phase mod 2pi\r", "return", "theta", ",", "phi_error" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
sccs_bit_sync
rx_symb_d,clk,track = sccs_bit_sync(y,Ns) ////////////////////////////////////////////////////// Symbol synchronization algorithm using SCCS ////////////////////////////////////////////////////// y = baseband NRZ data waveform Ns = nominal number of samples per symbol Reworked from ECE 5675 Project Translated from m-code version Mark Wickert April 2014
sk_dsp_comm/rtlsdr_helper.py
def sccs_bit_sync(y,Ns): """ rx_symb_d,clk,track = sccs_bit_sync(y,Ns) ////////////////////////////////////////////////////// Symbol synchronization algorithm using SCCS ////////////////////////////////////////////////////// y = baseband NRZ data waveform Ns = nominal number of samples per symbol Reworked from ECE 5675 Project Translated from m-code version Mark Wickert April 2014 """ # decimated symbol sequence for SEP rx_symb_d = np.zeros(int(np.fix(len(y)/Ns))) track = np.zeros(int(np.fix(len(y)/Ns))) bit_count = -1 y_abs = np.zeros(len(y)) clk = np.zeros(len(y)) k = Ns+1 #initial 1-of-Ns symbol synch clock phase # Sample-by-sample processing required for i in range(len(y)): #y_abs(i) = abs(round(real(y(i)))) if i >= Ns: # do not process first Ns samples # Collect timing decision unit (TDU) samples y_abs[i] = np.abs(np.sum(y[i-Ns+1:i+1])) # Update sampling instant and take a sample # For causality reason the early sample is 'i', # the on-time or prompt sample is 'i-1', and # the late sample is 'i-2'. if (k == 0): # Load the samples into the 3x1 TDU register w_hat. # w_hat[1] = late, w_hat[2] = on-time; w_hat[3] = early. w_hat = y_abs[i-2:i+1] bit_count += 1 if w_hat[1] != 0: if w_hat[0] < w_hat[2]: k = Ns-1 clk[i-2] = 1 rx_symb_d[bit_count] = y[i-2-int(np.round(Ns/2))-1] elif w_hat[0] > w_hat[2]: k = Ns+1 clk[i] = 1 rx_symb_d[bit_count] = y[i-int(np.round(Ns/2))-1] else: k = Ns clk[i-1] = 1 rx_symb_d[bit_count] = y[i-1-int(np.round(Ns/2))-1] else: k = Ns clk[i-1] = 1 rx_symb_d[bit_count] = y[i-1-int(np.round(Ns/2))] track[bit_count] = np.mod(i,Ns) k -= 1 # Trim the final output to bit_count rx_symb_d = rx_symb_d[:bit_count] return rx_symb_d, clk, track
def sccs_bit_sync(y,Ns): """ rx_symb_d,clk,track = sccs_bit_sync(y,Ns) ////////////////////////////////////////////////////// Symbol synchronization algorithm using SCCS ////////////////////////////////////////////////////// y = baseband NRZ data waveform Ns = nominal number of samples per symbol Reworked from ECE 5675 Project Translated from m-code version Mark Wickert April 2014 """ # decimated symbol sequence for SEP rx_symb_d = np.zeros(int(np.fix(len(y)/Ns))) track = np.zeros(int(np.fix(len(y)/Ns))) bit_count = -1 y_abs = np.zeros(len(y)) clk = np.zeros(len(y)) k = Ns+1 #initial 1-of-Ns symbol synch clock phase # Sample-by-sample processing required for i in range(len(y)): #y_abs(i) = abs(round(real(y(i)))) if i >= Ns: # do not process first Ns samples # Collect timing decision unit (TDU) samples y_abs[i] = np.abs(np.sum(y[i-Ns+1:i+1])) # Update sampling instant and take a sample # For causality reason the early sample is 'i', # the on-time or prompt sample is 'i-1', and # the late sample is 'i-2'. if (k == 0): # Load the samples into the 3x1 TDU register w_hat. # w_hat[1] = late, w_hat[2] = on-time; w_hat[3] = early. w_hat = y_abs[i-2:i+1] bit_count += 1 if w_hat[1] != 0: if w_hat[0] < w_hat[2]: k = Ns-1 clk[i-2] = 1 rx_symb_d[bit_count] = y[i-2-int(np.round(Ns/2))-1] elif w_hat[0] > w_hat[2]: k = Ns+1 clk[i] = 1 rx_symb_d[bit_count] = y[i-int(np.round(Ns/2))-1] else: k = Ns clk[i-1] = 1 rx_symb_d[bit_count] = y[i-1-int(np.round(Ns/2))-1] else: k = Ns clk[i-1] = 1 rx_symb_d[bit_count] = y[i-1-int(np.round(Ns/2))] track[bit_count] = np.mod(i,Ns) k -= 1 # Trim the final output to bit_count rx_symb_d = rx_symb_d[:bit_count] return rx_symb_d, clk, track
[ "rx_symb_d", "clk", "track", "=", "sccs_bit_sync", "(", "y", "Ns", ")", "//////////////////////////////////////////////////////", "Symbol", "synchronization", "algorithm", "using", "SCCS", "//////////////////////////////////////////////////////", "y", "=", "baseband", "NRZ", "data", "waveform", "Ns", "=", "nominal", "number", "of", "samples", "per", "symbol", "Reworked", "from", "ECE", "5675", "Project", "Translated", "from", "m", "-", "code", "version", "Mark", "Wickert", "April", "2014" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/rtlsdr_helper.py#L245-L302
[ "def", "sccs_bit_sync", "(", "y", ",", "Ns", ")", ":", "# decimated symbol sequence for SEP\r", "rx_symb_d", "=", "np", ".", "zeros", "(", "int", "(", "np", ".", "fix", "(", "len", "(", "y", ")", "/", "Ns", ")", ")", ")", "track", "=", "np", ".", "zeros", "(", "int", "(", "np", ".", "fix", "(", "len", "(", "y", ")", "/", "Ns", ")", ")", ")", "bit_count", "=", "-", "1", "y_abs", "=", "np", ".", "zeros", "(", "len", "(", "y", ")", ")", "clk", "=", "np", ".", "zeros", "(", "len", "(", "y", ")", ")", "k", "=", "Ns", "+", "1", "#initial 1-of-Ns symbol synch clock phase\r", "# Sample-by-sample processing required\r", "for", "i", "in", "range", "(", "len", "(", "y", ")", ")", ":", "#y_abs(i) = abs(round(real(y(i))))\r", "if", "i", ">=", "Ns", ":", "# do not process first Ns samples\r", "# Collect timing decision unit (TDU) samples\r", "y_abs", "[", "i", "]", "=", "np", ".", "abs", "(", "np", ".", "sum", "(", "y", "[", "i", "-", "Ns", "+", "1", ":", "i", "+", "1", "]", ")", ")", "# Update sampling instant and take a sample\r", "# For causality reason the early sample is 'i',\r", "# the on-time or prompt sample is 'i-1', and \r", "# the late sample is 'i-2'.\r", "if", "(", "k", "==", "0", ")", ":", "# Load the samples into the 3x1 TDU register w_hat.\r", "# w_hat[1] = late, w_hat[2] = on-time; w_hat[3] = early.\r", "w_hat", "=", "y_abs", "[", "i", "-", "2", ":", "i", "+", "1", "]", "bit_count", "+=", "1", "if", "w_hat", "[", "1", "]", "!=", "0", ":", "if", "w_hat", "[", "0", "]", "<", "w_hat", "[", "2", "]", ":", "k", "=", "Ns", "-", "1", "clk", "[", "i", "-", "2", "]", "=", "1", "rx_symb_d", "[", "bit_count", "]", "=", "y", "[", "i", "-", "2", "-", "int", "(", "np", ".", "round", "(", "Ns", "/", "2", ")", ")", "-", "1", "]", "elif", "w_hat", "[", "0", "]", ">", "w_hat", "[", "2", "]", ":", "k", "=", "Ns", "+", "1", "clk", "[", "i", "]", "=", "1", "rx_symb_d", "[", "bit_count", "]", "=", "y", "[", "i", "-", "int", "(", "np", ".", "round", "(", "Ns", "/", "2", ")", ")", "-", "1", "]", "else", ":", "k", "=", "Ns", "clk", "[", "i", "-", "1", "]", "=", "1", "rx_symb_d", "[", "bit_count", "]", "=", "y", "[", "i", "-", "1", "-", "int", "(", "np", ".", "round", "(", "Ns", "/", "2", ")", ")", "-", "1", "]", "else", ":", "k", "=", "Ns", "clk", "[", "i", "-", "1", "]", "=", "1", "rx_symb_d", "[", "bit_count", "]", "=", "y", "[", "i", "-", "1", "-", "int", "(", "np", ".", "round", "(", "Ns", "/", "2", ")", ")", "]", "track", "[", "bit_count", "]", "=", "np", ".", "mod", "(", "i", ",", "Ns", ")", "k", "-=", "1", "# Trim the final output to bit_count\r", "rx_symb_d", "=", "rx_symb_d", "[", ":", "bit_count", "]", "return", "rx_symb_d", ",", "clk", ",", "track" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
fsk_BEP
fsk_BEP(rx_data,m,flip) Estimate the BEP of the data bits recovered by the RTL-SDR Based FSK Receiver. The reference m-sequence generated in Python was found to produce sequences running in the opposite direction relative to the m-sequences generated by the mbed. To allow error detection the reference m-sequence is flipped. Mark Wickert April 2014
sk_dsp_comm/rtlsdr_helper.py
def fsk_BEP(rx_data,m,flip): """ fsk_BEP(rx_data,m,flip) Estimate the BEP of the data bits recovered by the RTL-SDR Based FSK Receiver. The reference m-sequence generated in Python was found to produce sequences running in the opposite direction relative to the m-sequences generated by the mbed. To allow error detection the reference m-sequence is flipped. Mark Wickert April 2014 """ Nbits = len(rx_data) c = dc.m_seq(m) if flip == 1: # Flip the sequence to compenstate for mbed code difference # First make it a 1xN array c.shape = (1,len(c)) c = np.fliplr(c).flatten() L = int(np.ceil(Nbits/float(len(c)))) tx_data = np.dot(c.reshape(len(c),1),np.ones((1,L))) tx_data = tx_data.T.reshape((1,len(c)*L)).flatten() tx_data = tx_data[:Nbits] # Convert to +1/-1 bits tx_data = 2*tx_data - 1 Bit_count,Bit_errors = dc.BPSK_BEP(rx_data,tx_data) print('len rx_data = %d, len tx_data = %d' % (len(rx_data),len(tx_data))) Pe = Bit_errors/float(Bit_count) print('/////////////////////////////////////') print('Bit Errors: %d' % Bit_errors) print('Bits Total: %d' % Bit_count) print(' BEP: %2.2e' % Pe) print('/////////////////////////////////////')
def fsk_BEP(rx_data,m,flip): """ fsk_BEP(rx_data,m,flip) Estimate the BEP of the data bits recovered by the RTL-SDR Based FSK Receiver. The reference m-sequence generated in Python was found to produce sequences running in the opposite direction relative to the m-sequences generated by the mbed. To allow error detection the reference m-sequence is flipped. Mark Wickert April 2014 """ Nbits = len(rx_data) c = dc.m_seq(m) if flip == 1: # Flip the sequence to compenstate for mbed code difference # First make it a 1xN array c.shape = (1,len(c)) c = np.fliplr(c).flatten() L = int(np.ceil(Nbits/float(len(c)))) tx_data = np.dot(c.reshape(len(c),1),np.ones((1,L))) tx_data = tx_data.T.reshape((1,len(c)*L)).flatten() tx_data = tx_data[:Nbits] # Convert to +1/-1 bits tx_data = 2*tx_data - 1 Bit_count,Bit_errors = dc.BPSK_BEP(rx_data,tx_data) print('len rx_data = %d, len tx_data = %d' % (len(rx_data),len(tx_data))) Pe = Bit_errors/float(Bit_count) print('/////////////////////////////////////') print('Bit Errors: %d' % Bit_errors) print('Bits Total: %d' % Bit_count) print(' BEP: %2.2e' % Pe) print('/////////////////////////////////////')
[ "fsk_BEP", "(", "rx_data", "m", "flip", ")", "Estimate", "the", "BEP", "of", "the", "data", "bits", "recovered", "by", "the", "RTL", "-", "SDR", "Based", "FSK", "Receiver", ".", "The", "reference", "m", "-", "sequence", "generated", "in", "Python", "was", "found", "to", "produce", "sequences", "running", "in", "the", "opposite", "direction", "relative", "to", "the", "m", "-", "sequences", "generated", "by", "the", "mbed", ".", "To", "allow", "error", "detection", "the", "reference", "m", "-", "sequence", "is", "flipped", ".", "Mark", "Wickert", "April", "2014" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/rtlsdr_helper.py#L305-L340
[ "def", "fsk_BEP", "(", "rx_data", ",", "m", ",", "flip", ")", ":", "Nbits", "=", "len", "(", "rx_data", ")", "c", "=", "dc", ".", "m_seq", "(", "m", ")", "if", "flip", "==", "1", ":", "# Flip the sequence to compenstate for mbed code difference\r", "# First make it a 1xN array\r", "c", ".", "shape", "=", "(", "1", ",", "len", "(", "c", ")", ")", "c", "=", "np", ".", "fliplr", "(", "c", ")", ".", "flatten", "(", ")", "L", "=", "int", "(", "np", ".", "ceil", "(", "Nbits", "/", "float", "(", "len", "(", "c", ")", ")", ")", ")", "tx_data", "=", "np", ".", "dot", "(", "c", ".", "reshape", "(", "len", "(", "c", ")", ",", "1", ")", ",", "np", ".", "ones", "(", "(", "1", ",", "L", ")", ")", ")", "tx_data", "=", "tx_data", ".", "T", ".", "reshape", "(", "(", "1", ",", "len", "(", "c", ")", "*", "L", ")", ")", ".", "flatten", "(", ")", "tx_data", "=", "tx_data", "[", ":", "Nbits", "]", "# Convert to +1/-1 bits\r", "tx_data", "=", "2", "*", "tx_data", "-", "1", "Bit_count", ",", "Bit_errors", "=", "dc", ".", "BPSK_BEP", "(", "rx_data", ",", "tx_data", ")", "print", "(", "'len rx_data = %d, len tx_data = %d'", "%", "(", "len", "(", "rx_data", ")", ",", "len", "(", "tx_data", ")", ")", ")", "Pe", "=", "Bit_errors", "/", "float", "(", "Bit_count", ")", "print", "(", "'/////////////////////////////////////'", ")", "print", "(", "'Bit Errors: %d'", "%", "Bit_errors", ")", "print", "(", "'Bits Total: %d'", "%", "Bit_count", ")", "print", "(", "' BEP: %2.2e'", "%", "Pe", ")", "print", "(", "'/////////////////////////////////////'", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b
valid
complex2wav
Save a complex signal vector to a wav file for compact binary storage of 16-bit signal samples. The wav left and right channels are used to save real (I) and imaginary (Q) values. The rate is just a convent way of documenting the original signal sample rate. complex2wav(filename,rate,x) Mark Wickert April 2014
sk_dsp_comm/rtlsdr_helper.py
def complex2wav(filename,rate,x): """ Save a complex signal vector to a wav file for compact binary storage of 16-bit signal samples. The wav left and right channels are used to save real (I) and imaginary (Q) values. The rate is just a convent way of documenting the original signal sample rate. complex2wav(filename,rate,x) Mark Wickert April 2014 """ x_wav = np.hstack((np.array([x.real]).T,np.array([x.imag]).T)) ss.to_wav(filename, rate, x_wav) print('Saved as binary wav file with (I,Q)<=>(L,R)')
def complex2wav(filename,rate,x): """ Save a complex signal vector to a wav file for compact binary storage of 16-bit signal samples. The wav left and right channels are used to save real (I) and imaginary (Q) values. The rate is just a convent way of documenting the original signal sample rate. complex2wav(filename,rate,x) Mark Wickert April 2014 """ x_wav = np.hstack((np.array([x.real]).T,np.array([x.imag]).T)) ss.to_wav(filename, rate, x_wav) print('Saved as binary wav file with (I,Q)<=>(L,R)')
[ "Save", "a", "complex", "signal", "vector", "to", "a", "wav", "file", "for", "compact", "binary", "storage", "of", "16", "-", "bit", "signal", "samples", ".", "The", "wav", "left", "and", "right", "channels", "are", "used", "to", "save", "real", "(", "I", ")", "and", "imaginary", "(", "Q", ")", "values", ".", "The", "rate", "is", "just", "a", "convent", "way", "of", "documenting", "the", "original", "signal", "sample", "rate", ".", "complex2wav", "(", "filename", "rate", "x", ")", "Mark", "Wickert", "April", "2014" ]
mwickert/scikit-dsp-comm
python
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/rtlsdr_helper.py#L349-L362
[ "def", "complex2wav", "(", "filename", ",", "rate", ",", "x", ")", ":", "x_wav", "=", "np", ".", "hstack", "(", "(", "np", ".", "array", "(", "[", "x", ".", "real", "]", ")", ".", "T", ",", "np", ".", "array", "(", "[", "x", ".", "imag", "]", ")", ".", "T", ")", ")", "ss", ".", "to_wav", "(", "filename", ",", "rate", ",", "x_wav", ")", "print", "(", "'Saved as binary wav file with (I,Q)<=>(L,R)'", ")" ]
5c1353412a4d81a8d7da169057564ecf940f8b5b