partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
spectral_bandwidth
|
Compute p'th-order spectral bandwidth:
(sum_k S[k] * (freq[k] - centroid)**p)**(1/p)
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
or a matrix of center frequencies as constructed by
`librosa.core.ifgram`
centroid : None or np.ndarray [shape=(1, t)]
pre-computed centroid frequencies
norm : bool
Normalize per-frame spectral energy (sum to one)
p : float > 0
Power to raise deviation from spectral centroid.
Returns
-------
bandwidth : np.ndarray [shape=(1, t)]
frequency bandwidth for each frame
Examples
--------
From time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
>>> spec_bw
array([[ 3379.878, 1429.486, ..., 3235.214, 3080.148]])
From spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y=y))
>>> librosa.feature.spectral_bandwidth(S=S)
array([[ 3379.878, 1429.486, ..., 3235.214, 3080.148]])
Using variable bin center frequencies
>>> if_gram, D = librosa.ifgram(y)
>>> librosa.feature.spectral_bandwidth(S=np.abs(D), freq=if_gram)
array([[ 3380.011, 1429.11 , ..., 3235.22 , 3080.148]])
Plot the result
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(spec_bw.T, label='Spectral bandwidth')
>>> plt.ylabel('Hz')
>>> plt.xticks([])
>>> plt.xlim([0, spec_bw.shape[-1]])
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
|
librosa/feature/spectral.py
|
def spectral_bandwidth(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
freq=None, centroid=None, norm=True, p=2):
'''Compute p'th-order spectral bandwidth:
(sum_k S[k] * (freq[k] - centroid)**p)**(1/p)
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
or a matrix of center frequencies as constructed by
`librosa.core.ifgram`
centroid : None or np.ndarray [shape=(1, t)]
pre-computed centroid frequencies
norm : bool
Normalize per-frame spectral energy (sum to one)
p : float > 0
Power to raise deviation from spectral centroid.
Returns
-------
bandwidth : np.ndarray [shape=(1, t)]
frequency bandwidth for each frame
Examples
--------
From time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
>>> spec_bw
array([[ 3379.878, 1429.486, ..., 3235.214, 3080.148]])
From spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y=y))
>>> librosa.feature.spectral_bandwidth(S=S)
array([[ 3379.878, 1429.486, ..., 3235.214, 3080.148]])
Using variable bin center frequencies
>>> if_gram, D = librosa.ifgram(y)
>>> librosa.feature.spectral_bandwidth(S=np.abs(D), freq=if_gram)
array([[ 3380.011, 1429.11 , ..., 3235.22 , 3080.148]])
Plot the result
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(spec_bw.T, label='Spectral bandwidth')
>>> plt.ylabel('Hz')
>>> plt.xticks([])
>>> plt.xlim([0, spec_bw.shape[-1]])
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
'''
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
if not np.isrealobj(S):
raise ParameterError('Spectral bandwidth is only defined '
'with real-valued input')
elif np.any(S < 0):
raise ParameterError('Spectral bandwidth is only defined '
'with non-negative energies')
if centroid is None:
centroid = spectral_centroid(y=y, sr=sr, S=S,
n_fft=n_fft,
hop_length=hop_length,
freq=freq)
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
if freq.ndim == 1:
deviation = np.abs(np.subtract.outer(freq, centroid[0]))
else:
deviation = np.abs(freq - centroid[0])
# Column-normalize S
if norm:
S = util.normalize(S, norm=1, axis=0)
return np.sum(S * deviation**p, axis=0, keepdims=True)**(1./p)
|
def spectral_bandwidth(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
freq=None, centroid=None, norm=True, p=2):
'''Compute p'th-order spectral bandwidth:
(sum_k S[k] * (freq[k] - centroid)**p)**(1/p)
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
or a matrix of center frequencies as constructed by
`librosa.core.ifgram`
centroid : None or np.ndarray [shape=(1, t)]
pre-computed centroid frequencies
norm : bool
Normalize per-frame spectral energy (sum to one)
p : float > 0
Power to raise deviation from spectral centroid.
Returns
-------
bandwidth : np.ndarray [shape=(1, t)]
frequency bandwidth for each frame
Examples
--------
From time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
>>> spec_bw
array([[ 3379.878, 1429.486, ..., 3235.214, 3080.148]])
From spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y=y))
>>> librosa.feature.spectral_bandwidth(S=S)
array([[ 3379.878, 1429.486, ..., 3235.214, 3080.148]])
Using variable bin center frequencies
>>> if_gram, D = librosa.ifgram(y)
>>> librosa.feature.spectral_bandwidth(S=np.abs(D), freq=if_gram)
array([[ 3380.011, 1429.11 , ..., 3235.22 , 3080.148]])
Plot the result
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(spec_bw.T, label='Spectral bandwidth')
>>> plt.ylabel('Hz')
>>> plt.xticks([])
>>> plt.xlim([0, spec_bw.shape[-1]])
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
'''
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
if not np.isrealobj(S):
raise ParameterError('Spectral bandwidth is only defined '
'with real-valued input')
elif np.any(S < 0):
raise ParameterError('Spectral bandwidth is only defined '
'with non-negative energies')
if centroid is None:
centroid = spectral_centroid(y=y, sr=sr, S=S,
n_fft=n_fft,
hop_length=hop_length,
freq=freq)
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
if freq.ndim == 1:
deviation = np.abs(np.subtract.outer(freq, centroid[0]))
else:
deviation = np.abs(freq - centroid[0])
# Column-normalize S
if norm:
S = util.normalize(S, norm=1, axis=0)
return np.sum(S * deviation**p, axis=0, keepdims=True)**(1./p)
|
[
"Compute",
"p",
"th",
"-",
"order",
"spectral",
"bandwidth",
":"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L171-L311
|
[
"def",
"spectral_bandwidth",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"center",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
",",
"freq",
"=",
"None",
",",
"centroid",
"=",
"None",
",",
"norm",
"=",
"True",
",",
"p",
"=",
"2",
")",
":",
"S",
",",
"n_fft",
"=",
"_spectrogram",
"(",
"y",
"=",
"y",
",",
"S",
"=",
"S",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"win_length",
"=",
"win_length",
",",
"window",
"=",
"window",
",",
"center",
"=",
"center",
",",
"pad_mode",
"=",
"pad_mode",
")",
"if",
"not",
"np",
".",
"isrealobj",
"(",
"S",
")",
":",
"raise",
"ParameterError",
"(",
"'Spectral bandwidth is only defined '",
"'with real-valued input'",
")",
"elif",
"np",
".",
"any",
"(",
"S",
"<",
"0",
")",
":",
"raise",
"ParameterError",
"(",
"'Spectral bandwidth is only defined '",
"'with non-negative energies'",
")",
"if",
"centroid",
"is",
"None",
":",
"centroid",
"=",
"spectral_centroid",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
",",
"S",
"=",
"S",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"freq",
"=",
"freq",
")",
"# Compute the center frequencies of each bin",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"fft_frequencies",
"(",
"sr",
"=",
"sr",
",",
"n_fft",
"=",
"n_fft",
")",
"if",
"freq",
".",
"ndim",
"==",
"1",
":",
"deviation",
"=",
"np",
".",
"abs",
"(",
"np",
".",
"subtract",
".",
"outer",
"(",
"freq",
",",
"centroid",
"[",
"0",
"]",
")",
")",
"else",
":",
"deviation",
"=",
"np",
".",
"abs",
"(",
"freq",
"-",
"centroid",
"[",
"0",
"]",
")",
"# Column-normalize S",
"if",
"norm",
":",
"S",
"=",
"util",
".",
"normalize",
"(",
"S",
",",
"norm",
"=",
"1",
",",
"axis",
"=",
"0",
")",
"return",
"np",
".",
"sum",
"(",
"S",
"*",
"deviation",
"**",
"p",
",",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")",
"**",
"(",
"1.",
"/",
"p",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
spectral_contrast
|
Compute spectral contrast [1]_
.. [1] Jiang, Dan-Ning, Lie Lu, Hong-Jiang Zhang, Jian-Hua Tao,
and Lian-Hong Cai.
"Music type classification by spectral contrast feature."
In Multimedia and Expo, 2002. ICME'02. Proceedings.
2002 IEEE International Conference on, vol. 1, pp. 113-116.
IEEE, 2002.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies.
fmin : float > 0
Frequency cutoff for the first bin `[0, fmin]`
Subsequent bins will cover `[fmin, 2*fmin]`, `[2*fmin, 4*fmin]`, etc.
n_bands : int > 1
number of frequency bands
quantile : float in (0, 1)
quantile for determining peaks and valleys
linear : bool
If `True`, return the linear difference of magnitudes:
`peaks - valleys`.
If `False`, return the logarithmic difference:
`log(peaks) - log(valleys)`.
Returns
-------
contrast : np.ndarray [shape=(n_bands + 1, t)]
each row of spectral contrast values corresponds to a given
octave-based frequency
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> contrast = librosa.feature.spectral_contrast(S=S, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(contrast, x_axis='time')
>>> plt.colorbar()
>>> plt.ylabel('Frequency bands')
>>> plt.title('Spectral contrast')
>>> plt.tight_layout()
|
librosa/feature/spectral.py
|
def spectral_contrast(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
freq=None, fmin=200.0, n_bands=6, quantile=0.02,
linear=False):
'''Compute spectral contrast [1]_
.. [1] Jiang, Dan-Ning, Lie Lu, Hong-Jiang Zhang, Jian-Hua Tao,
and Lian-Hong Cai.
"Music type classification by spectral contrast feature."
In Multimedia and Expo, 2002. ICME'02. Proceedings.
2002 IEEE International Conference on, vol. 1, pp. 113-116.
IEEE, 2002.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies.
fmin : float > 0
Frequency cutoff for the first bin `[0, fmin]`
Subsequent bins will cover `[fmin, 2*fmin]`, `[2*fmin, 4*fmin]`, etc.
n_bands : int > 1
number of frequency bands
quantile : float in (0, 1)
quantile for determining peaks and valleys
linear : bool
If `True`, return the linear difference of magnitudes:
`peaks - valleys`.
If `False`, return the logarithmic difference:
`log(peaks) - log(valleys)`.
Returns
-------
contrast : np.ndarray [shape=(n_bands + 1, t)]
each row of spectral contrast values corresponds to a given
octave-based frequency
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> contrast = librosa.feature.spectral_contrast(S=S, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(contrast, x_axis='time')
>>> plt.colorbar()
>>> plt.ylabel('Frequency bands')
>>> plt.title('Spectral contrast')
>>> plt.tight_layout()
'''
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
freq = np.atleast_1d(freq)
if freq.ndim != 1 or len(freq) != S.shape[0]:
raise ParameterError('freq.shape mismatch: expected '
'({:d},)'.format(S.shape[0]))
if n_bands < 1 or not isinstance(n_bands, int):
raise ParameterError('n_bands must be a positive integer')
if not 0.0 < quantile < 1.0:
raise ParameterError('quantile must lie in the range (0, 1)')
if fmin <= 0:
raise ParameterError('fmin must be a positive number')
octa = np.zeros(n_bands + 2)
octa[1:] = fmin * (2.0**np.arange(0, n_bands + 1))
if np.any(octa[:-1] >= 0.5 * sr):
raise ParameterError('Frequency band exceeds Nyquist. '
'Reduce either fmin or n_bands.')
valley = np.zeros((n_bands + 1, S.shape[1]))
peak = np.zeros_like(valley)
for k, (f_low, f_high) in enumerate(zip(octa[:-1], octa[1:])):
current_band = np.logical_and(freq >= f_low, freq <= f_high)
idx = np.flatnonzero(current_band)
if k > 0:
current_band[idx[0] - 1] = True
if k == n_bands:
current_band[idx[-1] + 1:] = True
sub_band = S[current_band]
if k < n_bands:
sub_band = sub_band[:-1]
# Always take at least one bin from each side
idx = np.rint(quantile * np.sum(current_band))
idx = int(np.maximum(idx, 1))
sortedr = np.sort(sub_band, axis=0)
valley[k] = np.mean(sortedr[:idx], axis=0)
peak[k] = np.mean(sortedr[-idx:], axis=0)
if linear:
return peak - valley
else:
return power_to_db(peak) - power_to_db(valley)
|
def spectral_contrast(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
freq=None, fmin=200.0, n_bands=6, quantile=0.02,
linear=False):
'''Compute spectral contrast [1]_
.. [1] Jiang, Dan-Ning, Lie Lu, Hong-Jiang Zhang, Jian-Hua Tao,
and Lian-Hong Cai.
"Music type classification by spectral contrast feature."
In Multimedia and Expo, 2002. ICME'02. Proceedings.
2002 IEEE International Conference on, vol. 1, pp. 113-116.
IEEE, 2002.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies.
fmin : float > 0
Frequency cutoff for the first bin `[0, fmin]`
Subsequent bins will cover `[fmin, 2*fmin]`, `[2*fmin, 4*fmin]`, etc.
n_bands : int > 1
number of frequency bands
quantile : float in (0, 1)
quantile for determining peaks and valleys
linear : bool
If `True`, return the linear difference of magnitudes:
`peaks - valleys`.
If `False`, return the logarithmic difference:
`log(peaks) - log(valleys)`.
Returns
-------
contrast : np.ndarray [shape=(n_bands + 1, t)]
each row of spectral contrast values corresponds to a given
octave-based frequency
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> contrast = librosa.feature.spectral_contrast(S=S, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(contrast, x_axis='time')
>>> plt.colorbar()
>>> plt.ylabel('Frequency bands')
>>> plt.title('Spectral contrast')
>>> plt.tight_layout()
'''
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
freq = np.atleast_1d(freq)
if freq.ndim != 1 or len(freq) != S.shape[0]:
raise ParameterError('freq.shape mismatch: expected '
'({:d},)'.format(S.shape[0]))
if n_bands < 1 or not isinstance(n_bands, int):
raise ParameterError('n_bands must be a positive integer')
if not 0.0 < quantile < 1.0:
raise ParameterError('quantile must lie in the range (0, 1)')
if fmin <= 0:
raise ParameterError('fmin must be a positive number')
octa = np.zeros(n_bands + 2)
octa[1:] = fmin * (2.0**np.arange(0, n_bands + 1))
if np.any(octa[:-1] >= 0.5 * sr):
raise ParameterError('Frequency band exceeds Nyquist. '
'Reduce either fmin or n_bands.')
valley = np.zeros((n_bands + 1, S.shape[1]))
peak = np.zeros_like(valley)
for k, (f_low, f_high) in enumerate(zip(octa[:-1], octa[1:])):
current_band = np.logical_and(freq >= f_low, freq <= f_high)
idx = np.flatnonzero(current_band)
if k > 0:
current_band[idx[0] - 1] = True
if k == n_bands:
current_band[idx[-1] + 1:] = True
sub_band = S[current_band]
if k < n_bands:
sub_band = sub_band[:-1]
# Always take at least one bin from each side
idx = np.rint(quantile * np.sum(current_band))
idx = int(np.maximum(idx, 1))
sortedr = np.sort(sub_band, axis=0)
valley[k] = np.mean(sortedr[:idx], axis=0)
peak[k] = np.mean(sortedr[-idx:], axis=0)
if linear:
return peak - valley
else:
return power_to_db(peak) - power_to_db(valley)
|
[
"Compute",
"spectral",
"contrast",
"[",
"1",
"]",
"_"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L314-L481
|
[
"def",
"spectral_contrast",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"center",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
",",
"freq",
"=",
"None",
",",
"fmin",
"=",
"200.0",
",",
"n_bands",
"=",
"6",
",",
"quantile",
"=",
"0.02",
",",
"linear",
"=",
"False",
")",
":",
"S",
",",
"n_fft",
"=",
"_spectrogram",
"(",
"y",
"=",
"y",
",",
"S",
"=",
"S",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"win_length",
"=",
"win_length",
",",
"window",
"=",
"window",
",",
"center",
"=",
"center",
",",
"pad_mode",
"=",
"pad_mode",
")",
"# Compute the center frequencies of each bin",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"fft_frequencies",
"(",
"sr",
"=",
"sr",
",",
"n_fft",
"=",
"n_fft",
")",
"freq",
"=",
"np",
".",
"atleast_1d",
"(",
"freq",
")",
"if",
"freq",
".",
"ndim",
"!=",
"1",
"or",
"len",
"(",
"freq",
")",
"!=",
"S",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ParameterError",
"(",
"'freq.shape mismatch: expected '",
"'({:d},)'",
".",
"format",
"(",
"S",
".",
"shape",
"[",
"0",
"]",
")",
")",
"if",
"n_bands",
"<",
"1",
"or",
"not",
"isinstance",
"(",
"n_bands",
",",
"int",
")",
":",
"raise",
"ParameterError",
"(",
"'n_bands must be a positive integer'",
")",
"if",
"not",
"0.0",
"<",
"quantile",
"<",
"1.0",
":",
"raise",
"ParameterError",
"(",
"'quantile must lie in the range (0, 1)'",
")",
"if",
"fmin",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'fmin must be a positive number'",
")",
"octa",
"=",
"np",
".",
"zeros",
"(",
"n_bands",
"+",
"2",
")",
"octa",
"[",
"1",
":",
"]",
"=",
"fmin",
"*",
"(",
"2.0",
"**",
"np",
".",
"arange",
"(",
"0",
",",
"n_bands",
"+",
"1",
")",
")",
"if",
"np",
".",
"any",
"(",
"octa",
"[",
":",
"-",
"1",
"]",
">=",
"0.5",
"*",
"sr",
")",
":",
"raise",
"ParameterError",
"(",
"'Frequency band exceeds Nyquist. '",
"'Reduce either fmin or n_bands.'",
")",
"valley",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_bands",
"+",
"1",
",",
"S",
".",
"shape",
"[",
"1",
"]",
")",
")",
"peak",
"=",
"np",
".",
"zeros_like",
"(",
"valley",
")",
"for",
"k",
",",
"(",
"f_low",
",",
"f_high",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"octa",
"[",
":",
"-",
"1",
"]",
",",
"octa",
"[",
"1",
":",
"]",
")",
")",
":",
"current_band",
"=",
"np",
".",
"logical_and",
"(",
"freq",
">=",
"f_low",
",",
"freq",
"<=",
"f_high",
")",
"idx",
"=",
"np",
".",
"flatnonzero",
"(",
"current_band",
")",
"if",
"k",
">",
"0",
":",
"current_band",
"[",
"idx",
"[",
"0",
"]",
"-",
"1",
"]",
"=",
"True",
"if",
"k",
"==",
"n_bands",
":",
"current_band",
"[",
"idx",
"[",
"-",
"1",
"]",
"+",
"1",
":",
"]",
"=",
"True",
"sub_band",
"=",
"S",
"[",
"current_band",
"]",
"if",
"k",
"<",
"n_bands",
":",
"sub_band",
"=",
"sub_band",
"[",
":",
"-",
"1",
"]",
"# Always take at least one bin from each side",
"idx",
"=",
"np",
".",
"rint",
"(",
"quantile",
"*",
"np",
".",
"sum",
"(",
"current_band",
")",
")",
"idx",
"=",
"int",
"(",
"np",
".",
"maximum",
"(",
"idx",
",",
"1",
")",
")",
"sortedr",
"=",
"np",
".",
"sort",
"(",
"sub_band",
",",
"axis",
"=",
"0",
")",
"valley",
"[",
"k",
"]",
"=",
"np",
".",
"mean",
"(",
"sortedr",
"[",
":",
"idx",
"]",
",",
"axis",
"=",
"0",
")",
"peak",
"[",
"k",
"]",
"=",
"np",
".",
"mean",
"(",
"sortedr",
"[",
"-",
"idx",
":",
"]",
",",
"axis",
"=",
"0",
")",
"if",
"linear",
":",
"return",
"peak",
"-",
"valley",
"else",
":",
"return",
"power_to_db",
"(",
"peak",
")",
"-",
"power_to_db",
"(",
"valley",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
spectral_rolloff
|
Compute roll-off frequency.
The roll-off frequency is defined for each frame as the center frequency
for a spectrogram bin such that at least roll_percent (0.85 by default)
of the energy of the spectrum in this frame is contained in this bin and
the bins below. This can be used to, e.g., approximate the maximum (or
minimum) frequency by setting roll_percent to a value close to 1 (or 0).
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
.. note:: `freq` is assumed to be sorted in increasing order
roll_percent : float [0 < roll_percent < 1]
Roll-off percentage.
Returns
-------
rolloff : np.ndarray [shape=(1, t)]
roll-off frequency for each frame
Examples
--------
From time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> # Approximate maximum frequencies with roll_percent=0.85 (default)
>>> rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
>>> rolloff
array([[ 8376.416, 968.994, ..., 8925.513, 9108.545]])
>>> # Approximate minimum frequencies with roll_percent=0.1
>>> rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr, roll_percent=0.1)
>>> rolloff
array([[ 75.36621094, 64.59960938, 64.59960938, ..., 75.36621094,
75.36621094, 64.59960938]])
From spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> librosa.feature.spectral_rolloff(S=S, sr=sr)
array([[ 8376.416, 968.994, ..., 8925.513, 9108.545]])
>>> # With a higher roll percentage:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.spectral_rolloff(y=y, sr=sr, roll_percent=0.95)
array([[ 10012.939, 3003.882, ..., 10034.473, 10077.539]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(rolloff.T, label='Roll-off frequency')
>>> plt.ylabel('Hz')
>>> plt.xticks([])
>>> plt.xlim([0, rolloff.shape[-1]])
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
|
librosa/feature/spectral.py
|
def spectral_rolloff(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
freq=None, roll_percent=0.85):
'''Compute roll-off frequency.
The roll-off frequency is defined for each frame as the center frequency
for a spectrogram bin such that at least roll_percent (0.85 by default)
of the energy of the spectrum in this frame is contained in this bin and
the bins below. This can be used to, e.g., approximate the maximum (or
minimum) frequency by setting roll_percent to a value close to 1 (or 0).
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
.. note:: `freq` is assumed to be sorted in increasing order
roll_percent : float [0 < roll_percent < 1]
Roll-off percentage.
Returns
-------
rolloff : np.ndarray [shape=(1, t)]
roll-off frequency for each frame
Examples
--------
From time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> # Approximate maximum frequencies with roll_percent=0.85 (default)
>>> rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
>>> rolloff
array([[ 8376.416, 968.994, ..., 8925.513, 9108.545]])
>>> # Approximate minimum frequencies with roll_percent=0.1
>>> rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr, roll_percent=0.1)
>>> rolloff
array([[ 75.36621094, 64.59960938, 64.59960938, ..., 75.36621094,
75.36621094, 64.59960938]])
From spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> librosa.feature.spectral_rolloff(S=S, sr=sr)
array([[ 8376.416, 968.994, ..., 8925.513, 9108.545]])
>>> # With a higher roll percentage:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.spectral_rolloff(y=y, sr=sr, roll_percent=0.95)
array([[ 10012.939, 3003.882, ..., 10034.473, 10077.539]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(rolloff.T, label='Roll-off frequency')
>>> plt.ylabel('Hz')
>>> plt.xticks([])
>>> plt.xlim([0, rolloff.shape[-1]])
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
'''
if not 0.0 < roll_percent < 1.0:
raise ParameterError('roll_percent must lie in the range (0, 1)')
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
if not np.isrealobj(S):
raise ParameterError('Spectral rolloff is only defined '
'with real-valued input')
elif np.any(S < 0):
raise ParameterError('Spectral rolloff is only defined '
'with non-negative energies')
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
# Make sure that frequency can be broadcast
if freq.ndim == 1:
freq = freq.reshape((-1, 1))
total_energy = np.cumsum(S, axis=0)
threshold = roll_percent * total_energy[-1]
ind = np.where(total_energy < threshold, np.nan, 1)
return np.nanmin(ind * freq, axis=0, keepdims=True)
|
def spectral_rolloff(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
freq=None, roll_percent=0.85):
'''Compute roll-off frequency.
The roll-off frequency is defined for each frame as the center frequency
for a spectrogram bin such that at least roll_percent (0.85 by default)
of the energy of the spectrum in this frame is contained in this bin and
the bins below. This can be used to, e.g., approximate the maximum (or
minimum) frequency by setting roll_percent to a value close to 1 (or 0).
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
.. note:: `freq` is assumed to be sorted in increasing order
roll_percent : float [0 < roll_percent < 1]
Roll-off percentage.
Returns
-------
rolloff : np.ndarray [shape=(1, t)]
roll-off frequency for each frame
Examples
--------
From time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> # Approximate maximum frequencies with roll_percent=0.85 (default)
>>> rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
>>> rolloff
array([[ 8376.416, 968.994, ..., 8925.513, 9108.545]])
>>> # Approximate minimum frequencies with roll_percent=0.1
>>> rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr, roll_percent=0.1)
>>> rolloff
array([[ 75.36621094, 64.59960938, 64.59960938, ..., 75.36621094,
75.36621094, 64.59960938]])
From spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> librosa.feature.spectral_rolloff(S=S, sr=sr)
array([[ 8376.416, 968.994, ..., 8925.513, 9108.545]])
>>> # With a higher roll percentage:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.spectral_rolloff(y=y, sr=sr, roll_percent=0.95)
array([[ 10012.939, 3003.882, ..., 10034.473, 10077.539]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(rolloff.T, label='Roll-off frequency')
>>> plt.ylabel('Hz')
>>> plt.xticks([])
>>> plt.xlim([0, rolloff.shape[-1]])
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
'''
if not 0.0 < roll_percent < 1.0:
raise ParameterError('roll_percent must lie in the range (0, 1)')
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
if not np.isrealobj(S):
raise ParameterError('Spectral rolloff is only defined '
'with real-valued input')
elif np.any(S < 0):
raise ParameterError('Spectral rolloff is only defined '
'with non-negative energies')
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
# Make sure that frequency can be broadcast
if freq.ndim == 1:
freq = freq.reshape((-1, 1))
total_energy = np.cumsum(S, axis=0)
threshold = roll_percent * total_energy[-1]
ind = np.where(total_energy < threshold, np.nan, 1)
return np.nanmin(ind * freq, axis=0, keepdims=True)
|
[
"Compute",
"roll",
"-",
"off",
"frequency",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L484-L623
|
[
"def",
"spectral_rolloff",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"center",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
",",
"freq",
"=",
"None",
",",
"roll_percent",
"=",
"0.85",
")",
":",
"if",
"not",
"0.0",
"<",
"roll_percent",
"<",
"1.0",
":",
"raise",
"ParameterError",
"(",
"'roll_percent must lie in the range (0, 1)'",
")",
"S",
",",
"n_fft",
"=",
"_spectrogram",
"(",
"y",
"=",
"y",
",",
"S",
"=",
"S",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"win_length",
"=",
"win_length",
",",
"window",
"=",
"window",
",",
"center",
"=",
"center",
",",
"pad_mode",
"=",
"pad_mode",
")",
"if",
"not",
"np",
".",
"isrealobj",
"(",
"S",
")",
":",
"raise",
"ParameterError",
"(",
"'Spectral rolloff is only defined '",
"'with real-valued input'",
")",
"elif",
"np",
".",
"any",
"(",
"S",
"<",
"0",
")",
":",
"raise",
"ParameterError",
"(",
"'Spectral rolloff is only defined '",
"'with non-negative energies'",
")",
"# Compute the center frequencies of each bin",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"fft_frequencies",
"(",
"sr",
"=",
"sr",
",",
"n_fft",
"=",
"n_fft",
")",
"# Make sure that frequency can be broadcast",
"if",
"freq",
".",
"ndim",
"==",
"1",
":",
"freq",
"=",
"freq",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"total_energy",
"=",
"np",
".",
"cumsum",
"(",
"S",
",",
"axis",
"=",
"0",
")",
"threshold",
"=",
"roll_percent",
"*",
"total_energy",
"[",
"-",
"1",
"]",
"ind",
"=",
"np",
".",
"where",
"(",
"total_energy",
"<",
"threshold",
",",
"np",
".",
"nan",
",",
"1",
")",
"return",
"np",
".",
"nanmin",
"(",
"ind",
"*",
"freq",
",",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
spectral_flatness
|
Compute spectral flatness
Spectral flatness (or tonality coefficient) is a measure to
quantify how much noise-like a sound is, as opposed to being
tone-like [1]_. A high spectral flatness (closer to 1.0)
indicates the spectrum is similar to white noise.
It is often converted to decibel.
.. [1] Dubnov, Shlomo "Generalization of spectral flatness
measure for non-gaussian linear processes"
IEEE Signal Processing Letters, 2004, Vol. 11.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
S : np.ndarray [shape=(d, t)] or None
(optional) pre-computed spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
amin : float > 0 [scalar]
minimum threshold for `S` (=added noise floor for numerical stability)
power : float > 0 [scalar]
Exponent for the magnitude spectrogram.
e.g., 1 for energy, 2 for power, etc.
Power spectrogram is usually used for computing spectral flatness.
Returns
-------
flatness : np.ndarray [shape=(1, t)]
spectral flatness for each frame.
The returned value is in [0, 1] and often converted to dB scale.
Examples
--------
From time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> flatness = librosa.feature.spectral_flatness(y=y)
>>> flatness
array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,
1.00000e+00, 1.00000e+00]], dtype=float32)
From spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> librosa.feature.spectral_flatness(S=S)
array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,
1.00000e+00, 1.00000e+00]], dtype=float32)
From power spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> S_power = S ** 2
>>> librosa.feature.spectral_flatness(S=S_power, power=1.0)
array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,
1.00000e+00, 1.00000e+00]], dtype=float32)
|
librosa/feature/spectral.py
|
def spectral_flatness(y=None, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
amin=1e-10, power=2.0):
'''Compute spectral flatness
Spectral flatness (or tonality coefficient) is a measure to
quantify how much noise-like a sound is, as opposed to being
tone-like [1]_. A high spectral flatness (closer to 1.0)
indicates the spectrum is similar to white noise.
It is often converted to decibel.
.. [1] Dubnov, Shlomo "Generalization of spectral flatness
measure for non-gaussian linear processes"
IEEE Signal Processing Letters, 2004, Vol. 11.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
S : np.ndarray [shape=(d, t)] or None
(optional) pre-computed spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
amin : float > 0 [scalar]
minimum threshold for `S` (=added noise floor for numerical stability)
power : float > 0 [scalar]
Exponent for the magnitude spectrogram.
e.g., 1 for energy, 2 for power, etc.
Power spectrogram is usually used for computing spectral flatness.
Returns
-------
flatness : np.ndarray [shape=(1, t)]
spectral flatness for each frame.
The returned value is in [0, 1] and often converted to dB scale.
Examples
--------
From time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> flatness = librosa.feature.spectral_flatness(y=y)
>>> flatness
array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,
1.00000e+00, 1.00000e+00]], dtype=float32)
From spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> librosa.feature.spectral_flatness(S=S)
array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,
1.00000e+00, 1.00000e+00]], dtype=float32)
From power spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> S_power = S ** 2
>>> librosa.feature.spectral_flatness(S=S_power, power=1.0)
array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,
1.00000e+00, 1.00000e+00]], dtype=float32)
'''
if amin <= 0:
raise ParameterError('amin must be strictly positive')
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
power=1., win_length=win_length, window=window,
center=center, pad_mode=pad_mode)
if not np.isrealobj(S):
raise ParameterError('Spectral flatness is only defined '
'with real-valued input')
elif np.any(S < 0):
raise ParameterError('Spectral flatness is only defined '
'with non-negative energies')
S_thresh = np.maximum(amin, S ** power)
gmean = np.exp(np.mean(np.log(S_thresh), axis=0, keepdims=True))
amean = np.mean(S_thresh, axis=0, keepdims=True)
return gmean / amean
|
def spectral_flatness(y=None, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
amin=1e-10, power=2.0):
'''Compute spectral flatness
Spectral flatness (or tonality coefficient) is a measure to
quantify how much noise-like a sound is, as opposed to being
tone-like [1]_. A high spectral flatness (closer to 1.0)
indicates the spectrum is similar to white noise.
It is often converted to decibel.
.. [1] Dubnov, Shlomo "Generalization of spectral flatness
measure for non-gaussian linear processes"
IEEE Signal Processing Letters, 2004, Vol. 11.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
S : np.ndarray [shape=(d, t)] or None
(optional) pre-computed spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
amin : float > 0 [scalar]
minimum threshold for `S` (=added noise floor for numerical stability)
power : float > 0 [scalar]
Exponent for the magnitude spectrogram.
e.g., 1 for energy, 2 for power, etc.
Power spectrogram is usually used for computing spectral flatness.
Returns
-------
flatness : np.ndarray [shape=(1, t)]
spectral flatness for each frame.
The returned value is in [0, 1] and often converted to dB scale.
Examples
--------
From time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> flatness = librosa.feature.spectral_flatness(y=y)
>>> flatness
array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,
1.00000e+00, 1.00000e+00]], dtype=float32)
From spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> librosa.feature.spectral_flatness(S=S)
array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,
1.00000e+00, 1.00000e+00]], dtype=float32)
From power spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> S_power = S ** 2
>>> librosa.feature.spectral_flatness(S=S_power, power=1.0)
array([[ 1.00000e+00, 5.82299e-03, 5.64624e-04, ..., 9.99063e-01,
1.00000e+00, 1.00000e+00]], dtype=float32)
'''
if amin <= 0:
raise ParameterError('amin must be strictly positive')
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
power=1., win_length=win_length, window=window,
center=center, pad_mode=pad_mode)
if not np.isrealobj(S):
raise ParameterError('Spectral flatness is only defined '
'with real-valued input')
elif np.any(S < 0):
raise ParameterError('Spectral flatness is only defined '
'with non-negative energies')
S_thresh = np.maximum(amin, S ** power)
gmean = np.exp(np.mean(np.log(S_thresh), axis=0, keepdims=True))
amean = np.mean(S_thresh, axis=0, keepdims=True)
return gmean / amean
|
[
"Compute",
"spectral",
"flatness"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L626-L737
|
[
"def",
"spectral_flatness",
"(",
"y",
"=",
"None",
",",
"S",
"=",
"None",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"center",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
",",
"amin",
"=",
"1e-10",
",",
"power",
"=",
"2.0",
")",
":",
"if",
"amin",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'amin must be strictly positive'",
")",
"S",
",",
"n_fft",
"=",
"_spectrogram",
"(",
"y",
"=",
"y",
",",
"S",
"=",
"S",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"power",
"=",
"1.",
",",
"win_length",
"=",
"win_length",
",",
"window",
"=",
"window",
",",
"center",
"=",
"center",
",",
"pad_mode",
"=",
"pad_mode",
")",
"if",
"not",
"np",
".",
"isrealobj",
"(",
"S",
")",
":",
"raise",
"ParameterError",
"(",
"'Spectral flatness is only defined '",
"'with real-valued input'",
")",
"elif",
"np",
".",
"any",
"(",
"S",
"<",
"0",
")",
":",
"raise",
"ParameterError",
"(",
"'Spectral flatness is only defined '",
"'with non-negative energies'",
")",
"S_thresh",
"=",
"np",
".",
"maximum",
"(",
"amin",
",",
"S",
"**",
"power",
")",
"gmean",
"=",
"np",
".",
"exp",
"(",
"np",
".",
"mean",
"(",
"np",
".",
"log",
"(",
"S_thresh",
")",
",",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")",
")",
"amean",
"=",
"np",
".",
"mean",
"(",
"S_thresh",
",",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")",
"return",
"gmean",
"/",
"amean"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
rms
|
Compute root-mean-square (RMS) value for each frame, either from the
audio samples `y` or from a spectrogram `S`.
Computing the RMS value from audio samples is faster as it doesn't require
a STFT calculation. However, using a spectrogram will give a more accurate
representation of energy over time because its frames can be windowed,
thus prefer using `S` if it's already available.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
(optional) audio time series. Required if `S` is not input.
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude. Required if `y` is not input.
frame_length : int > 0 [scalar]
length of analysis frame (in samples) for energy calculation
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
center : bool
If `True` and operating on time-domain input (`y`), pad the signal
by `frame_length//2` on either side.
If operating on spectrogram input, this has no effect.
pad_mode : str
Padding mode for centered analysis. See `np.pad` for valid
values.
Returns
-------
rms : np.ndarray [shape=(1, t)]
RMS value for each frame
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.rms(y=y)
array([[ 0. , 0.056, ..., 0. , 0. ]], dtype=float32)
Or from spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> rms = librosa.feature.rms(S=S)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(rms.T, label='RMS Energy')
>>> plt.xticks([])
>>> plt.xlim([0, rms.shape[-1]])
>>> plt.legend(loc='best')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
Use a STFT window of constant ones and no frame centering to get consistent
results with the RMS computed from the audio samples `y`
>>> S = librosa.magphase(librosa.stft(y, window=np.ones, center=False))[0]
>>> librosa.feature.rms(S=S)
|
librosa/feature/spectral.py
|
def rms(y=None, S=None, frame_length=2048, hop_length=512,
center=True, pad_mode='reflect'):
'''Compute root-mean-square (RMS) value for each frame, either from the
audio samples `y` or from a spectrogram `S`.
Computing the RMS value from audio samples is faster as it doesn't require
a STFT calculation. However, using a spectrogram will give a more accurate
representation of energy over time because its frames can be windowed,
thus prefer using `S` if it's already available.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
(optional) audio time series. Required if `S` is not input.
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude. Required if `y` is not input.
frame_length : int > 0 [scalar]
length of analysis frame (in samples) for energy calculation
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
center : bool
If `True` and operating on time-domain input (`y`), pad the signal
by `frame_length//2` on either side.
If operating on spectrogram input, this has no effect.
pad_mode : str
Padding mode for centered analysis. See `np.pad` for valid
values.
Returns
-------
rms : np.ndarray [shape=(1, t)]
RMS value for each frame
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.rms(y=y)
array([[ 0. , 0.056, ..., 0. , 0. ]], dtype=float32)
Or from spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> rms = librosa.feature.rms(S=S)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(rms.T, label='RMS Energy')
>>> plt.xticks([])
>>> plt.xlim([0, rms.shape[-1]])
>>> plt.legend(loc='best')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
Use a STFT window of constant ones and no frame centering to get consistent
results with the RMS computed from the audio samples `y`
>>> S = librosa.magphase(librosa.stft(y, window=np.ones, center=False))[0]
>>> librosa.feature.rms(S=S)
'''
if y is not None and S is not None:
raise ValueError('Either `y` or `S` should be input.')
if y is not None:
y = to_mono(y)
if center:
y = np.pad(y, int(frame_length // 2), mode=pad_mode)
x = util.frame(y,
frame_length=frame_length,
hop_length=hop_length)
elif S is not None:
x, _ = _spectrogram(y=y, S=S,
n_fft=frame_length,
hop_length=hop_length)
else:
raise ValueError('Either `y` or `S` must be input.')
return np.sqrt(np.mean(np.abs(x)**2, axis=0, keepdims=True))
|
def rms(y=None, S=None, frame_length=2048, hop_length=512,
center=True, pad_mode='reflect'):
'''Compute root-mean-square (RMS) value for each frame, either from the
audio samples `y` or from a spectrogram `S`.
Computing the RMS value from audio samples is faster as it doesn't require
a STFT calculation. However, using a spectrogram will give a more accurate
representation of energy over time because its frames can be windowed,
thus prefer using `S` if it's already available.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
(optional) audio time series. Required if `S` is not input.
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude. Required if `y` is not input.
frame_length : int > 0 [scalar]
length of analysis frame (in samples) for energy calculation
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
center : bool
If `True` and operating on time-domain input (`y`), pad the signal
by `frame_length//2` on either side.
If operating on spectrogram input, this has no effect.
pad_mode : str
Padding mode for centered analysis. See `np.pad` for valid
values.
Returns
-------
rms : np.ndarray [shape=(1, t)]
RMS value for each frame
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.rms(y=y)
array([[ 0. , 0.056, ..., 0. , 0. ]], dtype=float32)
Or from spectrogram input
>>> S, phase = librosa.magphase(librosa.stft(y))
>>> rms = librosa.feature.rms(S=S)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(rms.T, label='RMS Energy')
>>> plt.xticks([])
>>> plt.xlim([0, rms.shape[-1]])
>>> plt.legend(loc='best')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
Use a STFT window of constant ones and no frame centering to get consistent
results with the RMS computed from the audio samples `y`
>>> S = librosa.magphase(librosa.stft(y, window=np.ones, center=False))[0]
>>> librosa.feature.rms(S=S)
'''
if y is not None and S is not None:
raise ValueError('Either `y` or `S` should be input.')
if y is not None:
y = to_mono(y)
if center:
y = np.pad(y, int(frame_length // 2), mode=pad_mode)
x = util.frame(y,
frame_length=frame_length,
hop_length=hop_length)
elif S is not None:
x, _ = _spectrogram(y=y, S=S,
n_fft=frame_length,
hop_length=hop_length)
else:
raise ValueError('Either `y` or `S` must be input.')
return np.sqrt(np.mean(np.abs(x)**2, axis=0, keepdims=True))
|
[
"Compute",
"root",
"-",
"mean",
"-",
"square",
"(",
"RMS",
")",
"value",
"for",
"each",
"frame",
"either",
"from",
"the",
"audio",
"samples",
"y",
"or",
"from",
"a",
"spectrogram",
"S",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L740-L828
|
[
"def",
"rms",
"(",
"y",
"=",
"None",
",",
"S",
"=",
"None",
",",
"frame_length",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"center",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
")",
":",
"if",
"y",
"is",
"not",
"None",
"and",
"S",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'Either `y` or `S` should be input.'",
")",
"if",
"y",
"is",
"not",
"None",
":",
"y",
"=",
"to_mono",
"(",
"y",
")",
"if",
"center",
":",
"y",
"=",
"np",
".",
"pad",
"(",
"y",
",",
"int",
"(",
"frame_length",
"//",
"2",
")",
",",
"mode",
"=",
"pad_mode",
")",
"x",
"=",
"util",
".",
"frame",
"(",
"y",
",",
"frame_length",
"=",
"frame_length",
",",
"hop_length",
"=",
"hop_length",
")",
"elif",
"S",
"is",
"not",
"None",
":",
"x",
",",
"_",
"=",
"_spectrogram",
"(",
"y",
"=",
"y",
",",
"S",
"=",
"S",
",",
"n_fft",
"=",
"frame_length",
",",
"hop_length",
"=",
"hop_length",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Either `y` or `S` must be input.'",
")",
"return",
"np",
".",
"sqrt",
"(",
"np",
".",
"mean",
"(",
"np",
".",
"abs",
"(",
"x",
")",
"**",
"2",
",",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
poly_features
|
Get coefficients of fitting an nth-order polynomial to the columns
of a spectrogram.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
order : int > 0
order of the polynomial to fit
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
or a matrix of center frequencies as constructed by
`librosa.core.ifgram`
Returns
-------
coefficients : np.ndarray [shape=(order+1, t)]
polynomial coefficients for each frame.
`coeffecients[0]` corresponds to the highest degree (`order`),
`coefficients[1]` corresponds to the next highest degree (`order-1`),
down to the constant term `coefficients[order]`.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
Fit a degree-0 polynomial (constant) to each frame
>>> p0 = librosa.feature.poly_features(S=S, order=0)
Fit a linear polynomial to each frame
>>> p1 = librosa.feature.poly_features(S=S, order=1)
Fit a quadratic to each frame
>>> p2 = librosa.feature.poly_features(S=S, order=2)
Plot the results for comparison
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 8))
>>> ax = plt.subplot(4,1,1)
>>> plt.plot(p2[2], label='order=2', alpha=0.8)
>>> plt.plot(p1[1], label='order=1', alpha=0.8)
>>> plt.plot(p0[0], label='order=0', alpha=0.8)
>>> plt.xticks([])
>>> plt.ylabel('Constant')
>>> plt.legend()
>>> plt.subplot(4,1,2, sharex=ax)
>>> plt.plot(p2[1], label='order=2', alpha=0.8)
>>> plt.plot(p1[0], label='order=1', alpha=0.8)
>>> plt.xticks([])
>>> plt.ylabel('Linear')
>>> plt.subplot(4,1,3, sharex=ax)
>>> plt.plot(p2[0], label='order=2', alpha=0.8)
>>> plt.xticks([])
>>> plt.ylabel('Quadratic')
>>> plt.subplot(4,1,4, sharex=ax)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log')
>>> plt.tight_layout()
|
librosa/feature/spectral.py
|
def poly_features(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
order=1, freq=None):
'''Get coefficients of fitting an nth-order polynomial to the columns
of a spectrogram.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
order : int > 0
order of the polynomial to fit
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
or a matrix of center frequencies as constructed by
`librosa.core.ifgram`
Returns
-------
coefficients : np.ndarray [shape=(order+1, t)]
polynomial coefficients for each frame.
`coeffecients[0]` corresponds to the highest degree (`order`),
`coefficients[1]` corresponds to the next highest degree (`order-1`),
down to the constant term `coefficients[order]`.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
Fit a degree-0 polynomial (constant) to each frame
>>> p0 = librosa.feature.poly_features(S=S, order=0)
Fit a linear polynomial to each frame
>>> p1 = librosa.feature.poly_features(S=S, order=1)
Fit a quadratic to each frame
>>> p2 = librosa.feature.poly_features(S=S, order=2)
Plot the results for comparison
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 8))
>>> ax = plt.subplot(4,1,1)
>>> plt.plot(p2[2], label='order=2', alpha=0.8)
>>> plt.plot(p1[1], label='order=1', alpha=0.8)
>>> plt.plot(p0[0], label='order=0', alpha=0.8)
>>> plt.xticks([])
>>> plt.ylabel('Constant')
>>> plt.legend()
>>> plt.subplot(4,1,2, sharex=ax)
>>> plt.plot(p2[1], label='order=2', alpha=0.8)
>>> plt.plot(p1[0], label='order=1', alpha=0.8)
>>> plt.xticks([])
>>> plt.ylabel('Linear')
>>> plt.subplot(4,1,3, sharex=ax)
>>> plt.plot(p2[0], label='order=2', alpha=0.8)
>>> plt.xticks([])
>>> plt.ylabel('Quadratic')
>>> plt.subplot(4,1,4, sharex=ax)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log')
>>> plt.tight_layout()
'''
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
# If frequencies are constant over frames, then we only need to fit once
if freq.ndim == 1:
coefficients = np.polyfit(freq, S, order)
else:
# Else, fit each frame independently and stack the results
coefficients = np.concatenate([[np.polyfit(freq[:, i], S[:, i], order)]
for i in range(S.shape[1])], axis=0).T
return coefficients
|
def poly_features(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
order=1, freq=None):
'''Get coefficients of fitting an nth-order polynomial to the columns
of a spectrogram.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
order : int > 0
order of the polynomial to fit
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
or a matrix of center frequencies as constructed by
`librosa.core.ifgram`
Returns
-------
coefficients : np.ndarray [shape=(order+1, t)]
polynomial coefficients for each frame.
`coeffecients[0]` corresponds to the highest degree (`order`),
`coefficients[1]` corresponds to the next highest degree (`order-1`),
down to the constant term `coefficients[order]`.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
Fit a degree-0 polynomial (constant) to each frame
>>> p0 = librosa.feature.poly_features(S=S, order=0)
Fit a linear polynomial to each frame
>>> p1 = librosa.feature.poly_features(S=S, order=1)
Fit a quadratic to each frame
>>> p2 = librosa.feature.poly_features(S=S, order=2)
Plot the results for comparison
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 8))
>>> ax = plt.subplot(4,1,1)
>>> plt.plot(p2[2], label='order=2', alpha=0.8)
>>> plt.plot(p1[1], label='order=1', alpha=0.8)
>>> plt.plot(p0[0], label='order=0', alpha=0.8)
>>> plt.xticks([])
>>> plt.ylabel('Constant')
>>> plt.legend()
>>> plt.subplot(4,1,2, sharex=ax)
>>> plt.plot(p2[1], label='order=2', alpha=0.8)
>>> plt.plot(p1[0], label='order=1', alpha=0.8)
>>> plt.xticks([])
>>> plt.ylabel('Linear')
>>> plt.subplot(4,1,3, sharex=ax)
>>> plt.plot(p2[0], label='order=2', alpha=0.8)
>>> plt.xticks([])
>>> plt.ylabel('Quadratic')
>>> plt.subplot(4,1,4, sharex=ax)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log')
>>> plt.tight_layout()
'''
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
# If frequencies are constant over frames, then we only need to fit once
if freq.ndim == 1:
coefficients = np.polyfit(freq, S, order)
else:
# Else, fit each frame independently and stack the results
coefficients = np.concatenate([[np.polyfit(freq[:, i], S[:, i], order)]
for i in range(S.shape[1])], axis=0).T
return coefficients
|
[
"Get",
"coefficients",
"of",
"fitting",
"an",
"nth",
"-",
"order",
"polynomial",
"to",
"the",
"columns",
"of",
"a",
"spectrogram",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L831-L958
|
[
"def",
"poly_features",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"center",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
",",
"order",
"=",
"1",
",",
"freq",
"=",
"None",
")",
":",
"S",
",",
"n_fft",
"=",
"_spectrogram",
"(",
"y",
"=",
"y",
",",
"S",
"=",
"S",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"win_length",
"=",
"win_length",
",",
"window",
"=",
"window",
",",
"center",
"=",
"center",
",",
"pad_mode",
"=",
"pad_mode",
")",
"# Compute the center frequencies of each bin",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"fft_frequencies",
"(",
"sr",
"=",
"sr",
",",
"n_fft",
"=",
"n_fft",
")",
"# If frequencies are constant over frames, then we only need to fit once",
"if",
"freq",
".",
"ndim",
"==",
"1",
":",
"coefficients",
"=",
"np",
".",
"polyfit",
"(",
"freq",
",",
"S",
",",
"order",
")",
"else",
":",
"# Else, fit each frame independently and stack the results",
"coefficients",
"=",
"np",
".",
"concatenate",
"(",
"[",
"[",
"np",
".",
"polyfit",
"(",
"freq",
"[",
":",
",",
"i",
"]",
",",
"S",
"[",
":",
",",
"i",
"]",
",",
"order",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"S",
".",
"shape",
"[",
"1",
"]",
")",
"]",
",",
"axis",
"=",
"0",
")",
".",
"T",
"return",
"coefficients"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
zero_crossing_rate
|
Compute the zero-crossing rate of an audio time series.
Parameters
----------
y : np.ndarray [shape=(n,)]
Audio time series
frame_length : int > 0
Length of the frame over which to compute zero crossing rates
hop_length : int > 0
Number of samples to advance for each frame
center : bool
If `True`, frames are centered by padding the edges of `y`.
This is similar to the padding in `librosa.core.stft`,
but uses edge-value copies instead of reflection.
kwargs : additional keyword arguments
See `librosa.core.zero_crossings`
.. note:: By default, the `pad` parameter is set to `False`, which
differs from the default specified by
`librosa.core.zero_crossings`.
Returns
-------
zcr : np.ndarray [shape=(1, t)]
`zcr[0, i]` is the fraction of zero crossings in the
`i` th frame
See Also
--------
librosa.core.zero_crossings
Compute zero-crossings in a time-series
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.zero_crossing_rate(y)
array([[ 0.134, 0.139, ..., 0.387, 0.322]])
|
librosa/feature/spectral.py
|
def zero_crossing_rate(y, frame_length=2048, hop_length=512, center=True,
**kwargs):
'''Compute the zero-crossing rate of an audio time series.
Parameters
----------
y : np.ndarray [shape=(n,)]
Audio time series
frame_length : int > 0
Length of the frame over which to compute zero crossing rates
hop_length : int > 0
Number of samples to advance for each frame
center : bool
If `True`, frames are centered by padding the edges of `y`.
This is similar to the padding in `librosa.core.stft`,
but uses edge-value copies instead of reflection.
kwargs : additional keyword arguments
See `librosa.core.zero_crossings`
.. note:: By default, the `pad` parameter is set to `False`, which
differs from the default specified by
`librosa.core.zero_crossings`.
Returns
-------
zcr : np.ndarray [shape=(1, t)]
`zcr[0, i]` is the fraction of zero crossings in the
`i` th frame
See Also
--------
librosa.core.zero_crossings
Compute zero-crossings in a time-series
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.zero_crossing_rate(y)
array([[ 0.134, 0.139, ..., 0.387, 0.322]])
'''
util.valid_audio(y)
if center:
y = np.pad(y, int(frame_length // 2), mode='edge')
y_framed = util.frame(y, frame_length, hop_length)
kwargs['axis'] = 0
kwargs.setdefault('pad', False)
crossings = zero_crossings(y_framed, **kwargs)
return np.mean(crossings, axis=0, keepdims=True)
|
def zero_crossing_rate(y, frame_length=2048, hop_length=512, center=True,
**kwargs):
'''Compute the zero-crossing rate of an audio time series.
Parameters
----------
y : np.ndarray [shape=(n,)]
Audio time series
frame_length : int > 0
Length of the frame over which to compute zero crossing rates
hop_length : int > 0
Number of samples to advance for each frame
center : bool
If `True`, frames are centered by padding the edges of `y`.
This is similar to the padding in `librosa.core.stft`,
but uses edge-value copies instead of reflection.
kwargs : additional keyword arguments
See `librosa.core.zero_crossings`
.. note:: By default, the `pad` parameter is set to `False`, which
differs from the default specified by
`librosa.core.zero_crossings`.
Returns
-------
zcr : np.ndarray [shape=(1, t)]
`zcr[0, i]` is the fraction of zero crossings in the
`i` th frame
See Also
--------
librosa.core.zero_crossings
Compute zero-crossings in a time-series
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.zero_crossing_rate(y)
array([[ 0.134, 0.139, ..., 0.387, 0.322]])
'''
util.valid_audio(y)
if center:
y = np.pad(y, int(frame_length // 2), mode='edge')
y_framed = util.frame(y, frame_length, hop_length)
kwargs['axis'] = 0
kwargs.setdefault('pad', False)
crossings = zero_crossings(y_framed, **kwargs)
return np.mean(crossings, axis=0, keepdims=True)
|
[
"Compute",
"the",
"zero",
"-",
"crossing",
"rate",
"of",
"an",
"audio",
"time",
"series",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L961-L1019
|
[
"def",
"zero_crossing_rate",
"(",
"y",
",",
"frame_length",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"center",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"util",
".",
"valid_audio",
"(",
"y",
")",
"if",
"center",
":",
"y",
"=",
"np",
".",
"pad",
"(",
"y",
",",
"int",
"(",
"frame_length",
"//",
"2",
")",
",",
"mode",
"=",
"'edge'",
")",
"y_framed",
"=",
"util",
".",
"frame",
"(",
"y",
",",
"frame_length",
",",
"hop_length",
")",
"kwargs",
"[",
"'axis'",
"]",
"=",
"0",
"kwargs",
".",
"setdefault",
"(",
"'pad'",
",",
"False",
")",
"crossings",
"=",
"zero_crossings",
"(",
"y_framed",
",",
"*",
"*",
"kwargs",
")",
"return",
"np",
".",
"mean",
"(",
"crossings",
",",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
chroma_stft
|
Compute a chromagram from a waveform or power spectrogram.
This implementation is derived from `chromagram_E` [1]_
.. [1] Ellis, Daniel P.W. "Chroma feature analysis and synthesis"
2007/04/21
http://labrosa.ee.columbia.edu/matlab/chroma-ansyn/
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
power spectrogram
norm : float or None
Column-wise normalization.
See `librosa.util.normalize` for details.
If `None`, no normalization is performed.
n_fft : int > 0 [scalar]
FFT window size if provided `y, sr` instead of `S`
hop_length : int > 0 [scalar]
hop length if provided `y, sr` instead of `S`
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
tuning : float in `[-0.5, 0.5)` [scalar] or None.
Deviation from A440 tuning in fractional bins (cents).
If `None`, it is automatically estimated.
kwargs : additional keyword arguments
Arguments to parameterize chroma filters.
See `librosa.filters.chroma` for details.
Returns
-------
chromagram : np.ndarray [shape=(n_chroma, t)]
Normalized energy for each chroma bin at each frame.
See Also
--------
librosa.filters.chroma
Chroma filter bank construction
librosa.util.normalize
Vector normalization
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.chroma_stft(y=y, sr=sr)
array([[ 0.974, 0.881, ..., 0.925, 1. ],
[ 1. , 0.841, ..., 0.882, 0.878],
...,
[ 0.658, 0.985, ..., 0.878, 0.764],
[ 0.969, 0.92 , ..., 0.974, 0.915]])
Use an energy (magnitude) spectrum instead of power spectrogram
>>> S = np.abs(librosa.stft(y))
>>> chroma = librosa.feature.chroma_stft(S=S, sr=sr)
>>> chroma
array([[ 0.884, 0.91 , ..., 0.861, 0.858],
[ 0.963, 0.785, ..., 0.968, 0.896],
...,
[ 0.871, 1. , ..., 0.928, 0.829],
[ 1. , 0.982, ..., 0.93 , 0.878]])
Use a pre-computed power spectrogram with a larger frame
>>> S = np.abs(librosa.stft(y, n_fft=4096))**2
>>> chroma = librosa.feature.chroma_stft(S=S, sr=sr)
>>> chroma
array([[ 0.685, 0.477, ..., 0.961, 0.986],
[ 0.674, 0.452, ..., 0.952, 0.926],
...,
[ 0.844, 0.575, ..., 0.934, 0.869],
[ 0.793, 0.663, ..., 0.964, 0.972]])
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Chromagram')
>>> plt.tight_layout()
|
librosa/feature/spectral.py
|
def chroma_stft(y=None, sr=22050, S=None, norm=np.inf, n_fft=2048,
hop_length=512, win_length=None, window='hann', center=True,
pad_mode='reflect', tuning=None, **kwargs):
"""Compute a chromagram from a waveform or power spectrogram.
This implementation is derived from `chromagram_E` [1]_
.. [1] Ellis, Daniel P.W. "Chroma feature analysis and synthesis"
2007/04/21
http://labrosa.ee.columbia.edu/matlab/chroma-ansyn/
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
power spectrogram
norm : float or None
Column-wise normalization.
See `librosa.util.normalize` for details.
If `None`, no normalization is performed.
n_fft : int > 0 [scalar]
FFT window size if provided `y, sr` instead of `S`
hop_length : int > 0 [scalar]
hop length if provided `y, sr` instead of `S`
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
tuning : float in `[-0.5, 0.5)` [scalar] or None.
Deviation from A440 tuning in fractional bins (cents).
If `None`, it is automatically estimated.
kwargs : additional keyword arguments
Arguments to parameterize chroma filters.
See `librosa.filters.chroma` for details.
Returns
-------
chromagram : np.ndarray [shape=(n_chroma, t)]
Normalized energy for each chroma bin at each frame.
See Also
--------
librosa.filters.chroma
Chroma filter bank construction
librosa.util.normalize
Vector normalization
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.chroma_stft(y=y, sr=sr)
array([[ 0.974, 0.881, ..., 0.925, 1. ],
[ 1. , 0.841, ..., 0.882, 0.878],
...,
[ 0.658, 0.985, ..., 0.878, 0.764],
[ 0.969, 0.92 , ..., 0.974, 0.915]])
Use an energy (magnitude) spectrum instead of power spectrogram
>>> S = np.abs(librosa.stft(y))
>>> chroma = librosa.feature.chroma_stft(S=S, sr=sr)
>>> chroma
array([[ 0.884, 0.91 , ..., 0.861, 0.858],
[ 0.963, 0.785, ..., 0.968, 0.896],
...,
[ 0.871, 1. , ..., 0.928, 0.829],
[ 1. , 0.982, ..., 0.93 , 0.878]])
Use a pre-computed power spectrogram with a larger frame
>>> S = np.abs(librosa.stft(y, n_fft=4096))**2
>>> chroma = librosa.feature.chroma_stft(S=S, sr=sr)
>>> chroma
array([[ 0.685, 0.477, ..., 0.961, 0.986],
[ 0.674, 0.452, ..., 0.952, 0.926],
...,
[ 0.844, 0.575, ..., 0.934, 0.869],
[ 0.793, 0.663, ..., 0.964, 0.972]])
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Chromagram')
>>> plt.tight_layout()
"""
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length, power=2,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
n_chroma = kwargs.get('n_chroma', 12)
if tuning is None:
tuning = estimate_tuning(S=S, sr=sr, bins_per_octave=n_chroma)
# Get the filter bank
if 'A440' not in kwargs:
kwargs['A440'] = 440.0 * 2.0**(float(tuning) / n_chroma)
chromafb = filters.chroma(sr, n_fft, **kwargs)
# Compute raw chroma
raw_chroma = np.dot(chromafb, S)
# Compute normalization factor for each frame
return util.normalize(raw_chroma, norm=norm, axis=0)
|
def chroma_stft(y=None, sr=22050, S=None, norm=np.inf, n_fft=2048,
hop_length=512, win_length=None, window='hann', center=True,
pad_mode='reflect', tuning=None, **kwargs):
"""Compute a chromagram from a waveform or power spectrogram.
This implementation is derived from `chromagram_E` [1]_
.. [1] Ellis, Daniel P.W. "Chroma feature analysis and synthesis"
2007/04/21
http://labrosa.ee.columbia.edu/matlab/chroma-ansyn/
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
power spectrogram
norm : float or None
Column-wise normalization.
See `librosa.util.normalize` for details.
If `None`, no normalization is performed.
n_fft : int > 0 [scalar]
FFT window size if provided `y, sr` instead of `S`
hop_length : int > 0 [scalar]
hop length if provided `y, sr` instead of `S`
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
tuning : float in `[-0.5, 0.5)` [scalar] or None.
Deviation from A440 tuning in fractional bins (cents).
If `None`, it is automatically estimated.
kwargs : additional keyword arguments
Arguments to parameterize chroma filters.
See `librosa.filters.chroma` for details.
Returns
-------
chromagram : np.ndarray [shape=(n_chroma, t)]
Normalized energy for each chroma bin at each frame.
See Also
--------
librosa.filters.chroma
Chroma filter bank construction
librosa.util.normalize
Vector normalization
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.chroma_stft(y=y, sr=sr)
array([[ 0.974, 0.881, ..., 0.925, 1. ],
[ 1. , 0.841, ..., 0.882, 0.878],
...,
[ 0.658, 0.985, ..., 0.878, 0.764],
[ 0.969, 0.92 , ..., 0.974, 0.915]])
Use an energy (magnitude) spectrum instead of power spectrogram
>>> S = np.abs(librosa.stft(y))
>>> chroma = librosa.feature.chroma_stft(S=S, sr=sr)
>>> chroma
array([[ 0.884, 0.91 , ..., 0.861, 0.858],
[ 0.963, 0.785, ..., 0.968, 0.896],
...,
[ 0.871, 1. , ..., 0.928, 0.829],
[ 1. , 0.982, ..., 0.93 , 0.878]])
Use a pre-computed power spectrogram with a larger frame
>>> S = np.abs(librosa.stft(y, n_fft=4096))**2
>>> chroma = librosa.feature.chroma_stft(S=S, sr=sr)
>>> chroma
array([[ 0.685, 0.477, ..., 0.961, 0.986],
[ 0.674, 0.452, ..., 0.952, 0.926],
...,
[ 0.844, 0.575, ..., 0.934, 0.869],
[ 0.793, 0.663, ..., 0.964, 0.972]])
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Chromagram')
>>> plt.tight_layout()
"""
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length, power=2,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
n_chroma = kwargs.get('n_chroma', 12)
if tuning is None:
tuning = estimate_tuning(S=S, sr=sr, bins_per_octave=n_chroma)
# Get the filter bank
if 'A440' not in kwargs:
kwargs['A440'] = 440.0 * 2.0**(float(tuning) / n_chroma)
chromafb = filters.chroma(sr, n_fft, **kwargs)
# Compute raw chroma
raw_chroma = np.dot(chromafb, S)
# Compute normalization factor for each frame
return util.normalize(raw_chroma, norm=norm, axis=0)
|
[
"Compute",
"a",
"chromagram",
"from",
"a",
"waveform",
"or",
"power",
"spectrogram",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L1023-L1162
|
[
"def",
"chroma_stft",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"norm",
"=",
"np",
".",
"inf",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"center",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
",",
"tuning",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"S",
",",
"n_fft",
"=",
"_spectrogram",
"(",
"y",
"=",
"y",
",",
"S",
"=",
"S",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"power",
"=",
"2",
",",
"win_length",
"=",
"win_length",
",",
"window",
"=",
"window",
",",
"center",
"=",
"center",
",",
"pad_mode",
"=",
"pad_mode",
")",
"n_chroma",
"=",
"kwargs",
".",
"get",
"(",
"'n_chroma'",
",",
"12",
")",
"if",
"tuning",
"is",
"None",
":",
"tuning",
"=",
"estimate_tuning",
"(",
"S",
"=",
"S",
",",
"sr",
"=",
"sr",
",",
"bins_per_octave",
"=",
"n_chroma",
")",
"# Get the filter bank",
"if",
"'A440'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'A440'",
"]",
"=",
"440.0",
"*",
"2.0",
"**",
"(",
"float",
"(",
"tuning",
")",
"/",
"n_chroma",
")",
"chromafb",
"=",
"filters",
".",
"chroma",
"(",
"sr",
",",
"n_fft",
",",
"*",
"*",
"kwargs",
")",
"# Compute raw chroma",
"raw_chroma",
"=",
"np",
".",
"dot",
"(",
"chromafb",
",",
"S",
")",
"# Compute normalization factor for each frame",
"return",
"util",
".",
"normalize",
"(",
"raw_chroma",
",",
"norm",
"=",
"norm",
",",
"axis",
"=",
"0",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
chroma_cqt
|
r'''Constant-Q chromagram
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0
sampling rate of `y`
C : np.ndarray [shape=(d, t)] [Optional]
a pre-computed constant-Q spectrogram
hop_length : int > 0
number of samples between successive chroma frames
fmin : float > 0
minimum frequency to analyze in the CQT.
Default: 'C1' ~= 32.7 Hz
norm : int > 0, +-np.inf, or None
Column-wise normalization of the chromagram.
threshold : float
Pre-normalization energy threshold. Values below the
threshold are discarded, resulting in a sparse chromagram.
tuning : float
Deviation (in cents) from A440 tuning
n_chroma : int > 0
Number of chroma bins to produce
n_octaves : int > 0
Number of octaves to analyze above `fmin`
window : None or np.ndarray
Optional window parameter to `filters.cq_to_chroma`
bins_per_octave : int > 0
Number of bins per octave in the CQT.
Default: matches `n_chroma`
cqt_mode : ['full', 'hybrid']
Constant-Q transform mode
Returns
-------
chromagram : np.ndarray [shape=(n_chroma, t)]
The output chromagram
See Also
--------
librosa.util.normalize
librosa.core.cqt
librosa.core.hybrid_cqt
chroma_stft
Examples
--------
Compare a long-window STFT chromagram to the CQT chromagram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10, duration=15)
>>> chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr,
... n_chroma=12, n_fft=4096)
>>> chroma_cq = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(chroma_stft, y_axis='chroma')
>>> plt.title('chroma_stft')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(chroma_cq, y_axis='chroma', x_axis='time')
>>> plt.title('chroma_cqt')
>>> plt.colorbar()
>>> plt.tight_layout()
|
librosa/feature/spectral.py
|
def chroma_cqt(y=None, sr=22050, C=None, hop_length=512, fmin=None,
norm=np.inf, threshold=0.0, tuning=None, n_chroma=12,
n_octaves=7, window=None, bins_per_octave=None, cqt_mode='full'):
r'''Constant-Q chromagram
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0
sampling rate of `y`
C : np.ndarray [shape=(d, t)] [Optional]
a pre-computed constant-Q spectrogram
hop_length : int > 0
number of samples between successive chroma frames
fmin : float > 0
minimum frequency to analyze in the CQT.
Default: 'C1' ~= 32.7 Hz
norm : int > 0, +-np.inf, or None
Column-wise normalization of the chromagram.
threshold : float
Pre-normalization energy threshold. Values below the
threshold are discarded, resulting in a sparse chromagram.
tuning : float
Deviation (in cents) from A440 tuning
n_chroma : int > 0
Number of chroma bins to produce
n_octaves : int > 0
Number of octaves to analyze above `fmin`
window : None or np.ndarray
Optional window parameter to `filters.cq_to_chroma`
bins_per_octave : int > 0
Number of bins per octave in the CQT.
Default: matches `n_chroma`
cqt_mode : ['full', 'hybrid']
Constant-Q transform mode
Returns
-------
chromagram : np.ndarray [shape=(n_chroma, t)]
The output chromagram
See Also
--------
librosa.util.normalize
librosa.core.cqt
librosa.core.hybrid_cqt
chroma_stft
Examples
--------
Compare a long-window STFT chromagram to the CQT chromagram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10, duration=15)
>>> chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr,
... n_chroma=12, n_fft=4096)
>>> chroma_cq = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(chroma_stft, y_axis='chroma')
>>> plt.title('chroma_stft')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(chroma_cq, y_axis='chroma', x_axis='time')
>>> plt.title('chroma_cqt')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
cqt_func = {'full': cqt, 'hybrid': hybrid_cqt}
if bins_per_octave is None:
bins_per_octave = n_chroma
# Build the CQT if we don't have one already
if C is None:
C = np.abs(cqt_func[cqt_mode](y, sr=sr,
hop_length=hop_length,
fmin=fmin,
n_bins=n_octaves * bins_per_octave,
bins_per_octave=bins_per_octave,
tuning=tuning))
# Map to chroma
cq_to_chr = filters.cq_to_chroma(C.shape[0],
bins_per_octave=bins_per_octave,
n_chroma=n_chroma,
fmin=fmin,
window=window)
chroma = cq_to_chr.dot(C)
if threshold is not None:
chroma[chroma < threshold] = 0.0
# Normalize
if norm is not None:
chroma = util.normalize(chroma, norm=norm, axis=0)
return chroma
|
def chroma_cqt(y=None, sr=22050, C=None, hop_length=512, fmin=None,
norm=np.inf, threshold=0.0, tuning=None, n_chroma=12,
n_octaves=7, window=None, bins_per_octave=None, cqt_mode='full'):
r'''Constant-Q chromagram
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0
sampling rate of `y`
C : np.ndarray [shape=(d, t)] [Optional]
a pre-computed constant-Q spectrogram
hop_length : int > 0
number of samples between successive chroma frames
fmin : float > 0
minimum frequency to analyze in the CQT.
Default: 'C1' ~= 32.7 Hz
norm : int > 0, +-np.inf, or None
Column-wise normalization of the chromagram.
threshold : float
Pre-normalization energy threshold. Values below the
threshold are discarded, resulting in a sparse chromagram.
tuning : float
Deviation (in cents) from A440 tuning
n_chroma : int > 0
Number of chroma bins to produce
n_octaves : int > 0
Number of octaves to analyze above `fmin`
window : None or np.ndarray
Optional window parameter to `filters.cq_to_chroma`
bins_per_octave : int > 0
Number of bins per octave in the CQT.
Default: matches `n_chroma`
cqt_mode : ['full', 'hybrid']
Constant-Q transform mode
Returns
-------
chromagram : np.ndarray [shape=(n_chroma, t)]
The output chromagram
See Also
--------
librosa.util.normalize
librosa.core.cqt
librosa.core.hybrid_cqt
chroma_stft
Examples
--------
Compare a long-window STFT chromagram to the CQT chromagram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10, duration=15)
>>> chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr,
... n_chroma=12, n_fft=4096)
>>> chroma_cq = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(chroma_stft, y_axis='chroma')
>>> plt.title('chroma_stft')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(chroma_cq, y_axis='chroma', x_axis='time')
>>> plt.title('chroma_cqt')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
cqt_func = {'full': cqt, 'hybrid': hybrid_cqt}
if bins_per_octave is None:
bins_per_octave = n_chroma
# Build the CQT if we don't have one already
if C is None:
C = np.abs(cqt_func[cqt_mode](y, sr=sr,
hop_length=hop_length,
fmin=fmin,
n_bins=n_octaves * bins_per_octave,
bins_per_octave=bins_per_octave,
tuning=tuning))
# Map to chroma
cq_to_chr = filters.cq_to_chroma(C.shape[0],
bins_per_octave=bins_per_octave,
n_chroma=n_chroma,
fmin=fmin,
window=window)
chroma = cq_to_chr.dot(C)
if threshold is not None:
chroma[chroma < threshold] = 0.0
# Normalize
if norm is not None:
chroma = util.normalize(chroma, norm=norm, axis=0)
return chroma
|
[
"r",
"Constant",
"-",
"Q",
"chromagram"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L1165-L1280
|
[
"def",
"chroma_cqt",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"C",
"=",
"None",
",",
"hop_length",
"=",
"512",
",",
"fmin",
"=",
"None",
",",
"norm",
"=",
"np",
".",
"inf",
",",
"threshold",
"=",
"0.0",
",",
"tuning",
"=",
"None",
",",
"n_chroma",
"=",
"12",
",",
"n_octaves",
"=",
"7",
",",
"window",
"=",
"None",
",",
"bins_per_octave",
"=",
"None",
",",
"cqt_mode",
"=",
"'full'",
")",
":",
"cqt_func",
"=",
"{",
"'full'",
":",
"cqt",
",",
"'hybrid'",
":",
"hybrid_cqt",
"}",
"if",
"bins_per_octave",
"is",
"None",
":",
"bins_per_octave",
"=",
"n_chroma",
"# Build the CQT if we don't have one already",
"if",
"C",
"is",
"None",
":",
"C",
"=",
"np",
".",
"abs",
"(",
"cqt_func",
"[",
"cqt_mode",
"]",
"(",
"y",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"hop_length",
",",
"fmin",
"=",
"fmin",
",",
"n_bins",
"=",
"n_octaves",
"*",
"bins_per_octave",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"tuning",
"=",
"tuning",
")",
")",
"# Map to chroma",
"cq_to_chr",
"=",
"filters",
".",
"cq_to_chroma",
"(",
"C",
".",
"shape",
"[",
"0",
"]",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"n_chroma",
"=",
"n_chroma",
",",
"fmin",
"=",
"fmin",
",",
"window",
"=",
"window",
")",
"chroma",
"=",
"cq_to_chr",
".",
"dot",
"(",
"C",
")",
"if",
"threshold",
"is",
"not",
"None",
":",
"chroma",
"[",
"chroma",
"<",
"threshold",
"]",
"=",
"0.0",
"# Normalize",
"if",
"norm",
"is",
"not",
"None",
":",
"chroma",
"=",
"util",
".",
"normalize",
"(",
"chroma",
",",
"norm",
"=",
"norm",
",",
"axis",
"=",
"0",
")",
"return",
"chroma"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
chroma_cens
|
r'''Computes the chroma variant "Chroma Energy Normalized" (CENS), following [1]_.
To compute CENS features, following steps are taken after obtaining chroma vectors using `chroma_cqt`:
1. L-1 normalization of each chroma vector
2. Quantization of amplitude based on "log-like" amplitude thresholds
3. (optional) Smoothing with sliding window. Default window length = 41 frames
4. (not implemented) Downsampling
CENS features are robust to dynamics, timbre and articulation, thus these are commonly used in audio
matching and retrieval applications.
.. [1] Meinard Müller and Sebastian Ewert
"Chroma Toolbox: MATLAB implementations for extracting variants of chroma-based audio features"
In Proceedings of the International Conference on Music Information Retrieval (ISMIR), 2011.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0
sampling rate of `y`
C : np.ndarray [shape=(d, t)] [Optional]
a pre-computed constant-Q spectrogram
hop_length : int > 0
number of samples between successive chroma frames
fmin : float > 0
minimum frequency to analyze in the CQT.
Default: 'C1' ~= 32.7 Hz
norm : int > 0, +-np.inf, or None
Column-wise normalization of the chromagram.
tuning : float
Deviation (in cents) from A440 tuning
n_chroma : int > 0
Number of chroma bins to produce
n_octaves : int > 0
Number of octaves to analyze above `fmin`
window : None or np.ndarray
Optional window parameter to `filters.cq_to_chroma`
bins_per_octave : int > 0
Number of bins per octave in the CQT.
Default: matches `n_chroma`
cqt_mode : ['full', 'hybrid']
Constant-Q transform mode
win_len_smooth : int > 0 or None
Length of temporal smoothing window. `None` disables temporal smoothing.
Default: 41
smoothing_window : str, float or tuple
Type of window function for temporal smoothing. See `filters.get_window` for possible inputs.
Default: 'hann'
Returns
-------
chroma_cens : np.ndarray [shape=(n_chroma, t)]
The output cens-chromagram
See Also
--------
chroma_cqt
Compute a chromagram from a constant-Q transform.
chroma_stft
Compute a chromagram from an STFT spectrogram or waveform.
filters.get_window
Compute a window function.
Examples
--------
Compare standard cqt chroma to CENS.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10, duration=15)
>>> chroma_cens = librosa.feature.chroma_cens(y=y, sr=sr)
>>> chroma_cq = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(chroma_cq, y_axis='chroma')
>>> plt.title('chroma_cq')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(chroma_cens, y_axis='chroma', x_axis='time')
>>> plt.title('chroma_cens')
>>> plt.colorbar()
>>> plt.tight_layout()
|
librosa/feature/spectral.py
|
def chroma_cens(y=None, sr=22050, C=None, hop_length=512, fmin=None,
tuning=None, n_chroma=12,
n_octaves=7, bins_per_octave=None, cqt_mode='full', window=None,
norm=2, win_len_smooth=41, smoothing_window='hann'):
r'''Computes the chroma variant "Chroma Energy Normalized" (CENS), following [1]_.
To compute CENS features, following steps are taken after obtaining chroma vectors using `chroma_cqt`:
1. L-1 normalization of each chroma vector
2. Quantization of amplitude based on "log-like" amplitude thresholds
3. (optional) Smoothing with sliding window. Default window length = 41 frames
4. (not implemented) Downsampling
CENS features are robust to dynamics, timbre and articulation, thus these are commonly used in audio
matching and retrieval applications.
.. [1] Meinard Müller and Sebastian Ewert
"Chroma Toolbox: MATLAB implementations for extracting variants of chroma-based audio features"
In Proceedings of the International Conference on Music Information Retrieval (ISMIR), 2011.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0
sampling rate of `y`
C : np.ndarray [shape=(d, t)] [Optional]
a pre-computed constant-Q spectrogram
hop_length : int > 0
number of samples between successive chroma frames
fmin : float > 0
minimum frequency to analyze in the CQT.
Default: 'C1' ~= 32.7 Hz
norm : int > 0, +-np.inf, or None
Column-wise normalization of the chromagram.
tuning : float
Deviation (in cents) from A440 tuning
n_chroma : int > 0
Number of chroma bins to produce
n_octaves : int > 0
Number of octaves to analyze above `fmin`
window : None or np.ndarray
Optional window parameter to `filters.cq_to_chroma`
bins_per_octave : int > 0
Number of bins per octave in the CQT.
Default: matches `n_chroma`
cqt_mode : ['full', 'hybrid']
Constant-Q transform mode
win_len_smooth : int > 0 or None
Length of temporal smoothing window. `None` disables temporal smoothing.
Default: 41
smoothing_window : str, float or tuple
Type of window function for temporal smoothing. See `filters.get_window` for possible inputs.
Default: 'hann'
Returns
-------
chroma_cens : np.ndarray [shape=(n_chroma, t)]
The output cens-chromagram
See Also
--------
chroma_cqt
Compute a chromagram from a constant-Q transform.
chroma_stft
Compute a chromagram from an STFT spectrogram or waveform.
filters.get_window
Compute a window function.
Examples
--------
Compare standard cqt chroma to CENS.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10, duration=15)
>>> chroma_cens = librosa.feature.chroma_cens(y=y, sr=sr)
>>> chroma_cq = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(chroma_cq, y_axis='chroma')
>>> plt.title('chroma_cq')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(chroma_cens, y_axis='chroma', x_axis='time')
>>> plt.title('chroma_cens')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
if not ((win_len_smooth is None) or (isinstance(win_len_smooth, int) and win_len_smooth > 0)):
raise ParameterError('win_len_smooth={} must be a positive integer or None'.format(win_len_smooth))
chroma = chroma_cqt(y=y, C=C, sr=sr,
hop_length=hop_length,
fmin=fmin,
bins_per_octave=bins_per_octave,
tuning=tuning,
norm=None,
n_chroma=n_chroma,
n_octaves=n_octaves,
cqt_mode=cqt_mode,
window=window)
# L1-Normalization
chroma = util.normalize(chroma, norm=1, axis=0)
# Quantize amplitudes
QUANT_STEPS = [0.4, 0.2, 0.1, 0.05]
QUANT_WEIGHTS = [0.25, 0.25, 0.25, 0.25]
chroma_quant = np.zeros_like(chroma)
for cur_quant_step_idx, cur_quant_step in enumerate(QUANT_STEPS):
chroma_quant += (chroma > cur_quant_step) * QUANT_WEIGHTS[cur_quant_step_idx]
if win_len_smooth:
# Apply temporal smoothing
win = filters.get_window(smoothing_window, win_len_smooth + 2, fftbins=False)
win /= np.sum(win)
win = np.atleast_2d(win)
cens = scipy.signal.convolve2d(chroma_quant, win,
mode='same', boundary='fill')
else:
cens = chroma_quant
# L2-Normalization
return util.normalize(cens, norm=norm, axis=0)
|
def chroma_cens(y=None, sr=22050, C=None, hop_length=512, fmin=None,
tuning=None, n_chroma=12,
n_octaves=7, bins_per_octave=None, cqt_mode='full', window=None,
norm=2, win_len_smooth=41, smoothing_window='hann'):
r'''Computes the chroma variant "Chroma Energy Normalized" (CENS), following [1]_.
To compute CENS features, following steps are taken after obtaining chroma vectors using `chroma_cqt`:
1. L-1 normalization of each chroma vector
2. Quantization of amplitude based on "log-like" amplitude thresholds
3. (optional) Smoothing with sliding window. Default window length = 41 frames
4. (not implemented) Downsampling
CENS features are robust to dynamics, timbre and articulation, thus these are commonly used in audio
matching and retrieval applications.
.. [1] Meinard Müller and Sebastian Ewert
"Chroma Toolbox: MATLAB implementations for extracting variants of chroma-based audio features"
In Proceedings of the International Conference on Music Information Retrieval (ISMIR), 2011.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0
sampling rate of `y`
C : np.ndarray [shape=(d, t)] [Optional]
a pre-computed constant-Q spectrogram
hop_length : int > 0
number of samples between successive chroma frames
fmin : float > 0
minimum frequency to analyze in the CQT.
Default: 'C1' ~= 32.7 Hz
norm : int > 0, +-np.inf, or None
Column-wise normalization of the chromagram.
tuning : float
Deviation (in cents) from A440 tuning
n_chroma : int > 0
Number of chroma bins to produce
n_octaves : int > 0
Number of octaves to analyze above `fmin`
window : None or np.ndarray
Optional window parameter to `filters.cq_to_chroma`
bins_per_octave : int > 0
Number of bins per octave in the CQT.
Default: matches `n_chroma`
cqt_mode : ['full', 'hybrid']
Constant-Q transform mode
win_len_smooth : int > 0 or None
Length of temporal smoothing window. `None` disables temporal smoothing.
Default: 41
smoothing_window : str, float or tuple
Type of window function for temporal smoothing. See `filters.get_window` for possible inputs.
Default: 'hann'
Returns
-------
chroma_cens : np.ndarray [shape=(n_chroma, t)]
The output cens-chromagram
See Also
--------
chroma_cqt
Compute a chromagram from a constant-Q transform.
chroma_stft
Compute a chromagram from an STFT spectrogram or waveform.
filters.get_window
Compute a window function.
Examples
--------
Compare standard cqt chroma to CENS.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10, duration=15)
>>> chroma_cens = librosa.feature.chroma_cens(y=y, sr=sr)
>>> chroma_cq = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(chroma_cq, y_axis='chroma')
>>> plt.title('chroma_cq')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(chroma_cens, y_axis='chroma', x_axis='time')
>>> plt.title('chroma_cens')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
if not ((win_len_smooth is None) or (isinstance(win_len_smooth, int) and win_len_smooth > 0)):
raise ParameterError('win_len_smooth={} must be a positive integer or None'.format(win_len_smooth))
chroma = chroma_cqt(y=y, C=C, sr=sr,
hop_length=hop_length,
fmin=fmin,
bins_per_octave=bins_per_octave,
tuning=tuning,
norm=None,
n_chroma=n_chroma,
n_octaves=n_octaves,
cqt_mode=cqt_mode,
window=window)
# L1-Normalization
chroma = util.normalize(chroma, norm=1, axis=0)
# Quantize amplitudes
QUANT_STEPS = [0.4, 0.2, 0.1, 0.05]
QUANT_WEIGHTS = [0.25, 0.25, 0.25, 0.25]
chroma_quant = np.zeros_like(chroma)
for cur_quant_step_idx, cur_quant_step in enumerate(QUANT_STEPS):
chroma_quant += (chroma > cur_quant_step) * QUANT_WEIGHTS[cur_quant_step_idx]
if win_len_smooth:
# Apply temporal smoothing
win = filters.get_window(smoothing_window, win_len_smooth + 2, fftbins=False)
win /= np.sum(win)
win = np.atleast_2d(win)
cens = scipy.signal.convolve2d(chroma_quant, win,
mode='same', boundary='fill')
else:
cens = chroma_quant
# L2-Normalization
return util.normalize(cens, norm=norm, axis=0)
|
[
"r",
"Computes",
"the",
"chroma",
"variant",
"Chroma",
"Energy",
"Normalized",
"(",
"CENS",
")",
"following",
"[",
"1",
"]",
"_",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L1283-L1426
|
[
"def",
"chroma_cens",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"C",
"=",
"None",
",",
"hop_length",
"=",
"512",
",",
"fmin",
"=",
"None",
",",
"tuning",
"=",
"None",
",",
"n_chroma",
"=",
"12",
",",
"n_octaves",
"=",
"7",
",",
"bins_per_octave",
"=",
"None",
",",
"cqt_mode",
"=",
"'full'",
",",
"window",
"=",
"None",
",",
"norm",
"=",
"2",
",",
"win_len_smooth",
"=",
"41",
",",
"smoothing_window",
"=",
"'hann'",
")",
":",
"if",
"not",
"(",
"(",
"win_len_smooth",
"is",
"None",
")",
"or",
"(",
"isinstance",
"(",
"win_len_smooth",
",",
"int",
")",
"and",
"win_len_smooth",
">",
"0",
")",
")",
":",
"raise",
"ParameterError",
"(",
"'win_len_smooth={} must be a positive integer or None'",
".",
"format",
"(",
"win_len_smooth",
")",
")",
"chroma",
"=",
"chroma_cqt",
"(",
"y",
"=",
"y",
",",
"C",
"=",
"C",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"hop_length",
",",
"fmin",
"=",
"fmin",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"tuning",
"=",
"tuning",
",",
"norm",
"=",
"None",
",",
"n_chroma",
"=",
"n_chroma",
",",
"n_octaves",
"=",
"n_octaves",
",",
"cqt_mode",
"=",
"cqt_mode",
",",
"window",
"=",
"window",
")",
"# L1-Normalization",
"chroma",
"=",
"util",
".",
"normalize",
"(",
"chroma",
",",
"norm",
"=",
"1",
",",
"axis",
"=",
"0",
")",
"# Quantize amplitudes",
"QUANT_STEPS",
"=",
"[",
"0.4",
",",
"0.2",
",",
"0.1",
",",
"0.05",
"]",
"QUANT_WEIGHTS",
"=",
"[",
"0.25",
",",
"0.25",
",",
"0.25",
",",
"0.25",
"]",
"chroma_quant",
"=",
"np",
".",
"zeros_like",
"(",
"chroma",
")",
"for",
"cur_quant_step_idx",
",",
"cur_quant_step",
"in",
"enumerate",
"(",
"QUANT_STEPS",
")",
":",
"chroma_quant",
"+=",
"(",
"chroma",
">",
"cur_quant_step",
")",
"*",
"QUANT_WEIGHTS",
"[",
"cur_quant_step_idx",
"]",
"if",
"win_len_smooth",
":",
"# Apply temporal smoothing",
"win",
"=",
"filters",
".",
"get_window",
"(",
"smoothing_window",
",",
"win_len_smooth",
"+",
"2",
",",
"fftbins",
"=",
"False",
")",
"win",
"/=",
"np",
".",
"sum",
"(",
"win",
")",
"win",
"=",
"np",
".",
"atleast_2d",
"(",
"win",
")",
"cens",
"=",
"scipy",
".",
"signal",
".",
"convolve2d",
"(",
"chroma_quant",
",",
"win",
",",
"mode",
"=",
"'same'",
",",
"boundary",
"=",
"'fill'",
")",
"else",
":",
"cens",
"=",
"chroma_quant",
"# L2-Normalization",
"return",
"util",
".",
"normalize",
"(",
"cens",
",",
"norm",
"=",
"norm",
",",
"axis",
"=",
"0",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
tonnetz
|
Computes the tonal centroid features (tonnetz), following the method of
[1]_.
.. [1] Harte, C., Sandler, M., & Gasser, M. (2006). "Detecting Harmonic
Change in Musical Audio." In Proceedings of the 1st ACM Workshop
on Audio and Music Computing Multimedia (pp. 21-26).
Santa Barbara, CA, USA: ACM Press. doi:10.1145/1178723.1178727.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
Audio time series.
sr : number > 0 [scalar]
sampling rate of `y`
chroma : np.ndarray [shape=(n_chroma, t)] or None
Normalized energy for each chroma bin at each frame.
If `None`, a cqt chromagram is performed.
Returns
-------
tonnetz : np.ndarray [shape(6, t)]
Tonal centroid features for each frame.
Tonnetz dimensions:
- 0: Fifth x-axis
- 1: Fifth y-axis
- 2: Minor x-axis
- 3: Minor y-axis
- 4: Major x-axis
- 5: Major y-axis
See Also
--------
chroma_cqt
Compute a chromagram from a constant-Q transform.
chroma_stft
Compute a chromagram from an STFT spectrogram or waveform.
Examples
--------
Compute tonnetz features from the harmonic component of a song
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y = librosa.effects.harmonic(y)
>>> tonnetz = librosa.feature.tonnetz(y=y, sr=sr)
>>> tonnetz
array([[-0.073, -0.053, ..., -0.054, -0.073],
[ 0.001, 0.001, ..., -0.054, -0.062],
...,
[ 0.039, 0.034, ..., 0.044, 0.064],
[ 0.005, 0.002, ..., 0.011, 0.017]])
Compare the tonnetz features to `chroma_cqt`
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(tonnetz, y_axis='tonnetz')
>>> plt.colorbar()
>>> plt.title('Tonal Centroids (Tonnetz)')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.feature.chroma_cqt(y, sr=sr),
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Chroma')
>>> plt.tight_layout()
|
librosa/feature/spectral.py
|
def tonnetz(y=None, sr=22050, chroma=None):
'''Computes the tonal centroid features (tonnetz), following the method of
[1]_.
.. [1] Harte, C., Sandler, M., & Gasser, M. (2006). "Detecting Harmonic
Change in Musical Audio." In Proceedings of the 1st ACM Workshop
on Audio and Music Computing Multimedia (pp. 21-26).
Santa Barbara, CA, USA: ACM Press. doi:10.1145/1178723.1178727.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
Audio time series.
sr : number > 0 [scalar]
sampling rate of `y`
chroma : np.ndarray [shape=(n_chroma, t)] or None
Normalized energy for each chroma bin at each frame.
If `None`, a cqt chromagram is performed.
Returns
-------
tonnetz : np.ndarray [shape(6, t)]
Tonal centroid features for each frame.
Tonnetz dimensions:
- 0: Fifth x-axis
- 1: Fifth y-axis
- 2: Minor x-axis
- 3: Minor y-axis
- 4: Major x-axis
- 5: Major y-axis
See Also
--------
chroma_cqt
Compute a chromagram from a constant-Q transform.
chroma_stft
Compute a chromagram from an STFT spectrogram or waveform.
Examples
--------
Compute tonnetz features from the harmonic component of a song
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y = librosa.effects.harmonic(y)
>>> tonnetz = librosa.feature.tonnetz(y=y, sr=sr)
>>> tonnetz
array([[-0.073, -0.053, ..., -0.054, -0.073],
[ 0.001, 0.001, ..., -0.054, -0.062],
...,
[ 0.039, 0.034, ..., 0.044, 0.064],
[ 0.005, 0.002, ..., 0.011, 0.017]])
Compare the tonnetz features to `chroma_cqt`
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(tonnetz, y_axis='tonnetz')
>>> plt.colorbar()
>>> plt.title('Tonal Centroids (Tonnetz)')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.feature.chroma_cqt(y, sr=sr),
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Chroma')
>>> plt.tight_layout()
'''
if y is None and chroma is None:
raise ParameterError('Either the audio samples or the chromagram must be '
'passed as an argument.')
if chroma is None:
chroma = chroma_cqt(y=y, sr=sr)
# Generate Transformation matrix
dim_map = np.linspace(0, 12, num=chroma.shape[0], endpoint=False)
scale = np.asarray([7. / 6, 7. / 6,
3. / 2, 3. / 2,
2. / 3, 2. / 3])
V = np.multiply.outer(scale, dim_map)
# Even rows compute sin()
V[::2] -= 0.5
R = np.array([1, 1, # Fifths
1, 1, # Minor
0.5, 0.5]) # Major
phi = R[:, np.newaxis] * np.cos(np.pi * V)
# Do the transform to tonnetz
return phi.dot(util.normalize(chroma, norm=1, axis=0))
|
def tonnetz(y=None, sr=22050, chroma=None):
'''Computes the tonal centroid features (tonnetz), following the method of
[1]_.
.. [1] Harte, C., Sandler, M., & Gasser, M. (2006). "Detecting Harmonic
Change in Musical Audio." In Proceedings of the 1st ACM Workshop
on Audio and Music Computing Multimedia (pp. 21-26).
Santa Barbara, CA, USA: ACM Press. doi:10.1145/1178723.1178727.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
Audio time series.
sr : number > 0 [scalar]
sampling rate of `y`
chroma : np.ndarray [shape=(n_chroma, t)] or None
Normalized energy for each chroma bin at each frame.
If `None`, a cqt chromagram is performed.
Returns
-------
tonnetz : np.ndarray [shape(6, t)]
Tonal centroid features for each frame.
Tonnetz dimensions:
- 0: Fifth x-axis
- 1: Fifth y-axis
- 2: Minor x-axis
- 3: Minor y-axis
- 4: Major x-axis
- 5: Major y-axis
See Also
--------
chroma_cqt
Compute a chromagram from a constant-Q transform.
chroma_stft
Compute a chromagram from an STFT spectrogram or waveform.
Examples
--------
Compute tonnetz features from the harmonic component of a song
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y = librosa.effects.harmonic(y)
>>> tonnetz = librosa.feature.tonnetz(y=y, sr=sr)
>>> tonnetz
array([[-0.073, -0.053, ..., -0.054, -0.073],
[ 0.001, 0.001, ..., -0.054, -0.062],
...,
[ 0.039, 0.034, ..., 0.044, 0.064],
[ 0.005, 0.002, ..., 0.011, 0.017]])
Compare the tonnetz features to `chroma_cqt`
>>> import matplotlib.pyplot as plt
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(tonnetz, y_axis='tonnetz')
>>> plt.colorbar()
>>> plt.title('Tonal Centroids (Tonnetz)')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.feature.chroma_cqt(y, sr=sr),
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Chroma')
>>> plt.tight_layout()
'''
if y is None and chroma is None:
raise ParameterError('Either the audio samples or the chromagram must be '
'passed as an argument.')
if chroma is None:
chroma = chroma_cqt(y=y, sr=sr)
# Generate Transformation matrix
dim_map = np.linspace(0, 12, num=chroma.shape[0], endpoint=False)
scale = np.asarray([7. / 6, 7. / 6,
3. / 2, 3. / 2,
2. / 3, 2. / 3])
V = np.multiply.outer(scale, dim_map)
# Even rows compute sin()
V[::2] -= 0.5
R = np.array([1, 1, # Fifths
1, 1, # Minor
0.5, 0.5]) # Major
phi = R[:, np.newaxis] * np.cos(np.pi * V)
# Do the transform to tonnetz
return phi.dot(util.normalize(chroma, norm=1, axis=0))
|
[
"Computes",
"the",
"tonal",
"centroid",
"features",
"(",
"tonnetz",
")",
"following",
"the",
"method",
"of",
"[",
"1",
"]",
"_",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L1429-L1528
|
[
"def",
"tonnetz",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"chroma",
"=",
"None",
")",
":",
"if",
"y",
"is",
"None",
"and",
"chroma",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'Either the audio samples or the chromagram must be '",
"'passed as an argument.'",
")",
"if",
"chroma",
"is",
"None",
":",
"chroma",
"=",
"chroma_cqt",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
")",
"# Generate Transformation matrix",
"dim_map",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"12",
",",
"num",
"=",
"chroma",
".",
"shape",
"[",
"0",
"]",
",",
"endpoint",
"=",
"False",
")",
"scale",
"=",
"np",
".",
"asarray",
"(",
"[",
"7.",
"/",
"6",
",",
"7.",
"/",
"6",
",",
"3.",
"/",
"2",
",",
"3.",
"/",
"2",
",",
"2.",
"/",
"3",
",",
"2.",
"/",
"3",
"]",
")",
"V",
"=",
"np",
".",
"multiply",
".",
"outer",
"(",
"scale",
",",
"dim_map",
")",
"# Even rows compute sin()",
"V",
"[",
":",
":",
"2",
"]",
"-=",
"0.5",
"R",
"=",
"np",
".",
"array",
"(",
"[",
"1",
",",
"1",
",",
"# Fifths",
"1",
",",
"1",
",",
"# Minor",
"0.5",
",",
"0.5",
"]",
")",
"# Major",
"phi",
"=",
"R",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"*",
"np",
".",
"cos",
"(",
"np",
".",
"pi",
"*",
"V",
")",
"# Do the transform to tonnetz",
"return",
"phi",
".",
"dot",
"(",
"util",
".",
"normalize",
"(",
"chroma",
",",
"norm",
"=",
"1",
",",
"axis",
"=",
"0",
")",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
mfcc
|
Mel-frequency cepstral coefficients (MFCCs)
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
log-power Mel spectrogram
n_mfcc: int > 0 [scalar]
number of MFCCs to return
dct_type : None, or {1, 2, 3}
Discrete cosine transform (DCT) type.
By default, DCT type-2 is used.
norm : None or 'ortho'
If `dct_type` is `2 or 3`, setting `norm='ortho'` uses an ortho-normal
DCT basis.
Normalization is not supported for `dct_type=1`.
kwargs : additional keyword arguments
Arguments to `melspectrogram`, if operating
on time series input
Returns
-------
M : np.ndarray [shape=(n_mfcc, t)]
MFCC sequence
See Also
--------
melspectrogram
scipy.fftpack.dct
Examples
--------
Generate mfccs from a time series
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30, duration=5)
>>> librosa.feature.mfcc(y=y, sr=sr)
array([[ -5.229e+02, -4.944e+02, ..., -5.229e+02, -5.229e+02],
[ 7.105e-15, 3.787e+01, ..., -7.105e-15, -7.105e-15],
...,
[ 1.066e-14, -7.500e+00, ..., 1.421e-14, 1.421e-14],
[ 3.109e-14, -5.058e+00, ..., 2.931e-14, 2.931e-14]])
Use a pre-computed log-power Mel spectrogram
>>> S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,
... fmax=8000)
>>> librosa.feature.mfcc(S=librosa.power_to_db(S))
array([[ -5.207e+02, -4.898e+02, ..., -5.207e+02, -5.207e+02],
[ -2.576e-14, 4.054e+01, ..., -3.997e-14, -3.997e-14],
...,
[ 7.105e-15, -3.534e+00, ..., 0.000e+00, 0.000e+00],
[ 3.020e-14, -2.613e+00, ..., 3.553e-14, 3.553e-14]])
Get more components
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)
Visualize the MFCC series
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(mfccs, x_axis='time')
>>> plt.colorbar()
>>> plt.title('MFCC')
>>> plt.tight_layout()
Compare different DCT bases
>>> m_slaney = librosa.feature.mfcc(y=y, sr=sr, dct_type=2)
>>> m_htk = librosa.feature.mfcc(y=y, sr=sr, dct_type=3)
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(m_slaney, x_axis='time')
>>> plt.title('RASTAMAT / Auditory toolbox (dct_type=2)')
>>> plt.colorbar()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(m_htk, x_axis='time')
>>> plt.title('HTK-style (dct_type=3)')
>>> plt.colorbar()
>>> plt.tight_layout()
|
librosa/feature/spectral.py
|
def mfcc(y=None, sr=22050, S=None, n_mfcc=20, dct_type=2, norm='ortho', **kwargs):
"""Mel-frequency cepstral coefficients (MFCCs)
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
log-power Mel spectrogram
n_mfcc: int > 0 [scalar]
number of MFCCs to return
dct_type : None, or {1, 2, 3}
Discrete cosine transform (DCT) type.
By default, DCT type-2 is used.
norm : None or 'ortho'
If `dct_type` is `2 or 3`, setting `norm='ortho'` uses an ortho-normal
DCT basis.
Normalization is not supported for `dct_type=1`.
kwargs : additional keyword arguments
Arguments to `melspectrogram`, if operating
on time series input
Returns
-------
M : np.ndarray [shape=(n_mfcc, t)]
MFCC sequence
See Also
--------
melspectrogram
scipy.fftpack.dct
Examples
--------
Generate mfccs from a time series
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30, duration=5)
>>> librosa.feature.mfcc(y=y, sr=sr)
array([[ -5.229e+02, -4.944e+02, ..., -5.229e+02, -5.229e+02],
[ 7.105e-15, 3.787e+01, ..., -7.105e-15, -7.105e-15],
...,
[ 1.066e-14, -7.500e+00, ..., 1.421e-14, 1.421e-14],
[ 3.109e-14, -5.058e+00, ..., 2.931e-14, 2.931e-14]])
Use a pre-computed log-power Mel spectrogram
>>> S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,
... fmax=8000)
>>> librosa.feature.mfcc(S=librosa.power_to_db(S))
array([[ -5.207e+02, -4.898e+02, ..., -5.207e+02, -5.207e+02],
[ -2.576e-14, 4.054e+01, ..., -3.997e-14, -3.997e-14],
...,
[ 7.105e-15, -3.534e+00, ..., 0.000e+00, 0.000e+00],
[ 3.020e-14, -2.613e+00, ..., 3.553e-14, 3.553e-14]])
Get more components
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)
Visualize the MFCC series
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(mfccs, x_axis='time')
>>> plt.colorbar()
>>> plt.title('MFCC')
>>> plt.tight_layout()
Compare different DCT bases
>>> m_slaney = librosa.feature.mfcc(y=y, sr=sr, dct_type=2)
>>> m_htk = librosa.feature.mfcc(y=y, sr=sr, dct_type=3)
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(m_slaney, x_axis='time')
>>> plt.title('RASTAMAT / Auditory toolbox (dct_type=2)')
>>> plt.colorbar()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(m_htk, x_axis='time')
>>> plt.title('HTK-style (dct_type=3)')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if S is None:
S = power_to_db(melspectrogram(y=y, sr=sr, **kwargs))
return scipy.fftpack.dct(S, axis=0, type=dct_type, norm=norm)[:n_mfcc]
|
def mfcc(y=None, sr=22050, S=None, n_mfcc=20, dct_type=2, norm='ortho', **kwargs):
"""Mel-frequency cepstral coefficients (MFCCs)
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
log-power Mel spectrogram
n_mfcc: int > 0 [scalar]
number of MFCCs to return
dct_type : None, or {1, 2, 3}
Discrete cosine transform (DCT) type.
By default, DCT type-2 is used.
norm : None or 'ortho'
If `dct_type` is `2 or 3`, setting `norm='ortho'` uses an ortho-normal
DCT basis.
Normalization is not supported for `dct_type=1`.
kwargs : additional keyword arguments
Arguments to `melspectrogram`, if operating
on time series input
Returns
-------
M : np.ndarray [shape=(n_mfcc, t)]
MFCC sequence
See Also
--------
melspectrogram
scipy.fftpack.dct
Examples
--------
Generate mfccs from a time series
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30, duration=5)
>>> librosa.feature.mfcc(y=y, sr=sr)
array([[ -5.229e+02, -4.944e+02, ..., -5.229e+02, -5.229e+02],
[ 7.105e-15, 3.787e+01, ..., -7.105e-15, -7.105e-15],
...,
[ 1.066e-14, -7.500e+00, ..., 1.421e-14, 1.421e-14],
[ 3.109e-14, -5.058e+00, ..., 2.931e-14, 2.931e-14]])
Use a pre-computed log-power Mel spectrogram
>>> S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,
... fmax=8000)
>>> librosa.feature.mfcc(S=librosa.power_to_db(S))
array([[ -5.207e+02, -4.898e+02, ..., -5.207e+02, -5.207e+02],
[ -2.576e-14, 4.054e+01, ..., -3.997e-14, -3.997e-14],
...,
[ 7.105e-15, -3.534e+00, ..., 0.000e+00, 0.000e+00],
[ 3.020e-14, -2.613e+00, ..., 3.553e-14, 3.553e-14]])
Get more components
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)
Visualize the MFCC series
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(mfccs, x_axis='time')
>>> plt.colorbar()
>>> plt.title('MFCC')
>>> plt.tight_layout()
Compare different DCT bases
>>> m_slaney = librosa.feature.mfcc(y=y, sr=sr, dct_type=2)
>>> m_htk = librosa.feature.mfcc(y=y, sr=sr, dct_type=3)
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(m_slaney, x_axis='time')
>>> plt.title('RASTAMAT / Auditory toolbox (dct_type=2)')
>>> plt.colorbar()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(m_htk, x_axis='time')
>>> plt.title('HTK-style (dct_type=3)')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if S is None:
S = power_to_db(melspectrogram(y=y, sr=sr, **kwargs))
return scipy.fftpack.dct(S, axis=0, type=dct_type, norm=norm)[:n_mfcc]
|
[
"Mel",
"-",
"frequency",
"cepstral",
"coefficients",
"(",
"MFCCs",
")"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L1532-L1628
|
[
"def",
"mfcc",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"n_mfcc",
"=",
"20",
",",
"dct_type",
"=",
"2",
",",
"norm",
"=",
"'ortho'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"S",
"is",
"None",
":",
"S",
"=",
"power_to_db",
"(",
"melspectrogram",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"scipy",
".",
"fftpack",
".",
"dct",
"(",
"S",
",",
"axis",
"=",
"0",
",",
"type",
"=",
"dct_type",
",",
"norm",
"=",
"norm",
")",
"[",
":",
"n_mfcc",
"]"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
melspectrogram
|
Compute a mel-scaled spectrogram.
If a spectrogram input `S` is provided, then it is mapped directly onto
the mel basis `mel_f` by `mel_f.dot(S)`.
If a time-series input `y, sr` is provided, then its magnitude spectrogram
`S` is first computed, and then mapped onto the mel scale by
`mel_f.dot(S**power)`. By default, `power=2` operates on a power spectrum.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, t)]
spectrogram
n_fft : int > 0 [scalar]
length of the FFT window
hop_length : int > 0 [scalar]
number of samples between successive frames.
See `librosa.core.stft`
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
power : float > 0 [scalar]
Exponent for the magnitude melspectrogram.
e.g., 1 for energy, 2 for power, etc.
kwargs : additional keyword arguments
Mel filter bank parameters.
See `librosa.filters.mel` for details.
Returns
-------
S : np.ndarray [shape=(n_mels, t)]
Mel spectrogram
See Also
--------
librosa.filters.mel
Mel filter bank construction
librosa.core.stft
Short-time Fourier Transform
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.melspectrogram(y=y, sr=sr)
array([[ 2.891e-07, 2.548e-03, ..., 8.116e-09, 5.633e-09],
[ 1.986e-07, 1.162e-02, ..., 9.332e-08, 6.716e-09],
...,
[ 3.668e-09, 2.029e-08, ..., 3.208e-09, 2.864e-09],
[ 2.561e-10, 2.096e-09, ..., 7.543e-10, 6.101e-10]])
Using a pre-computed power spectrogram
>>> D = np.abs(librosa.stft(y))**2
>>> S = librosa.feature.melspectrogram(S=D)
>>> # Passing through arguments to the Mel filters
>>> S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,
... fmax=8000)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(librosa.power_to_db(S,
... ref=np.max),
... y_axis='mel', fmax=8000,
... x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Mel spectrogram')
>>> plt.tight_layout()
|
librosa/feature/spectral.py
|
def melspectrogram(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
power=2.0, **kwargs):
"""Compute a mel-scaled spectrogram.
If a spectrogram input `S` is provided, then it is mapped directly onto
the mel basis `mel_f` by `mel_f.dot(S)`.
If a time-series input `y, sr` is provided, then its magnitude spectrogram
`S` is first computed, and then mapped onto the mel scale by
`mel_f.dot(S**power)`. By default, `power=2` operates on a power spectrum.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, t)]
spectrogram
n_fft : int > 0 [scalar]
length of the FFT window
hop_length : int > 0 [scalar]
number of samples between successive frames.
See `librosa.core.stft`
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
power : float > 0 [scalar]
Exponent for the magnitude melspectrogram.
e.g., 1 for energy, 2 for power, etc.
kwargs : additional keyword arguments
Mel filter bank parameters.
See `librosa.filters.mel` for details.
Returns
-------
S : np.ndarray [shape=(n_mels, t)]
Mel spectrogram
See Also
--------
librosa.filters.mel
Mel filter bank construction
librosa.core.stft
Short-time Fourier Transform
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.melspectrogram(y=y, sr=sr)
array([[ 2.891e-07, 2.548e-03, ..., 8.116e-09, 5.633e-09],
[ 1.986e-07, 1.162e-02, ..., 9.332e-08, 6.716e-09],
...,
[ 3.668e-09, 2.029e-08, ..., 3.208e-09, 2.864e-09],
[ 2.561e-10, 2.096e-09, ..., 7.543e-10, 6.101e-10]])
Using a pre-computed power spectrogram
>>> D = np.abs(librosa.stft(y))**2
>>> S = librosa.feature.melspectrogram(S=D)
>>> # Passing through arguments to the Mel filters
>>> S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,
... fmax=8000)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(librosa.power_to_db(S,
... ref=np.max),
... y_axis='mel', fmax=8000,
... x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Mel spectrogram')
>>> plt.tight_layout()
"""
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length, power=power,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
# Build a Mel filter
mel_basis = filters.mel(sr, n_fft, **kwargs)
return np.dot(mel_basis, S)
|
def melspectrogram(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
win_length=None, window='hann', center=True, pad_mode='reflect',
power=2.0, **kwargs):
"""Compute a mel-scaled spectrogram.
If a spectrogram input `S` is provided, then it is mapped directly onto
the mel basis `mel_f` by `mel_f.dot(S)`.
If a time-series input `y, sr` is provided, then its magnitude spectrogram
`S` is first computed, and then mapped onto the mel scale by
`mel_f.dot(S**power)`. By default, `power=2` operates on a power spectrum.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, t)]
spectrogram
n_fft : int > 0 [scalar]
length of the FFT window
hop_length : int > 0 [scalar]
number of samples between successive frames.
See `librosa.core.stft`
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
power : float > 0 [scalar]
Exponent for the magnitude melspectrogram.
e.g., 1 for energy, 2 for power, etc.
kwargs : additional keyword arguments
Mel filter bank parameters.
See `librosa.filters.mel` for details.
Returns
-------
S : np.ndarray [shape=(n_mels, t)]
Mel spectrogram
See Also
--------
librosa.filters.mel
Mel filter bank construction
librosa.core.stft
Short-time Fourier Transform
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.feature.melspectrogram(y=y, sr=sr)
array([[ 2.891e-07, 2.548e-03, ..., 8.116e-09, 5.633e-09],
[ 1.986e-07, 1.162e-02, ..., 9.332e-08, 6.716e-09],
...,
[ 3.668e-09, 2.029e-08, ..., 3.208e-09, 2.864e-09],
[ 2.561e-10, 2.096e-09, ..., 7.543e-10, 6.101e-10]])
Using a pre-computed power spectrogram
>>> D = np.abs(librosa.stft(y))**2
>>> S = librosa.feature.melspectrogram(S=D)
>>> # Passing through arguments to the Mel filters
>>> S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,
... fmax=8000)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(librosa.power_to_db(S,
... ref=np.max),
... y_axis='mel', fmax=8000,
... x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Mel spectrogram')
>>> plt.tight_layout()
"""
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length, power=power,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
# Build a Mel filter
mel_basis = filters.mel(sr, n_fft, **kwargs)
return np.dot(mel_basis, S)
|
[
"Compute",
"a",
"mel",
"-",
"scaled",
"spectrogram",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L1631-L1744
|
[
"def",
"melspectrogram",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"center",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
",",
"power",
"=",
"2.0",
",",
"*",
"*",
"kwargs",
")",
":",
"S",
",",
"n_fft",
"=",
"_spectrogram",
"(",
"y",
"=",
"y",
",",
"S",
"=",
"S",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"power",
"=",
"power",
",",
"win_length",
"=",
"win_length",
",",
"window",
"=",
"window",
",",
"center",
"=",
"center",
",",
"pad_mode",
"=",
"pad_mode",
")",
"# Build a Mel filter",
"mel_basis",
"=",
"filters",
".",
"mel",
"(",
"sr",
",",
"n_fft",
",",
"*",
"*",
"kwargs",
")",
"return",
"np",
".",
"dot",
"(",
"mel_basis",
",",
"S",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
estimate_tuning
|
Load an audio file and estimate tuning (in cents)
|
examples/estimate_tuning.py
|
def estimate_tuning(input_file):
'''Load an audio file and estimate tuning (in cents)'''
print('Loading ', input_file)
y, sr = librosa.load(input_file)
print('Separating harmonic component ... ')
y_harm = librosa.effects.harmonic(y)
print('Estimating tuning ... ')
# Just track the pitches associated with high magnitude
tuning = librosa.estimate_tuning(y=y_harm, sr=sr)
print('{:+0.2f} cents'.format(100 * tuning))
|
def estimate_tuning(input_file):
'''Load an audio file and estimate tuning (in cents)'''
print('Loading ', input_file)
y, sr = librosa.load(input_file)
print('Separating harmonic component ... ')
y_harm = librosa.effects.harmonic(y)
print('Estimating tuning ... ')
# Just track the pitches associated with high magnitude
tuning = librosa.estimate_tuning(y=y_harm, sr=sr)
print('{:+0.2f} cents'.format(100 * tuning))
|
[
"Load",
"an",
"audio",
"file",
"and",
"estimate",
"tuning",
"(",
"in",
"cents",
")"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/examples/estimate_tuning.py#L15-L28
|
[
"def",
"estimate_tuning",
"(",
"input_file",
")",
":",
"print",
"(",
"'Loading '",
",",
"input_file",
")",
"y",
",",
"sr",
"=",
"librosa",
".",
"load",
"(",
"input_file",
")",
"print",
"(",
"'Separating harmonic component ... '",
")",
"y_harm",
"=",
"librosa",
".",
"effects",
".",
"harmonic",
"(",
"y",
")",
"print",
"(",
"'Estimating tuning ... '",
")",
"# Just track the pitches associated with high magnitude",
"tuning",
"=",
"librosa",
".",
"estimate_tuning",
"(",
"y",
"=",
"y_harm",
",",
"sr",
"=",
"sr",
")",
"print",
"(",
"'{:+0.2f} cents'",
".",
"format",
"(",
"100",
"*",
"tuning",
")",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__jaccard
|
Jaccard similarity between two intervals
Parameters
----------
int_a, int_b : np.ndarrays, shape=(2,)
Returns
-------
Jaccard similarity between intervals
|
librosa/util/matching.py
|
def __jaccard(int_a, int_b): # pragma: no cover
'''Jaccard similarity between two intervals
Parameters
----------
int_a, int_b : np.ndarrays, shape=(2,)
Returns
-------
Jaccard similarity between intervals
'''
ends = [int_a[1], int_b[1]]
if ends[1] < ends[0]:
ends.reverse()
starts = [int_a[0], int_b[0]]
if starts[1] < starts[0]:
starts.reverse()
intersection = ends[0] - starts[1]
if intersection < 0:
intersection = 0.
union = ends[1] - starts[0]
if union > 0:
return intersection / union
return 0.0
|
def __jaccard(int_a, int_b): # pragma: no cover
'''Jaccard similarity between two intervals
Parameters
----------
int_a, int_b : np.ndarrays, shape=(2,)
Returns
-------
Jaccard similarity between intervals
'''
ends = [int_a[1], int_b[1]]
if ends[1] < ends[0]:
ends.reverse()
starts = [int_a[0], int_b[0]]
if starts[1] < starts[0]:
starts.reverse()
intersection = ends[0] - starts[1]
if intersection < 0:
intersection = 0.
union = ends[1] - starts[0]
if union > 0:
return intersection / union
return 0.0
|
[
"Jaccard",
"similarity",
"between",
"two",
"intervals"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/matching.py#L17-L45
|
[
"def",
"__jaccard",
"(",
"int_a",
",",
"int_b",
")",
":",
"# pragma: no cover",
"ends",
"=",
"[",
"int_a",
"[",
"1",
"]",
",",
"int_b",
"[",
"1",
"]",
"]",
"if",
"ends",
"[",
"1",
"]",
"<",
"ends",
"[",
"0",
"]",
":",
"ends",
".",
"reverse",
"(",
")",
"starts",
"=",
"[",
"int_a",
"[",
"0",
"]",
",",
"int_b",
"[",
"0",
"]",
"]",
"if",
"starts",
"[",
"1",
"]",
"<",
"starts",
"[",
"0",
"]",
":",
"starts",
".",
"reverse",
"(",
")",
"intersection",
"=",
"ends",
"[",
"0",
"]",
"-",
"starts",
"[",
"1",
"]",
"if",
"intersection",
"<",
"0",
":",
"intersection",
"=",
"0.",
"union",
"=",
"ends",
"[",
"1",
"]",
"-",
"starts",
"[",
"0",
"]",
"if",
"union",
">",
"0",
":",
"return",
"intersection",
"/",
"union",
"return",
"0.0"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__match_interval_overlaps
|
Find the best Jaccard match from query to candidates
|
librosa/util/matching.py
|
def __match_interval_overlaps(query, intervals_to, candidates): # pragma: no cover
'''Find the best Jaccard match from query to candidates'''
best_score = -1
best_idx = -1
for idx in candidates:
score = __jaccard(query, intervals_to[idx])
if score > best_score:
best_score, best_idx = score, idx
return best_idx
|
def __match_interval_overlaps(query, intervals_to, candidates): # pragma: no cover
'''Find the best Jaccard match from query to candidates'''
best_score = -1
best_idx = -1
for idx in candidates:
score = __jaccard(query, intervals_to[idx])
if score > best_score:
best_score, best_idx = score, idx
return best_idx
|
[
"Find",
"the",
"best",
"Jaccard",
"match",
"from",
"query",
"to",
"candidates"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/matching.py#L49-L59
|
[
"def",
"__match_interval_overlaps",
"(",
"query",
",",
"intervals_to",
",",
"candidates",
")",
":",
"# pragma: no cover",
"best_score",
"=",
"-",
"1",
"best_idx",
"=",
"-",
"1",
"for",
"idx",
"in",
"candidates",
":",
"score",
"=",
"__jaccard",
"(",
"query",
",",
"intervals_to",
"[",
"idx",
"]",
")",
"if",
"score",
">",
"best_score",
":",
"best_score",
",",
"best_idx",
"=",
"score",
",",
"idx",
"return",
"best_idx"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__match_intervals
|
Numba-accelerated interval matching algorithm.
|
librosa/util/matching.py
|
def __match_intervals(intervals_from, intervals_to, strict=True): # pragma: no cover
'''Numba-accelerated interval matching algorithm.
'''
# sort index of the interval starts
start_index = np.argsort(intervals_to[:, 0])
# sort index of the interval ends
end_index = np.argsort(intervals_to[:, 1])
# and sorted values of starts
start_sorted = intervals_to[start_index, 0]
# and ends
end_sorted = intervals_to[end_index, 1]
search_ends = np.searchsorted(start_sorted, intervals_from[:, 1], side='right')
search_starts = np.searchsorted(end_sorted, intervals_from[:, 0], side='left')
output = np.empty(len(intervals_from), dtype=numba.uint32)
for i in range(len(intervals_from)):
query = intervals_from[i]
# Find the intervals that start after our query ends
after_query = search_ends[i]
# And the intervals that end after our query begins
before_query = search_starts[i]
# Candidates for overlapping have to (end after we start) and (begin before we end)
candidates = set(start_index[:after_query]) & set(end_index[before_query:])
# Proceed as before
if len(candidates) > 0:
output[i] = __match_interval_overlaps(query, intervals_to, candidates)
elif strict:
# Numba only lets us use compile-time constants in exception messages
raise ParameterError
else:
# Find the closest interval
# (start_index[after_query] - query[1]) is the distance to the next interval
# (query[0] - end_index[before_query])
dist_before = np.inf
dist_after = np.inf
if search_starts[i] > 0:
dist_before = query[0] - end_sorted[search_starts[i]-1]
if search_ends[i] + 1 < len(intervals_to):
dist_after = start_sorted[search_ends[i]+1] - query[1]
if dist_before < dist_after:
output[i] = end_index[search_starts[i]-1]
else:
output[i] = start_index[search_ends[i]+1]
return output
|
def __match_intervals(intervals_from, intervals_to, strict=True): # pragma: no cover
'''Numba-accelerated interval matching algorithm.
'''
# sort index of the interval starts
start_index = np.argsort(intervals_to[:, 0])
# sort index of the interval ends
end_index = np.argsort(intervals_to[:, 1])
# and sorted values of starts
start_sorted = intervals_to[start_index, 0]
# and ends
end_sorted = intervals_to[end_index, 1]
search_ends = np.searchsorted(start_sorted, intervals_from[:, 1], side='right')
search_starts = np.searchsorted(end_sorted, intervals_from[:, 0], side='left')
output = np.empty(len(intervals_from), dtype=numba.uint32)
for i in range(len(intervals_from)):
query = intervals_from[i]
# Find the intervals that start after our query ends
after_query = search_ends[i]
# And the intervals that end after our query begins
before_query = search_starts[i]
# Candidates for overlapping have to (end after we start) and (begin before we end)
candidates = set(start_index[:after_query]) & set(end_index[before_query:])
# Proceed as before
if len(candidates) > 0:
output[i] = __match_interval_overlaps(query, intervals_to, candidates)
elif strict:
# Numba only lets us use compile-time constants in exception messages
raise ParameterError
else:
# Find the closest interval
# (start_index[after_query] - query[1]) is the distance to the next interval
# (query[0] - end_index[before_query])
dist_before = np.inf
dist_after = np.inf
if search_starts[i] > 0:
dist_before = query[0] - end_sorted[search_starts[i]-1]
if search_ends[i] + 1 < len(intervals_to):
dist_after = start_sorted[search_ends[i]+1] - query[1]
if dist_before < dist_after:
output[i] = end_index[search_starts[i]-1]
else:
output[i] = start_index[search_ends[i]+1]
return output
|
[
"Numba",
"-",
"accelerated",
"interval",
"matching",
"algorithm",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/matching.py#L63-L113
|
[
"def",
"__match_intervals",
"(",
"intervals_from",
",",
"intervals_to",
",",
"strict",
"=",
"True",
")",
":",
"# pragma: no cover",
"# sort index of the interval starts",
"start_index",
"=",
"np",
".",
"argsort",
"(",
"intervals_to",
"[",
":",
",",
"0",
"]",
")",
"# sort index of the interval ends",
"end_index",
"=",
"np",
".",
"argsort",
"(",
"intervals_to",
"[",
":",
",",
"1",
"]",
")",
"# and sorted values of starts",
"start_sorted",
"=",
"intervals_to",
"[",
"start_index",
",",
"0",
"]",
"# and ends",
"end_sorted",
"=",
"intervals_to",
"[",
"end_index",
",",
"1",
"]",
"search_ends",
"=",
"np",
".",
"searchsorted",
"(",
"start_sorted",
",",
"intervals_from",
"[",
":",
",",
"1",
"]",
",",
"side",
"=",
"'right'",
")",
"search_starts",
"=",
"np",
".",
"searchsorted",
"(",
"end_sorted",
",",
"intervals_from",
"[",
":",
",",
"0",
"]",
",",
"side",
"=",
"'left'",
")",
"output",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"intervals_from",
")",
",",
"dtype",
"=",
"numba",
".",
"uint32",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"intervals_from",
")",
")",
":",
"query",
"=",
"intervals_from",
"[",
"i",
"]",
"# Find the intervals that start after our query ends",
"after_query",
"=",
"search_ends",
"[",
"i",
"]",
"# And the intervals that end after our query begins",
"before_query",
"=",
"search_starts",
"[",
"i",
"]",
"# Candidates for overlapping have to (end after we start) and (begin before we end)",
"candidates",
"=",
"set",
"(",
"start_index",
"[",
":",
"after_query",
"]",
")",
"&",
"set",
"(",
"end_index",
"[",
"before_query",
":",
"]",
")",
"# Proceed as before",
"if",
"len",
"(",
"candidates",
")",
">",
"0",
":",
"output",
"[",
"i",
"]",
"=",
"__match_interval_overlaps",
"(",
"query",
",",
"intervals_to",
",",
"candidates",
")",
"elif",
"strict",
":",
"# Numba only lets us use compile-time constants in exception messages",
"raise",
"ParameterError",
"else",
":",
"# Find the closest interval",
"# (start_index[after_query] - query[1]) is the distance to the next interval",
"# (query[0] - end_index[before_query])",
"dist_before",
"=",
"np",
".",
"inf",
"dist_after",
"=",
"np",
".",
"inf",
"if",
"search_starts",
"[",
"i",
"]",
">",
"0",
":",
"dist_before",
"=",
"query",
"[",
"0",
"]",
"-",
"end_sorted",
"[",
"search_starts",
"[",
"i",
"]",
"-",
"1",
"]",
"if",
"search_ends",
"[",
"i",
"]",
"+",
"1",
"<",
"len",
"(",
"intervals_to",
")",
":",
"dist_after",
"=",
"start_sorted",
"[",
"search_ends",
"[",
"i",
"]",
"+",
"1",
"]",
"-",
"query",
"[",
"1",
"]",
"if",
"dist_before",
"<",
"dist_after",
":",
"output",
"[",
"i",
"]",
"=",
"end_index",
"[",
"search_starts",
"[",
"i",
"]",
"-",
"1",
"]",
"else",
":",
"output",
"[",
"i",
"]",
"=",
"start_index",
"[",
"search_ends",
"[",
"i",
"]",
"+",
"1",
"]",
"return",
"output"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
match_intervals
|
Match one set of time intervals to another.
This can be useful for tasks such as mapping beat timings
to segments.
Each element `[a, b]` of `intervals_from` is matched to the
element `[c, d]` of `intervals_to` which maximizes the
Jaccard similarity between the intervals:
`max(0, |min(b, d) - max(a, c)|) / |max(d, b) - min(a, c)|`
In `strict=True` mode, if there is no interval with positive
intersection with `[a,b]`, an exception is thrown.
In `strict=False` mode, any interval `[a, b]` that has no
intersection with any element of `intervals_to` is instead
matched to the interval `[c, d]` which minimizes
`min(|b - c|, |a - d|)`
that is, the disjoint interval `[c, d]` with a boundary closest
to `[a, b]`.
.. note:: An element of `intervals_to` may be matched to multiple
entries of `intervals_from`.
Parameters
----------
intervals_from : np.ndarray [shape=(n, 2)]
The time range for source intervals.
The `i` th interval spans time `intervals_from[i, 0]`
to `intervals_from[i, 1]`.
`intervals_from[0, 0]` should be 0, `intervals_from[-1, 1]`
should be the track duration.
intervals_to : np.ndarray [shape=(m, 2)]
Analogous to `intervals_from`.
strict : bool
If `True`, intervals can only match if they intersect.
If `False`, disjoint intervals can match.
Returns
-------
interval_mapping : np.ndarray [shape=(n,)]
For each interval in `intervals_from`, the
corresponding interval in `intervals_to`.
See Also
--------
match_events
Raises
------
ParameterError
If either array of input intervals is not the correct shape
If `strict=True` and some element of `intervals_from` is disjoint from
every element of `intervals_to`.
Examples
--------
>>> ints_from = np.array([[3, 5], [1, 4], [4, 5]])
>>> ints_to = np.array([[0, 2], [1, 3], [4, 5], [6, 7]])
>>> librosa.util.match_intervals(ints_from, ints_to)
array([2, 1, 2], dtype=uint32)
>>> # [3, 5] => [4, 5] (ints_to[2])
>>> # [1, 4] => [1, 3] (ints_to[1])
>>> # [4, 5] => [4, 5] (ints_to[2])
The reverse matching of the above is not possible in `strict` mode
because `[6, 7]` is disjoint from all intervals in `ints_from`.
With `strict=False`, we get the following:
>>> librosa.util.match_intervals(ints_to, ints_from, strict=False)
array([1, 1, 2, 2], dtype=uint32)
>>> # [0, 2] => [1, 4] (ints_from[1])
>>> # [1, 3] => [1, 4] (ints_from[1])
>>> # [4, 5] => [4, 5] (ints_from[2])
>>> # [6, 7] => [4, 5] (ints_from[2])
|
librosa/util/matching.py
|
def match_intervals(intervals_from, intervals_to, strict=True):
'''Match one set of time intervals to another.
This can be useful for tasks such as mapping beat timings
to segments.
Each element `[a, b]` of `intervals_from` is matched to the
element `[c, d]` of `intervals_to` which maximizes the
Jaccard similarity between the intervals:
`max(0, |min(b, d) - max(a, c)|) / |max(d, b) - min(a, c)|`
In `strict=True` mode, if there is no interval with positive
intersection with `[a,b]`, an exception is thrown.
In `strict=False` mode, any interval `[a, b]` that has no
intersection with any element of `intervals_to` is instead
matched to the interval `[c, d]` which minimizes
`min(|b - c|, |a - d|)`
that is, the disjoint interval `[c, d]` with a boundary closest
to `[a, b]`.
.. note:: An element of `intervals_to` may be matched to multiple
entries of `intervals_from`.
Parameters
----------
intervals_from : np.ndarray [shape=(n, 2)]
The time range for source intervals.
The `i` th interval spans time `intervals_from[i, 0]`
to `intervals_from[i, 1]`.
`intervals_from[0, 0]` should be 0, `intervals_from[-1, 1]`
should be the track duration.
intervals_to : np.ndarray [shape=(m, 2)]
Analogous to `intervals_from`.
strict : bool
If `True`, intervals can only match if they intersect.
If `False`, disjoint intervals can match.
Returns
-------
interval_mapping : np.ndarray [shape=(n,)]
For each interval in `intervals_from`, the
corresponding interval in `intervals_to`.
See Also
--------
match_events
Raises
------
ParameterError
If either array of input intervals is not the correct shape
If `strict=True` and some element of `intervals_from` is disjoint from
every element of `intervals_to`.
Examples
--------
>>> ints_from = np.array([[3, 5], [1, 4], [4, 5]])
>>> ints_to = np.array([[0, 2], [1, 3], [4, 5], [6, 7]])
>>> librosa.util.match_intervals(ints_from, ints_to)
array([2, 1, 2], dtype=uint32)
>>> # [3, 5] => [4, 5] (ints_to[2])
>>> # [1, 4] => [1, 3] (ints_to[1])
>>> # [4, 5] => [4, 5] (ints_to[2])
The reverse matching of the above is not possible in `strict` mode
because `[6, 7]` is disjoint from all intervals in `ints_from`.
With `strict=False`, we get the following:
>>> librosa.util.match_intervals(ints_to, ints_from, strict=False)
array([1, 1, 2, 2], dtype=uint32)
>>> # [0, 2] => [1, 4] (ints_from[1])
>>> # [1, 3] => [1, 4] (ints_from[1])
>>> # [4, 5] => [4, 5] (ints_from[2])
>>> # [6, 7] => [4, 5] (ints_from[2])
'''
if len(intervals_from) == 0 or len(intervals_to) == 0:
raise ParameterError('Attempting to match empty interval list')
# Verify that the input intervals has correct shape and size
valid_intervals(intervals_from)
valid_intervals(intervals_to)
try:
return __match_intervals(intervals_from, intervals_to, strict=strict)
except ParameterError:
six.reraise(ParameterError,
ParameterError('Unable to match intervals with strict={}'.format(strict)),
sys.exc_info()[2])
|
def match_intervals(intervals_from, intervals_to, strict=True):
'''Match one set of time intervals to another.
This can be useful for tasks such as mapping beat timings
to segments.
Each element `[a, b]` of `intervals_from` is matched to the
element `[c, d]` of `intervals_to` which maximizes the
Jaccard similarity between the intervals:
`max(0, |min(b, d) - max(a, c)|) / |max(d, b) - min(a, c)|`
In `strict=True` mode, if there is no interval with positive
intersection with `[a,b]`, an exception is thrown.
In `strict=False` mode, any interval `[a, b]` that has no
intersection with any element of `intervals_to` is instead
matched to the interval `[c, d]` which minimizes
`min(|b - c|, |a - d|)`
that is, the disjoint interval `[c, d]` with a boundary closest
to `[a, b]`.
.. note:: An element of `intervals_to` may be matched to multiple
entries of `intervals_from`.
Parameters
----------
intervals_from : np.ndarray [shape=(n, 2)]
The time range for source intervals.
The `i` th interval spans time `intervals_from[i, 0]`
to `intervals_from[i, 1]`.
`intervals_from[0, 0]` should be 0, `intervals_from[-1, 1]`
should be the track duration.
intervals_to : np.ndarray [shape=(m, 2)]
Analogous to `intervals_from`.
strict : bool
If `True`, intervals can only match if they intersect.
If `False`, disjoint intervals can match.
Returns
-------
interval_mapping : np.ndarray [shape=(n,)]
For each interval in `intervals_from`, the
corresponding interval in `intervals_to`.
See Also
--------
match_events
Raises
------
ParameterError
If either array of input intervals is not the correct shape
If `strict=True` and some element of `intervals_from` is disjoint from
every element of `intervals_to`.
Examples
--------
>>> ints_from = np.array([[3, 5], [1, 4], [4, 5]])
>>> ints_to = np.array([[0, 2], [1, 3], [4, 5], [6, 7]])
>>> librosa.util.match_intervals(ints_from, ints_to)
array([2, 1, 2], dtype=uint32)
>>> # [3, 5] => [4, 5] (ints_to[2])
>>> # [1, 4] => [1, 3] (ints_to[1])
>>> # [4, 5] => [4, 5] (ints_to[2])
The reverse matching of the above is not possible in `strict` mode
because `[6, 7]` is disjoint from all intervals in `ints_from`.
With `strict=False`, we get the following:
>>> librosa.util.match_intervals(ints_to, ints_from, strict=False)
array([1, 1, 2, 2], dtype=uint32)
>>> # [0, 2] => [1, 4] (ints_from[1])
>>> # [1, 3] => [1, 4] (ints_from[1])
>>> # [4, 5] => [4, 5] (ints_from[2])
>>> # [6, 7] => [4, 5] (ints_from[2])
'''
if len(intervals_from) == 0 or len(intervals_to) == 0:
raise ParameterError('Attempting to match empty interval list')
# Verify that the input intervals has correct shape and size
valid_intervals(intervals_from)
valid_intervals(intervals_to)
try:
return __match_intervals(intervals_from, intervals_to, strict=strict)
except ParameterError:
six.reraise(ParameterError,
ParameterError('Unable to match intervals with strict={}'.format(strict)),
sys.exc_info()[2])
|
[
"Match",
"one",
"set",
"of",
"time",
"intervals",
"to",
"another",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/matching.py#L116-L210
|
[
"def",
"match_intervals",
"(",
"intervals_from",
",",
"intervals_to",
",",
"strict",
"=",
"True",
")",
":",
"if",
"len",
"(",
"intervals_from",
")",
"==",
"0",
"or",
"len",
"(",
"intervals_to",
")",
"==",
"0",
":",
"raise",
"ParameterError",
"(",
"'Attempting to match empty interval list'",
")",
"# Verify that the input intervals has correct shape and size",
"valid_intervals",
"(",
"intervals_from",
")",
"valid_intervals",
"(",
"intervals_to",
")",
"try",
":",
"return",
"__match_intervals",
"(",
"intervals_from",
",",
"intervals_to",
",",
"strict",
"=",
"strict",
")",
"except",
"ParameterError",
":",
"six",
".",
"reraise",
"(",
"ParameterError",
",",
"ParameterError",
"(",
"'Unable to match intervals with strict={}'",
".",
"format",
"(",
"strict",
")",
")",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
match_events
|
Match one set of events to another.
This is useful for tasks such as matching beats to the nearest
detected onsets, or frame-aligned events to the nearest zero-crossing.
.. note:: A target event may be matched to multiple source events.
Examples
--------
>>> # Sources are multiples of 7
>>> s_from = np.arange(0, 100, 7)
>>> s_from
array([ 0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 91,
98])
>>> # Targets are multiples of 10
>>> s_to = np.arange(0, 100, 10)
>>> s_to
array([ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90])
>>> # Find the matching
>>> idx = librosa.util.match_events(s_from, s_to)
>>> idx
array([0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9])
>>> # Print each source value to its matching target
>>> zip(s_from, s_to[idx])
[(0, 0), (7, 10), (14, 10), (21, 20), (28, 30), (35, 30),
(42, 40), (49, 50), (56, 60), (63, 60), (70, 70), (77, 80),
(84, 80), (91, 90), (98, 90)]
Parameters
----------
events_from : ndarray [shape=(n,)]
Array of events (eg, times, sample or frame indices) to match from.
events_to : ndarray [shape=(m,)]
Array of events (eg, times, sample or frame indices) to
match against.
left : bool
right : bool
If `False`, then matched events cannot be to the left (or right)
of source events.
Returns
-------
event_mapping : np.ndarray [shape=(n,)]
For each event in `events_from`, the corresponding event
index in `events_to`.
`event_mapping[i] == arg min |events_from[i] - events_to[:]|`
See Also
--------
match_intervals
Raises
------
ParameterError
If either array of input events is not the correct shape
|
librosa/util/matching.py
|
def match_events(events_from, events_to, left=True, right=True):
'''Match one set of events to another.
This is useful for tasks such as matching beats to the nearest
detected onsets, or frame-aligned events to the nearest zero-crossing.
.. note:: A target event may be matched to multiple source events.
Examples
--------
>>> # Sources are multiples of 7
>>> s_from = np.arange(0, 100, 7)
>>> s_from
array([ 0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 91,
98])
>>> # Targets are multiples of 10
>>> s_to = np.arange(0, 100, 10)
>>> s_to
array([ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90])
>>> # Find the matching
>>> idx = librosa.util.match_events(s_from, s_to)
>>> idx
array([0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9])
>>> # Print each source value to its matching target
>>> zip(s_from, s_to[idx])
[(0, 0), (7, 10), (14, 10), (21, 20), (28, 30), (35, 30),
(42, 40), (49, 50), (56, 60), (63, 60), (70, 70), (77, 80),
(84, 80), (91, 90), (98, 90)]
Parameters
----------
events_from : ndarray [shape=(n,)]
Array of events (eg, times, sample or frame indices) to match from.
events_to : ndarray [shape=(m,)]
Array of events (eg, times, sample or frame indices) to
match against.
left : bool
right : bool
If `False`, then matched events cannot be to the left (or right)
of source events.
Returns
-------
event_mapping : np.ndarray [shape=(n,)]
For each event in `events_from`, the corresponding event
index in `events_to`.
`event_mapping[i] == arg min |events_from[i] - events_to[:]|`
See Also
--------
match_intervals
Raises
------
ParameterError
If either array of input events is not the correct shape
'''
if len(events_from) == 0 or len(events_to) == 0:
raise ParameterError('Attempting to match empty event list')
# If we can't match left or right, then only strict equivalence
# counts as a match.
if not (left or right) and not np.all(np.in1d(events_from, events_to)):
raise ParameterError('Cannot match events with left=right=False '
'and events_from is not contained '
'in events_to')
# If we can't match to the left, then there should be at least one
# target event greater-equal to every source event
if (not left) and max(events_to) < max(events_from):
raise ParameterError('Cannot match events with left=False '
'and max(events_to) < max(events_from)')
# If we can't match to the right, then there should be at least one
# target event less-equal to every source event
if (not right) and min(events_to) > min(events_from):
raise ParameterError('Cannot match events with right=False '
'and min(events_to) > min(events_from)')
# array of matched items
output = np.empty_like(events_from, dtype=np.int)
return __match_events_helper(output, events_from, events_to, left, right)
|
def match_events(events_from, events_to, left=True, right=True):
'''Match one set of events to another.
This is useful for tasks such as matching beats to the nearest
detected onsets, or frame-aligned events to the nearest zero-crossing.
.. note:: A target event may be matched to multiple source events.
Examples
--------
>>> # Sources are multiples of 7
>>> s_from = np.arange(0, 100, 7)
>>> s_from
array([ 0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 91,
98])
>>> # Targets are multiples of 10
>>> s_to = np.arange(0, 100, 10)
>>> s_to
array([ 0, 10, 20, 30, 40, 50, 60, 70, 80, 90])
>>> # Find the matching
>>> idx = librosa.util.match_events(s_from, s_to)
>>> idx
array([0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9])
>>> # Print each source value to its matching target
>>> zip(s_from, s_to[idx])
[(0, 0), (7, 10), (14, 10), (21, 20), (28, 30), (35, 30),
(42, 40), (49, 50), (56, 60), (63, 60), (70, 70), (77, 80),
(84, 80), (91, 90), (98, 90)]
Parameters
----------
events_from : ndarray [shape=(n,)]
Array of events (eg, times, sample or frame indices) to match from.
events_to : ndarray [shape=(m,)]
Array of events (eg, times, sample or frame indices) to
match against.
left : bool
right : bool
If `False`, then matched events cannot be to the left (or right)
of source events.
Returns
-------
event_mapping : np.ndarray [shape=(n,)]
For each event in `events_from`, the corresponding event
index in `events_to`.
`event_mapping[i] == arg min |events_from[i] - events_to[:]|`
See Also
--------
match_intervals
Raises
------
ParameterError
If either array of input events is not the correct shape
'''
if len(events_from) == 0 or len(events_to) == 0:
raise ParameterError('Attempting to match empty event list')
# If we can't match left or right, then only strict equivalence
# counts as a match.
if not (left or right) and not np.all(np.in1d(events_from, events_to)):
raise ParameterError('Cannot match events with left=right=False '
'and events_from is not contained '
'in events_to')
# If we can't match to the left, then there should be at least one
# target event greater-equal to every source event
if (not left) and max(events_to) < max(events_from):
raise ParameterError('Cannot match events with left=False '
'and max(events_to) < max(events_from)')
# If we can't match to the right, then there should be at least one
# target event less-equal to every source event
if (not right) and min(events_to) > min(events_from):
raise ParameterError('Cannot match events with right=False '
'and min(events_to) > min(events_from)')
# array of matched items
output = np.empty_like(events_from, dtype=np.int)
return __match_events_helper(output, events_from, events_to, left, right)
|
[
"Match",
"one",
"set",
"of",
"events",
"to",
"another",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/matching.py#L213-L298
|
[
"def",
"match_events",
"(",
"events_from",
",",
"events_to",
",",
"left",
"=",
"True",
",",
"right",
"=",
"True",
")",
":",
"if",
"len",
"(",
"events_from",
")",
"==",
"0",
"or",
"len",
"(",
"events_to",
")",
"==",
"0",
":",
"raise",
"ParameterError",
"(",
"'Attempting to match empty event list'",
")",
"# If we can't match left or right, then only strict equivalence",
"# counts as a match.",
"if",
"not",
"(",
"left",
"or",
"right",
")",
"and",
"not",
"np",
".",
"all",
"(",
"np",
".",
"in1d",
"(",
"events_from",
",",
"events_to",
")",
")",
":",
"raise",
"ParameterError",
"(",
"'Cannot match events with left=right=False '",
"'and events_from is not contained '",
"'in events_to'",
")",
"# If we can't match to the left, then there should be at least one",
"# target event greater-equal to every source event",
"if",
"(",
"not",
"left",
")",
"and",
"max",
"(",
"events_to",
")",
"<",
"max",
"(",
"events_from",
")",
":",
"raise",
"ParameterError",
"(",
"'Cannot match events with left=False '",
"'and max(events_to) < max(events_from)'",
")",
"# If we can't match to the right, then there should be at least one",
"# target event less-equal to every source event",
"if",
"(",
"not",
"right",
")",
"and",
"min",
"(",
"events_to",
")",
">",
"min",
"(",
"events_from",
")",
":",
"raise",
"ParameterError",
"(",
"'Cannot match events with right=False '",
"'and min(events_to) > min(events_from)'",
")",
"# array of matched items",
"output",
"=",
"np",
".",
"empty_like",
"(",
"events_from",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"return",
"__match_events_helper",
"(",
"output",
",",
"events_from",
",",
"events_to",
",",
"left",
",",
"right",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
salience
|
Harmonic salience function.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input time frequency magnitude representation (stft, ifgram, etc).
Must be real-valued and non-negative.
freqs : np.ndarray, shape=(S.shape[axis])
The frequency values corresponding to S's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to include in salience computation. The first harmonic (1)
corresponds to `S` itself. Values less than one (e.g., 1/2) correspond
to sub-harmonics.
weights : list-like
The weight to apply to each harmonic in the summation. (default:
uniform weights). Must be the same length as `harmonics`.
aggregate : function
aggregation function (default: `np.average`)
If `aggregate=np.average`, then a weighted average is
computed per-harmonic according to the specified weights.
For all other aggregation functions, all harmonics
are treated equally.
filter_peaks : bool
If true, returns harmonic summation only on frequencies of peak
magnitude. Otherwise returns harmonic summation over the full spectrum.
Defaults to True.
fill_value : float
The value to fill non-peaks in the output representation. (default:
np.nan) Only used if `filter_peaks == True`.
kind : str
Interpolation type for harmonic estimation.
See `scipy.interpolate.interp1d`.
axis : int
The axis along which to compute harmonics
Returns
-------
S_sal : np.ndarray, shape=(len(h_range), [x.shape])
`S_sal` will have the same shape as `S`, and measure
the overal harmonic energy at each frequency.
See Also
--------
interp_harmonics
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> S = np.abs(librosa.stft(y))
>>> freqs = librosa.core.fft_frequencies(sr)
>>> harms = [1, 2, 3, 4]
>>> weights = [1.0, 0.5, 0.33, 0.25]
>>> S_sal = librosa.salience(S, freqs, harms, weights, fill_value=0)
>>> print(S_sal.shape)
(1025, 646)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(S_sal,
... ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Salience spectrogram')
>>> plt.tight_layout()
|
librosa/core/harmonic.py
|
def salience(S, freqs, h_range, weights=None, aggregate=None,
filter_peaks=True, fill_value=np.nan, kind='linear', axis=0):
"""Harmonic salience function.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input time frequency magnitude representation (stft, ifgram, etc).
Must be real-valued and non-negative.
freqs : np.ndarray, shape=(S.shape[axis])
The frequency values corresponding to S's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to include in salience computation. The first harmonic (1)
corresponds to `S` itself. Values less than one (e.g., 1/2) correspond
to sub-harmonics.
weights : list-like
The weight to apply to each harmonic in the summation. (default:
uniform weights). Must be the same length as `harmonics`.
aggregate : function
aggregation function (default: `np.average`)
If `aggregate=np.average`, then a weighted average is
computed per-harmonic according to the specified weights.
For all other aggregation functions, all harmonics
are treated equally.
filter_peaks : bool
If true, returns harmonic summation only on frequencies of peak
magnitude. Otherwise returns harmonic summation over the full spectrum.
Defaults to True.
fill_value : float
The value to fill non-peaks in the output representation. (default:
np.nan) Only used if `filter_peaks == True`.
kind : str
Interpolation type for harmonic estimation.
See `scipy.interpolate.interp1d`.
axis : int
The axis along which to compute harmonics
Returns
-------
S_sal : np.ndarray, shape=(len(h_range), [x.shape])
`S_sal` will have the same shape as `S`, and measure
the overal harmonic energy at each frequency.
See Also
--------
interp_harmonics
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> S = np.abs(librosa.stft(y))
>>> freqs = librosa.core.fft_frequencies(sr)
>>> harms = [1, 2, 3, 4]
>>> weights = [1.0, 0.5, 0.33, 0.25]
>>> S_sal = librosa.salience(S, freqs, harms, weights, fill_value=0)
>>> print(S_sal.shape)
(1025, 646)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(S_sal,
... ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Salience spectrogram')
>>> plt.tight_layout()
"""
if aggregate is None:
aggregate = np.average
if weights is None:
weights = np.ones((len(h_range), ))
else:
weights = np.array(weights, dtype=float)
S_harm = interp_harmonics(S, freqs, h_range, kind=kind, axis=axis)
if aggregate is np.average:
S_sal = aggregate(S_harm, axis=0, weights=weights)
else:
S_sal = aggregate(S_harm, axis=0)
if filter_peaks:
S_peaks = scipy.signal.argrelmax(S, axis=0)
S_out = np.empty(S.shape)
S_out.fill(fill_value)
S_out[S_peaks[0], S_peaks[1]] = S_sal[S_peaks[0], S_peaks[1]]
S_sal = S_out
return S_sal
|
def salience(S, freqs, h_range, weights=None, aggregate=None,
filter_peaks=True, fill_value=np.nan, kind='linear', axis=0):
"""Harmonic salience function.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input time frequency magnitude representation (stft, ifgram, etc).
Must be real-valued and non-negative.
freqs : np.ndarray, shape=(S.shape[axis])
The frequency values corresponding to S's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to include in salience computation. The first harmonic (1)
corresponds to `S` itself. Values less than one (e.g., 1/2) correspond
to sub-harmonics.
weights : list-like
The weight to apply to each harmonic in the summation. (default:
uniform weights). Must be the same length as `harmonics`.
aggregate : function
aggregation function (default: `np.average`)
If `aggregate=np.average`, then a weighted average is
computed per-harmonic according to the specified weights.
For all other aggregation functions, all harmonics
are treated equally.
filter_peaks : bool
If true, returns harmonic summation only on frequencies of peak
magnitude. Otherwise returns harmonic summation over the full spectrum.
Defaults to True.
fill_value : float
The value to fill non-peaks in the output representation. (default:
np.nan) Only used if `filter_peaks == True`.
kind : str
Interpolation type for harmonic estimation.
See `scipy.interpolate.interp1d`.
axis : int
The axis along which to compute harmonics
Returns
-------
S_sal : np.ndarray, shape=(len(h_range), [x.shape])
`S_sal` will have the same shape as `S`, and measure
the overal harmonic energy at each frequency.
See Also
--------
interp_harmonics
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> S = np.abs(librosa.stft(y))
>>> freqs = librosa.core.fft_frequencies(sr)
>>> harms = [1, 2, 3, 4]
>>> weights = [1.0, 0.5, 0.33, 0.25]
>>> S_sal = librosa.salience(S, freqs, harms, weights, fill_value=0)
>>> print(S_sal.shape)
(1025, 646)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(S_sal,
... ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Salience spectrogram')
>>> plt.tight_layout()
"""
if aggregate is None:
aggregate = np.average
if weights is None:
weights = np.ones((len(h_range), ))
else:
weights = np.array(weights, dtype=float)
S_harm = interp_harmonics(S, freqs, h_range, kind=kind, axis=axis)
if aggregate is np.average:
S_sal = aggregate(S_harm, axis=0, weights=weights)
else:
S_sal = aggregate(S_harm, axis=0)
if filter_peaks:
S_peaks = scipy.signal.argrelmax(S, axis=0)
S_out = np.empty(S.shape)
S_out.fill(fill_value)
S_out[S_peaks[0], S_peaks[1]] = S_sal[S_peaks[0], S_peaks[1]]
S_sal = S_out
return S_sal
|
[
"Harmonic",
"salience",
"function",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/harmonic.py#L13-L104
|
[
"def",
"salience",
"(",
"S",
",",
"freqs",
",",
"h_range",
",",
"weights",
"=",
"None",
",",
"aggregate",
"=",
"None",
",",
"filter_peaks",
"=",
"True",
",",
"fill_value",
"=",
"np",
".",
"nan",
",",
"kind",
"=",
"'linear'",
",",
"axis",
"=",
"0",
")",
":",
"if",
"aggregate",
"is",
"None",
":",
"aggregate",
"=",
"np",
".",
"average",
"if",
"weights",
"is",
"None",
":",
"weights",
"=",
"np",
".",
"ones",
"(",
"(",
"len",
"(",
"h_range",
")",
",",
")",
")",
"else",
":",
"weights",
"=",
"np",
".",
"array",
"(",
"weights",
",",
"dtype",
"=",
"float",
")",
"S_harm",
"=",
"interp_harmonics",
"(",
"S",
",",
"freqs",
",",
"h_range",
",",
"kind",
"=",
"kind",
",",
"axis",
"=",
"axis",
")",
"if",
"aggregate",
"is",
"np",
".",
"average",
":",
"S_sal",
"=",
"aggregate",
"(",
"S_harm",
",",
"axis",
"=",
"0",
",",
"weights",
"=",
"weights",
")",
"else",
":",
"S_sal",
"=",
"aggregate",
"(",
"S_harm",
",",
"axis",
"=",
"0",
")",
"if",
"filter_peaks",
":",
"S_peaks",
"=",
"scipy",
".",
"signal",
".",
"argrelmax",
"(",
"S",
",",
"axis",
"=",
"0",
")",
"S_out",
"=",
"np",
".",
"empty",
"(",
"S",
".",
"shape",
")",
"S_out",
".",
"fill",
"(",
"fill_value",
")",
"S_out",
"[",
"S_peaks",
"[",
"0",
"]",
",",
"S_peaks",
"[",
"1",
"]",
"]",
"=",
"S_sal",
"[",
"S_peaks",
"[",
"0",
"]",
",",
"S_peaks",
"[",
"1",
"]",
"]",
"S_sal",
"=",
"S_out",
"return",
"S_sal"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
interp_harmonics
|
Compute the energy at harmonics of time-frequency representation.
Given a frequency-based energy representation such as a spectrogram
or tempogram, this function computes the energy at the chosen harmonics
of the frequency axis. (See examples below.)
The resulting harmonic array can then be used as input to a salience
computation.
Parameters
----------
x : np.ndarray
The input energy
freqs : np.ndarray, shape=(X.shape[axis])
The frequency values corresponding to X's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
Returns
-------
x_harm : np.ndarray, shape=(len(h_range), [x.shape])
`x_harm[i]` will have the same shape as `x`, and measure
the energy at the `h_range[i]` harmonic of each frequency.
See Also
--------
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)
>>> plt.yticks(0.5 + np.arange(len(h_range)),
... ['{:.3g}'.format(_) for _ in h_range])
>>> plt.ylabel('Harmonic')
>>> plt.xlabel('Tempo (BPM)')
>>> plt.tight_layout()
We can also compute frequency harmonics for spectrograms.
To calculate sub-harmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> plt.figure()
>>> for i, _sh in enumerate(S_harm, 1):
... plt.subplot(3, 2, i)
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log')
... plt.title('h={:.3g}'.format(h_range[i-1]))
... plt.yticks([])
>>> plt.tight_layout()
|
librosa/core/harmonic.py
|
def interp_harmonics(x, freqs, h_range, kind='linear', fill_value=0, axis=0):
'''Compute the energy at harmonics of time-frequency representation.
Given a frequency-based energy representation such as a spectrogram
or tempogram, this function computes the energy at the chosen harmonics
of the frequency axis. (See examples below.)
The resulting harmonic array can then be used as input to a salience
computation.
Parameters
----------
x : np.ndarray
The input energy
freqs : np.ndarray, shape=(X.shape[axis])
The frequency values corresponding to X's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
Returns
-------
x_harm : np.ndarray, shape=(len(h_range), [x.shape])
`x_harm[i]` will have the same shape as `x`, and measure
the energy at the `h_range[i]` harmonic of each frequency.
See Also
--------
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)
>>> plt.yticks(0.5 + np.arange(len(h_range)),
... ['{:.3g}'.format(_) for _ in h_range])
>>> plt.ylabel('Harmonic')
>>> plt.xlabel('Tempo (BPM)')
>>> plt.tight_layout()
We can also compute frequency harmonics for spectrograms.
To calculate sub-harmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> plt.figure()
>>> for i, _sh in enumerate(S_harm, 1):
... plt.subplot(3, 2, i)
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log')
... plt.title('h={:.3g}'.format(h_range[i-1]))
... plt.yticks([])
>>> plt.tight_layout()
'''
# X_out will be the same shape as X, plus a leading
# axis that has length = len(h_range)
out_shape = [len(h_range)]
out_shape.extend(x.shape)
x_out = np.zeros(out_shape, dtype=x.dtype)
if freqs.ndim == 1 and len(freqs) == x.shape[axis]:
harmonics_1d(x_out, x, freqs, h_range,
kind=kind, fill_value=fill_value,
axis=axis)
elif freqs.ndim == 2 and freqs.shape == x.shape:
harmonics_2d(x_out, x, freqs, h_range,
kind=kind, fill_value=fill_value,
axis=axis)
else:
raise ParameterError('freqs.shape={} does not match '
'input shape={}'.format(freqs.shape, x.shape))
return x_out
|
def interp_harmonics(x, freqs, h_range, kind='linear', fill_value=0, axis=0):
'''Compute the energy at harmonics of time-frequency representation.
Given a frequency-based energy representation such as a spectrogram
or tempogram, this function computes the energy at the chosen harmonics
of the frequency axis. (See examples below.)
The resulting harmonic array can then be used as input to a salience
computation.
Parameters
----------
x : np.ndarray
The input energy
freqs : np.ndarray, shape=(X.shape[axis])
The frequency values corresponding to X's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
Returns
-------
x_harm : np.ndarray, shape=(len(h_range), [x.shape])
`x_harm[i]` will have the same shape as `x`, and measure
the energy at the `h_range[i]` harmonic of each frequency.
See Also
--------
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)
>>> plt.yticks(0.5 + np.arange(len(h_range)),
... ['{:.3g}'.format(_) for _ in h_range])
>>> plt.ylabel('Harmonic')
>>> plt.xlabel('Tempo (BPM)')
>>> plt.tight_layout()
We can also compute frequency harmonics for spectrograms.
To calculate sub-harmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> plt.figure()
>>> for i, _sh in enumerate(S_harm, 1):
... plt.subplot(3, 2, i)
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log')
... plt.title('h={:.3g}'.format(h_range[i-1]))
... plt.yticks([])
>>> plt.tight_layout()
'''
# X_out will be the same shape as X, plus a leading
# axis that has length = len(h_range)
out_shape = [len(h_range)]
out_shape.extend(x.shape)
x_out = np.zeros(out_shape, dtype=x.dtype)
if freqs.ndim == 1 and len(freqs) == x.shape[axis]:
harmonics_1d(x_out, x, freqs, h_range,
kind=kind, fill_value=fill_value,
axis=axis)
elif freqs.ndim == 2 and freqs.shape == x.shape:
harmonics_2d(x_out, x, freqs, h_range,
kind=kind, fill_value=fill_value,
axis=axis)
else:
raise ParameterError('freqs.shape={} does not match '
'input shape={}'.format(freqs.shape, x.shape))
return x_out
|
[
"Compute",
"the",
"energy",
"at",
"harmonics",
"of",
"time",
"-",
"frequency",
"representation",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/harmonic.py#L107-L218
|
[
"def",
"interp_harmonics",
"(",
"x",
",",
"freqs",
",",
"h_range",
",",
"kind",
"=",
"'linear'",
",",
"fill_value",
"=",
"0",
",",
"axis",
"=",
"0",
")",
":",
"# X_out will be the same shape as X, plus a leading",
"# axis that has length = len(h_range)",
"out_shape",
"=",
"[",
"len",
"(",
"h_range",
")",
"]",
"out_shape",
".",
"extend",
"(",
"x",
".",
"shape",
")",
"x_out",
"=",
"np",
".",
"zeros",
"(",
"out_shape",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
"if",
"freqs",
".",
"ndim",
"==",
"1",
"and",
"len",
"(",
"freqs",
")",
"==",
"x",
".",
"shape",
"[",
"axis",
"]",
":",
"harmonics_1d",
"(",
"x_out",
",",
"x",
",",
"freqs",
",",
"h_range",
",",
"kind",
"=",
"kind",
",",
"fill_value",
"=",
"fill_value",
",",
"axis",
"=",
"axis",
")",
"elif",
"freqs",
".",
"ndim",
"==",
"2",
"and",
"freqs",
".",
"shape",
"==",
"x",
".",
"shape",
":",
"harmonics_2d",
"(",
"x_out",
",",
"x",
",",
"freqs",
",",
"h_range",
",",
"kind",
"=",
"kind",
",",
"fill_value",
"=",
"fill_value",
",",
"axis",
"=",
"axis",
")",
"else",
":",
"raise",
"ParameterError",
"(",
"'freqs.shape={} does not match '",
"'input shape={}'",
".",
"format",
"(",
"freqs",
".",
"shape",
",",
"x",
".",
"shape",
")",
")",
"return",
"x_out"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
harmonics_1d
|
Populate a harmonic tensor from a time-frequency representation.
Parameters
----------
harmonic_out : np.ndarray, shape=(len(h_range), X.shape)
The output array to store harmonics
X : np.ndarray
The input energy
freqs : np.ndarray, shape=(x.shape[axis])
The frequency values corresponding to x's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)
>>> plt.yticks(0.5 + np.arange(len(h_range)),
... ['{:.3g}'.format(_) for _ in h_range])
>>> plt.ylabel('Harmonic')
>>> plt.xlabel('Tempo (BPM)')
>>> plt.tight_layout()
We can also compute frequency harmonics for spectrograms.
To calculate subharmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> plt.figure()
>>> for i, _sh in enumerate(S_harm, 1):
... plt.subplot(3,2,i)
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log')
... plt.title('h={:.3g}'.format(h_range[i-1]))
... plt.yticks([])
>>> plt.tight_layout()
|
librosa/core/harmonic.py
|
def harmonics_1d(harmonic_out, x, freqs, h_range, kind='linear',
fill_value=0, axis=0):
'''Populate a harmonic tensor from a time-frequency representation.
Parameters
----------
harmonic_out : np.ndarray, shape=(len(h_range), X.shape)
The output array to store harmonics
X : np.ndarray
The input energy
freqs : np.ndarray, shape=(x.shape[axis])
The frequency values corresponding to x's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)
>>> plt.yticks(0.5 + np.arange(len(h_range)),
... ['{:.3g}'.format(_) for _ in h_range])
>>> plt.ylabel('Harmonic')
>>> plt.xlabel('Tempo (BPM)')
>>> plt.tight_layout()
We can also compute frequency harmonics for spectrograms.
To calculate subharmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> plt.figure()
>>> for i, _sh in enumerate(S_harm, 1):
... plt.subplot(3,2,i)
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log')
... plt.title('h={:.3g}'.format(h_range[i-1]))
... plt.yticks([])
>>> plt.tight_layout()
'''
# Note: this only works for fixed-grid, 1d interpolation
f_interp = scipy.interpolate.interp1d(freqs, x,
kind=kind,
axis=axis,
copy=False,
bounds_error=False,
fill_value=fill_value)
idx_out = [slice(None)] * harmonic_out.ndim
# Compute the output index of the interpolated values
interp_axis = 1 + (axis % x.ndim)
# Iterate over the harmonics range
for h_index, harmonic in enumerate(h_range):
idx_out[0] = h_index
# Iterate over frequencies
for f_index, frequency in enumerate(freqs):
# Offset the output axis by 1 to account for the harmonic index
idx_out[interp_axis] = f_index
# Estimate the harmonic energy at this frequency across time
harmonic_out[tuple(idx_out)] = f_interp(harmonic * frequency)
|
def harmonics_1d(harmonic_out, x, freqs, h_range, kind='linear',
fill_value=0, axis=0):
'''Populate a harmonic tensor from a time-frequency representation.
Parameters
----------
harmonic_out : np.ndarray, shape=(len(h_range), X.shape)
The output array to store harmonics
X : np.ndarray
The input energy
freqs : np.ndarray, shape=(x.shape[axis])
The frequency values corresponding to x's elements along the
chosen axis.
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself.
Values less than one (e.g., 1/2) correspond to sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
scipy.interpolate.interp1d
Examples
--------
Estimate the harmonics of a time-averaged tempogram
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=15, offset=30)
>>> # Compute the time-varying tempogram and average over time
>>> tempi = np.mean(librosa.feature.tempogram(y=y, sr=sr), axis=1)
>>> # We'll measure the first five harmonics
>>> h_range = [1, 2, 3, 4, 5]
>>> f_tempo = librosa.tempo_frequencies(len(tempi), sr=sr)
>>> # Build the harmonic tensor
>>> t_harmonics = librosa.interp_harmonics(tempi, f_tempo, h_range)
>>> print(t_harmonics.shape)
(5, 384)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(t_harmonics, x_axis='tempo', sr=sr)
>>> plt.yticks(0.5 + np.arange(len(h_range)),
... ['{:.3g}'.format(_) for _ in h_range])
>>> plt.ylabel('Harmonic')
>>> plt.xlabel('Tempo (BPM)')
>>> plt.tight_layout()
We can also compute frequency harmonics for spectrograms.
To calculate subharmonic energy, use values < 1.
>>> h_range = [1./3, 1./2, 1, 2, 3, 4]
>>> S = np.abs(librosa.stft(y))
>>> fft_freqs = librosa.fft_frequencies(sr=sr)
>>> S_harm = librosa.interp_harmonics(S, fft_freqs, h_range, axis=0)
>>> print(S_harm.shape)
(6, 1025, 646)
>>> plt.figure()
>>> for i, _sh in enumerate(S_harm, 1):
... plt.subplot(3,2,i)
... librosa.display.specshow(librosa.amplitude_to_db(_sh,
... ref=S.max()),
... sr=sr, y_axis='log')
... plt.title('h={:.3g}'.format(h_range[i-1]))
... plt.yticks([])
>>> plt.tight_layout()
'''
# Note: this only works for fixed-grid, 1d interpolation
f_interp = scipy.interpolate.interp1d(freqs, x,
kind=kind,
axis=axis,
copy=False,
bounds_error=False,
fill_value=fill_value)
idx_out = [slice(None)] * harmonic_out.ndim
# Compute the output index of the interpolated values
interp_axis = 1 + (axis % x.ndim)
# Iterate over the harmonics range
for h_index, harmonic in enumerate(h_range):
idx_out[0] = h_index
# Iterate over frequencies
for f_index, frequency in enumerate(freqs):
# Offset the output axis by 1 to account for the harmonic index
idx_out[interp_axis] = f_index
# Estimate the harmonic energy at this frequency across time
harmonic_out[tuple(idx_out)] = f_interp(harmonic * frequency)
|
[
"Populate",
"a",
"harmonic",
"tensor",
"from",
"a",
"time",
"-",
"frequency",
"representation",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/harmonic.py#L221-L328
|
[
"def",
"harmonics_1d",
"(",
"harmonic_out",
",",
"x",
",",
"freqs",
",",
"h_range",
",",
"kind",
"=",
"'linear'",
",",
"fill_value",
"=",
"0",
",",
"axis",
"=",
"0",
")",
":",
"# Note: this only works for fixed-grid, 1d interpolation",
"f_interp",
"=",
"scipy",
".",
"interpolate",
".",
"interp1d",
"(",
"freqs",
",",
"x",
",",
"kind",
"=",
"kind",
",",
"axis",
"=",
"axis",
",",
"copy",
"=",
"False",
",",
"bounds_error",
"=",
"False",
",",
"fill_value",
"=",
"fill_value",
")",
"idx_out",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"harmonic_out",
".",
"ndim",
"# Compute the output index of the interpolated values",
"interp_axis",
"=",
"1",
"+",
"(",
"axis",
"%",
"x",
".",
"ndim",
")",
"# Iterate over the harmonics range",
"for",
"h_index",
",",
"harmonic",
"in",
"enumerate",
"(",
"h_range",
")",
":",
"idx_out",
"[",
"0",
"]",
"=",
"h_index",
"# Iterate over frequencies",
"for",
"f_index",
",",
"frequency",
"in",
"enumerate",
"(",
"freqs",
")",
":",
"# Offset the output axis by 1 to account for the harmonic index",
"idx_out",
"[",
"interp_axis",
"]",
"=",
"f_index",
"# Estimate the harmonic energy at this frequency across time",
"harmonic_out",
"[",
"tuple",
"(",
"idx_out",
")",
"]",
"=",
"f_interp",
"(",
"harmonic",
"*",
"frequency",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
harmonics_2d
|
Populate a harmonic tensor from a time-frequency representation with
time-varying frequencies.
Parameters
----------
harmonic_out : np.ndarray
The output array to store harmonics
x : np.ndarray
The input energy
freqs : np.ndarray, shape=x.shape
The frequency values corresponding to each element of `x`
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself. Values less than one (e.g., 1/2) correspond to
sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
harmonics_1d
|
librosa/core/harmonic.py
|
def harmonics_2d(harmonic_out, x, freqs, h_range, kind='linear', fill_value=0,
axis=0):
'''Populate a harmonic tensor from a time-frequency representation with
time-varying frequencies.
Parameters
----------
harmonic_out : np.ndarray
The output array to store harmonics
x : np.ndarray
The input energy
freqs : np.ndarray, shape=x.shape
The frequency values corresponding to each element of `x`
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself. Values less than one (e.g., 1/2) correspond to
sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
harmonics_1d
'''
idx_in = [slice(None)] * x.ndim
idx_freq = [slice(None)] * x.ndim
idx_out = [slice(None)] * harmonic_out.ndim
# This is the non-interpolation axis
ni_axis = (1 + axis) % x.ndim
# For each value in the non-interpolated axis, compute its harmonics
for i in range(x.shape[ni_axis]):
idx_in[ni_axis] = slice(i, i + 1)
idx_freq[ni_axis] = i
idx_out[1 + ni_axis] = idx_in[ni_axis]
harmonics_1d(harmonic_out[tuple(idx_out)], x[tuple(idx_in)], freqs[tuple(idx_freq)],
h_range, kind=kind, fill_value=fill_value,
axis=axis)
|
def harmonics_2d(harmonic_out, x, freqs, h_range, kind='linear', fill_value=0,
axis=0):
'''Populate a harmonic tensor from a time-frequency representation with
time-varying frequencies.
Parameters
----------
harmonic_out : np.ndarray
The output array to store harmonics
x : np.ndarray
The input energy
freqs : np.ndarray, shape=x.shape
The frequency values corresponding to each element of `x`
h_range : list-like, non-negative
Harmonics to compute. The first harmonic (1) corresponds to `x`
itself. Values less than one (e.g., 1/2) correspond to
sub-harmonics.
kind : str
Interpolation type. See `scipy.interpolate.interp1d`.
fill_value : float
The value to fill when extrapolating beyond the observed
frequency range.
axis : int
The axis along which to compute harmonics
See Also
--------
harmonics
harmonics_1d
'''
idx_in = [slice(None)] * x.ndim
idx_freq = [slice(None)] * x.ndim
idx_out = [slice(None)] * harmonic_out.ndim
# This is the non-interpolation axis
ni_axis = (1 + axis) % x.ndim
# For each value in the non-interpolated axis, compute its harmonics
for i in range(x.shape[ni_axis]):
idx_in[ni_axis] = slice(i, i + 1)
idx_freq[ni_axis] = i
idx_out[1 + ni_axis] = idx_in[ni_axis]
harmonics_1d(harmonic_out[tuple(idx_out)], x[tuple(idx_in)], freqs[tuple(idx_freq)],
h_range, kind=kind, fill_value=fill_value,
axis=axis)
|
[
"Populate",
"a",
"harmonic",
"tensor",
"from",
"a",
"time",
"-",
"frequency",
"representation",
"with",
"time",
"-",
"varying",
"frequencies",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/harmonic.py#L331-L382
|
[
"def",
"harmonics_2d",
"(",
"harmonic_out",
",",
"x",
",",
"freqs",
",",
"h_range",
",",
"kind",
"=",
"'linear'",
",",
"fill_value",
"=",
"0",
",",
"axis",
"=",
"0",
")",
":",
"idx_in",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"x",
".",
"ndim",
"idx_freq",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"x",
".",
"ndim",
"idx_out",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"harmonic_out",
".",
"ndim",
"# This is the non-interpolation axis",
"ni_axis",
"=",
"(",
"1",
"+",
"axis",
")",
"%",
"x",
".",
"ndim",
"# For each value in the non-interpolated axis, compute its harmonics",
"for",
"i",
"in",
"range",
"(",
"x",
".",
"shape",
"[",
"ni_axis",
"]",
")",
":",
"idx_in",
"[",
"ni_axis",
"]",
"=",
"slice",
"(",
"i",
",",
"i",
"+",
"1",
")",
"idx_freq",
"[",
"ni_axis",
"]",
"=",
"i",
"idx_out",
"[",
"1",
"+",
"ni_axis",
"]",
"=",
"idx_in",
"[",
"ni_axis",
"]",
"harmonics_1d",
"(",
"harmonic_out",
"[",
"tuple",
"(",
"idx_out",
")",
"]",
",",
"x",
"[",
"tuple",
"(",
"idx_in",
")",
"]",
",",
"freqs",
"[",
"tuple",
"(",
"idx_freq",
")",
"]",
",",
"h_range",
",",
"kind",
"=",
"kind",
",",
"fill_value",
"=",
"fill_value",
",",
"axis",
"=",
"axis",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
load
|
Load an audio file as a floating point time series.
Audio will be automatically resampled to the given rate
(default `sr=22050`).
To preserve the native sampling rate of the file, use `sr=None`.
Parameters
----------
path : string, int, or file-like object
path to the input file.
Any codec supported by `soundfile` or `audioread` will work.
If the codec is supported by `soundfile`, then `path` can also be
an open file descriptor (int), or any object implementing Python's
file interface.
If the codec is not supported by `soundfile` (e.g., MP3), then only
string file paths are supported.
sr : number > 0 [scalar]
target sampling rate
'None' uses the native sampling rate
mono : bool
convert signal to mono
offset : float
start reading after this time (in seconds)
duration : float
only load up to this much audio (in seconds)
dtype : numeric type
data type of `y`
res_type : str
resample type (see note)
.. note::
By default, this uses `resampy`'s high-quality mode ('kaiser_best').
For alternative resampling modes, see `resample`
.. note::
`audioread` may truncate the precision of the audio data to 16 bits.
See https://librosa.github.io/librosa/ioformats.html for alternate
loading methods.
Returns
-------
y : np.ndarray [shape=(n,) or (2, n)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
Examples
--------
>>> # Load an ogg vorbis file
>>> filename = librosa.util.example_audio_file()
>>> y, sr = librosa.load(filename)
>>> y
array([ -4.756e-06, -6.020e-06, ..., -1.040e-06, 0.000e+00], dtype=float32)
>>> sr
22050
>>> # Load a file and resample to 11 KHz
>>> filename = librosa.util.example_audio_file()
>>> y, sr = librosa.load(filename, sr=11025)
>>> y
array([ -2.077e-06, -2.928e-06, ..., -4.395e-06, 0.000e+00], dtype=float32)
>>> sr
11025
>>> # Load 5 seconds of a file, starting 15 seconds in
>>> filename = librosa.util.example_audio_file()
>>> y, sr = librosa.load(filename, offset=15.0, duration=5.0)
>>> y
array([ 0.069, 0.1 , ..., -0.101, 0. ], dtype=float32)
>>> sr
22050
|
librosa/core/audio.py
|
def load(path, sr=22050, mono=True, offset=0.0, duration=None,
dtype=np.float32, res_type='kaiser_best'):
"""Load an audio file as a floating point time series.
Audio will be automatically resampled to the given rate
(default `sr=22050`).
To preserve the native sampling rate of the file, use `sr=None`.
Parameters
----------
path : string, int, or file-like object
path to the input file.
Any codec supported by `soundfile` or `audioread` will work.
If the codec is supported by `soundfile`, then `path` can also be
an open file descriptor (int), or any object implementing Python's
file interface.
If the codec is not supported by `soundfile` (e.g., MP3), then only
string file paths are supported.
sr : number > 0 [scalar]
target sampling rate
'None' uses the native sampling rate
mono : bool
convert signal to mono
offset : float
start reading after this time (in seconds)
duration : float
only load up to this much audio (in seconds)
dtype : numeric type
data type of `y`
res_type : str
resample type (see note)
.. note::
By default, this uses `resampy`'s high-quality mode ('kaiser_best').
For alternative resampling modes, see `resample`
.. note::
`audioread` may truncate the precision of the audio data to 16 bits.
See https://librosa.github.io/librosa/ioformats.html for alternate
loading methods.
Returns
-------
y : np.ndarray [shape=(n,) or (2, n)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
Examples
--------
>>> # Load an ogg vorbis file
>>> filename = librosa.util.example_audio_file()
>>> y, sr = librosa.load(filename)
>>> y
array([ -4.756e-06, -6.020e-06, ..., -1.040e-06, 0.000e+00], dtype=float32)
>>> sr
22050
>>> # Load a file and resample to 11 KHz
>>> filename = librosa.util.example_audio_file()
>>> y, sr = librosa.load(filename, sr=11025)
>>> y
array([ -2.077e-06, -2.928e-06, ..., -4.395e-06, 0.000e+00], dtype=float32)
>>> sr
11025
>>> # Load 5 seconds of a file, starting 15 seconds in
>>> filename = librosa.util.example_audio_file()
>>> y, sr = librosa.load(filename, offset=15.0, duration=5.0)
>>> y
array([ 0.069, 0.1 , ..., -0.101, 0. ], dtype=float32)
>>> sr
22050
"""
try:
with sf.SoundFile(path) as sf_desc:
sr_native = sf_desc.samplerate
if offset:
# Seek to the start of the target read
sf_desc.seek(int(offset * sr_native))
if duration is not None:
frame_duration = int(duration * sr_native)
else:
frame_duration = -1
# Load the target number of frames, and transpose to match librosa form
y = sf_desc.read(frames=frame_duration, dtype=dtype, always_2d=False).T
except RuntimeError as exc:
# If soundfile failed, fall back to the audioread loader
y, sr_native = __audioread_load(path, offset, duration, dtype)
# Final cleanup for dtype and contiguity
if mono:
y = to_mono(y)
if sr is not None:
y = resample(y, sr_native, sr, res_type=res_type)
else:
sr = sr_native
return y, sr
|
def load(path, sr=22050, mono=True, offset=0.0, duration=None,
dtype=np.float32, res_type='kaiser_best'):
"""Load an audio file as a floating point time series.
Audio will be automatically resampled to the given rate
(default `sr=22050`).
To preserve the native sampling rate of the file, use `sr=None`.
Parameters
----------
path : string, int, or file-like object
path to the input file.
Any codec supported by `soundfile` or `audioread` will work.
If the codec is supported by `soundfile`, then `path` can also be
an open file descriptor (int), or any object implementing Python's
file interface.
If the codec is not supported by `soundfile` (e.g., MP3), then only
string file paths are supported.
sr : number > 0 [scalar]
target sampling rate
'None' uses the native sampling rate
mono : bool
convert signal to mono
offset : float
start reading after this time (in seconds)
duration : float
only load up to this much audio (in seconds)
dtype : numeric type
data type of `y`
res_type : str
resample type (see note)
.. note::
By default, this uses `resampy`'s high-quality mode ('kaiser_best').
For alternative resampling modes, see `resample`
.. note::
`audioread` may truncate the precision of the audio data to 16 bits.
See https://librosa.github.io/librosa/ioformats.html for alternate
loading methods.
Returns
-------
y : np.ndarray [shape=(n,) or (2, n)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
Examples
--------
>>> # Load an ogg vorbis file
>>> filename = librosa.util.example_audio_file()
>>> y, sr = librosa.load(filename)
>>> y
array([ -4.756e-06, -6.020e-06, ..., -1.040e-06, 0.000e+00], dtype=float32)
>>> sr
22050
>>> # Load a file and resample to 11 KHz
>>> filename = librosa.util.example_audio_file()
>>> y, sr = librosa.load(filename, sr=11025)
>>> y
array([ -2.077e-06, -2.928e-06, ..., -4.395e-06, 0.000e+00], dtype=float32)
>>> sr
11025
>>> # Load 5 seconds of a file, starting 15 seconds in
>>> filename = librosa.util.example_audio_file()
>>> y, sr = librosa.load(filename, offset=15.0, duration=5.0)
>>> y
array([ 0.069, 0.1 , ..., -0.101, 0. ], dtype=float32)
>>> sr
22050
"""
try:
with sf.SoundFile(path) as sf_desc:
sr_native = sf_desc.samplerate
if offset:
# Seek to the start of the target read
sf_desc.seek(int(offset * sr_native))
if duration is not None:
frame_duration = int(duration * sr_native)
else:
frame_duration = -1
# Load the target number of frames, and transpose to match librosa form
y = sf_desc.read(frames=frame_duration, dtype=dtype, always_2d=False).T
except RuntimeError as exc:
# If soundfile failed, fall back to the audioread loader
y, sr_native = __audioread_load(path, offset, duration, dtype)
# Final cleanup for dtype and contiguity
if mono:
y = to_mono(y)
if sr is not None:
y = resample(y, sr_native, sr, res_type=res_type)
else:
sr = sr_native
return y, sr
|
[
"Load",
"an",
"audio",
"file",
"as",
"a",
"floating",
"point",
"time",
"series",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/audio.py#L32-L152
|
[
"def",
"load",
"(",
"path",
",",
"sr",
"=",
"22050",
",",
"mono",
"=",
"True",
",",
"offset",
"=",
"0.0",
",",
"duration",
"=",
"None",
",",
"dtype",
"=",
"np",
".",
"float32",
",",
"res_type",
"=",
"'kaiser_best'",
")",
":",
"try",
":",
"with",
"sf",
".",
"SoundFile",
"(",
"path",
")",
"as",
"sf_desc",
":",
"sr_native",
"=",
"sf_desc",
".",
"samplerate",
"if",
"offset",
":",
"# Seek to the start of the target read",
"sf_desc",
".",
"seek",
"(",
"int",
"(",
"offset",
"*",
"sr_native",
")",
")",
"if",
"duration",
"is",
"not",
"None",
":",
"frame_duration",
"=",
"int",
"(",
"duration",
"*",
"sr_native",
")",
"else",
":",
"frame_duration",
"=",
"-",
"1",
"# Load the target number of frames, and transpose to match librosa form",
"y",
"=",
"sf_desc",
".",
"read",
"(",
"frames",
"=",
"frame_duration",
",",
"dtype",
"=",
"dtype",
",",
"always_2d",
"=",
"False",
")",
".",
"T",
"except",
"RuntimeError",
"as",
"exc",
":",
"# If soundfile failed, fall back to the audioread loader",
"y",
",",
"sr_native",
"=",
"__audioread_load",
"(",
"path",
",",
"offset",
",",
"duration",
",",
"dtype",
")",
"# Final cleanup for dtype and contiguity",
"if",
"mono",
":",
"y",
"=",
"to_mono",
"(",
"y",
")",
"if",
"sr",
"is",
"not",
"None",
":",
"y",
"=",
"resample",
"(",
"y",
",",
"sr_native",
",",
"sr",
",",
"res_type",
"=",
"res_type",
")",
"else",
":",
"sr",
"=",
"sr_native",
"return",
"y",
",",
"sr"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__audioread_load
|
Load an audio buffer using audioread.
This loads one block at a time, and then concatenates the results.
|
librosa/core/audio.py
|
def __audioread_load(path, offset, duration, dtype):
'''Load an audio buffer using audioread.
This loads one block at a time, and then concatenates the results.
'''
y = []
with audioread.audio_open(path) as input_file:
sr_native = input_file.samplerate
n_channels = input_file.channels
s_start = int(np.round(sr_native * offset)) * n_channels
if duration is None:
s_end = np.inf
else:
s_end = s_start + (int(np.round(sr_native * duration))
* n_channels)
n = 0
for frame in input_file:
frame = util.buf_to_float(frame, dtype=dtype)
n_prev = n
n = n + len(frame)
if n < s_start:
# offset is after the current frame
# keep reading
continue
if s_end < n_prev:
# we're off the end. stop reading
break
if s_end < n:
# the end is in this frame. crop.
frame = frame[:s_end - n_prev]
if n_prev <= s_start <= n:
# beginning is in this frame
frame = frame[(s_start - n_prev):]
# tack on the current frame
y.append(frame)
if y:
y = np.concatenate(y)
if n_channels > 1:
y = y.reshape((-1, n_channels)).T
else:
y = np.empty(0, dtype=dtype)
return y, sr_native
|
def __audioread_load(path, offset, duration, dtype):
'''Load an audio buffer using audioread.
This loads one block at a time, and then concatenates the results.
'''
y = []
with audioread.audio_open(path) as input_file:
sr_native = input_file.samplerate
n_channels = input_file.channels
s_start = int(np.round(sr_native * offset)) * n_channels
if duration is None:
s_end = np.inf
else:
s_end = s_start + (int(np.round(sr_native * duration))
* n_channels)
n = 0
for frame in input_file:
frame = util.buf_to_float(frame, dtype=dtype)
n_prev = n
n = n + len(frame)
if n < s_start:
# offset is after the current frame
# keep reading
continue
if s_end < n_prev:
# we're off the end. stop reading
break
if s_end < n:
# the end is in this frame. crop.
frame = frame[:s_end - n_prev]
if n_prev <= s_start <= n:
# beginning is in this frame
frame = frame[(s_start - n_prev):]
# tack on the current frame
y.append(frame)
if y:
y = np.concatenate(y)
if n_channels > 1:
y = y.reshape((-1, n_channels)).T
else:
y = np.empty(0, dtype=dtype)
return y, sr_native
|
[
"Load",
"an",
"audio",
"buffer",
"using",
"audioread",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/audio.py#L155-L208
|
[
"def",
"__audioread_load",
"(",
"path",
",",
"offset",
",",
"duration",
",",
"dtype",
")",
":",
"y",
"=",
"[",
"]",
"with",
"audioread",
".",
"audio_open",
"(",
"path",
")",
"as",
"input_file",
":",
"sr_native",
"=",
"input_file",
".",
"samplerate",
"n_channels",
"=",
"input_file",
".",
"channels",
"s_start",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"sr_native",
"*",
"offset",
")",
")",
"*",
"n_channels",
"if",
"duration",
"is",
"None",
":",
"s_end",
"=",
"np",
".",
"inf",
"else",
":",
"s_end",
"=",
"s_start",
"+",
"(",
"int",
"(",
"np",
".",
"round",
"(",
"sr_native",
"*",
"duration",
")",
")",
"*",
"n_channels",
")",
"n",
"=",
"0",
"for",
"frame",
"in",
"input_file",
":",
"frame",
"=",
"util",
".",
"buf_to_float",
"(",
"frame",
",",
"dtype",
"=",
"dtype",
")",
"n_prev",
"=",
"n",
"n",
"=",
"n",
"+",
"len",
"(",
"frame",
")",
"if",
"n",
"<",
"s_start",
":",
"# offset is after the current frame",
"# keep reading",
"continue",
"if",
"s_end",
"<",
"n_prev",
":",
"# we're off the end. stop reading",
"break",
"if",
"s_end",
"<",
"n",
":",
"# the end is in this frame. crop.",
"frame",
"=",
"frame",
"[",
":",
"s_end",
"-",
"n_prev",
"]",
"if",
"n_prev",
"<=",
"s_start",
"<=",
"n",
":",
"# beginning is in this frame",
"frame",
"=",
"frame",
"[",
"(",
"s_start",
"-",
"n_prev",
")",
":",
"]",
"# tack on the current frame",
"y",
".",
"append",
"(",
"frame",
")",
"if",
"y",
":",
"y",
"=",
"np",
".",
"concatenate",
"(",
"y",
")",
"if",
"n_channels",
">",
"1",
":",
"y",
"=",
"y",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"n_channels",
")",
")",
".",
"T",
"else",
":",
"y",
"=",
"np",
".",
"empty",
"(",
"0",
",",
"dtype",
"=",
"dtype",
")",
"return",
"y",
",",
"sr_native"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
to_mono
|
Force an audio signal down to mono.
Parameters
----------
y : np.ndarray [shape=(2,n) or shape=(n,)]
audio time series, either stereo or mono
Returns
-------
y_mono : np.ndarray [shape=(n,)]
`y` as a monophonic time-series
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> y.shape
(2, 1355168)
>>> y_mono = librosa.to_mono(y)
>>> y_mono.shape
(1355168,)
|
librosa/core/audio.py
|
def to_mono(y):
'''Force an audio signal down to mono.
Parameters
----------
y : np.ndarray [shape=(2,n) or shape=(n,)]
audio time series, either stereo or mono
Returns
-------
y_mono : np.ndarray [shape=(n,)]
`y` as a monophonic time-series
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> y.shape
(2, 1355168)
>>> y_mono = librosa.to_mono(y)
>>> y_mono.shape
(1355168,)
'''
# Validate the buffer. Stereo is ok here.
util.valid_audio(y, mono=False)
if y.ndim > 1:
y = np.mean(y, axis=0)
return y
|
def to_mono(y):
'''Force an audio signal down to mono.
Parameters
----------
y : np.ndarray [shape=(2,n) or shape=(n,)]
audio time series, either stereo or mono
Returns
-------
y_mono : np.ndarray [shape=(n,)]
`y` as a monophonic time-series
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> y.shape
(2, 1355168)
>>> y_mono = librosa.to_mono(y)
>>> y_mono.shape
(1355168,)
'''
# Validate the buffer. Stereo is ok here.
util.valid_audio(y, mono=False)
if y.ndim > 1:
y = np.mean(y, axis=0)
return y
|
[
"Force",
"an",
"audio",
"signal",
"down",
"to",
"mono",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/audio.py#L212-L246
|
[
"def",
"to_mono",
"(",
"y",
")",
":",
"# Validate the buffer. Stereo is ok here.",
"util",
".",
"valid_audio",
"(",
"y",
",",
"mono",
"=",
"False",
")",
"if",
"y",
".",
"ndim",
">",
"1",
":",
"y",
"=",
"np",
".",
"mean",
"(",
"y",
",",
"axis",
"=",
"0",
")",
"return",
"y"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
resample
|
Resample a time series from orig_sr to target_sr
Parameters
----------
y : np.ndarray [shape=(n,) or shape=(2, n)]
audio time series. Can be mono or stereo.
orig_sr : number > 0 [scalar]
original sampling rate of `y`
target_sr : number > 0 [scalar]
target sampling rate
res_type : str
resample type (see note)
.. note::
By default, this uses `resampy`'s high-quality mode ('kaiser_best').
To use a faster method, set `res_type='kaiser_fast'`.
To use `scipy.signal.resample`, set `res_type='fft'` or `res_type='scipy'`.
To use `scipy.signal.resample_poly`, set `res_type='polyphase'`.
.. note::
When using `res_type='polyphase'`, only integer sampling rates are
supported.
fix : bool
adjust the length of the resampled signal to be of size exactly
`ceil(target_sr * len(y) / orig_sr)`
scale : bool
Scale the resampled signal so that `y` and `y_hat` have approximately
equal total energy.
kwargs : additional keyword arguments
If `fix==True`, additional keyword arguments to pass to
`librosa.util.fix_length`.
Returns
-------
y_hat : np.ndarray [shape=(n * target_sr / orig_sr,)]
`y` resampled from `orig_sr` to `target_sr`
Raises
------
ParameterError
If `res_type='polyphase'` and `orig_sr` or `target_sr` are not both
integer-valued.
See Also
--------
librosa.util.fix_length
scipy.signal.resample
resampy.resample
Notes
-----
This function caches at level 20.
Examples
--------
Downsample from 22 KHz to 8 KHz
>>> y, sr = librosa.load(librosa.util.example_audio_file(), sr=22050)
>>> y_8k = librosa.resample(y, sr, 8000)
>>> y.shape, y_8k.shape
((1355168,), (491671,))
|
librosa/core/audio.py
|
def resample(y, orig_sr, target_sr, res_type='kaiser_best', fix=True, scale=False, **kwargs):
"""Resample a time series from orig_sr to target_sr
Parameters
----------
y : np.ndarray [shape=(n,) or shape=(2, n)]
audio time series. Can be mono or stereo.
orig_sr : number > 0 [scalar]
original sampling rate of `y`
target_sr : number > 0 [scalar]
target sampling rate
res_type : str
resample type (see note)
.. note::
By default, this uses `resampy`'s high-quality mode ('kaiser_best').
To use a faster method, set `res_type='kaiser_fast'`.
To use `scipy.signal.resample`, set `res_type='fft'` or `res_type='scipy'`.
To use `scipy.signal.resample_poly`, set `res_type='polyphase'`.
.. note::
When using `res_type='polyphase'`, only integer sampling rates are
supported.
fix : bool
adjust the length of the resampled signal to be of size exactly
`ceil(target_sr * len(y) / orig_sr)`
scale : bool
Scale the resampled signal so that `y` and `y_hat` have approximately
equal total energy.
kwargs : additional keyword arguments
If `fix==True`, additional keyword arguments to pass to
`librosa.util.fix_length`.
Returns
-------
y_hat : np.ndarray [shape=(n * target_sr / orig_sr,)]
`y` resampled from `orig_sr` to `target_sr`
Raises
------
ParameterError
If `res_type='polyphase'` and `orig_sr` or `target_sr` are not both
integer-valued.
See Also
--------
librosa.util.fix_length
scipy.signal.resample
resampy.resample
Notes
-----
This function caches at level 20.
Examples
--------
Downsample from 22 KHz to 8 KHz
>>> y, sr = librosa.load(librosa.util.example_audio_file(), sr=22050)
>>> y_8k = librosa.resample(y, sr, 8000)
>>> y.shape, y_8k.shape
((1355168,), (491671,))
"""
# First, validate the audio buffer
util.valid_audio(y, mono=False)
if orig_sr == target_sr:
return y
ratio = float(target_sr) / orig_sr
n_samples = int(np.ceil(y.shape[-1] * ratio))
if res_type in ('scipy', 'fft'):
y_hat = scipy.signal.resample(y, n_samples, axis=-1)
elif res_type == 'polyphase':
if int(orig_sr) != orig_sr or int(target_sr) != target_sr:
raise ParameterError('polyphase resampling is only supported for integer-valued sampling rates.')
# For polyphase resampling, we need up- and down-sampling ratios
# We can get those from the greatest common divisor of the rates
# as long as the rates are integrable
orig_sr = int(orig_sr)
target_sr = int(target_sr)
gcd = np.gcd(orig_sr, target_sr)
y_hat = scipy.signal.resample_poly(y, target_sr // gcd, orig_sr // gcd, axis=-1)
else:
y_hat = resampy.resample(y, orig_sr, target_sr, filter=res_type, axis=-1)
if fix:
y_hat = util.fix_length(y_hat, n_samples, **kwargs)
if scale:
y_hat /= np.sqrt(ratio)
return np.ascontiguousarray(y_hat, dtype=y.dtype)
|
def resample(y, orig_sr, target_sr, res_type='kaiser_best', fix=True, scale=False, **kwargs):
"""Resample a time series from orig_sr to target_sr
Parameters
----------
y : np.ndarray [shape=(n,) or shape=(2, n)]
audio time series. Can be mono or stereo.
orig_sr : number > 0 [scalar]
original sampling rate of `y`
target_sr : number > 0 [scalar]
target sampling rate
res_type : str
resample type (see note)
.. note::
By default, this uses `resampy`'s high-quality mode ('kaiser_best').
To use a faster method, set `res_type='kaiser_fast'`.
To use `scipy.signal.resample`, set `res_type='fft'` or `res_type='scipy'`.
To use `scipy.signal.resample_poly`, set `res_type='polyphase'`.
.. note::
When using `res_type='polyphase'`, only integer sampling rates are
supported.
fix : bool
adjust the length of the resampled signal to be of size exactly
`ceil(target_sr * len(y) / orig_sr)`
scale : bool
Scale the resampled signal so that `y` and `y_hat` have approximately
equal total energy.
kwargs : additional keyword arguments
If `fix==True`, additional keyword arguments to pass to
`librosa.util.fix_length`.
Returns
-------
y_hat : np.ndarray [shape=(n * target_sr / orig_sr,)]
`y` resampled from `orig_sr` to `target_sr`
Raises
------
ParameterError
If `res_type='polyphase'` and `orig_sr` or `target_sr` are not both
integer-valued.
See Also
--------
librosa.util.fix_length
scipy.signal.resample
resampy.resample
Notes
-----
This function caches at level 20.
Examples
--------
Downsample from 22 KHz to 8 KHz
>>> y, sr = librosa.load(librosa.util.example_audio_file(), sr=22050)
>>> y_8k = librosa.resample(y, sr, 8000)
>>> y.shape, y_8k.shape
((1355168,), (491671,))
"""
# First, validate the audio buffer
util.valid_audio(y, mono=False)
if orig_sr == target_sr:
return y
ratio = float(target_sr) / orig_sr
n_samples = int(np.ceil(y.shape[-1] * ratio))
if res_type in ('scipy', 'fft'):
y_hat = scipy.signal.resample(y, n_samples, axis=-1)
elif res_type == 'polyphase':
if int(orig_sr) != orig_sr or int(target_sr) != target_sr:
raise ParameterError('polyphase resampling is only supported for integer-valued sampling rates.')
# For polyphase resampling, we need up- and down-sampling ratios
# We can get those from the greatest common divisor of the rates
# as long as the rates are integrable
orig_sr = int(orig_sr)
target_sr = int(target_sr)
gcd = np.gcd(orig_sr, target_sr)
y_hat = scipy.signal.resample_poly(y, target_sr // gcd, orig_sr // gcd, axis=-1)
else:
y_hat = resampy.resample(y, orig_sr, target_sr, filter=res_type, axis=-1)
if fix:
y_hat = util.fix_length(y_hat, n_samples, **kwargs)
if scale:
y_hat /= np.sqrt(ratio)
return np.ascontiguousarray(y_hat, dtype=y.dtype)
|
[
"Resample",
"a",
"time",
"series",
"from",
"orig_sr",
"to",
"target_sr"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/audio.py#L250-L355
|
[
"def",
"resample",
"(",
"y",
",",
"orig_sr",
",",
"target_sr",
",",
"res_type",
"=",
"'kaiser_best'",
",",
"fix",
"=",
"True",
",",
"scale",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# First, validate the audio buffer",
"util",
".",
"valid_audio",
"(",
"y",
",",
"mono",
"=",
"False",
")",
"if",
"orig_sr",
"==",
"target_sr",
":",
"return",
"y",
"ratio",
"=",
"float",
"(",
"target_sr",
")",
"/",
"orig_sr",
"n_samples",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"y",
".",
"shape",
"[",
"-",
"1",
"]",
"*",
"ratio",
")",
")",
"if",
"res_type",
"in",
"(",
"'scipy'",
",",
"'fft'",
")",
":",
"y_hat",
"=",
"scipy",
".",
"signal",
".",
"resample",
"(",
"y",
",",
"n_samples",
",",
"axis",
"=",
"-",
"1",
")",
"elif",
"res_type",
"==",
"'polyphase'",
":",
"if",
"int",
"(",
"orig_sr",
")",
"!=",
"orig_sr",
"or",
"int",
"(",
"target_sr",
")",
"!=",
"target_sr",
":",
"raise",
"ParameterError",
"(",
"'polyphase resampling is only supported for integer-valued sampling rates.'",
")",
"# For polyphase resampling, we need up- and down-sampling ratios",
"# We can get those from the greatest common divisor of the rates",
"# as long as the rates are integrable",
"orig_sr",
"=",
"int",
"(",
"orig_sr",
")",
"target_sr",
"=",
"int",
"(",
"target_sr",
")",
"gcd",
"=",
"np",
".",
"gcd",
"(",
"orig_sr",
",",
"target_sr",
")",
"y_hat",
"=",
"scipy",
".",
"signal",
".",
"resample_poly",
"(",
"y",
",",
"target_sr",
"//",
"gcd",
",",
"orig_sr",
"//",
"gcd",
",",
"axis",
"=",
"-",
"1",
")",
"else",
":",
"y_hat",
"=",
"resampy",
".",
"resample",
"(",
"y",
",",
"orig_sr",
",",
"target_sr",
",",
"filter",
"=",
"res_type",
",",
"axis",
"=",
"-",
"1",
")",
"if",
"fix",
":",
"y_hat",
"=",
"util",
".",
"fix_length",
"(",
"y_hat",
",",
"n_samples",
",",
"*",
"*",
"kwargs",
")",
"if",
"scale",
":",
"y_hat",
"/=",
"np",
".",
"sqrt",
"(",
"ratio",
")",
"return",
"np",
".",
"ascontiguousarray",
"(",
"y_hat",
",",
"dtype",
"=",
"y",
".",
"dtype",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
get_duration
|
Compute the duration (in seconds) of an audio time series,
feature matrix, or filename.
Examples
--------
>>> # Load the example audio file
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.get_duration(y=y, sr=sr)
61.45886621315193
>>> # Or directly from an audio file
>>> librosa.get_duration(filename=librosa.util.example_audio_file())
61.4
>>> # Or compute duration from an STFT matrix
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = librosa.stft(y)
>>> librosa.get_duration(S=S, sr=sr)
61.44
>>> # Or a non-centered STFT matrix
>>> S_left = librosa.stft(y, center=False)
>>> librosa.get_duration(S=S_left, sr=sr)
61.3471201814059
Parameters
----------
y : np.ndarray [shape=(n,), (2, n)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
STFT matrix, or any STFT-derived matrix (e.g., chromagram
or mel spectrogram).
Durations calculated from spectrogram inputs are only accurate
up to the frame resolution. If high precision is required,
it is better to use the audio time series directly.
n_fft : int > 0 [scalar]
FFT window size for `S`
hop_length : int > 0 [ scalar]
number of audio samples between columns of `S`
center : boolean
- If `True`, `S[:, t]` is centered at `y[t * hop_length]`
- If `False`, then `S[:, t]` begins at `y[t * hop_length]`
filename : str
If provided, all other parameters are ignored, and the
duration is calculated directly from the audio file.
Note that this avoids loading the contents into memory,
and is therefore useful for querying the duration of
long files.
Returns
-------
d : float >= 0
Duration (in seconds) of the input time series or spectrogram.
Raises
------
ParameterError
if none of `y`, `S`, or `filename` are provided.
Notes
-----
`get_duration` can be applied to a file (`filename`), a spectrogram (`S`),
or audio buffer (`y, sr`). Only one of these three options should be
provided. If you do provide multiple options (e.g., `filename` and `S`),
then `filename` takes precedence over `S`, and `S` takes precedence over
`(y, sr)`.
|
librosa/core/audio.py
|
def get_duration(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
center=True, filename=None):
"""Compute the duration (in seconds) of an audio time series,
feature matrix, or filename.
Examples
--------
>>> # Load the example audio file
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.get_duration(y=y, sr=sr)
61.45886621315193
>>> # Or directly from an audio file
>>> librosa.get_duration(filename=librosa.util.example_audio_file())
61.4
>>> # Or compute duration from an STFT matrix
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = librosa.stft(y)
>>> librosa.get_duration(S=S, sr=sr)
61.44
>>> # Or a non-centered STFT matrix
>>> S_left = librosa.stft(y, center=False)
>>> librosa.get_duration(S=S_left, sr=sr)
61.3471201814059
Parameters
----------
y : np.ndarray [shape=(n,), (2, n)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
STFT matrix, or any STFT-derived matrix (e.g., chromagram
or mel spectrogram).
Durations calculated from spectrogram inputs are only accurate
up to the frame resolution. If high precision is required,
it is better to use the audio time series directly.
n_fft : int > 0 [scalar]
FFT window size for `S`
hop_length : int > 0 [ scalar]
number of audio samples between columns of `S`
center : boolean
- If `True`, `S[:, t]` is centered at `y[t * hop_length]`
- If `False`, then `S[:, t]` begins at `y[t * hop_length]`
filename : str
If provided, all other parameters are ignored, and the
duration is calculated directly from the audio file.
Note that this avoids loading the contents into memory,
and is therefore useful for querying the duration of
long files.
Returns
-------
d : float >= 0
Duration (in seconds) of the input time series or spectrogram.
Raises
------
ParameterError
if none of `y`, `S`, or `filename` are provided.
Notes
-----
`get_duration` can be applied to a file (`filename`), a spectrogram (`S`),
or audio buffer (`y, sr`). Only one of these three options should be
provided. If you do provide multiple options (e.g., `filename` and `S`),
then `filename` takes precedence over `S`, and `S` takes precedence over
`(y, sr)`.
"""
if filename is not None:
try:
return sf.info(filename).duration
except:
with audioread.audio_open(filename) as fdesc:
return fdesc.duration
if y is None:
if S is None:
raise ParameterError('At least one of (y, sr), S, or filename must be provided')
n_frames = S.shape[1]
n_samples = n_fft + hop_length * (n_frames - 1)
# If centered, we lose half a window from each end of S
if center:
n_samples = n_samples - 2 * int(n_fft / 2)
else:
# Validate the audio buffer. Stereo is okay here.
util.valid_audio(y, mono=False)
if y.ndim == 1:
n_samples = len(y)
else:
n_samples = y.shape[-1]
return float(n_samples) / sr
|
def get_duration(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
center=True, filename=None):
"""Compute the duration (in seconds) of an audio time series,
feature matrix, or filename.
Examples
--------
>>> # Load the example audio file
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.get_duration(y=y, sr=sr)
61.45886621315193
>>> # Or directly from an audio file
>>> librosa.get_duration(filename=librosa.util.example_audio_file())
61.4
>>> # Or compute duration from an STFT matrix
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = librosa.stft(y)
>>> librosa.get_duration(S=S, sr=sr)
61.44
>>> # Or a non-centered STFT matrix
>>> S_left = librosa.stft(y, center=False)
>>> librosa.get_duration(S=S_left, sr=sr)
61.3471201814059
Parameters
----------
y : np.ndarray [shape=(n,), (2, n)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
STFT matrix, or any STFT-derived matrix (e.g., chromagram
or mel spectrogram).
Durations calculated from spectrogram inputs are only accurate
up to the frame resolution. If high precision is required,
it is better to use the audio time series directly.
n_fft : int > 0 [scalar]
FFT window size for `S`
hop_length : int > 0 [ scalar]
number of audio samples between columns of `S`
center : boolean
- If `True`, `S[:, t]` is centered at `y[t * hop_length]`
- If `False`, then `S[:, t]` begins at `y[t * hop_length]`
filename : str
If provided, all other parameters are ignored, and the
duration is calculated directly from the audio file.
Note that this avoids loading the contents into memory,
and is therefore useful for querying the duration of
long files.
Returns
-------
d : float >= 0
Duration (in seconds) of the input time series or spectrogram.
Raises
------
ParameterError
if none of `y`, `S`, or `filename` are provided.
Notes
-----
`get_duration` can be applied to a file (`filename`), a spectrogram (`S`),
or audio buffer (`y, sr`). Only one of these three options should be
provided. If you do provide multiple options (e.g., `filename` and `S`),
then `filename` takes precedence over `S`, and `S` takes precedence over
`(y, sr)`.
"""
if filename is not None:
try:
return sf.info(filename).duration
except:
with audioread.audio_open(filename) as fdesc:
return fdesc.duration
if y is None:
if S is None:
raise ParameterError('At least one of (y, sr), S, or filename must be provided')
n_frames = S.shape[1]
n_samples = n_fft + hop_length * (n_frames - 1)
# If centered, we lose half a window from each end of S
if center:
n_samples = n_samples - 2 * int(n_fft / 2)
else:
# Validate the audio buffer. Stereo is okay here.
util.valid_audio(y, mono=False)
if y.ndim == 1:
n_samples = len(y)
else:
n_samples = y.shape[-1]
return float(n_samples) / sr
|
[
"Compute",
"the",
"duration",
"(",
"in",
"seconds",
")",
"of",
"an",
"audio",
"time",
"series",
"feature",
"matrix",
"or",
"filename",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/audio.py#L358-L462
|
[
"def",
"get_duration",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"center",
"=",
"True",
",",
"filename",
"=",
"None",
")",
":",
"if",
"filename",
"is",
"not",
"None",
":",
"try",
":",
"return",
"sf",
".",
"info",
"(",
"filename",
")",
".",
"duration",
"except",
":",
"with",
"audioread",
".",
"audio_open",
"(",
"filename",
")",
"as",
"fdesc",
":",
"return",
"fdesc",
".",
"duration",
"if",
"y",
"is",
"None",
":",
"if",
"S",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'At least one of (y, sr), S, or filename must be provided'",
")",
"n_frames",
"=",
"S",
".",
"shape",
"[",
"1",
"]",
"n_samples",
"=",
"n_fft",
"+",
"hop_length",
"*",
"(",
"n_frames",
"-",
"1",
")",
"# If centered, we lose half a window from each end of S",
"if",
"center",
":",
"n_samples",
"=",
"n_samples",
"-",
"2",
"*",
"int",
"(",
"n_fft",
"/",
"2",
")",
"else",
":",
"# Validate the audio buffer. Stereo is okay here.",
"util",
".",
"valid_audio",
"(",
"y",
",",
"mono",
"=",
"False",
")",
"if",
"y",
".",
"ndim",
"==",
"1",
":",
"n_samples",
"=",
"len",
"(",
"y",
")",
"else",
":",
"n_samples",
"=",
"y",
".",
"shape",
"[",
"-",
"1",
"]",
"return",
"float",
"(",
"n_samples",
")",
"/",
"sr"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
autocorrelate
|
Bounded auto-correlation
Parameters
----------
y : np.ndarray
array to autocorrelate
max_size : int > 0 or None
maximum correlation lag.
If unspecified, defaults to `y.shape[axis]` (unbounded)
axis : int
The axis along which to autocorrelate.
By default, the last axis (-1) is taken.
Returns
-------
z : np.ndarray
truncated autocorrelation `y*y` along the specified axis.
If `max_size` is specified, then `z.shape[axis]` is bounded
to `max_size`.
Notes
-----
This function caches at level 20.
Examples
--------
Compute full autocorrelation of y
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=20, duration=10)
>>> librosa.autocorrelate(y)
array([ 3.226e+03, 3.217e+03, ..., 8.277e-04, 3.575e-04], dtype=float32)
Compute onset strength auto-correlation up to 4 seconds
>>> import matplotlib.pyplot as plt
>>> odf = librosa.onset.onset_strength(y=y, sr=sr, hop_length=512)
>>> ac = librosa.autocorrelate(odf, max_size=4* sr / 512)
>>> plt.plot(ac)
>>> plt.title('Auto-correlation')
>>> plt.xlabel('Lag (frames)')
|
librosa/core/audio.py
|
def autocorrelate(y, max_size=None, axis=-1):
"""Bounded auto-correlation
Parameters
----------
y : np.ndarray
array to autocorrelate
max_size : int > 0 or None
maximum correlation lag.
If unspecified, defaults to `y.shape[axis]` (unbounded)
axis : int
The axis along which to autocorrelate.
By default, the last axis (-1) is taken.
Returns
-------
z : np.ndarray
truncated autocorrelation `y*y` along the specified axis.
If `max_size` is specified, then `z.shape[axis]` is bounded
to `max_size`.
Notes
-----
This function caches at level 20.
Examples
--------
Compute full autocorrelation of y
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=20, duration=10)
>>> librosa.autocorrelate(y)
array([ 3.226e+03, 3.217e+03, ..., 8.277e-04, 3.575e-04], dtype=float32)
Compute onset strength auto-correlation up to 4 seconds
>>> import matplotlib.pyplot as plt
>>> odf = librosa.onset.onset_strength(y=y, sr=sr, hop_length=512)
>>> ac = librosa.autocorrelate(odf, max_size=4* sr / 512)
>>> plt.plot(ac)
>>> plt.title('Auto-correlation')
>>> plt.xlabel('Lag (frames)')
"""
if max_size is None:
max_size = y.shape[axis]
max_size = int(min(max_size, y.shape[axis]))
# Compute the power spectrum along the chosen axis
# Pad out the signal to support full-length auto-correlation.
fft = get_fftlib()
powspec = np.abs(fft.fft(y, n=2 * y.shape[axis] + 1, axis=axis))**2
# Convert back to time domain
autocorr = fft.ifft(powspec, axis=axis)
# Slice down to max_size
subslice = [slice(None)] * autocorr.ndim
subslice[axis] = slice(max_size)
autocorr = autocorr[tuple(subslice)]
if not np.iscomplexobj(y):
autocorr = autocorr.real
return autocorr
|
def autocorrelate(y, max_size=None, axis=-1):
"""Bounded auto-correlation
Parameters
----------
y : np.ndarray
array to autocorrelate
max_size : int > 0 or None
maximum correlation lag.
If unspecified, defaults to `y.shape[axis]` (unbounded)
axis : int
The axis along which to autocorrelate.
By default, the last axis (-1) is taken.
Returns
-------
z : np.ndarray
truncated autocorrelation `y*y` along the specified axis.
If `max_size` is specified, then `z.shape[axis]` is bounded
to `max_size`.
Notes
-----
This function caches at level 20.
Examples
--------
Compute full autocorrelation of y
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=20, duration=10)
>>> librosa.autocorrelate(y)
array([ 3.226e+03, 3.217e+03, ..., 8.277e-04, 3.575e-04], dtype=float32)
Compute onset strength auto-correlation up to 4 seconds
>>> import matplotlib.pyplot as plt
>>> odf = librosa.onset.onset_strength(y=y, sr=sr, hop_length=512)
>>> ac = librosa.autocorrelate(odf, max_size=4* sr / 512)
>>> plt.plot(ac)
>>> plt.title('Auto-correlation')
>>> plt.xlabel('Lag (frames)')
"""
if max_size is None:
max_size = y.shape[axis]
max_size = int(min(max_size, y.shape[axis]))
# Compute the power spectrum along the chosen axis
# Pad out the signal to support full-length auto-correlation.
fft = get_fftlib()
powspec = np.abs(fft.fft(y, n=2 * y.shape[axis] + 1, axis=axis))**2
# Convert back to time domain
autocorr = fft.ifft(powspec, axis=axis)
# Slice down to max_size
subslice = [slice(None)] * autocorr.ndim
subslice[axis] = slice(max_size)
autocorr = autocorr[tuple(subslice)]
if not np.iscomplexobj(y):
autocorr = autocorr.real
return autocorr
|
[
"Bounded",
"auto",
"-",
"correlation"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/audio.py#L465-L533
|
[
"def",
"autocorrelate",
"(",
"y",
",",
"max_size",
"=",
"None",
",",
"axis",
"=",
"-",
"1",
")",
":",
"if",
"max_size",
"is",
"None",
":",
"max_size",
"=",
"y",
".",
"shape",
"[",
"axis",
"]",
"max_size",
"=",
"int",
"(",
"min",
"(",
"max_size",
",",
"y",
".",
"shape",
"[",
"axis",
"]",
")",
")",
"# Compute the power spectrum along the chosen axis",
"# Pad out the signal to support full-length auto-correlation.",
"fft",
"=",
"get_fftlib",
"(",
")",
"powspec",
"=",
"np",
".",
"abs",
"(",
"fft",
".",
"fft",
"(",
"y",
",",
"n",
"=",
"2",
"*",
"y",
".",
"shape",
"[",
"axis",
"]",
"+",
"1",
",",
"axis",
"=",
"axis",
")",
")",
"**",
"2",
"# Convert back to time domain",
"autocorr",
"=",
"fft",
".",
"ifft",
"(",
"powspec",
",",
"axis",
"=",
"axis",
")",
"# Slice down to max_size",
"subslice",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"autocorr",
".",
"ndim",
"subslice",
"[",
"axis",
"]",
"=",
"slice",
"(",
"max_size",
")",
"autocorr",
"=",
"autocorr",
"[",
"tuple",
"(",
"subslice",
")",
"]",
"if",
"not",
"np",
".",
"iscomplexobj",
"(",
"y",
")",
":",
"autocorr",
"=",
"autocorr",
".",
"real",
"return",
"autocorr"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
lpc
|
Linear Prediction Coefficients via Burg's method
This function applies Burg's method to estimate coefficients of a linear
filter on `y` of order `order`. Burg's method is an extension to the
Yule-Walker approach, which are both sometimes referred to as LPC parameter
estimation by autocorrelation.
It follows the description and implementation approach described in the
introduction in [1]_. N.B. This paper describes a different method, which
is not implemented here, but has been chosen for its clear explanation of
Burg's technique in its introduction.
.. [1] Larry Marple
A New Autoregressive Spectrum Analysis Algorithm
IEEE Transactions on Accoustics, Speech, and Signal Processing
vol 28, no. 4, 1980
Parameters
----------
y : np.ndarray
Time series to fit
order : int > 0
Order of the linear filter
Returns
-------
a : np.ndarray of length order + 1
LP prediction error coefficients, i.e. filter denominator polynomial
Raises
------
ParameterError
- If y is not valid audio as per `util.valid_audio`
- If order < 1 or not integer
FloatingPointError
- If y is ill-conditioned
See also
--------
scipy.signal.lfilter
Examples
--------
Compute LP coefficients of y at order 16 on entire series
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30,
... duration=10)
>>> librosa.lpc(y, 16)
Compute LP coefficients, and plot LP estimate of original series
>>> import matplotlib.pyplot as plt
>>> import scipy
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30,
... duration=0.020)
>>> a = librosa.lpc(y, 2)
>>> y_hat = scipy.signal.lfilter([0] + -1*a[1:], [1], y)
>>> plt.figure()
>>> plt.plot(y)
>>> plt.plot(y_hat)
>>> plt.legend(['y', 'y_hat'])
>>> plt.title('LP Model Forward Prediction')
|
librosa/core/audio.py
|
def lpc(y, order):
"""Linear Prediction Coefficients via Burg's method
This function applies Burg's method to estimate coefficients of a linear
filter on `y` of order `order`. Burg's method is an extension to the
Yule-Walker approach, which are both sometimes referred to as LPC parameter
estimation by autocorrelation.
It follows the description and implementation approach described in the
introduction in [1]_. N.B. This paper describes a different method, which
is not implemented here, but has been chosen for its clear explanation of
Burg's technique in its introduction.
.. [1] Larry Marple
A New Autoregressive Spectrum Analysis Algorithm
IEEE Transactions on Accoustics, Speech, and Signal Processing
vol 28, no. 4, 1980
Parameters
----------
y : np.ndarray
Time series to fit
order : int > 0
Order of the linear filter
Returns
-------
a : np.ndarray of length order + 1
LP prediction error coefficients, i.e. filter denominator polynomial
Raises
------
ParameterError
- If y is not valid audio as per `util.valid_audio`
- If order < 1 or not integer
FloatingPointError
- If y is ill-conditioned
See also
--------
scipy.signal.lfilter
Examples
--------
Compute LP coefficients of y at order 16 on entire series
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30,
... duration=10)
>>> librosa.lpc(y, 16)
Compute LP coefficients, and plot LP estimate of original series
>>> import matplotlib.pyplot as plt
>>> import scipy
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30,
... duration=0.020)
>>> a = librosa.lpc(y, 2)
>>> y_hat = scipy.signal.lfilter([0] + -1*a[1:], [1], y)
>>> plt.figure()
>>> plt.plot(y)
>>> plt.plot(y_hat)
>>> plt.legend(['y', 'y_hat'])
>>> plt.title('LP Model Forward Prediction')
"""
if not isinstance(order, int) or order < 1:
raise ParameterError("order must be an integer > 0")
util.valid_audio(y, mono=True)
return __lpc(y, order)
|
def lpc(y, order):
"""Linear Prediction Coefficients via Burg's method
This function applies Burg's method to estimate coefficients of a linear
filter on `y` of order `order`. Burg's method is an extension to the
Yule-Walker approach, which are both sometimes referred to as LPC parameter
estimation by autocorrelation.
It follows the description and implementation approach described in the
introduction in [1]_. N.B. This paper describes a different method, which
is not implemented here, but has been chosen for its clear explanation of
Burg's technique in its introduction.
.. [1] Larry Marple
A New Autoregressive Spectrum Analysis Algorithm
IEEE Transactions on Accoustics, Speech, and Signal Processing
vol 28, no. 4, 1980
Parameters
----------
y : np.ndarray
Time series to fit
order : int > 0
Order of the linear filter
Returns
-------
a : np.ndarray of length order + 1
LP prediction error coefficients, i.e. filter denominator polynomial
Raises
------
ParameterError
- If y is not valid audio as per `util.valid_audio`
- If order < 1 or not integer
FloatingPointError
- If y is ill-conditioned
See also
--------
scipy.signal.lfilter
Examples
--------
Compute LP coefficients of y at order 16 on entire series
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30,
... duration=10)
>>> librosa.lpc(y, 16)
Compute LP coefficients, and plot LP estimate of original series
>>> import matplotlib.pyplot as plt
>>> import scipy
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=30,
... duration=0.020)
>>> a = librosa.lpc(y, 2)
>>> y_hat = scipy.signal.lfilter([0] + -1*a[1:], [1], y)
>>> plt.figure()
>>> plt.plot(y)
>>> plt.plot(y_hat)
>>> plt.legend(['y', 'y_hat'])
>>> plt.title('LP Model Forward Prediction')
"""
if not isinstance(order, int) or order < 1:
raise ParameterError("order must be an integer > 0")
util.valid_audio(y, mono=True)
return __lpc(y, order)
|
[
"Linear",
"Prediction",
"Coefficients",
"via",
"Burg",
"s",
"method"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/audio.py#L536-L607
|
[
"def",
"lpc",
"(",
"y",
",",
"order",
")",
":",
"if",
"not",
"isinstance",
"(",
"order",
",",
"int",
")",
"or",
"order",
"<",
"1",
":",
"raise",
"ParameterError",
"(",
"\"order must be an integer > 0\"",
")",
"util",
".",
"valid_audio",
"(",
"y",
",",
"mono",
"=",
"True",
")",
"return",
"__lpc",
"(",
"y",
",",
"order",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
zero_crossings
|
Find the zero-crossings of a signal `y`: indices `i` such that
`sign(y[i]) != sign(y[j])`.
If `y` is multi-dimensional, then zero-crossings are computed along
the specified `axis`.
Parameters
----------
y : np.ndarray
The input array
threshold : float > 0 or None
If specified, values where `-threshold <= y <= threshold` are
clipped to 0.
ref_magnitude : float > 0 or callable
If numeric, the threshold is scaled relative to `ref_magnitude`.
If callable, the threshold is scaled relative to
`ref_magnitude(np.abs(y))`.
pad : boolean
If `True`, then `y[0]` is considered a valid zero-crossing.
zero_pos : boolean
If `True` then the value 0 is interpreted as having positive sign.
If `False`, then 0, -1, and +1 all have distinct signs.
axis : int
Axis along which to compute zero-crossings.
Returns
-------
zero_crossings : np.ndarray [shape=y.shape, dtype=boolean]
Indicator array of zero-crossings in `y` along the selected axis.
Notes
-----
This function caches at level 20.
Examples
--------
>>> # Generate a time-series
>>> y = np.sin(np.linspace(0, 4 * 2 * np.pi, 20))
>>> y
array([ 0.000e+00, 9.694e-01, 4.759e-01, -7.357e-01,
-8.372e-01, 3.247e-01, 9.966e-01, 1.646e-01,
-9.158e-01, -6.142e-01, 6.142e-01, 9.158e-01,
-1.646e-01, -9.966e-01, -3.247e-01, 8.372e-01,
7.357e-01, -4.759e-01, -9.694e-01, -9.797e-16])
>>> # Compute zero-crossings
>>> z = librosa.zero_crossings(y)
>>> z
array([ True, False, False, True, False, True, False, False,
True, False, True, False, True, False, False, True,
False, True, False, True], dtype=bool)
>>> # Stack y against the zero-crossing indicator
>>> np.vstack([y, z]).T
array([[ 0.000e+00, 1.000e+00],
[ 9.694e-01, 0.000e+00],
[ 4.759e-01, 0.000e+00],
[ -7.357e-01, 1.000e+00],
[ -8.372e-01, 0.000e+00],
[ 3.247e-01, 1.000e+00],
[ 9.966e-01, 0.000e+00],
[ 1.646e-01, 0.000e+00],
[ -9.158e-01, 1.000e+00],
[ -6.142e-01, 0.000e+00],
[ 6.142e-01, 1.000e+00],
[ 9.158e-01, 0.000e+00],
[ -1.646e-01, 1.000e+00],
[ -9.966e-01, 0.000e+00],
[ -3.247e-01, 0.000e+00],
[ 8.372e-01, 1.000e+00],
[ 7.357e-01, 0.000e+00],
[ -4.759e-01, 1.000e+00],
[ -9.694e-01, 0.000e+00],
[ -9.797e-16, 1.000e+00]])
>>> # Find the indices of zero-crossings
>>> np.nonzero(z)
(array([ 0, 3, 5, 8, 10, 12, 15, 17, 19]),)
|
librosa/core/audio.py
|
def zero_crossings(y, threshold=1e-10, ref_magnitude=None, pad=True,
zero_pos=True, axis=-1):
'''Find the zero-crossings of a signal `y`: indices `i` such that
`sign(y[i]) != sign(y[j])`.
If `y` is multi-dimensional, then zero-crossings are computed along
the specified `axis`.
Parameters
----------
y : np.ndarray
The input array
threshold : float > 0 or None
If specified, values where `-threshold <= y <= threshold` are
clipped to 0.
ref_magnitude : float > 0 or callable
If numeric, the threshold is scaled relative to `ref_magnitude`.
If callable, the threshold is scaled relative to
`ref_magnitude(np.abs(y))`.
pad : boolean
If `True`, then `y[0]` is considered a valid zero-crossing.
zero_pos : boolean
If `True` then the value 0 is interpreted as having positive sign.
If `False`, then 0, -1, and +1 all have distinct signs.
axis : int
Axis along which to compute zero-crossings.
Returns
-------
zero_crossings : np.ndarray [shape=y.shape, dtype=boolean]
Indicator array of zero-crossings in `y` along the selected axis.
Notes
-----
This function caches at level 20.
Examples
--------
>>> # Generate a time-series
>>> y = np.sin(np.linspace(0, 4 * 2 * np.pi, 20))
>>> y
array([ 0.000e+00, 9.694e-01, 4.759e-01, -7.357e-01,
-8.372e-01, 3.247e-01, 9.966e-01, 1.646e-01,
-9.158e-01, -6.142e-01, 6.142e-01, 9.158e-01,
-1.646e-01, -9.966e-01, -3.247e-01, 8.372e-01,
7.357e-01, -4.759e-01, -9.694e-01, -9.797e-16])
>>> # Compute zero-crossings
>>> z = librosa.zero_crossings(y)
>>> z
array([ True, False, False, True, False, True, False, False,
True, False, True, False, True, False, False, True,
False, True, False, True], dtype=bool)
>>> # Stack y against the zero-crossing indicator
>>> np.vstack([y, z]).T
array([[ 0.000e+00, 1.000e+00],
[ 9.694e-01, 0.000e+00],
[ 4.759e-01, 0.000e+00],
[ -7.357e-01, 1.000e+00],
[ -8.372e-01, 0.000e+00],
[ 3.247e-01, 1.000e+00],
[ 9.966e-01, 0.000e+00],
[ 1.646e-01, 0.000e+00],
[ -9.158e-01, 1.000e+00],
[ -6.142e-01, 0.000e+00],
[ 6.142e-01, 1.000e+00],
[ 9.158e-01, 0.000e+00],
[ -1.646e-01, 1.000e+00],
[ -9.966e-01, 0.000e+00],
[ -3.247e-01, 0.000e+00],
[ 8.372e-01, 1.000e+00],
[ 7.357e-01, 0.000e+00],
[ -4.759e-01, 1.000e+00],
[ -9.694e-01, 0.000e+00],
[ -9.797e-16, 1.000e+00]])
>>> # Find the indices of zero-crossings
>>> np.nonzero(z)
(array([ 0, 3, 5, 8, 10, 12, 15, 17, 19]),)
'''
# Clip within the threshold
if threshold is None:
threshold = 0.0
if six.callable(ref_magnitude):
threshold = threshold * ref_magnitude(np.abs(y))
elif ref_magnitude is not None:
threshold = threshold * ref_magnitude
if threshold > 0:
y = y.copy()
y[np.abs(y) <= threshold] = 0
# Extract the sign bit
if zero_pos:
y_sign = np.signbit(y)
else:
y_sign = np.sign(y)
# Find the change-points by slicing
slice_pre = [slice(None)] * y.ndim
slice_pre[axis] = slice(1, None)
slice_post = [slice(None)] * y.ndim
slice_post[axis] = slice(-1)
# Since we've offset the input by one, pad back onto the front
padding = [(0, 0)] * y.ndim
padding[axis] = (1, 0)
return np.pad((y_sign[tuple(slice_post)] != y_sign[tuple(slice_pre)]),
padding,
mode='constant',
constant_values=pad)
|
def zero_crossings(y, threshold=1e-10, ref_magnitude=None, pad=True,
zero_pos=True, axis=-1):
'''Find the zero-crossings of a signal `y`: indices `i` such that
`sign(y[i]) != sign(y[j])`.
If `y` is multi-dimensional, then zero-crossings are computed along
the specified `axis`.
Parameters
----------
y : np.ndarray
The input array
threshold : float > 0 or None
If specified, values where `-threshold <= y <= threshold` are
clipped to 0.
ref_magnitude : float > 0 or callable
If numeric, the threshold is scaled relative to `ref_magnitude`.
If callable, the threshold is scaled relative to
`ref_magnitude(np.abs(y))`.
pad : boolean
If `True`, then `y[0]` is considered a valid zero-crossing.
zero_pos : boolean
If `True` then the value 0 is interpreted as having positive sign.
If `False`, then 0, -1, and +1 all have distinct signs.
axis : int
Axis along which to compute zero-crossings.
Returns
-------
zero_crossings : np.ndarray [shape=y.shape, dtype=boolean]
Indicator array of zero-crossings in `y` along the selected axis.
Notes
-----
This function caches at level 20.
Examples
--------
>>> # Generate a time-series
>>> y = np.sin(np.linspace(0, 4 * 2 * np.pi, 20))
>>> y
array([ 0.000e+00, 9.694e-01, 4.759e-01, -7.357e-01,
-8.372e-01, 3.247e-01, 9.966e-01, 1.646e-01,
-9.158e-01, -6.142e-01, 6.142e-01, 9.158e-01,
-1.646e-01, -9.966e-01, -3.247e-01, 8.372e-01,
7.357e-01, -4.759e-01, -9.694e-01, -9.797e-16])
>>> # Compute zero-crossings
>>> z = librosa.zero_crossings(y)
>>> z
array([ True, False, False, True, False, True, False, False,
True, False, True, False, True, False, False, True,
False, True, False, True], dtype=bool)
>>> # Stack y against the zero-crossing indicator
>>> np.vstack([y, z]).T
array([[ 0.000e+00, 1.000e+00],
[ 9.694e-01, 0.000e+00],
[ 4.759e-01, 0.000e+00],
[ -7.357e-01, 1.000e+00],
[ -8.372e-01, 0.000e+00],
[ 3.247e-01, 1.000e+00],
[ 9.966e-01, 0.000e+00],
[ 1.646e-01, 0.000e+00],
[ -9.158e-01, 1.000e+00],
[ -6.142e-01, 0.000e+00],
[ 6.142e-01, 1.000e+00],
[ 9.158e-01, 0.000e+00],
[ -1.646e-01, 1.000e+00],
[ -9.966e-01, 0.000e+00],
[ -3.247e-01, 0.000e+00],
[ 8.372e-01, 1.000e+00],
[ 7.357e-01, 0.000e+00],
[ -4.759e-01, 1.000e+00],
[ -9.694e-01, 0.000e+00],
[ -9.797e-16, 1.000e+00]])
>>> # Find the indices of zero-crossings
>>> np.nonzero(z)
(array([ 0, 3, 5, 8, 10, 12, 15, 17, 19]),)
'''
# Clip within the threshold
if threshold is None:
threshold = 0.0
if six.callable(ref_magnitude):
threshold = threshold * ref_magnitude(np.abs(y))
elif ref_magnitude is not None:
threshold = threshold * ref_magnitude
if threshold > 0:
y = y.copy()
y[np.abs(y) <= threshold] = 0
# Extract the sign bit
if zero_pos:
y_sign = np.signbit(y)
else:
y_sign = np.sign(y)
# Find the change-points by slicing
slice_pre = [slice(None)] * y.ndim
slice_pre[axis] = slice(1, None)
slice_post = [slice(None)] * y.ndim
slice_post[axis] = slice(-1)
# Since we've offset the input by one, pad back onto the front
padding = [(0, 0)] * y.ndim
padding[axis] = (1, 0)
return np.pad((y_sign[tuple(slice_post)] != y_sign[tuple(slice_pre)]),
padding,
mode='constant',
constant_values=pad)
|
[
"Find",
"the",
"zero",
"-",
"crossings",
"of",
"a",
"signal",
"y",
":",
"indices",
"i",
"such",
"that",
"sign",
"(",
"y",
"[",
"i",
"]",
")",
"!",
"=",
"sign",
"(",
"y",
"[",
"j",
"]",
")",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/audio.py#L694-L815
|
[
"def",
"zero_crossings",
"(",
"y",
",",
"threshold",
"=",
"1e-10",
",",
"ref_magnitude",
"=",
"None",
",",
"pad",
"=",
"True",
",",
"zero_pos",
"=",
"True",
",",
"axis",
"=",
"-",
"1",
")",
":",
"# Clip within the threshold",
"if",
"threshold",
"is",
"None",
":",
"threshold",
"=",
"0.0",
"if",
"six",
".",
"callable",
"(",
"ref_magnitude",
")",
":",
"threshold",
"=",
"threshold",
"*",
"ref_magnitude",
"(",
"np",
".",
"abs",
"(",
"y",
")",
")",
"elif",
"ref_magnitude",
"is",
"not",
"None",
":",
"threshold",
"=",
"threshold",
"*",
"ref_magnitude",
"if",
"threshold",
">",
"0",
":",
"y",
"=",
"y",
".",
"copy",
"(",
")",
"y",
"[",
"np",
".",
"abs",
"(",
"y",
")",
"<=",
"threshold",
"]",
"=",
"0",
"# Extract the sign bit",
"if",
"zero_pos",
":",
"y_sign",
"=",
"np",
".",
"signbit",
"(",
"y",
")",
"else",
":",
"y_sign",
"=",
"np",
".",
"sign",
"(",
"y",
")",
"# Find the change-points by slicing",
"slice_pre",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"y",
".",
"ndim",
"slice_pre",
"[",
"axis",
"]",
"=",
"slice",
"(",
"1",
",",
"None",
")",
"slice_post",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"y",
".",
"ndim",
"slice_post",
"[",
"axis",
"]",
"=",
"slice",
"(",
"-",
"1",
")",
"# Since we've offset the input by one, pad back onto the front",
"padding",
"=",
"[",
"(",
"0",
",",
"0",
")",
"]",
"*",
"y",
".",
"ndim",
"padding",
"[",
"axis",
"]",
"=",
"(",
"1",
",",
"0",
")",
"return",
"np",
".",
"pad",
"(",
"(",
"y_sign",
"[",
"tuple",
"(",
"slice_post",
")",
"]",
"!=",
"y_sign",
"[",
"tuple",
"(",
"slice_pre",
")",
"]",
")",
",",
"padding",
",",
"mode",
"=",
"'constant'",
",",
"constant_values",
"=",
"pad",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
clicks
|
Returns a signal with the signal `click` placed at each specified time
Parameters
----------
times : np.ndarray or None
times to place clicks, in seconds
frames : np.ndarray or None
frame indices to place clicks
sr : number > 0
desired sampling rate of the output signal
hop_length : int > 0
if positions are specified by `frames`, the number of samples between frames.
click_freq : float > 0
frequency (in Hz) of the default click signal. Default is 1KHz.
click_duration : float > 0
duration (in seconds) of the default click signal. Default is 100ms.
click : np.ndarray or None
optional click signal sample to use instead of the default blip.
length : int > 0
desired number of samples in the output signal
Returns
-------
click_signal : np.ndarray
Synthesized click signal
Raises
------
ParameterError
- If neither `times` nor `frames` are provided.
- If any of `click_freq`, `click_duration`, or `length` are out of range.
Examples
--------
>>> # Sonify detected beat events
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> y_beats = librosa.clicks(frames=beats, sr=sr)
>>> # Or generate a signal of the same length as y
>>> y_beats = librosa.clicks(frames=beats, sr=sr, length=len(y))
>>> # Or use timing instead of frame indices
>>> times = librosa.frames_to_time(beats, sr=sr)
>>> y_beat_times = librosa.clicks(times=times, sr=sr)
>>> # Or with a click frequency of 880Hz and a 500ms sample
>>> y_beat_times880 = librosa.clicks(times=times, sr=sr,
... click_freq=880, click_duration=0.5)
Display click waveform next to the spectrogram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S = librosa.feature.melspectrogram(y=y, sr=sr)
>>> ax = plt.subplot(2,1,2)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,1, sharex=ax)
>>> librosa.display.waveplot(y_beat_times, sr=sr, label='Beat clicks')
>>> plt.legend()
>>> plt.xlim(15, 30)
>>> plt.tight_layout()
|
librosa/core/audio.py
|
def clicks(times=None, frames=None, sr=22050, hop_length=512,
click_freq=1000.0, click_duration=0.1, click=None, length=None):
"""Returns a signal with the signal `click` placed at each specified time
Parameters
----------
times : np.ndarray or None
times to place clicks, in seconds
frames : np.ndarray or None
frame indices to place clicks
sr : number > 0
desired sampling rate of the output signal
hop_length : int > 0
if positions are specified by `frames`, the number of samples between frames.
click_freq : float > 0
frequency (in Hz) of the default click signal. Default is 1KHz.
click_duration : float > 0
duration (in seconds) of the default click signal. Default is 100ms.
click : np.ndarray or None
optional click signal sample to use instead of the default blip.
length : int > 0
desired number of samples in the output signal
Returns
-------
click_signal : np.ndarray
Synthesized click signal
Raises
------
ParameterError
- If neither `times` nor `frames` are provided.
- If any of `click_freq`, `click_duration`, or `length` are out of range.
Examples
--------
>>> # Sonify detected beat events
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> y_beats = librosa.clicks(frames=beats, sr=sr)
>>> # Or generate a signal of the same length as y
>>> y_beats = librosa.clicks(frames=beats, sr=sr, length=len(y))
>>> # Or use timing instead of frame indices
>>> times = librosa.frames_to_time(beats, sr=sr)
>>> y_beat_times = librosa.clicks(times=times, sr=sr)
>>> # Or with a click frequency of 880Hz and a 500ms sample
>>> y_beat_times880 = librosa.clicks(times=times, sr=sr,
... click_freq=880, click_duration=0.5)
Display click waveform next to the spectrogram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S = librosa.feature.melspectrogram(y=y, sr=sr)
>>> ax = plt.subplot(2,1,2)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,1, sharex=ax)
>>> librosa.display.waveplot(y_beat_times, sr=sr, label='Beat clicks')
>>> plt.legend()
>>> plt.xlim(15, 30)
>>> plt.tight_layout()
"""
# Compute sample positions from time or frames
if times is None:
if frames is None:
raise ParameterError('either "times" or "frames" must be provided')
positions = frames_to_samples(frames, hop_length=hop_length)
else:
# Convert times to positions
positions = time_to_samples(times, sr=sr)
if click is not None:
# Check that we have a well-formed audio buffer
util.valid_audio(click, mono=True)
else:
# Create default click signal
if click_duration <= 0:
raise ParameterError('click_duration must be strictly positive')
if click_freq <= 0:
raise ParameterError('click_freq must be strictly positive')
angular_freq = 2 * np.pi * click_freq / float(sr)
click = np.logspace(0, -10,
num=int(np.round(sr * click_duration)),
base=2.0)
click *= np.sin(angular_freq * np.arange(len(click)))
# Set default length
if length is None:
length = positions.max() + click.shape[0]
else:
if length < 1:
raise ParameterError('length must be a positive integer')
# Filter out any positions past the length boundary
positions = positions[positions < length]
# Pre-allocate click signal
click_signal = np.zeros(length, dtype=np.float32)
# Place clicks
for start in positions:
# Compute the end-point of this click
end = start + click.shape[0]
if end >= length:
click_signal[start:] += click[:length - start]
else:
# Normally, just add a click here
click_signal[start:end] += click
return click_signal
|
def clicks(times=None, frames=None, sr=22050, hop_length=512,
click_freq=1000.0, click_duration=0.1, click=None, length=None):
"""Returns a signal with the signal `click` placed at each specified time
Parameters
----------
times : np.ndarray or None
times to place clicks, in seconds
frames : np.ndarray or None
frame indices to place clicks
sr : number > 0
desired sampling rate of the output signal
hop_length : int > 0
if positions are specified by `frames`, the number of samples between frames.
click_freq : float > 0
frequency (in Hz) of the default click signal. Default is 1KHz.
click_duration : float > 0
duration (in seconds) of the default click signal. Default is 100ms.
click : np.ndarray or None
optional click signal sample to use instead of the default blip.
length : int > 0
desired number of samples in the output signal
Returns
-------
click_signal : np.ndarray
Synthesized click signal
Raises
------
ParameterError
- If neither `times` nor `frames` are provided.
- If any of `click_freq`, `click_duration`, or `length` are out of range.
Examples
--------
>>> # Sonify detected beat events
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> y_beats = librosa.clicks(frames=beats, sr=sr)
>>> # Or generate a signal of the same length as y
>>> y_beats = librosa.clicks(frames=beats, sr=sr, length=len(y))
>>> # Or use timing instead of frame indices
>>> times = librosa.frames_to_time(beats, sr=sr)
>>> y_beat_times = librosa.clicks(times=times, sr=sr)
>>> # Or with a click frequency of 880Hz and a 500ms sample
>>> y_beat_times880 = librosa.clicks(times=times, sr=sr,
... click_freq=880, click_duration=0.5)
Display click waveform next to the spectrogram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S = librosa.feature.melspectrogram(y=y, sr=sr)
>>> ax = plt.subplot(2,1,2)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,1, sharex=ax)
>>> librosa.display.waveplot(y_beat_times, sr=sr, label='Beat clicks')
>>> plt.legend()
>>> plt.xlim(15, 30)
>>> plt.tight_layout()
"""
# Compute sample positions from time or frames
if times is None:
if frames is None:
raise ParameterError('either "times" or "frames" must be provided')
positions = frames_to_samples(frames, hop_length=hop_length)
else:
# Convert times to positions
positions = time_to_samples(times, sr=sr)
if click is not None:
# Check that we have a well-formed audio buffer
util.valid_audio(click, mono=True)
else:
# Create default click signal
if click_duration <= 0:
raise ParameterError('click_duration must be strictly positive')
if click_freq <= 0:
raise ParameterError('click_freq must be strictly positive')
angular_freq = 2 * np.pi * click_freq / float(sr)
click = np.logspace(0, -10,
num=int(np.round(sr * click_duration)),
base=2.0)
click *= np.sin(angular_freq * np.arange(len(click)))
# Set default length
if length is None:
length = positions.max() + click.shape[0]
else:
if length < 1:
raise ParameterError('length must be a positive integer')
# Filter out any positions past the length boundary
positions = positions[positions < length]
# Pre-allocate click signal
click_signal = np.zeros(length, dtype=np.float32)
# Place clicks
for start in positions:
# Compute the end-point of this click
end = start + click.shape[0]
if end >= length:
click_signal[start:] += click[:length - start]
else:
# Normally, just add a click here
click_signal[start:end] += click
return click_signal
|
[
"Returns",
"a",
"signal",
"with",
"the",
"signal",
"click",
"placed",
"at",
"each",
"specified",
"time"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/audio.py#L818-L949
|
[
"def",
"clicks",
"(",
"times",
"=",
"None",
",",
"frames",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"hop_length",
"=",
"512",
",",
"click_freq",
"=",
"1000.0",
",",
"click_duration",
"=",
"0.1",
",",
"click",
"=",
"None",
",",
"length",
"=",
"None",
")",
":",
"# Compute sample positions from time or frames",
"if",
"times",
"is",
"None",
":",
"if",
"frames",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'either \"times\" or \"frames\" must be provided'",
")",
"positions",
"=",
"frames_to_samples",
"(",
"frames",
",",
"hop_length",
"=",
"hop_length",
")",
"else",
":",
"# Convert times to positions",
"positions",
"=",
"time_to_samples",
"(",
"times",
",",
"sr",
"=",
"sr",
")",
"if",
"click",
"is",
"not",
"None",
":",
"# Check that we have a well-formed audio buffer",
"util",
".",
"valid_audio",
"(",
"click",
",",
"mono",
"=",
"True",
")",
"else",
":",
"# Create default click signal",
"if",
"click_duration",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'click_duration must be strictly positive'",
")",
"if",
"click_freq",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'click_freq must be strictly positive'",
")",
"angular_freq",
"=",
"2",
"*",
"np",
".",
"pi",
"*",
"click_freq",
"/",
"float",
"(",
"sr",
")",
"click",
"=",
"np",
".",
"logspace",
"(",
"0",
",",
"-",
"10",
",",
"num",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"sr",
"*",
"click_duration",
")",
")",
",",
"base",
"=",
"2.0",
")",
"click",
"*=",
"np",
".",
"sin",
"(",
"angular_freq",
"*",
"np",
".",
"arange",
"(",
"len",
"(",
"click",
")",
")",
")",
"# Set default length",
"if",
"length",
"is",
"None",
":",
"length",
"=",
"positions",
".",
"max",
"(",
")",
"+",
"click",
".",
"shape",
"[",
"0",
"]",
"else",
":",
"if",
"length",
"<",
"1",
":",
"raise",
"ParameterError",
"(",
"'length must be a positive integer'",
")",
"# Filter out any positions past the length boundary",
"positions",
"=",
"positions",
"[",
"positions",
"<",
"length",
"]",
"# Pre-allocate click signal",
"click_signal",
"=",
"np",
".",
"zeros",
"(",
"length",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# Place clicks",
"for",
"start",
"in",
"positions",
":",
"# Compute the end-point of this click",
"end",
"=",
"start",
"+",
"click",
".",
"shape",
"[",
"0",
"]",
"if",
"end",
">=",
"length",
":",
"click_signal",
"[",
"start",
":",
"]",
"+=",
"click",
"[",
":",
"length",
"-",
"start",
"]",
"else",
":",
"# Normally, just add a click here",
"click_signal",
"[",
"start",
":",
"end",
"]",
"+=",
"click",
"return",
"click_signal"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
tone
|
Returns a pure tone signal. The signal generated is a cosine wave.
Parameters
----------
frequency : float > 0
frequency
sr : number > 0
desired sampling rate of the output signal
length : int > 0
desired number of samples in the output signal. When both `duration` and `length` are defined,
`length` would take priority.
duration : float > 0
desired duration in seconds. When both `duration` and `length` are defined, `length` would take priority.
phi : float or None
phase offset, in radians. If unspecified, defaults to `-np.pi * 0.5`.
Returns
-------
tone_signal : np.ndarray [shape=(length,), dtype=float64]
Synthesized pure sine tone signal
Raises
------
ParameterError
- If `frequency` is not provided.
- If neither `length` nor `duration` are provided.
Examples
--------
>>> # Generate a pure sine tone A4
>>> tone = librosa.tone(440, duration=1)
>>> # Or generate the same signal using `length`
>>> tone = librosa.tone(440, sr=22050, length=22050)
Display spectrogram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S = librosa.feature.melspectrogram(y=tone)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel')
|
librosa/core/audio.py
|
def tone(frequency, sr=22050, length=None, duration=None, phi=None):
"""Returns a pure tone signal. The signal generated is a cosine wave.
Parameters
----------
frequency : float > 0
frequency
sr : number > 0
desired sampling rate of the output signal
length : int > 0
desired number of samples in the output signal. When both `duration` and `length` are defined,
`length` would take priority.
duration : float > 0
desired duration in seconds. When both `duration` and `length` are defined, `length` would take priority.
phi : float or None
phase offset, in radians. If unspecified, defaults to `-np.pi * 0.5`.
Returns
-------
tone_signal : np.ndarray [shape=(length,), dtype=float64]
Synthesized pure sine tone signal
Raises
------
ParameterError
- If `frequency` is not provided.
- If neither `length` nor `duration` are provided.
Examples
--------
>>> # Generate a pure sine tone A4
>>> tone = librosa.tone(440, duration=1)
>>> # Or generate the same signal using `length`
>>> tone = librosa.tone(440, sr=22050, length=22050)
Display spectrogram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S = librosa.feature.melspectrogram(y=tone)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel')
"""
if frequency is None:
raise ParameterError('"frequency" must be provided')
# Compute signal length
if length is None:
if duration is None:
raise ParameterError('either "length" or "duration" must be provided')
length = duration * sr
if phi is None:
phi = -np.pi * 0.5
step = 1.0 / sr
return np.cos(2 * np.pi * frequency * (np.arange(step * length, step=step)) + phi)
|
def tone(frequency, sr=22050, length=None, duration=None, phi=None):
"""Returns a pure tone signal. The signal generated is a cosine wave.
Parameters
----------
frequency : float > 0
frequency
sr : number > 0
desired sampling rate of the output signal
length : int > 0
desired number of samples in the output signal. When both `duration` and `length` are defined,
`length` would take priority.
duration : float > 0
desired duration in seconds. When both `duration` and `length` are defined, `length` would take priority.
phi : float or None
phase offset, in radians. If unspecified, defaults to `-np.pi * 0.5`.
Returns
-------
tone_signal : np.ndarray [shape=(length,), dtype=float64]
Synthesized pure sine tone signal
Raises
------
ParameterError
- If `frequency` is not provided.
- If neither `length` nor `duration` are provided.
Examples
--------
>>> # Generate a pure sine tone A4
>>> tone = librosa.tone(440, duration=1)
>>> # Or generate the same signal using `length`
>>> tone = librosa.tone(440, sr=22050, length=22050)
Display spectrogram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S = librosa.feature.melspectrogram(y=tone)
>>> librosa.display.specshow(librosa.power_to_db(S, ref=np.max),
... x_axis='time', y_axis='mel')
"""
if frequency is None:
raise ParameterError('"frequency" must be provided')
# Compute signal length
if length is None:
if duration is None:
raise ParameterError('either "length" or "duration" must be provided')
length = duration * sr
if phi is None:
phi = -np.pi * 0.5
step = 1.0 / sr
return np.cos(2 * np.pi * frequency * (np.arange(step * length, step=step)) + phi)
|
[
"Returns",
"a",
"pure",
"tone",
"signal",
".",
"The",
"signal",
"generated",
"is",
"a",
"cosine",
"wave",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/audio.py#L952-L1017
|
[
"def",
"tone",
"(",
"frequency",
",",
"sr",
"=",
"22050",
",",
"length",
"=",
"None",
",",
"duration",
"=",
"None",
",",
"phi",
"=",
"None",
")",
":",
"if",
"frequency",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'\"frequency\" must be provided'",
")",
"# Compute signal length",
"if",
"length",
"is",
"None",
":",
"if",
"duration",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'either \"length\" or \"duration\" must be provided'",
")",
"length",
"=",
"duration",
"*",
"sr",
"if",
"phi",
"is",
"None",
":",
"phi",
"=",
"-",
"np",
".",
"pi",
"*",
"0.5",
"step",
"=",
"1.0",
"/",
"sr",
"return",
"np",
".",
"cos",
"(",
"2",
"*",
"np",
".",
"pi",
"*",
"frequency",
"*",
"(",
"np",
".",
"arange",
"(",
"step",
"*",
"length",
",",
"step",
"=",
"step",
")",
")",
"+",
"phi",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
chirp
|
Returns a chirp signal that goes from frequency `fmin` to frequency `fmax`
Parameters
----------
fmin : float > 0
initial frequency
fmax : float > 0
final frequency
sr : number > 0
desired sampling rate of the output signal
length : int > 0
desired number of samples in the output signal.
When both `duration` and `length` are defined, `length` would take priority.
duration : float > 0
desired duration in seconds.
When both `duration` and `length` are defined, `length` would take priority.
linear : boolean
- If `True`, use a linear sweep, i.e., frequency changes linearly with time
- If `False`, use a exponential sweep.
Default is `False`.
phi : float or None
phase offset, in radians.
If unspecified, defaults to `-np.pi * 0.5`.
Returns
-------
chirp_signal : np.ndarray [shape=(length,), dtype=float64]
Synthesized chirp signal
Raises
------
ParameterError
- If either `fmin` or `fmax` are not provided.
- If neither `length` nor `duration` are provided.
See Also
--------
scipy.signal.chirp
Examples
--------
>>> # Generate a exponential chirp from A4 to A5
>>> exponential_chirp = librosa.chirp(440, 880, duration=1)
>>> # Or generate the same signal using `length`
>>> exponential_chirp = librosa.chirp(440, 880, sr=22050, length=22050)
>>> # Or generate a linear chirp instead
>>> linear_chirp = librosa.chirp(440, 880, duration=1, linear=True)
Display spectrogram for both exponential and linear chirps
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S_exponential = librosa.feature.melspectrogram(y=exponential_chirp)
>>> ax = plt.subplot(2,1,1)
>>> librosa.display.specshow(librosa.power_to_db(S_exponential, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,2, sharex=ax)
>>> S_linear = librosa.feature.melspectrogram(y=linear_chirp)
>>> librosa.display.specshow(librosa.power_to_db(S_linear, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.tight_layout()
|
librosa/core/audio.py
|
def chirp(fmin, fmax, sr=22050, length=None, duration=None, linear=False, phi=None):
"""Returns a chirp signal that goes from frequency `fmin` to frequency `fmax`
Parameters
----------
fmin : float > 0
initial frequency
fmax : float > 0
final frequency
sr : number > 0
desired sampling rate of the output signal
length : int > 0
desired number of samples in the output signal.
When both `duration` and `length` are defined, `length` would take priority.
duration : float > 0
desired duration in seconds.
When both `duration` and `length` are defined, `length` would take priority.
linear : boolean
- If `True`, use a linear sweep, i.e., frequency changes linearly with time
- If `False`, use a exponential sweep.
Default is `False`.
phi : float or None
phase offset, in radians.
If unspecified, defaults to `-np.pi * 0.5`.
Returns
-------
chirp_signal : np.ndarray [shape=(length,), dtype=float64]
Synthesized chirp signal
Raises
------
ParameterError
- If either `fmin` or `fmax` are not provided.
- If neither `length` nor `duration` are provided.
See Also
--------
scipy.signal.chirp
Examples
--------
>>> # Generate a exponential chirp from A4 to A5
>>> exponential_chirp = librosa.chirp(440, 880, duration=1)
>>> # Or generate the same signal using `length`
>>> exponential_chirp = librosa.chirp(440, 880, sr=22050, length=22050)
>>> # Or generate a linear chirp instead
>>> linear_chirp = librosa.chirp(440, 880, duration=1, linear=True)
Display spectrogram for both exponential and linear chirps
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S_exponential = librosa.feature.melspectrogram(y=exponential_chirp)
>>> ax = plt.subplot(2,1,1)
>>> librosa.display.specshow(librosa.power_to_db(S_exponential, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,2, sharex=ax)
>>> S_linear = librosa.feature.melspectrogram(y=linear_chirp)
>>> librosa.display.specshow(librosa.power_to_db(S_linear, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.tight_layout()
"""
if fmin is None or fmax is None:
raise ParameterError('both "fmin" and "fmax" must be provided')
# Compute signal duration
period = 1.0 / sr
if length is None:
if duration is None:
raise ParameterError('either "length" or "duration" must be provided')
else:
duration = period * length
if phi is None:
phi = -np.pi * 0.5
method = 'linear' if linear else 'logarithmic'
return scipy.signal.chirp(
np.arange(duration, step=period),
fmin,
duration,
fmax,
method=method,
phi=phi / np.pi * 180, # scipy.signal.chirp uses degrees for phase offset
)
|
def chirp(fmin, fmax, sr=22050, length=None, duration=None, linear=False, phi=None):
"""Returns a chirp signal that goes from frequency `fmin` to frequency `fmax`
Parameters
----------
fmin : float > 0
initial frequency
fmax : float > 0
final frequency
sr : number > 0
desired sampling rate of the output signal
length : int > 0
desired number of samples in the output signal.
When both `duration` and `length` are defined, `length` would take priority.
duration : float > 0
desired duration in seconds.
When both `duration` and `length` are defined, `length` would take priority.
linear : boolean
- If `True`, use a linear sweep, i.e., frequency changes linearly with time
- If `False`, use a exponential sweep.
Default is `False`.
phi : float or None
phase offset, in radians.
If unspecified, defaults to `-np.pi * 0.5`.
Returns
-------
chirp_signal : np.ndarray [shape=(length,), dtype=float64]
Synthesized chirp signal
Raises
------
ParameterError
- If either `fmin` or `fmax` are not provided.
- If neither `length` nor `duration` are provided.
See Also
--------
scipy.signal.chirp
Examples
--------
>>> # Generate a exponential chirp from A4 to A5
>>> exponential_chirp = librosa.chirp(440, 880, duration=1)
>>> # Or generate the same signal using `length`
>>> exponential_chirp = librosa.chirp(440, 880, sr=22050, length=22050)
>>> # Or generate a linear chirp instead
>>> linear_chirp = librosa.chirp(440, 880, duration=1, linear=True)
Display spectrogram for both exponential and linear chirps
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> S_exponential = librosa.feature.melspectrogram(y=exponential_chirp)
>>> ax = plt.subplot(2,1,1)
>>> librosa.display.specshow(librosa.power_to_db(S_exponential, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.subplot(2,1,2, sharex=ax)
>>> S_linear = librosa.feature.melspectrogram(y=linear_chirp)
>>> librosa.display.specshow(librosa.power_to_db(S_linear, ref=np.max),
... x_axis='time', y_axis='mel')
>>> plt.tight_layout()
"""
if fmin is None or fmax is None:
raise ParameterError('both "fmin" and "fmax" must be provided')
# Compute signal duration
period = 1.0 / sr
if length is None:
if duration is None:
raise ParameterError('either "length" or "duration" must be provided')
else:
duration = period * length
if phi is None:
phi = -np.pi * 0.5
method = 'linear' if linear else 'logarithmic'
return scipy.signal.chirp(
np.arange(duration, step=period),
fmin,
duration,
fmax,
method=method,
phi=phi / np.pi * 180, # scipy.signal.chirp uses degrees for phase offset
)
|
[
"Returns",
"a",
"chirp",
"signal",
"that",
"goes",
"from",
"frequency",
"fmin",
"to",
"frequency",
"fmax"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/audio.py#L1020-L1118
|
[
"def",
"chirp",
"(",
"fmin",
",",
"fmax",
",",
"sr",
"=",
"22050",
",",
"length",
"=",
"None",
",",
"duration",
"=",
"None",
",",
"linear",
"=",
"False",
",",
"phi",
"=",
"None",
")",
":",
"if",
"fmin",
"is",
"None",
"or",
"fmax",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'both \"fmin\" and \"fmax\" must be provided'",
")",
"# Compute signal duration",
"period",
"=",
"1.0",
"/",
"sr",
"if",
"length",
"is",
"None",
":",
"if",
"duration",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'either \"length\" or \"duration\" must be provided'",
")",
"else",
":",
"duration",
"=",
"period",
"*",
"length",
"if",
"phi",
"is",
"None",
":",
"phi",
"=",
"-",
"np",
".",
"pi",
"*",
"0.5",
"method",
"=",
"'linear'",
"if",
"linear",
"else",
"'logarithmic'",
"return",
"scipy",
".",
"signal",
".",
"chirp",
"(",
"np",
".",
"arange",
"(",
"duration",
",",
"step",
"=",
"period",
")",
",",
"fmin",
",",
"duration",
",",
"fmax",
",",
"method",
"=",
"method",
",",
"phi",
"=",
"phi",
"/",
"np",
".",
"pi",
"*",
"180",
",",
"# scipy.signal.chirp uses degrees for phase offset",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
tempogram
|
Compute the tempogram: local autocorrelation of the onset strength envelope. [1]_
.. [1] Grosche, Peter, Meinard Müller, and Frank Kurth.
"Cyclic tempogram - A mid-level tempo representation for music signals."
ICASSP, 2010.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
Audio time series.
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(n,) or (m, n)] or None
Optional pre-computed onset strength envelope as provided by
`onset.onset_strength`.
If multi-dimensional, tempograms are computed independently for each
band (first dimension).
hop_length : int > 0
number of audio samples between successive onset measurements
win_length : int > 0
length of the onset autocorrelation window (in frames/onset measurements)
The default settings (384) corresponds to `384 * hop_length / sr ~= 8.9s`.
center : bool
If `True`, onset autocorrelation windows are centered.
If `False`, windows are left-aligned.
window : string, function, number, tuple, or np.ndarray [shape=(win_length,)]
A window specification as in `core.stft`.
norm : {np.inf, -np.inf, 0, float > 0, None}
Normalization mode. Set to `None` to disable normalization.
Returns
-------
tempogram : np.ndarray [shape=(win_length, n) or (m, win_length, n)]
Localized autocorrelation of the onset strength envelope.
If given multi-band input (`onset_envelope.shape==(m,n)`) then
`tempogram[i]` is the tempogram of `onset_envelope[i]`.
Raises
------
ParameterError
if neither `y` nor `onset_envelope` are provided
if `win_length < 1`
See Also
--------
librosa.onset.onset_strength
librosa.util.normalize
librosa.core.stft
Examples
--------
>>> # Compute local onset autocorrelation
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> hop_length = 512
>>> oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
>>> tempogram = librosa.feature.tempogram(onset_envelope=oenv, sr=sr,
... hop_length=hop_length)
>>> # Compute global onset autocorrelation
>>> ac_global = librosa.autocorrelate(oenv, max_size=tempogram.shape[0])
>>> ac_global = librosa.util.normalize(ac_global)
>>> # Estimate the global tempo for display purposes
>>> tempo = librosa.beat.tempo(onset_envelope=oenv, sr=sr,
... hop_length=hop_length)[0]
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 8))
>>> plt.subplot(4, 1, 1)
>>> plt.plot(oenv, label='Onset strength')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.subplot(4, 1, 2)
>>> # We'll truncate the display to a narrower range of tempi
>>> librosa.display.specshow(tempogram, sr=sr, hop_length=hop_length,
>>> x_axis='time', y_axis='tempo')
>>> plt.axhline(tempo, color='w', linestyle='--', alpha=1,
... label='Estimated tempo={:g}'.format(tempo))
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.subplot(4, 1, 3)
>>> x = np.linspace(0, tempogram.shape[0] * float(hop_length) / sr,
... num=tempogram.shape[0])
>>> plt.plot(x, np.mean(tempogram, axis=1), label='Mean local autocorrelation')
>>> plt.plot(x, ac_global, '--', alpha=0.75, label='Global autocorrelation')
>>> plt.xlabel('Lag (seconds)')
>>> plt.axis('tight')
>>> plt.legend(frameon=True)
>>> plt.subplot(4,1,4)
>>> # We can also plot on a BPM axis
>>> freqs = librosa.tempo_frequencies(tempogram.shape[0], hop_length=hop_length, sr=sr)
>>> plt.semilogx(freqs[1:], np.mean(tempogram[1:], axis=1),
... label='Mean local autocorrelation', basex=2)
>>> plt.semilogx(freqs[1:], ac_global[1:], '--', alpha=0.75,
... label='Global autocorrelation', basex=2)
>>> plt.axvline(tempo, color='black', linestyle='--', alpha=.8,
... label='Estimated tempo={:g}'.format(tempo))
>>> plt.legend(frameon=True)
>>> plt.xlabel('BPM')
>>> plt.axis('tight')
>>> plt.grid()
>>> plt.tight_layout()
|
librosa/feature/rhythm.py
|
def tempogram(y=None, sr=22050, onset_envelope=None, hop_length=512,
win_length=384, center=True, window='hann', norm=np.inf):
'''Compute the tempogram: local autocorrelation of the onset strength envelope. [1]_
.. [1] Grosche, Peter, Meinard Müller, and Frank Kurth.
"Cyclic tempogram - A mid-level tempo representation for music signals."
ICASSP, 2010.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
Audio time series.
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(n,) or (m, n)] or None
Optional pre-computed onset strength envelope as provided by
`onset.onset_strength`.
If multi-dimensional, tempograms are computed independently for each
band (first dimension).
hop_length : int > 0
number of audio samples between successive onset measurements
win_length : int > 0
length of the onset autocorrelation window (in frames/onset measurements)
The default settings (384) corresponds to `384 * hop_length / sr ~= 8.9s`.
center : bool
If `True`, onset autocorrelation windows are centered.
If `False`, windows are left-aligned.
window : string, function, number, tuple, or np.ndarray [shape=(win_length,)]
A window specification as in `core.stft`.
norm : {np.inf, -np.inf, 0, float > 0, None}
Normalization mode. Set to `None` to disable normalization.
Returns
-------
tempogram : np.ndarray [shape=(win_length, n) or (m, win_length, n)]
Localized autocorrelation of the onset strength envelope.
If given multi-band input (`onset_envelope.shape==(m,n)`) then
`tempogram[i]` is the tempogram of `onset_envelope[i]`.
Raises
------
ParameterError
if neither `y` nor `onset_envelope` are provided
if `win_length < 1`
See Also
--------
librosa.onset.onset_strength
librosa.util.normalize
librosa.core.stft
Examples
--------
>>> # Compute local onset autocorrelation
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> hop_length = 512
>>> oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
>>> tempogram = librosa.feature.tempogram(onset_envelope=oenv, sr=sr,
... hop_length=hop_length)
>>> # Compute global onset autocorrelation
>>> ac_global = librosa.autocorrelate(oenv, max_size=tempogram.shape[0])
>>> ac_global = librosa.util.normalize(ac_global)
>>> # Estimate the global tempo for display purposes
>>> tempo = librosa.beat.tempo(onset_envelope=oenv, sr=sr,
... hop_length=hop_length)[0]
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 8))
>>> plt.subplot(4, 1, 1)
>>> plt.plot(oenv, label='Onset strength')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.subplot(4, 1, 2)
>>> # We'll truncate the display to a narrower range of tempi
>>> librosa.display.specshow(tempogram, sr=sr, hop_length=hop_length,
>>> x_axis='time', y_axis='tempo')
>>> plt.axhline(tempo, color='w', linestyle='--', alpha=1,
... label='Estimated tempo={:g}'.format(tempo))
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.subplot(4, 1, 3)
>>> x = np.linspace(0, tempogram.shape[0] * float(hop_length) / sr,
... num=tempogram.shape[0])
>>> plt.plot(x, np.mean(tempogram, axis=1), label='Mean local autocorrelation')
>>> plt.plot(x, ac_global, '--', alpha=0.75, label='Global autocorrelation')
>>> plt.xlabel('Lag (seconds)')
>>> plt.axis('tight')
>>> plt.legend(frameon=True)
>>> plt.subplot(4,1,4)
>>> # We can also plot on a BPM axis
>>> freqs = librosa.tempo_frequencies(tempogram.shape[0], hop_length=hop_length, sr=sr)
>>> plt.semilogx(freqs[1:], np.mean(tempogram[1:], axis=1),
... label='Mean local autocorrelation', basex=2)
>>> plt.semilogx(freqs[1:], ac_global[1:], '--', alpha=0.75,
... label='Global autocorrelation', basex=2)
>>> plt.axvline(tempo, color='black', linestyle='--', alpha=.8,
... label='Estimated tempo={:g}'.format(tempo))
>>> plt.legend(frameon=True)
>>> plt.xlabel('BPM')
>>> plt.axis('tight')
>>> plt.grid()
>>> plt.tight_layout()
'''
from ..onset import onset_strength
if win_length < 1:
raise ParameterError('win_length must be a positive integer')
ac_window = get_window(window, win_length, fftbins=True)
if onset_envelope is None:
if y is None:
raise ParameterError('Either y or onset_envelope must be provided')
onset_envelope = onset_strength(y=y, sr=sr, hop_length=hop_length)
else:
# Force row-contiguity to avoid framing errors below
onset_envelope = np.ascontiguousarray(onset_envelope)
if onset_envelope.ndim > 1:
# If we have multi-band input, iterate over rows
return np.asarray([tempogram(onset_envelope=oe_subband,
hop_length=hop_length,
win_length=win_length,
center=center,
window=window,
norm=norm) for oe_subband in onset_envelope])
# Center the autocorrelation windows
n = len(onset_envelope)
if center:
onset_envelope = np.pad(onset_envelope, int(win_length // 2),
mode='linear_ramp', end_values=[0, 0])
# Carve onset envelope into frames
odf_frame = util.frame(onset_envelope,
frame_length=win_length,
hop_length=1)
# Truncate to the length of the original signal
if center:
odf_frame = odf_frame[:, :n]
# Window, autocorrelate, and normalize
return util.normalize(autocorrelate(odf_frame * ac_window[:, np.newaxis],
axis=0),
norm=norm, axis=0)
|
def tempogram(y=None, sr=22050, onset_envelope=None, hop_length=512,
win_length=384, center=True, window='hann', norm=np.inf):
'''Compute the tempogram: local autocorrelation of the onset strength envelope. [1]_
.. [1] Grosche, Peter, Meinard Müller, and Frank Kurth.
"Cyclic tempogram - A mid-level tempo representation for music signals."
ICASSP, 2010.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
Audio time series.
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(n,) or (m, n)] or None
Optional pre-computed onset strength envelope as provided by
`onset.onset_strength`.
If multi-dimensional, tempograms are computed independently for each
band (first dimension).
hop_length : int > 0
number of audio samples between successive onset measurements
win_length : int > 0
length of the onset autocorrelation window (in frames/onset measurements)
The default settings (384) corresponds to `384 * hop_length / sr ~= 8.9s`.
center : bool
If `True`, onset autocorrelation windows are centered.
If `False`, windows are left-aligned.
window : string, function, number, tuple, or np.ndarray [shape=(win_length,)]
A window specification as in `core.stft`.
norm : {np.inf, -np.inf, 0, float > 0, None}
Normalization mode. Set to `None` to disable normalization.
Returns
-------
tempogram : np.ndarray [shape=(win_length, n) or (m, win_length, n)]
Localized autocorrelation of the onset strength envelope.
If given multi-band input (`onset_envelope.shape==(m,n)`) then
`tempogram[i]` is the tempogram of `onset_envelope[i]`.
Raises
------
ParameterError
if neither `y` nor `onset_envelope` are provided
if `win_length < 1`
See Also
--------
librosa.onset.onset_strength
librosa.util.normalize
librosa.core.stft
Examples
--------
>>> # Compute local onset autocorrelation
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> hop_length = 512
>>> oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
>>> tempogram = librosa.feature.tempogram(onset_envelope=oenv, sr=sr,
... hop_length=hop_length)
>>> # Compute global onset autocorrelation
>>> ac_global = librosa.autocorrelate(oenv, max_size=tempogram.shape[0])
>>> ac_global = librosa.util.normalize(ac_global)
>>> # Estimate the global tempo for display purposes
>>> tempo = librosa.beat.tempo(onset_envelope=oenv, sr=sr,
... hop_length=hop_length)[0]
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 8))
>>> plt.subplot(4, 1, 1)
>>> plt.plot(oenv, label='Onset strength')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.subplot(4, 1, 2)
>>> # We'll truncate the display to a narrower range of tempi
>>> librosa.display.specshow(tempogram, sr=sr, hop_length=hop_length,
>>> x_axis='time', y_axis='tempo')
>>> plt.axhline(tempo, color='w', linestyle='--', alpha=1,
... label='Estimated tempo={:g}'.format(tempo))
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.subplot(4, 1, 3)
>>> x = np.linspace(0, tempogram.shape[0] * float(hop_length) / sr,
... num=tempogram.shape[0])
>>> plt.plot(x, np.mean(tempogram, axis=1), label='Mean local autocorrelation')
>>> plt.plot(x, ac_global, '--', alpha=0.75, label='Global autocorrelation')
>>> plt.xlabel('Lag (seconds)')
>>> plt.axis('tight')
>>> plt.legend(frameon=True)
>>> plt.subplot(4,1,4)
>>> # We can also plot on a BPM axis
>>> freqs = librosa.tempo_frequencies(tempogram.shape[0], hop_length=hop_length, sr=sr)
>>> plt.semilogx(freqs[1:], np.mean(tempogram[1:], axis=1),
... label='Mean local autocorrelation', basex=2)
>>> plt.semilogx(freqs[1:], ac_global[1:], '--', alpha=0.75,
... label='Global autocorrelation', basex=2)
>>> plt.axvline(tempo, color='black', linestyle='--', alpha=.8,
... label='Estimated tempo={:g}'.format(tempo))
>>> plt.legend(frameon=True)
>>> plt.xlabel('BPM')
>>> plt.axis('tight')
>>> plt.grid()
>>> plt.tight_layout()
'''
from ..onset import onset_strength
if win_length < 1:
raise ParameterError('win_length must be a positive integer')
ac_window = get_window(window, win_length, fftbins=True)
if onset_envelope is None:
if y is None:
raise ParameterError('Either y or onset_envelope must be provided')
onset_envelope = onset_strength(y=y, sr=sr, hop_length=hop_length)
else:
# Force row-contiguity to avoid framing errors below
onset_envelope = np.ascontiguousarray(onset_envelope)
if onset_envelope.ndim > 1:
# If we have multi-band input, iterate over rows
return np.asarray([tempogram(onset_envelope=oe_subband,
hop_length=hop_length,
win_length=win_length,
center=center,
window=window,
norm=norm) for oe_subband in onset_envelope])
# Center the autocorrelation windows
n = len(onset_envelope)
if center:
onset_envelope = np.pad(onset_envelope, int(win_length // 2),
mode='linear_ramp', end_values=[0, 0])
# Carve onset envelope into frames
odf_frame = util.frame(onset_envelope,
frame_length=win_length,
hop_length=1)
# Truncate to the length of the original signal
if center:
odf_frame = odf_frame[:, :n]
# Window, autocorrelate, and normalize
return util.normalize(autocorrelate(odf_frame * ac_window[:, np.newaxis],
axis=0),
norm=norm, axis=0)
|
[
"Compute",
"the",
"tempogram",
":",
"local",
"autocorrelation",
"of",
"the",
"onset",
"strength",
"envelope",
".",
"[",
"1",
"]",
"_"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/rhythm.py#L18-L178
|
[
"def",
"tempogram",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"onset_envelope",
"=",
"None",
",",
"hop_length",
"=",
"512",
",",
"win_length",
"=",
"384",
",",
"center",
"=",
"True",
",",
"window",
"=",
"'hann'",
",",
"norm",
"=",
"np",
".",
"inf",
")",
":",
"from",
".",
".",
"onset",
"import",
"onset_strength",
"if",
"win_length",
"<",
"1",
":",
"raise",
"ParameterError",
"(",
"'win_length must be a positive integer'",
")",
"ac_window",
"=",
"get_window",
"(",
"window",
",",
"win_length",
",",
"fftbins",
"=",
"True",
")",
"if",
"onset_envelope",
"is",
"None",
":",
"if",
"y",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'Either y or onset_envelope must be provided'",
")",
"onset_envelope",
"=",
"onset_strength",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"hop_length",
")",
"else",
":",
"# Force row-contiguity to avoid framing errors below",
"onset_envelope",
"=",
"np",
".",
"ascontiguousarray",
"(",
"onset_envelope",
")",
"if",
"onset_envelope",
".",
"ndim",
">",
"1",
":",
"# If we have multi-band input, iterate over rows",
"return",
"np",
".",
"asarray",
"(",
"[",
"tempogram",
"(",
"onset_envelope",
"=",
"oe_subband",
",",
"hop_length",
"=",
"hop_length",
",",
"win_length",
"=",
"win_length",
",",
"center",
"=",
"center",
",",
"window",
"=",
"window",
",",
"norm",
"=",
"norm",
")",
"for",
"oe_subband",
"in",
"onset_envelope",
"]",
")",
"# Center the autocorrelation windows",
"n",
"=",
"len",
"(",
"onset_envelope",
")",
"if",
"center",
":",
"onset_envelope",
"=",
"np",
".",
"pad",
"(",
"onset_envelope",
",",
"int",
"(",
"win_length",
"//",
"2",
")",
",",
"mode",
"=",
"'linear_ramp'",
",",
"end_values",
"=",
"[",
"0",
",",
"0",
"]",
")",
"# Carve onset envelope into frames",
"odf_frame",
"=",
"util",
".",
"frame",
"(",
"onset_envelope",
",",
"frame_length",
"=",
"win_length",
",",
"hop_length",
"=",
"1",
")",
"# Truncate to the length of the original signal",
"if",
"center",
":",
"odf_frame",
"=",
"odf_frame",
"[",
":",
",",
":",
"n",
"]",
"# Window, autocorrelate, and normalize",
"return",
"util",
".",
"normalize",
"(",
"autocorrelate",
"(",
"odf_frame",
"*",
"ac_window",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"axis",
"=",
"0",
")",
",",
"norm",
"=",
"norm",
",",
"axis",
"=",
"0",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
find_files
|
Get a sorted list of (audio) files in a directory or directory sub-tree.
Examples
--------
>>> # Get all audio files in a directory sub-tree
>>> files = librosa.util.find_files('~/Music')
>>> # Look only within a specific directory, not the sub-tree
>>> files = librosa.util.find_files('~/Music', recurse=False)
>>> # Only look for mp3 files
>>> files = librosa.util.find_files('~/Music', ext='mp3')
>>> # Or just mp3 and ogg
>>> files = librosa.util.find_files('~/Music', ext=['mp3', 'ogg'])
>>> # Only get the first 10 files
>>> files = librosa.util.find_files('~/Music', limit=10)
>>> # Or last 10 files
>>> files = librosa.util.find_files('~/Music', offset=-10)
Parameters
----------
directory : str
Path to look for files
ext : str or list of str
A file extension or list of file extensions to include in the search.
Default: `['aac', 'au', 'flac', 'm4a', 'mp3', 'ogg', 'wav']`
recurse : boolean
If `True`, then all subfolders of `directory` will be searched.
Otherwise, only `directory` will be searched.
case_sensitive : boolean
If `False`, files matching upper-case version of
extensions will be included.
limit : int > 0 or None
Return at most `limit` files. If `None`, all files are returned.
offset : int
Return files starting at `offset` within the list.
Use negative values to offset from the end of the list.
Returns
-------
files : list of str
The list of audio files.
|
librosa/util/files.py
|
def find_files(directory, ext=None, recurse=True, case_sensitive=False,
limit=None, offset=0):
'''Get a sorted list of (audio) files in a directory or directory sub-tree.
Examples
--------
>>> # Get all audio files in a directory sub-tree
>>> files = librosa.util.find_files('~/Music')
>>> # Look only within a specific directory, not the sub-tree
>>> files = librosa.util.find_files('~/Music', recurse=False)
>>> # Only look for mp3 files
>>> files = librosa.util.find_files('~/Music', ext='mp3')
>>> # Or just mp3 and ogg
>>> files = librosa.util.find_files('~/Music', ext=['mp3', 'ogg'])
>>> # Only get the first 10 files
>>> files = librosa.util.find_files('~/Music', limit=10)
>>> # Or last 10 files
>>> files = librosa.util.find_files('~/Music', offset=-10)
Parameters
----------
directory : str
Path to look for files
ext : str or list of str
A file extension or list of file extensions to include in the search.
Default: `['aac', 'au', 'flac', 'm4a', 'mp3', 'ogg', 'wav']`
recurse : boolean
If `True`, then all subfolders of `directory` will be searched.
Otherwise, only `directory` will be searched.
case_sensitive : boolean
If `False`, files matching upper-case version of
extensions will be included.
limit : int > 0 or None
Return at most `limit` files. If `None`, all files are returned.
offset : int
Return files starting at `offset` within the list.
Use negative values to offset from the end of the list.
Returns
-------
files : list of str
The list of audio files.
'''
if ext is None:
ext = ['aac', 'au', 'flac', 'm4a', 'mp3', 'ogg', 'wav']
elif isinstance(ext, six.string_types):
ext = [ext]
# Cast into a set
ext = set(ext)
# Generate upper-case versions
if not case_sensitive:
# Force to lower-case
ext = set([e.lower() for e in ext])
# Add in upper-case versions
ext |= set([e.upper() for e in ext])
files = set()
if recurse:
for walk in os.walk(directory):
files |= __get_files(walk[0], ext)
else:
files = __get_files(directory, ext)
files = list(files)
files.sort()
files = files[offset:]
if limit is not None:
files = files[:limit]
return files
|
def find_files(directory, ext=None, recurse=True, case_sensitive=False,
limit=None, offset=0):
'''Get a sorted list of (audio) files in a directory or directory sub-tree.
Examples
--------
>>> # Get all audio files in a directory sub-tree
>>> files = librosa.util.find_files('~/Music')
>>> # Look only within a specific directory, not the sub-tree
>>> files = librosa.util.find_files('~/Music', recurse=False)
>>> # Only look for mp3 files
>>> files = librosa.util.find_files('~/Music', ext='mp3')
>>> # Or just mp3 and ogg
>>> files = librosa.util.find_files('~/Music', ext=['mp3', 'ogg'])
>>> # Only get the first 10 files
>>> files = librosa.util.find_files('~/Music', limit=10)
>>> # Or last 10 files
>>> files = librosa.util.find_files('~/Music', offset=-10)
Parameters
----------
directory : str
Path to look for files
ext : str or list of str
A file extension or list of file extensions to include in the search.
Default: `['aac', 'au', 'flac', 'm4a', 'mp3', 'ogg', 'wav']`
recurse : boolean
If `True`, then all subfolders of `directory` will be searched.
Otherwise, only `directory` will be searched.
case_sensitive : boolean
If `False`, files matching upper-case version of
extensions will be included.
limit : int > 0 or None
Return at most `limit` files. If `None`, all files are returned.
offset : int
Return files starting at `offset` within the list.
Use negative values to offset from the end of the list.
Returns
-------
files : list of str
The list of audio files.
'''
if ext is None:
ext = ['aac', 'au', 'flac', 'm4a', 'mp3', 'ogg', 'wav']
elif isinstance(ext, six.string_types):
ext = [ext]
# Cast into a set
ext = set(ext)
# Generate upper-case versions
if not case_sensitive:
# Force to lower-case
ext = set([e.lower() for e in ext])
# Add in upper-case versions
ext |= set([e.upper() for e in ext])
files = set()
if recurse:
for walk in os.walk(directory):
files |= __get_files(walk[0], ext)
else:
files = __get_files(directory, ext)
files = list(files)
files.sort()
files = files[offset:]
if limit is not None:
files = files[:limit]
return files
|
[
"Get",
"a",
"sorted",
"list",
"of",
"(",
"audio",
")",
"files",
"in",
"a",
"directory",
"or",
"directory",
"sub",
"-",
"tree",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/files.py#L49-L136
|
[
"def",
"find_files",
"(",
"directory",
",",
"ext",
"=",
"None",
",",
"recurse",
"=",
"True",
",",
"case_sensitive",
"=",
"False",
",",
"limit",
"=",
"None",
",",
"offset",
"=",
"0",
")",
":",
"if",
"ext",
"is",
"None",
":",
"ext",
"=",
"[",
"'aac'",
",",
"'au'",
",",
"'flac'",
",",
"'m4a'",
",",
"'mp3'",
",",
"'ogg'",
",",
"'wav'",
"]",
"elif",
"isinstance",
"(",
"ext",
",",
"six",
".",
"string_types",
")",
":",
"ext",
"=",
"[",
"ext",
"]",
"# Cast into a set",
"ext",
"=",
"set",
"(",
"ext",
")",
"# Generate upper-case versions",
"if",
"not",
"case_sensitive",
":",
"# Force to lower-case",
"ext",
"=",
"set",
"(",
"[",
"e",
".",
"lower",
"(",
")",
"for",
"e",
"in",
"ext",
"]",
")",
"# Add in upper-case versions",
"ext",
"|=",
"set",
"(",
"[",
"e",
".",
"upper",
"(",
")",
"for",
"e",
"in",
"ext",
"]",
")",
"files",
"=",
"set",
"(",
")",
"if",
"recurse",
":",
"for",
"walk",
"in",
"os",
".",
"walk",
"(",
"directory",
")",
":",
"files",
"|=",
"__get_files",
"(",
"walk",
"[",
"0",
"]",
",",
"ext",
")",
"else",
":",
"files",
"=",
"__get_files",
"(",
"directory",
",",
"ext",
")",
"files",
"=",
"list",
"(",
"files",
")",
"files",
".",
"sort",
"(",
")",
"files",
"=",
"files",
"[",
"offset",
":",
"]",
"if",
"limit",
"is",
"not",
"None",
":",
"files",
"=",
"files",
"[",
":",
"limit",
"]",
"return",
"files"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__get_files
|
Helper function to get files in a single directory
|
librosa/util/files.py
|
def __get_files(dir_name, extensions):
'''Helper function to get files in a single directory'''
# Expand out the directory
dir_name = os.path.abspath(os.path.expanduser(dir_name))
myfiles = set()
for sub_ext in extensions:
globstr = os.path.join(dir_name, '*' + os.path.extsep + sub_ext)
myfiles |= set(glob.glob(globstr))
return myfiles
|
def __get_files(dir_name, extensions):
'''Helper function to get files in a single directory'''
# Expand out the directory
dir_name = os.path.abspath(os.path.expanduser(dir_name))
myfiles = set()
for sub_ext in extensions:
globstr = os.path.join(dir_name, '*' + os.path.extsep + sub_ext)
myfiles |= set(glob.glob(globstr))
return myfiles
|
[
"Helper",
"function",
"to",
"get",
"files",
"in",
"a",
"single",
"directory"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/files.py#L139-L151
|
[
"def",
"__get_files",
"(",
"dir_name",
",",
"extensions",
")",
":",
"# Expand out the directory",
"dir_name",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"dir_name",
")",
")",
"myfiles",
"=",
"set",
"(",
")",
"for",
"sub_ext",
"in",
"extensions",
":",
"globstr",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_name",
",",
"'*'",
"+",
"os",
".",
"path",
".",
"extsep",
"+",
"sub_ext",
")",
"myfiles",
"|=",
"set",
"(",
"glob",
".",
"glob",
"(",
"globstr",
")",
")",
"return",
"myfiles"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
stretch_demo
|
Phase-vocoder time stretch demo function.
:parameters:
- input_file : str
path to input audio
- output_file : str
path to save output (wav)
- speed : float > 0
speed up by this factor
|
examples/time_stretch.py
|
def stretch_demo(input_file, output_file, speed):
'''Phase-vocoder time stretch demo function.
:parameters:
- input_file : str
path to input audio
- output_file : str
path to save output (wav)
- speed : float > 0
speed up by this factor
'''
# 1. Load the wav file, resample
print('Loading ', input_file)
y, sr = librosa.load(input_file)
# 2. Time-stretch through effects module
print('Playing back at {:3.0f}% speed'.format(speed * 100))
y_stretch = librosa.effects.time_stretch(y, speed)
print('Saving stretched audio to: ', output_file)
librosa.output.write_wav(output_file, y_stretch, sr)
|
def stretch_demo(input_file, output_file, speed):
'''Phase-vocoder time stretch demo function.
:parameters:
- input_file : str
path to input audio
- output_file : str
path to save output (wav)
- speed : float > 0
speed up by this factor
'''
# 1. Load the wav file, resample
print('Loading ', input_file)
y, sr = librosa.load(input_file)
# 2. Time-stretch through effects module
print('Playing back at {:3.0f}% speed'.format(speed * 100))
y_stretch = librosa.effects.time_stretch(y, speed)
print('Saving stretched audio to: ', output_file)
librosa.output.write_wav(output_file, y_stretch, sr)
|
[
"Phase",
"-",
"vocoder",
"time",
"stretch",
"demo",
"function",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/examples/time_stretch.py#L13-L36
|
[
"def",
"stretch_demo",
"(",
"input_file",
",",
"output_file",
",",
"speed",
")",
":",
"# 1. Load the wav file, resample",
"print",
"(",
"'Loading '",
",",
"input_file",
")",
"y",
",",
"sr",
"=",
"librosa",
".",
"load",
"(",
"input_file",
")",
"# 2. Time-stretch through effects module",
"print",
"(",
"'Playing back at {:3.0f}% speed'",
".",
"format",
"(",
"speed",
"*",
"100",
")",
")",
"y_stretch",
"=",
"librosa",
".",
"effects",
".",
"time_stretch",
"(",
"y",
",",
"speed",
")",
"print",
"(",
"'Saving stretched audio to: '",
",",
"output_file",
")",
"librosa",
".",
"output",
".",
"write_wav",
"(",
"output_file",
",",
"y_stretch",
",",
"sr",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
process_arguments
|
Argparse function to get the program parameters
|
examples/time_stretch.py
|
def process_arguments(args):
'''Argparse function to get the program parameters'''
parser = argparse.ArgumentParser(description='Time stretching example')
parser.add_argument('input_file',
action='store',
help='path to the input file (wav, mp3, etc)')
parser.add_argument('output_file',
action='store',
help='path to the stretched output (wav)')
parser.add_argument('-s', '--speed',
action='store',
type=float,
default=2.0,
required=False,
help='speed')
return vars(parser.parse_args(args))
|
def process_arguments(args):
'''Argparse function to get the program parameters'''
parser = argparse.ArgumentParser(description='Time stretching example')
parser.add_argument('input_file',
action='store',
help='path to the input file (wav, mp3, etc)')
parser.add_argument('output_file',
action='store',
help='path to the stretched output (wav)')
parser.add_argument('-s', '--speed',
action='store',
type=float,
default=2.0,
required=False,
help='speed')
return vars(parser.parse_args(args))
|
[
"Argparse",
"function",
"to",
"get",
"the",
"program",
"parameters"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/examples/time_stretch.py#L39-L59
|
[
"def",
"process_arguments",
"(",
"args",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Time stretching example'",
")",
"parser",
".",
"add_argument",
"(",
"'input_file'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'path to the input file (wav, mp3, etc)'",
")",
"parser",
".",
"add_argument",
"(",
"'output_file'",
",",
"action",
"=",
"'store'",
",",
"help",
"=",
"'path to the stretched output (wav)'",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--speed'",
",",
"action",
"=",
"'store'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"2.0",
",",
"required",
"=",
"False",
",",
"help",
"=",
"'speed'",
")",
"return",
"vars",
"(",
"parser",
".",
"parse_args",
"(",
"args",
")",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
hpss_demo
|
HPSS demo function.
:parameters:
- input_file : str
path to input audio
- output_harmonic : str
path to save output harmonic (wav)
- output_percussive : str
path to save output harmonic (wav)
|
examples/hpss.py
|
def hpss_demo(input_file, output_harmonic, output_percussive):
'''HPSS demo function.
:parameters:
- input_file : str
path to input audio
- output_harmonic : str
path to save output harmonic (wav)
- output_percussive : str
path to save output harmonic (wav)
'''
# 1. Load the wav file, resample
print('Loading ', input_file)
y, sr = librosa.load(input_file)
# Separate components with the effects module
print('Separating harmonics and percussives... ')
y_harmonic, y_percussive = librosa.effects.hpss(y)
# 5. Save the results
print('Saving harmonic audio to: ', output_harmonic)
librosa.output.write_wav(output_harmonic, y_harmonic, sr)
print('Saving percussive audio to: ', output_percussive)
librosa.output.write_wav(output_percussive, y_percussive, sr)
|
def hpss_demo(input_file, output_harmonic, output_percussive):
'''HPSS demo function.
:parameters:
- input_file : str
path to input audio
- output_harmonic : str
path to save output harmonic (wav)
- output_percussive : str
path to save output harmonic (wav)
'''
# 1. Load the wav file, resample
print('Loading ', input_file)
y, sr = librosa.load(input_file)
# Separate components with the effects module
print('Separating harmonics and percussives... ')
y_harmonic, y_percussive = librosa.effects.hpss(y)
# 5. Save the results
print('Saving harmonic audio to: ', output_harmonic)
librosa.output.write_wav(output_harmonic, y_harmonic, sr)
print('Saving percussive audio to: ', output_percussive)
librosa.output.write_wav(output_percussive, y_percussive, sr)
|
[
"HPSS",
"demo",
"function",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/examples/hpss.py#L13-L39
|
[
"def",
"hpss_demo",
"(",
"input_file",
",",
"output_harmonic",
",",
"output_percussive",
")",
":",
"# 1. Load the wav file, resample",
"print",
"(",
"'Loading '",
",",
"input_file",
")",
"y",
",",
"sr",
"=",
"librosa",
".",
"load",
"(",
"input_file",
")",
"# Separate components with the effects module",
"print",
"(",
"'Separating harmonics and percussives... '",
")",
"y_harmonic",
",",
"y_percussive",
"=",
"librosa",
".",
"effects",
".",
"hpss",
"(",
"y",
")",
"# 5. Save the results",
"print",
"(",
"'Saving harmonic audio to: '",
",",
"output_harmonic",
")",
"librosa",
".",
"output",
".",
"write_wav",
"(",
"output_harmonic",
",",
"y_harmonic",
",",
"sr",
")",
"print",
"(",
"'Saving percussive audio to: '",
",",
"output_percussive",
")",
"librosa",
".",
"output",
".",
"write_wav",
"(",
"output_percussive",
",",
"y_percussive",
",",
"sr",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
beat_track
|
r'''Dynamic programming beat tracker.
Beats are detected in three stages, following the method of [1]_:
1. Measure onset strength
2. Estimate tempo from onset correlation
3. Pick peaks in onset strength approximately consistent with estimated
tempo
.. [1] Ellis, Daniel PW. "Beat tracking by dynamic programming."
Journal of New Music Research 36.1 (2007): 51-60.
http://labrosa.ee.columbia.edu/projects/beattrack/
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(n,)] or None
(optional) pre-computed onset strength envelope.
hop_length : int > 0 [scalar]
number of audio samples between successive `onset_envelope` values
start_bpm : float > 0 [scalar]
initial guess for the tempo estimator (in beats per minute)
tightness : float [scalar]
tightness of beat distribution around tempo
trim : bool [scalar]
trim leading/trailing beats with weak onsets
bpm : float [scalar]
(optional) If provided, use `bpm` as the tempo instead of
estimating it from `onsets`.
units : {'frames', 'samples', 'time'}
The units to encode detected beat events in.
By default, 'frames' are used.
Returns
-------
tempo : float [scalar, non-negative]
estimated global tempo (in beats per minute)
beats : np.ndarray [shape=(m,)]
estimated beat event locations in the specified units
(default is frame indices)
.. note::
If no onset strength could be detected, beat_tracker estimates 0 BPM
and returns an empty list.
Raises
------
ParameterError
if neither `y` nor `onset_envelope` are provided
or if `units` is not one of 'frames', 'samples', or 'time'
See Also
--------
librosa.onset.onset_strength
Examples
--------
Track beats using time series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> tempo
64.599609375
Print the first 20 beat frames
>>> beats[:20]
array([ 320, 357, 397, 436, 480, 525, 569, 609, 658,
698, 737, 777, 817, 857, 896, 936, 976, 1016,
1055, 1095])
Or print them as timestamps
>>> librosa.frames_to_time(beats[:20], sr=sr)
array([ 7.43 , 8.29 , 9.218, 10.124, 11.146, 12.19 ,
13.212, 14.141, 15.279, 16.208, 17.113, 18.042,
18.971, 19.9 , 20.805, 21.734, 22.663, 23.591,
24.497, 25.426])
Track beats using a pre-computed onset envelope
>>> onset_env = librosa.onset.onset_strength(y, sr=sr,
... aggregate=np.median)
>>> tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env,
... sr=sr)
>>> tempo
64.599609375
>>> beats[:20]
array([ 320, 357, 397, 436, 480, 525, 569, 609, 658,
698, 737, 777, 817, 857, 896, 936, 976, 1016,
1055, 1095])
Plot the beat events against the onset strength envelope
>>> import matplotlib.pyplot as plt
>>> hop_length = 512
>>> plt.figure(figsize=(8, 4))
>>> times = librosa.frames_to_time(np.arange(len(onset_env)),
... sr=sr, hop_length=hop_length)
>>> plt.plot(times, librosa.util.normalize(onset_env),
... label='Onset strength')
>>> plt.vlines(times[beats], 0, 1, alpha=0.5, color='r',
... linestyle='--', label='Beats')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> # Limit the plot to a 15-second window
>>> plt.xlim(15, 30)
>>> plt.gca().xaxis.set_major_formatter(librosa.display.TimeFormatter())
>>> plt.tight_layout()
|
librosa/beat.py
|
def beat_track(y=None, sr=22050, onset_envelope=None, hop_length=512,
start_bpm=120.0, tightness=100, trim=True, bpm=None,
units='frames'):
r'''Dynamic programming beat tracker.
Beats are detected in three stages, following the method of [1]_:
1. Measure onset strength
2. Estimate tempo from onset correlation
3. Pick peaks in onset strength approximately consistent with estimated
tempo
.. [1] Ellis, Daniel PW. "Beat tracking by dynamic programming."
Journal of New Music Research 36.1 (2007): 51-60.
http://labrosa.ee.columbia.edu/projects/beattrack/
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(n,)] or None
(optional) pre-computed onset strength envelope.
hop_length : int > 0 [scalar]
number of audio samples between successive `onset_envelope` values
start_bpm : float > 0 [scalar]
initial guess for the tempo estimator (in beats per minute)
tightness : float [scalar]
tightness of beat distribution around tempo
trim : bool [scalar]
trim leading/trailing beats with weak onsets
bpm : float [scalar]
(optional) If provided, use `bpm` as the tempo instead of
estimating it from `onsets`.
units : {'frames', 'samples', 'time'}
The units to encode detected beat events in.
By default, 'frames' are used.
Returns
-------
tempo : float [scalar, non-negative]
estimated global tempo (in beats per minute)
beats : np.ndarray [shape=(m,)]
estimated beat event locations in the specified units
(default is frame indices)
.. note::
If no onset strength could be detected, beat_tracker estimates 0 BPM
and returns an empty list.
Raises
------
ParameterError
if neither `y` nor `onset_envelope` are provided
or if `units` is not one of 'frames', 'samples', or 'time'
See Also
--------
librosa.onset.onset_strength
Examples
--------
Track beats using time series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> tempo
64.599609375
Print the first 20 beat frames
>>> beats[:20]
array([ 320, 357, 397, 436, 480, 525, 569, 609, 658,
698, 737, 777, 817, 857, 896, 936, 976, 1016,
1055, 1095])
Or print them as timestamps
>>> librosa.frames_to_time(beats[:20], sr=sr)
array([ 7.43 , 8.29 , 9.218, 10.124, 11.146, 12.19 ,
13.212, 14.141, 15.279, 16.208, 17.113, 18.042,
18.971, 19.9 , 20.805, 21.734, 22.663, 23.591,
24.497, 25.426])
Track beats using a pre-computed onset envelope
>>> onset_env = librosa.onset.onset_strength(y, sr=sr,
... aggregate=np.median)
>>> tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env,
... sr=sr)
>>> tempo
64.599609375
>>> beats[:20]
array([ 320, 357, 397, 436, 480, 525, 569, 609, 658,
698, 737, 777, 817, 857, 896, 936, 976, 1016,
1055, 1095])
Plot the beat events against the onset strength envelope
>>> import matplotlib.pyplot as plt
>>> hop_length = 512
>>> plt.figure(figsize=(8, 4))
>>> times = librosa.frames_to_time(np.arange(len(onset_env)),
... sr=sr, hop_length=hop_length)
>>> plt.plot(times, librosa.util.normalize(onset_env),
... label='Onset strength')
>>> plt.vlines(times[beats], 0, 1, alpha=0.5, color='r',
... linestyle='--', label='Beats')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> # Limit the plot to a 15-second window
>>> plt.xlim(15, 30)
>>> plt.gca().xaxis.set_major_formatter(librosa.display.TimeFormatter())
>>> plt.tight_layout()
'''
# First, get the frame->beat strength profile if we don't already have one
if onset_envelope is None:
if y is None:
raise ParameterError('y or onset_envelope must be provided')
onset_envelope = onset.onset_strength(y=y,
sr=sr,
hop_length=hop_length,
aggregate=np.median)
# Do we have any onsets to grab?
if not onset_envelope.any():
return (0, np.array([], dtype=int))
# Estimate BPM if one was not provided
if bpm is None:
bpm = tempo(onset_envelope=onset_envelope,
sr=sr,
hop_length=hop_length,
start_bpm=start_bpm)[0]
# Then, run the tracker
beats = __beat_tracker(onset_envelope,
bpm,
float(sr) / hop_length,
tightness,
trim)
if units == 'frames':
pass
elif units == 'samples':
beats = core.frames_to_samples(beats, hop_length=hop_length)
elif units == 'time':
beats = core.frames_to_time(beats, hop_length=hop_length, sr=sr)
else:
raise ParameterError('Invalid unit type: {}'.format(units))
return (bpm, beats)
|
def beat_track(y=None, sr=22050, onset_envelope=None, hop_length=512,
start_bpm=120.0, tightness=100, trim=True, bpm=None,
units='frames'):
r'''Dynamic programming beat tracker.
Beats are detected in three stages, following the method of [1]_:
1. Measure onset strength
2. Estimate tempo from onset correlation
3. Pick peaks in onset strength approximately consistent with estimated
tempo
.. [1] Ellis, Daniel PW. "Beat tracking by dynamic programming."
Journal of New Music Research 36.1 (2007): 51-60.
http://labrosa.ee.columbia.edu/projects/beattrack/
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(n,)] or None
(optional) pre-computed onset strength envelope.
hop_length : int > 0 [scalar]
number of audio samples between successive `onset_envelope` values
start_bpm : float > 0 [scalar]
initial guess for the tempo estimator (in beats per minute)
tightness : float [scalar]
tightness of beat distribution around tempo
trim : bool [scalar]
trim leading/trailing beats with weak onsets
bpm : float [scalar]
(optional) If provided, use `bpm` as the tempo instead of
estimating it from `onsets`.
units : {'frames', 'samples', 'time'}
The units to encode detected beat events in.
By default, 'frames' are used.
Returns
-------
tempo : float [scalar, non-negative]
estimated global tempo (in beats per minute)
beats : np.ndarray [shape=(m,)]
estimated beat event locations in the specified units
(default is frame indices)
.. note::
If no onset strength could be detected, beat_tracker estimates 0 BPM
and returns an empty list.
Raises
------
ParameterError
if neither `y` nor `onset_envelope` are provided
or if `units` is not one of 'frames', 'samples', or 'time'
See Also
--------
librosa.onset.onset_strength
Examples
--------
Track beats using time series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> tempo
64.599609375
Print the first 20 beat frames
>>> beats[:20]
array([ 320, 357, 397, 436, 480, 525, 569, 609, 658,
698, 737, 777, 817, 857, 896, 936, 976, 1016,
1055, 1095])
Or print them as timestamps
>>> librosa.frames_to_time(beats[:20], sr=sr)
array([ 7.43 , 8.29 , 9.218, 10.124, 11.146, 12.19 ,
13.212, 14.141, 15.279, 16.208, 17.113, 18.042,
18.971, 19.9 , 20.805, 21.734, 22.663, 23.591,
24.497, 25.426])
Track beats using a pre-computed onset envelope
>>> onset_env = librosa.onset.onset_strength(y, sr=sr,
... aggregate=np.median)
>>> tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env,
... sr=sr)
>>> tempo
64.599609375
>>> beats[:20]
array([ 320, 357, 397, 436, 480, 525, 569, 609, 658,
698, 737, 777, 817, 857, 896, 936, 976, 1016,
1055, 1095])
Plot the beat events against the onset strength envelope
>>> import matplotlib.pyplot as plt
>>> hop_length = 512
>>> plt.figure(figsize=(8, 4))
>>> times = librosa.frames_to_time(np.arange(len(onset_env)),
... sr=sr, hop_length=hop_length)
>>> plt.plot(times, librosa.util.normalize(onset_env),
... label='Onset strength')
>>> plt.vlines(times[beats], 0, 1, alpha=0.5, color='r',
... linestyle='--', label='Beats')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> # Limit the plot to a 15-second window
>>> plt.xlim(15, 30)
>>> plt.gca().xaxis.set_major_formatter(librosa.display.TimeFormatter())
>>> plt.tight_layout()
'''
# First, get the frame->beat strength profile if we don't already have one
if onset_envelope is None:
if y is None:
raise ParameterError('y or onset_envelope must be provided')
onset_envelope = onset.onset_strength(y=y,
sr=sr,
hop_length=hop_length,
aggregate=np.median)
# Do we have any onsets to grab?
if not onset_envelope.any():
return (0, np.array([], dtype=int))
# Estimate BPM if one was not provided
if bpm is None:
bpm = tempo(onset_envelope=onset_envelope,
sr=sr,
hop_length=hop_length,
start_bpm=start_bpm)[0]
# Then, run the tracker
beats = __beat_tracker(onset_envelope,
bpm,
float(sr) / hop_length,
tightness,
trim)
if units == 'frames':
pass
elif units == 'samples':
beats = core.frames_to_samples(beats, hop_length=hop_length)
elif units == 'time':
beats = core.frames_to_time(beats, hop_length=hop_length, sr=sr)
else:
raise ParameterError('Invalid unit type: {}'.format(units))
return (bpm, beats)
|
[
"r",
"Dynamic",
"programming",
"beat",
"tracker",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/beat.py#L26-L199
|
[
"def",
"beat_track",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"onset_envelope",
"=",
"None",
",",
"hop_length",
"=",
"512",
",",
"start_bpm",
"=",
"120.0",
",",
"tightness",
"=",
"100",
",",
"trim",
"=",
"True",
",",
"bpm",
"=",
"None",
",",
"units",
"=",
"'frames'",
")",
":",
"# First, get the frame->beat strength profile if we don't already have one",
"if",
"onset_envelope",
"is",
"None",
":",
"if",
"y",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'y or onset_envelope must be provided'",
")",
"onset_envelope",
"=",
"onset",
".",
"onset_strength",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"hop_length",
",",
"aggregate",
"=",
"np",
".",
"median",
")",
"# Do we have any onsets to grab?",
"if",
"not",
"onset_envelope",
".",
"any",
"(",
")",
":",
"return",
"(",
"0",
",",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"int",
")",
")",
"# Estimate BPM if one was not provided",
"if",
"bpm",
"is",
"None",
":",
"bpm",
"=",
"tempo",
"(",
"onset_envelope",
"=",
"onset_envelope",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"hop_length",
",",
"start_bpm",
"=",
"start_bpm",
")",
"[",
"0",
"]",
"# Then, run the tracker",
"beats",
"=",
"__beat_tracker",
"(",
"onset_envelope",
",",
"bpm",
",",
"float",
"(",
"sr",
")",
"/",
"hop_length",
",",
"tightness",
",",
"trim",
")",
"if",
"units",
"==",
"'frames'",
":",
"pass",
"elif",
"units",
"==",
"'samples'",
":",
"beats",
"=",
"core",
".",
"frames_to_samples",
"(",
"beats",
",",
"hop_length",
"=",
"hop_length",
")",
"elif",
"units",
"==",
"'time'",
":",
"beats",
"=",
"core",
".",
"frames_to_time",
"(",
"beats",
",",
"hop_length",
"=",
"hop_length",
",",
"sr",
"=",
"sr",
")",
"else",
":",
"raise",
"ParameterError",
"(",
"'Invalid unit type: {}'",
".",
"format",
"(",
"units",
")",
")",
"return",
"(",
"bpm",
",",
"beats",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
tempo
|
Estimate the tempo (beats per minute)
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of the time series
onset_envelope : np.ndarray [shape=(n,)]
pre-computed onset strength envelope
hop_length : int > 0 [scalar]
hop length of the time series
start_bpm : float [scalar]
initial guess of the BPM
std_bpm : float > 0 [scalar]
standard deviation of tempo distribution
ac_size : float > 0 [scalar]
length (in seconds) of the auto-correlation window
max_tempo : float > 0 [scalar, optional]
If provided, only estimate tempo below this threshold
aggregate : callable [optional]
Aggregation function for estimating global tempo.
If `None`, then tempo is estimated independently for each frame.
Returns
-------
tempo : np.ndarray [scalar]
estimated tempo (beats per minute)
See Also
--------
librosa.onset.onset_strength
librosa.feature.tempogram
Notes
-----
This function caches at level 30.
Examples
--------
>>> # Estimate a static tempo
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> onset_env = librosa.onset.onset_strength(y, sr=sr)
>>> tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr)
>>> tempo
array([129.199])
>>> # Or a dynamic tempo
>>> dtempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr,
... aggregate=None)
>>> dtempo
array([ 143.555, 143.555, 143.555, ..., 161.499, 161.499,
172.266])
Plot the estimated tempo against the onset autocorrelation
>>> import matplotlib.pyplot as plt
>>> # Convert to scalar
>>> tempo = np.asscalar(tempo)
>>> # Compute 2-second windowed autocorrelation
>>> hop_length = 512
>>> ac = librosa.autocorrelate(onset_env, 2 * sr // hop_length)
>>> freqs = librosa.tempo_frequencies(len(ac), sr=sr,
... hop_length=hop_length)
>>> # Plot on a BPM axis. We skip the first (0-lag) bin.
>>> plt.figure(figsize=(8,4))
>>> plt.semilogx(freqs[1:], librosa.util.normalize(ac)[1:],
... label='Onset autocorrelation', basex=2)
>>> plt.axvline(tempo, 0, 1, color='r', alpha=0.75, linestyle='--',
... label='Tempo: {:.2f} BPM'.format(tempo))
>>> plt.xlabel('Tempo (BPM)')
>>> plt.grid()
>>> plt.title('Static tempo estimation')
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
Plot dynamic tempo estimates over a tempogram
>>> plt.figure()
>>> tg = librosa.feature.tempogram(onset_envelope=onset_env, sr=sr,
... hop_length=hop_length)
>>> librosa.display.specshow(tg, x_axis='time', y_axis='tempo')
>>> plt.plot(librosa.frames_to_time(np.arange(len(dtempo))), dtempo,
... color='w', linewidth=1.5, label='Tempo estimate')
>>> plt.title('Dynamic tempo estimation')
>>> plt.legend(frameon=True, framealpha=0.75)
|
librosa/beat.py
|
def tempo(y=None, sr=22050, onset_envelope=None, hop_length=512, start_bpm=120,
std_bpm=1.0, ac_size=8.0, max_tempo=320.0, aggregate=np.mean):
"""Estimate the tempo (beats per minute)
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of the time series
onset_envelope : np.ndarray [shape=(n,)]
pre-computed onset strength envelope
hop_length : int > 0 [scalar]
hop length of the time series
start_bpm : float [scalar]
initial guess of the BPM
std_bpm : float > 0 [scalar]
standard deviation of tempo distribution
ac_size : float > 0 [scalar]
length (in seconds) of the auto-correlation window
max_tempo : float > 0 [scalar, optional]
If provided, only estimate tempo below this threshold
aggregate : callable [optional]
Aggregation function for estimating global tempo.
If `None`, then tempo is estimated independently for each frame.
Returns
-------
tempo : np.ndarray [scalar]
estimated tempo (beats per minute)
See Also
--------
librosa.onset.onset_strength
librosa.feature.tempogram
Notes
-----
This function caches at level 30.
Examples
--------
>>> # Estimate a static tempo
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> onset_env = librosa.onset.onset_strength(y, sr=sr)
>>> tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr)
>>> tempo
array([129.199])
>>> # Or a dynamic tempo
>>> dtempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr,
... aggregate=None)
>>> dtempo
array([ 143.555, 143.555, 143.555, ..., 161.499, 161.499,
172.266])
Plot the estimated tempo against the onset autocorrelation
>>> import matplotlib.pyplot as plt
>>> # Convert to scalar
>>> tempo = np.asscalar(tempo)
>>> # Compute 2-second windowed autocorrelation
>>> hop_length = 512
>>> ac = librosa.autocorrelate(onset_env, 2 * sr // hop_length)
>>> freqs = librosa.tempo_frequencies(len(ac), sr=sr,
... hop_length=hop_length)
>>> # Plot on a BPM axis. We skip the first (0-lag) bin.
>>> plt.figure(figsize=(8,4))
>>> plt.semilogx(freqs[1:], librosa.util.normalize(ac)[1:],
... label='Onset autocorrelation', basex=2)
>>> plt.axvline(tempo, 0, 1, color='r', alpha=0.75, linestyle='--',
... label='Tempo: {:.2f} BPM'.format(tempo))
>>> plt.xlabel('Tempo (BPM)')
>>> plt.grid()
>>> plt.title('Static tempo estimation')
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
Plot dynamic tempo estimates over a tempogram
>>> plt.figure()
>>> tg = librosa.feature.tempogram(onset_envelope=onset_env, sr=sr,
... hop_length=hop_length)
>>> librosa.display.specshow(tg, x_axis='time', y_axis='tempo')
>>> plt.plot(librosa.frames_to_time(np.arange(len(dtempo))), dtempo,
... color='w', linewidth=1.5, label='Tempo estimate')
>>> plt.title('Dynamic tempo estimation')
>>> plt.legend(frameon=True, framealpha=0.75)
"""
if start_bpm <= 0:
raise ParameterError('start_bpm must be strictly positive')
win_length = np.asscalar(core.time_to_frames(ac_size, sr=sr,
hop_length=hop_length))
tg = tempogram(y=y, sr=sr,
onset_envelope=onset_envelope,
hop_length=hop_length,
win_length=win_length)
# Eventually, we want this to work for time-varying tempo
if aggregate is not None:
tg = aggregate(tg, axis=1, keepdims=True)
# Get the BPM values for each bin, skipping the 0-lag bin
bpms = core.tempo_frequencies(tg.shape[0], hop_length=hop_length, sr=sr)
# Weight the autocorrelation by a log-normal distribution
prior = np.exp(-0.5 * ((np.log2(bpms) - np.log2(start_bpm)) / std_bpm)**2)
# Kill everything above the max tempo
if max_tempo is not None:
max_idx = np.argmax(bpms < max_tempo)
prior[:max_idx] = 0
# Really, instead of multiplying by the prior, we should set up a
# probabilistic model for tempo and add log-probabilities.
# This would give us a chance to recover from null signals and
# rely on the prior.
# it would also make time aggregation much more natural
# Get the maximum, weighted by the prior
best_period = np.argmax(tg * prior[:, np.newaxis], axis=0)
tempi = bpms[best_period]
# Wherever the best tempo is index 0, return start_bpm
tempi[best_period == 0] = start_bpm
return tempi
|
def tempo(y=None, sr=22050, onset_envelope=None, hop_length=512, start_bpm=120,
std_bpm=1.0, ac_size=8.0, max_tempo=320.0, aggregate=np.mean):
"""Estimate the tempo (beats per minute)
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of the time series
onset_envelope : np.ndarray [shape=(n,)]
pre-computed onset strength envelope
hop_length : int > 0 [scalar]
hop length of the time series
start_bpm : float [scalar]
initial guess of the BPM
std_bpm : float > 0 [scalar]
standard deviation of tempo distribution
ac_size : float > 0 [scalar]
length (in seconds) of the auto-correlation window
max_tempo : float > 0 [scalar, optional]
If provided, only estimate tempo below this threshold
aggregate : callable [optional]
Aggregation function for estimating global tempo.
If `None`, then tempo is estimated independently for each frame.
Returns
-------
tempo : np.ndarray [scalar]
estimated tempo (beats per minute)
See Also
--------
librosa.onset.onset_strength
librosa.feature.tempogram
Notes
-----
This function caches at level 30.
Examples
--------
>>> # Estimate a static tempo
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> onset_env = librosa.onset.onset_strength(y, sr=sr)
>>> tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr)
>>> tempo
array([129.199])
>>> # Or a dynamic tempo
>>> dtempo = librosa.beat.tempo(onset_envelope=onset_env, sr=sr,
... aggregate=None)
>>> dtempo
array([ 143.555, 143.555, 143.555, ..., 161.499, 161.499,
172.266])
Plot the estimated tempo against the onset autocorrelation
>>> import matplotlib.pyplot as plt
>>> # Convert to scalar
>>> tempo = np.asscalar(tempo)
>>> # Compute 2-second windowed autocorrelation
>>> hop_length = 512
>>> ac = librosa.autocorrelate(onset_env, 2 * sr // hop_length)
>>> freqs = librosa.tempo_frequencies(len(ac), sr=sr,
... hop_length=hop_length)
>>> # Plot on a BPM axis. We skip the first (0-lag) bin.
>>> plt.figure(figsize=(8,4))
>>> plt.semilogx(freqs[1:], librosa.util.normalize(ac)[1:],
... label='Onset autocorrelation', basex=2)
>>> plt.axvline(tempo, 0, 1, color='r', alpha=0.75, linestyle='--',
... label='Tempo: {:.2f} BPM'.format(tempo))
>>> plt.xlabel('Tempo (BPM)')
>>> plt.grid()
>>> plt.title('Static tempo estimation')
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
Plot dynamic tempo estimates over a tempogram
>>> plt.figure()
>>> tg = librosa.feature.tempogram(onset_envelope=onset_env, sr=sr,
... hop_length=hop_length)
>>> librosa.display.specshow(tg, x_axis='time', y_axis='tempo')
>>> plt.plot(librosa.frames_to_time(np.arange(len(dtempo))), dtempo,
... color='w', linewidth=1.5, label='Tempo estimate')
>>> plt.title('Dynamic tempo estimation')
>>> plt.legend(frameon=True, framealpha=0.75)
"""
if start_bpm <= 0:
raise ParameterError('start_bpm must be strictly positive')
win_length = np.asscalar(core.time_to_frames(ac_size, sr=sr,
hop_length=hop_length))
tg = tempogram(y=y, sr=sr,
onset_envelope=onset_envelope,
hop_length=hop_length,
win_length=win_length)
# Eventually, we want this to work for time-varying tempo
if aggregate is not None:
tg = aggregate(tg, axis=1, keepdims=True)
# Get the BPM values for each bin, skipping the 0-lag bin
bpms = core.tempo_frequencies(tg.shape[0], hop_length=hop_length, sr=sr)
# Weight the autocorrelation by a log-normal distribution
prior = np.exp(-0.5 * ((np.log2(bpms) - np.log2(start_bpm)) / std_bpm)**2)
# Kill everything above the max tempo
if max_tempo is not None:
max_idx = np.argmax(bpms < max_tempo)
prior[:max_idx] = 0
# Really, instead of multiplying by the prior, we should set up a
# probabilistic model for tempo and add log-probabilities.
# This would give us a chance to recover from null signals and
# rely on the prior.
# it would also make time aggregation much more natural
# Get the maximum, weighted by the prior
best_period = np.argmax(tg * prior[:, np.newaxis], axis=0)
tempi = bpms[best_period]
# Wherever the best tempo is index 0, return start_bpm
tempi[best_period == 0] = start_bpm
return tempi
|
[
"Estimate",
"the",
"tempo",
"(",
"beats",
"per",
"minute",
")"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/beat.py#L203-L340
|
[
"def",
"tempo",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"onset_envelope",
"=",
"None",
",",
"hop_length",
"=",
"512",
",",
"start_bpm",
"=",
"120",
",",
"std_bpm",
"=",
"1.0",
",",
"ac_size",
"=",
"8.0",
",",
"max_tempo",
"=",
"320.0",
",",
"aggregate",
"=",
"np",
".",
"mean",
")",
":",
"if",
"start_bpm",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'start_bpm must be strictly positive'",
")",
"win_length",
"=",
"np",
".",
"asscalar",
"(",
"core",
".",
"time_to_frames",
"(",
"ac_size",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"hop_length",
")",
")",
"tg",
"=",
"tempogram",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
",",
"onset_envelope",
"=",
"onset_envelope",
",",
"hop_length",
"=",
"hop_length",
",",
"win_length",
"=",
"win_length",
")",
"# Eventually, we want this to work for time-varying tempo",
"if",
"aggregate",
"is",
"not",
"None",
":",
"tg",
"=",
"aggregate",
"(",
"tg",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"# Get the BPM values for each bin, skipping the 0-lag bin",
"bpms",
"=",
"core",
".",
"tempo_frequencies",
"(",
"tg",
".",
"shape",
"[",
"0",
"]",
",",
"hop_length",
"=",
"hop_length",
",",
"sr",
"=",
"sr",
")",
"# Weight the autocorrelation by a log-normal distribution",
"prior",
"=",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"(",
"(",
"np",
".",
"log2",
"(",
"bpms",
")",
"-",
"np",
".",
"log2",
"(",
"start_bpm",
")",
")",
"/",
"std_bpm",
")",
"**",
"2",
")",
"# Kill everything above the max tempo",
"if",
"max_tempo",
"is",
"not",
"None",
":",
"max_idx",
"=",
"np",
".",
"argmax",
"(",
"bpms",
"<",
"max_tempo",
")",
"prior",
"[",
":",
"max_idx",
"]",
"=",
"0",
"# Really, instead of multiplying by the prior, we should set up a",
"# probabilistic model for tempo and add log-probabilities.",
"# This would give us a chance to recover from null signals and",
"# rely on the prior.",
"# it would also make time aggregation much more natural",
"# Get the maximum, weighted by the prior",
"best_period",
"=",
"np",
".",
"argmax",
"(",
"tg",
"*",
"prior",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"axis",
"=",
"0",
")",
"tempi",
"=",
"bpms",
"[",
"best_period",
"]",
"# Wherever the best tempo is index 0, return start_bpm",
"tempi",
"[",
"best_period",
"==",
"0",
"]",
"=",
"start_bpm",
"return",
"tempi"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__beat_tracker
|
Internal function that tracks beats in an onset strength envelope.
Parameters
----------
onset_envelope : np.ndarray [shape=(n,)]
onset strength envelope
bpm : float [scalar]
tempo estimate
fft_res : float [scalar]
resolution of the fft (sr / hop_length)
tightness: float [scalar]
how closely do we adhere to bpm?
trim : bool [scalar]
trim leading/trailing beats with weak onsets?
Returns
-------
beats : np.ndarray [shape=(n,)]
frame numbers of beat events
|
librosa/beat.py
|
def __beat_tracker(onset_envelope, bpm, fft_res, tightness, trim):
"""Internal function that tracks beats in an onset strength envelope.
Parameters
----------
onset_envelope : np.ndarray [shape=(n,)]
onset strength envelope
bpm : float [scalar]
tempo estimate
fft_res : float [scalar]
resolution of the fft (sr / hop_length)
tightness: float [scalar]
how closely do we adhere to bpm?
trim : bool [scalar]
trim leading/trailing beats with weak onsets?
Returns
-------
beats : np.ndarray [shape=(n,)]
frame numbers of beat events
"""
if bpm <= 0:
raise ParameterError('bpm must be strictly positive')
# convert bpm to a sample period for searching
period = round(60.0 * fft_res / bpm)
# localscore is a smoothed version of AGC'd onset envelope
localscore = __beat_local_score(onset_envelope, period)
# run the DP
backlink, cumscore = __beat_track_dp(localscore, period, tightness)
# get the position of the last beat
beats = [__last_beat(cumscore)]
# Reconstruct the beat path from backlinks
while backlink[beats[-1]] >= 0:
beats.append(backlink[beats[-1]])
# Put the beats in ascending order
# Convert into an array of frame numbers
beats = np.array(beats[::-1], dtype=int)
# Discard spurious trailing beats
beats = __trim_beats(localscore, beats, trim)
return beats
|
def __beat_tracker(onset_envelope, bpm, fft_res, tightness, trim):
"""Internal function that tracks beats in an onset strength envelope.
Parameters
----------
onset_envelope : np.ndarray [shape=(n,)]
onset strength envelope
bpm : float [scalar]
tempo estimate
fft_res : float [scalar]
resolution of the fft (sr / hop_length)
tightness: float [scalar]
how closely do we adhere to bpm?
trim : bool [scalar]
trim leading/trailing beats with weak onsets?
Returns
-------
beats : np.ndarray [shape=(n,)]
frame numbers of beat events
"""
if bpm <= 0:
raise ParameterError('bpm must be strictly positive')
# convert bpm to a sample period for searching
period = round(60.0 * fft_res / bpm)
# localscore is a smoothed version of AGC'd onset envelope
localscore = __beat_local_score(onset_envelope, period)
# run the DP
backlink, cumscore = __beat_track_dp(localscore, period, tightness)
# get the position of the last beat
beats = [__last_beat(cumscore)]
# Reconstruct the beat path from backlinks
while backlink[beats[-1]] >= 0:
beats.append(backlink[beats[-1]])
# Put the beats in ascending order
# Convert into an array of frame numbers
beats = np.array(beats[::-1], dtype=int)
# Discard spurious trailing beats
beats = __trim_beats(localscore, beats, trim)
return beats
|
[
"Internal",
"function",
"that",
"tracks",
"beats",
"in",
"an",
"onset",
"strength",
"envelope",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/beat.py#L343-L395
|
[
"def",
"__beat_tracker",
"(",
"onset_envelope",
",",
"bpm",
",",
"fft_res",
",",
"tightness",
",",
"trim",
")",
":",
"if",
"bpm",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'bpm must be strictly positive'",
")",
"# convert bpm to a sample period for searching",
"period",
"=",
"round",
"(",
"60.0",
"*",
"fft_res",
"/",
"bpm",
")",
"# localscore is a smoothed version of AGC'd onset envelope",
"localscore",
"=",
"__beat_local_score",
"(",
"onset_envelope",
",",
"period",
")",
"# run the DP",
"backlink",
",",
"cumscore",
"=",
"__beat_track_dp",
"(",
"localscore",
",",
"period",
",",
"tightness",
")",
"# get the position of the last beat",
"beats",
"=",
"[",
"__last_beat",
"(",
"cumscore",
")",
"]",
"# Reconstruct the beat path from backlinks",
"while",
"backlink",
"[",
"beats",
"[",
"-",
"1",
"]",
"]",
">=",
"0",
":",
"beats",
".",
"append",
"(",
"backlink",
"[",
"beats",
"[",
"-",
"1",
"]",
"]",
")",
"# Put the beats in ascending order",
"# Convert into an array of frame numbers",
"beats",
"=",
"np",
".",
"array",
"(",
"beats",
"[",
":",
":",
"-",
"1",
"]",
",",
"dtype",
"=",
"int",
")",
"# Discard spurious trailing beats",
"beats",
"=",
"__trim_beats",
"(",
"localscore",
",",
"beats",
",",
"trim",
")",
"return",
"beats"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__normalize_onsets
|
Maps onset strength function into the range [0, 1]
|
librosa/beat.py
|
def __normalize_onsets(onsets):
'''Maps onset strength function into the range [0, 1]'''
norm = onsets.std(ddof=1)
if norm > 0:
onsets = onsets / norm
return onsets
|
def __normalize_onsets(onsets):
'''Maps onset strength function into the range [0, 1]'''
norm = onsets.std(ddof=1)
if norm > 0:
onsets = onsets / norm
return onsets
|
[
"Maps",
"onset",
"strength",
"function",
"into",
"the",
"range",
"[",
"0",
"1",
"]"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/beat.py#L399-L405
|
[
"def",
"__normalize_onsets",
"(",
"onsets",
")",
":",
"norm",
"=",
"onsets",
".",
"std",
"(",
"ddof",
"=",
"1",
")",
"if",
"norm",
">",
"0",
":",
"onsets",
"=",
"onsets",
"/",
"norm",
"return",
"onsets"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__beat_local_score
|
Construct the local score for an onset envlope and given period
|
librosa/beat.py
|
def __beat_local_score(onset_envelope, period):
'''Construct the local score for an onset envlope and given period'''
window = np.exp(-0.5 * (np.arange(-period, period+1)*32.0/period)**2)
return scipy.signal.convolve(__normalize_onsets(onset_envelope),
window,
'same')
|
def __beat_local_score(onset_envelope, period):
'''Construct the local score for an onset envlope and given period'''
window = np.exp(-0.5 * (np.arange(-period, period+1)*32.0/period)**2)
return scipy.signal.convolve(__normalize_onsets(onset_envelope),
window,
'same')
|
[
"Construct",
"the",
"local",
"score",
"for",
"an",
"onset",
"envlope",
"and",
"given",
"period"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/beat.py#L408-L414
|
[
"def",
"__beat_local_score",
"(",
"onset_envelope",
",",
"period",
")",
":",
"window",
"=",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"(",
"np",
".",
"arange",
"(",
"-",
"period",
",",
"period",
"+",
"1",
")",
"*",
"32.0",
"/",
"period",
")",
"**",
"2",
")",
"return",
"scipy",
".",
"signal",
".",
"convolve",
"(",
"__normalize_onsets",
"(",
"onset_envelope",
")",
",",
"window",
",",
"'same'",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__beat_track_dp
|
Core dynamic program for beat tracking
|
librosa/beat.py
|
def __beat_track_dp(localscore, period, tightness):
"""Core dynamic program for beat tracking"""
backlink = np.zeros_like(localscore, dtype=int)
cumscore = np.zeros_like(localscore)
# Search range for previous beat
window = np.arange(-2 * period, -np.round(period / 2) + 1, dtype=int)
# Make a score window, which begins biased toward start_bpm and skewed
if tightness <= 0:
raise ParameterError('tightness must be strictly positive')
txwt = -tightness * (np.log(-window / period) ** 2)
# Are we on the first beat?
first_beat = True
for i, score_i in enumerate(localscore):
# Are we reaching back before time 0?
z_pad = np.maximum(0, min(- window[0], len(window)))
# Search over all possible predecessors
candidates = txwt.copy()
candidates[z_pad:] = candidates[z_pad:] + cumscore[window[z_pad:]]
# Find the best preceding beat
beat_location = np.argmax(candidates)
# Add the local score
cumscore[i] = score_i + candidates[beat_location]
# Special case the first onset. Stop if the localscore is small
if first_beat and score_i < 0.01 * localscore.max():
backlink[i] = -1
else:
backlink[i] = window[beat_location]
first_beat = False
# Update the time range
window = window + 1
return backlink, cumscore
|
def __beat_track_dp(localscore, period, tightness):
"""Core dynamic program for beat tracking"""
backlink = np.zeros_like(localscore, dtype=int)
cumscore = np.zeros_like(localscore)
# Search range for previous beat
window = np.arange(-2 * period, -np.round(period / 2) + 1, dtype=int)
# Make a score window, which begins biased toward start_bpm and skewed
if tightness <= 0:
raise ParameterError('tightness must be strictly positive')
txwt = -tightness * (np.log(-window / period) ** 2)
# Are we on the first beat?
first_beat = True
for i, score_i in enumerate(localscore):
# Are we reaching back before time 0?
z_pad = np.maximum(0, min(- window[0], len(window)))
# Search over all possible predecessors
candidates = txwt.copy()
candidates[z_pad:] = candidates[z_pad:] + cumscore[window[z_pad:]]
# Find the best preceding beat
beat_location = np.argmax(candidates)
# Add the local score
cumscore[i] = score_i + candidates[beat_location]
# Special case the first onset. Stop if the localscore is small
if first_beat and score_i < 0.01 * localscore.max():
backlink[i] = -1
else:
backlink[i] = window[beat_location]
first_beat = False
# Update the time range
window = window + 1
return backlink, cumscore
|
[
"Core",
"dynamic",
"program",
"for",
"beat",
"tracking"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/beat.py#L417-L459
|
[
"def",
"__beat_track_dp",
"(",
"localscore",
",",
"period",
",",
"tightness",
")",
":",
"backlink",
"=",
"np",
".",
"zeros_like",
"(",
"localscore",
",",
"dtype",
"=",
"int",
")",
"cumscore",
"=",
"np",
".",
"zeros_like",
"(",
"localscore",
")",
"# Search range for previous beat",
"window",
"=",
"np",
".",
"arange",
"(",
"-",
"2",
"*",
"period",
",",
"-",
"np",
".",
"round",
"(",
"period",
"/",
"2",
")",
"+",
"1",
",",
"dtype",
"=",
"int",
")",
"# Make a score window, which begins biased toward start_bpm and skewed",
"if",
"tightness",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'tightness must be strictly positive'",
")",
"txwt",
"=",
"-",
"tightness",
"*",
"(",
"np",
".",
"log",
"(",
"-",
"window",
"/",
"period",
")",
"**",
"2",
")",
"# Are we on the first beat?",
"first_beat",
"=",
"True",
"for",
"i",
",",
"score_i",
"in",
"enumerate",
"(",
"localscore",
")",
":",
"# Are we reaching back before time 0?",
"z_pad",
"=",
"np",
".",
"maximum",
"(",
"0",
",",
"min",
"(",
"-",
"window",
"[",
"0",
"]",
",",
"len",
"(",
"window",
")",
")",
")",
"# Search over all possible predecessors",
"candidates",
"=",
"txwt",
".",
"copy",
"(",
")",
"candidates",
"[",
"z_pad",
":",
"]",
"=",
"candidates",
"[",
"z_pad",
":",
"]",
"+",
"cumscore",
"[",
"window",
"[",
"z_pad",
":",
"]",
"]",
"# Find the best preceding beat",
"beat_location",
"=",
"np",
".",
"argmax",
"(",
"candidates",
")",
"# Add the local score",
"cumscore",
"[",
"i",
"]",
"=",
"score_i",
"+",
"candidates",
"[",
"beat_location",
"]",
"# Special case the first onset. Stop if the localscore is small",
"if",
"first_beat",
"and",
"score_i",
"<",
"0.01",
"*",
"localscore",
".",
"max",
"(",
")",
":",
"backlink",
"[",
"i",
"]",
"=",
"-",
"1",
"else",
":",
"backlink",
"[",
"i",
"]",
"=",
"window",
"[",
"beat_location",
"]",
"first_beat",
"=",
"False",
"# Update the time range",
"window",
"=",
"window",
"+",
"1",
"return",
"backlink",
",",
"cumscore"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__last_beat
|
Get the last beat from the cumulative score array
|
librosa/beat.py
|
def __last_beat(cumscore):
"""Get the last beat from the cumulative score array"""
maxes = util.localmax(cumscore)
med_score = np.median(cumscore[np.argwhere(maxes)])
# The last of these is the last beat (since score generally increases)
return np.argwhere((cumscore * maxes * 2 > med_score)).max()
|
def __last_beat(cumscore):
"""Get the last beat from the cumulative score array"""
maxes = util.localmax(cumscore)
med_score = np.median(cumscore[np.argwhere(maxes)])
# The last of these is the last beat (since score generally increases)
return np.argwhere((cumscore * maxes * 2 > med_score)).max()
|
[
"Get",
"the",
"last",
"beat",
"from",
"the",
"cumulative",
"score",
"array"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/beat.py#L462-L469
|
[
"def",
"__last_beat",
"(",
"cumscore",
")",
":",
"maxes",
"=",
"util",
".",
"localmax",
"(",
"cumscore",
")",
"med_score",
"=",
"np",
".",
"median",
"(",
"cumscore",
"[",
"np",
".",
"argwhere",
"(",
"maxes",
")",
"]",
")",
"# The last of these is the last beat (since score generally increases)",
"return",
"np",
".",
"argwhere",
"(",
"(",
"cumscore",
"*",
"maxes",
"*",
"2",
">",
"med_score",
")",
")",
".",
"max",
"(",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__trim_beats
|
Final post-processing: throw out spurious leading/trailing beats
|
librosa/beat.py
|
def __trim_beats(localscore, beats, trim):
"""Final post-processing: throw out spurious leading/trailing beats"""
smooth_boe = scipy.signal.convolve(localscore[beats],
scipy.signal.hann(5),
'same')
if trim:
threshold = 0.5 * ((smooth_boe**2).mean()**0.5)
else:
threshold = 0.0
valid = np.argwhere(smooth_boe > threshold)
return beats[valid.min():valid.max()]
|
def __trim_beats(localscore, beats, trim):
"""Final post-processing: throw out spurious leading/trailing beats"""
smooth_boe = scipy.signal.convolve(localscore[beats],
scipy.signal.hann(5),
'same')
if trim:
threshold = 0.5 * ((smooth_boe**2).mean()**0.5)
else:
threshold = 0.0
valid = np.argwhere(smooth_boe > threshold)
return beats[valid.min():valid.max()]
|
[
"Final",
"post",
"-",
"processing",
":",
"throw",
"out",
"spurious",
"leading",
"/",
"trailing",
"beats"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/beat.py#L472-L486
|
[
"def",
"__trim_beats",
"(",
"localscore",
",",
"beats",
",",
"trim",
")",
":",
"smooth_boe",
"=",
"scipy",
".",
"signal",
".",
"convolve",
"(",
"localscore",
"[",
"beats",
"]",
",",
"scipy",
".",
"signal",
".",
"hann",
"(",
"5",
")",
",",
"'same'",
")",
"if",
"trim",
":",
"threshold",
"=",
"0.5",
"*",
"(",
"(",
"smooth_boe",
"**",
"2",
")",
".",
"mean",
"(",
")",
"**",
"0.5",
")",
"else",
":",
"threshold",
"=",
"0.0",
"valid",
"=",
"np",
".",
"argwhere",
"(",
"smooth_boe",
">",
"threshold",
")",
"return",
"beats",
"[",
"valid",
".",
"min",
"(",
")",
":",
"valid",
".",
"max",
"(",
")",
"]"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
recurrence_matrix
|
Compute a recurrence matrix from a data matrix.
`rec[i, j]` is non-zero if (`data[:, i]`, `data[:, j]`) are
k-nearest-neighbors and `|i - j| >= width`
The specific value of `rec[i, j]` can have several forms, governed
by the `mode` parameter below:
- Connectivity: `rec[i, j] = 1 or 0` indicates that frames `i` and `j` are repetitions
- Affinity: `rec[i, j] > 0` measures how similar frames `i` and `j` are. This is also
known as a (sparse) self-similarity matrix.
- Distance: `rec[, j] > 0` measures how distant frames `i` and `j` are. This is also
known as a (sparse) self-distance matrix.
The general term *recurrence matrix* can refer to any of the three forms above.
Parameters
----------
data : np.ndarray
A feature matrix
k : int > 0 [scalar] or None
the number of nearest-neighbors for each sample
Default: `k = 2 * ceil(sqrt(t - 2 * width + 1))`,
or `k = 2` if `t <= 2 * width + 1`
width : int >= 1 [scalar]
only link neighbors `(data[:, i], data[:, j])`
if `|i - j| >= width`
`width` cannot exceed the length of the data.
metric : str
Distance metric to use for nearest-neighbor calculation.
See `sklearn.neighbors.NearestNeighbors` for details.
sym : bool [scalar]
set `sym=True` to only link mutual nearest-neighbors
sparse : bool [scalar]
if False, returns a dense type (ndarray)
if True, returns a sparse type (scipy.sparse.csr_matrix)
mode : str, {'connectivity', 'distance', 'affinity'}
If 'connectivity', a binary connectivity matrix is produced.
If 'distance', then a non-zero entry contains the distance between
points.
If 'affinity', then non-zero entries are mapped to
`exp( - distance(i, j) / bandwidth)` where `bandwidth` is
as specified below.
bandwidth : None or float > 0
If using ``mode='affinity'``, this can be used to set the
bandwidth on the affinity kernel.
If no value is provided, it is set automatically to the median
distance between furthest nearest neighbors.
self : bool
If `True`, then the main diagonal is populated with self-links:
0 if ``mode='distance'``, and 1 otherwise.
If `False`, the main diagonal is left empty.
axis : int
The axis along which to compute recurrence.
By default, the last index (-1) is taken.
Returns
-------
rec : np.ndarray or scipy.sparse.csr_matrix, [shape=(t, t)]
Recurrence matrix
See Also
--------
sklearn.neighbors.NearestNeighbors
scipy.spatial.distance.cdist
librosa.feature.stack_memory
recurrence_to_lag
Notes
-----
This function caches at level 30.
Examples
--------
Find nearest neighbors in MFCC space
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfcc = librosa.feature.mfcc(y=y, sr=sr)
>>> R = librosa.segment.recurrence_matrix(mfcc)
Or fix the number of nearest neighbors to 5
>>> R = librosa.segment.recurrence_matrix(mfcc, k=5)
Suppress neighbors within +- 7 samples
>>> R = librosa.segment.recurrence_matrix(mfcc, width=7)
Use cosine similarity instead of Euclidean distance
>>> R = librosa.segment.recurrence_matrix(mfcc, metric='cosine')
Require mutual nearest neighbors
>>> R = librosa.segment.recurrence_matrix(mfcc, sym=True)
Use an affinity matrix instead of binary connectivity
>>> R_aff = librosa.segment.recurrence_matrix(mfcc, mode='affinity')
Plot the feature and recurrence matrices
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1, 2, 1)
>>> librosa.display.specshow(R, x_axis='time', y_axis='time')
>>> plt.title('Binary recurrence (symmetric)')
>>> plt.subplot(1, 2, 2)
>>> librosa.display.specshow(R_aff, x_axis='time', y_axis='time',
... cmap='magma_r')
>>> plt.title('Affinity recurrence')
>>> plt.tight_layout()
|
librosa/segment.py
|
def recurrence_matrix(data, k=None, width=1, metric='euclidean',
sym=False, sparse=False, mode='connectivity',
bandwidth=None, self=False, axis=-1):
'''Compute a recurrence matrix from a data matrix.
`rec[i, j]` is non-zero if (`data[:, i]`, `data[:, j]`) are
k-nearest-neighbors and `|i - j| >= width`
The specific value of `rec[i, j]` can have several forms, governed
by the `mode` parameter below:
- Connectivity: `rec[i, j] = 1 or 0` indicates that frames `i` and `j` are repetitions
- Affinity: `rec[i, j] > 0` measures how similar frames `i` and `j` are. This is also
known as a (sparse) self-similarity matrix.
- Distance: `rec[, j] > 0` measures how distant frames `i` and `j` are. This is also
known as a (sparse) self-distance matrix.
The general term *recurrence matrix* can refer to any of the three forms above.
Parameters
----------
data : np.ndarray
A feature matrix
k : int > 0 [scalar] or None
the number of nearest-neighbors for each sample
Default: `k = 2 * ceil(sqrt(t - 2 * width + 1))`,
or `k = 2` if `t <= 2 * width + 1`
width : int >= 1 [scalar]
only link neighbors `(data[:, i], data[:, j])`
if `|i - j| >= width`
`width` cannot exceed the length of the data.
metric : str
Distance metric to use for nearest-neighbor calculation.
See `sklearn.neighbors.NearestNeighbors` for details.
sym : bool [scalar]
set `sym=True` to only link mutual nearest-neighbors
sparse : bool [scalar]
if False, returns a dense type (ndarray)
if True, returns a sparse type (scipy.sparse.csr_matrix)
mode : str, {'connectivity', 'distance', 'affinity'}
If 'connectivity', a binary connectivity matrix is produced.
If 'distance', then a non-zero entry contains the distance between
points.
If 'affinity', then non-zero entries are mapped to
`exp( - distance(i, j) / bandwidth)` where `bandwidth` is
as specified below.
bandwidth : None or float > 0
If using ``mode='affinity'``, this can be used to set the
bandwidth on the affinity kernel.
If no value is provided, it is set automatically to the median
distance between furthest nearest neighbors.
self : bool
If `True`, then the main diagonal is populated with self-links:
0 if ``mode='distance'``, and 1 otherwise.
If `False`, the main diagonal is left empty.
axis : int
The axis along which to compute recurrence.
By default, the last index (-1) is taken.
Returns
-------
rec : np.ndarray or scipy.sparse.csr_matrix, [shape=(t, t)]
Recurrence matrix
See Also
--------
sklearn.neighbors.NearestNeighbors
scipy.spatial.distance.cdist
librosa.feature.stack_memory
recurrence_to_lag
Notes
-----
This function caches at level 30.
Examples
--------
Find nearest neighbors in MFCC space
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfcc = librosa.feature.mfcc(y=y, sr=sr)
>>> R = librosa.segment.recurrence_matrix(mfcc)
Or fix the number of nearest neighbors to 5
>>> R = librosa.segment.recurrence_matrix(mfcc, k=5)
Suppress neighbors within +- 7 samples
>>> R = librosa.segment.recurrence_matrix(mfcc, width=7)
Use cosine similarity instead of Euclidean distance
>>> R = librosa.segment.recurrence_matrix(mfcc, metric='cosine')
Require mutual nearest neighbors
>>> R = librosa.segment.recurrence_matrix(mfcc, sym=True)
Use an affinity matrix instead of binary connectivity
>>> R_aff = librosa.segment.recurrence_matrix(mfcc, mode='affinity')
Plot the feature and recurrence matrices
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1, 2, 1)
>>> librosa.display.specshow(R, x_axis='time', y_axis='time')
>>> plt.title('Binary recurrence (symmetric)')
>>> plt.subplot(1, 2, 2)
>>> librosa.display.specshow(R_aff, x_axis='time', y_axis='time',
... cmap='magma_r')
>>> plt.title('Affinity recurrence')
>>> plt.tight_layout()
'''
data = np.atleast_2d(data)
# Swap observations to the first dimension and flatten the rest
data = np.swapaxes(data, axis, 0)
t = data.shape[0]
data = data.reshape((t, -1))
if width < 1 or width > t:
raise ParameterError('width={} must be at least 1 and at most data.shape[{}]={}'.format(width, axis, t))
if mode not in ['connectivity', 'distance', 'affinity']:
raise ParameterError(("Invalid mode='{}'. Must be one of "
"['connectivity', 'distance', "
"'affinity']").format(mode))
if k is None:
if t > 2 * width + 1:
k = 2 * np.ceil(np.sqrt(t - 2 * width + 1))
else:
k = 2
if bandwidth is not None:
if bandwidth <= 0:
raise ParameterError('Invalid bandwidth={}. '
'Must be strictly positive.'.format(bandwidth))
k = int(k)
# Build the neighbor search object
try:
knn = sklearn.neighbors.NearestNeighbors(n_neighbors=min(t-1, k + 2 * width),
metric=metric,
algorithm='auto')
except ValueError:
knn = sklearn.neighbors.NearestNeighbors(n_neighbors=min(t-1, k + 2 * width),
metric=metric,
algorithm='brute')
knn.fit(data)
# Get the knn graph
if mode == 'affinity':
kng_mode = 'distance'
else:
kng_mode = mode
rec = knn.kneighbors_graph(mode=kng_mode).tolil()
# Remove connections within width
for diag in range(-width + 1, width):
rec.setdiag(0, diag)
# Retain only the top-k links per point
for i in range(t):
# Get the links from point i
links = rec[i].nonzero()[1]
# Order them ascending
idx = links[np.argsort(rec[i, links].toarray())][0]
# Everything past the kth closest gets squashed
rec[i, idx[k:]] = 0
if self:
if mode == 'connectivity':
rec.setdiag(1)
elif mode == 'affinity':
# we need to keep the self-loop in here, but not mess up the
# bandwidth estimation
#
# using negative distances here preserves the structure without changing
# the statistics of the data
rec.setdiag(-1)
# symmetrize
if sym:
# Note: this operation produces a CSR (compressed sparse row) matrix!
# This is why we have to do it after filling the diagonal in self-mode
rec = rec.minimum(rec.T)
rec = rec.tocsr()
rec.eliminate_zeros()
if mode == 'connectivity':
rec = rec.astype(np.bool)
elif mode == 'affinity':
if bandwidth is None:
bandwidth = np.nanmedian(rec.max(axis=1).data)
# Set all the negatives back to 0
# Negatives are temporarily inserted above to preserve the sparsity structure
# of the matrix without corrupting the bandwidth calculations
rec.data[rec.data < 0] = 0.0
rec.data[:] = np.exp(rec.data / (-1 * bandwidth))
if not sparse:
rec = rec.toarray()
return rec
|
def recurrence_matrix(data, k=None, width=1, metric='euclidean',
sym=False, sparse=False, mode='connectivity',
bandwidth=None, self=False, axis=-1):
'''Compute a recurrence matrix from a data matrix.
`rec[i, j]` is non-zero if (`data[:, i]`, `data[:, j]`) are
k-nearest-neighbors and `|i - j| >= width`
The specific value of `rec[i, j]` can have several forms, governed
by the `mode` parameter below:
- Connectivity: `rec[i, j] = 1 or 0` indicates that frames `i` and `j` are repetitions
- Affinity: `rec[i, j] > 0` measures how similar frames `i` and `j` are. This is also
known as a (sparse) self-similarity matrix.
- Distance: `rec[, j] > 0` measures how distant frames `i` and `j` are. This is also
known as a (sparse) self-distance matrix.
The general term *recurrence matrix* can refer to any of the three forms above.
Parameters
----------
data : np.ndarray
A feature matrix
k : int > 0 [scalar] or None
the number of nearest-neighbors for each sample
Default: `k = 2 * ceil(sqrt(t - 2 * width + 1))`,
or `k = 2` if `t <= 2 * width + 1`
width : int >= 1 [scalar]
only link neighbors `(data[:, i], data[:, j])`
if `|i - j| >= width`
`width` cannot exceed the length of the data.
metric : str
Distance metric to use for nearest-neighbor calculation.
See `sklearn.neighbors.NearestNeighbors` for details.
sym : bool [scalar]
set `sym=True` to only link mutual nearest-neighbors
sparse : bool [scalar]
if False, returns a dense type (ndarray)
if True, returns a sparse type (scipy.sparse.csr_matrix)
mode : str, {'connectivity', 'distance', 'affinity'}
If 'connectivity', a binary connectivity matrix is produced.
If 'distance', then a non-zero entry contains the distance between
points.
If 'affinity', then non-zero entries are mapped to
`exp( - distance(i, j) / bandwidth)` where `bandwidth` is
as specified below.
bandwidth : None or float > 0
If using ``mode='affinity'``, this can be used to set the
bandwidth on the affinity kernel.
If no value is provided, it is set automatically to the median
distance between furthest nearest neighbors.
self : bool
If `True`, then the main diagonal is populated with self-links:
0 if ``mode='distance'``, and 1 otherwise.
If `False`, the main diagonal is left empty.
axis : int
The axis along which to compute recurrence.
By default, the last index (-1) is taken.
Returns
-------
rec : np.ndarray or scipy.sparse.csr_matrix, [shape=(t, t)]
Recurrence matrix
See Also
--------
sklearn.neighbors.NearestNeighbors
scipy.spatial.distance.cdist
librosa.feature.stack_memory
recurrence_to_lag
Notes
-----
This function caches at level 30.
Examples
--------
Find nearest neighbors in MFCC space
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfcc = librosa.feature.mfcc(y=y, sr=sr)
>>> R = librosa.segment.recurrence_matrix(mfcc)
Or fix the number of nearest neighbors to 5
>>> R = librosa.segment.recurrence_matrix(mfcc, k=5)
Suppress neighbors within +- 7 samples
>>> R = librosa.segment.recurrence_matrix(mfcc, width=7)
Use cosine similarity instead of Euclidean distance
>>> R = librosa.segment.recurrence_matrix(mfcc, metric='cosine')
Require mutual nearest neighbors
>>> R = librosa.segment.recurrence_matrix(mfcc, sym=True)
Use an affinity matrix instead of binary connectivity
>>> R_aff = librosa.segment.recurrence_matrix(mfcc, mode='affinity')
Plot the feature and recurrence matrices
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1, 2, 1)
>>> librosa.display.specshow(R, x_axis='time', y_axis='time')
>>> plt.title('Binary recurrence (symmetric)')
>>> plt.subplot(1, 2, 2)
>>> librosa.display.specshow(R_aff, x_axis='time', y_axis='time',
... cmap='magma_r')
>>> plt.title('Affinity recurrence')
>>> plt.tight_layout()
'''
data = np.atleast_2d(data)
# Swap observations to the first dimension and flatten the rest
data = np.swapaxes(data, axis, 0)
t = data.shape[0]
data = data.reshape((t, -1))
if width < 1 or width > t:
raise ParameterError('width={} must be at least 1 and at most data.shape[{}]={}'.format(width, axis, t))
if mode not in ['connectivity', 'distance', 'affinity']:
raise ParameterError(("Invalid mode='{}'. Must be one of "
"['connectivity', 'distance', "
"'affinity']").format(mode))
if k is None:
if t > 2 * width + 1:
k = 2 * np.ceil(np.sqrt(t - 2 * width + 1))
else:
k = 2
if bandwidth is not None:
if bandwidth <= 0:
raise ParameterError('Invalid bandwidth={}. '
'Must be strictly positive.'.format(bandwidth))
k = int(k)
# Build the neighbor search object
try:
knn = sklearn.neighbors.NearestNeighbors(n_neighbors=min(t-1, k + 2 * width),
metric=metric,
algorithm='auto')
except ValueError:
knn = sklearn.neighbors.NearestNeighbors(n_neighbors=min(t-1, k + 2 * width),
metric=metric,
algorithm='brute')
knn.fit(data)
# Get the knn graph
if mode == 'affinity':
kng_mode = 'distance'
else:
kng_mode = mode
rec = knn.kneighbors_graph(mode=kng_mode).tolil()
# Remove connections within width
for diag in range(-width + 1, width):
rec.setdiag(0, diag)
# Retain only the top-k links per point
for i in range(t):
# Get the links from point i
links = rec[i].nonzero()[1]
# Order them ascending
idx = links[np.argsort(rec[i, links].toarray())][0]
# Everything past the kth closest gets squashed
rec[i, idx[k:]] = 0
if self:
if mode == 'connectivity':
rec.setdiag(1)
elif mode == 'affinity':
# we need to keep the self-loop in here, but not mess up the
# bandwidth estimation
#
# using negative distances here preserves the structure without changing
# the statistics of the data
rec.setdiag(-1)
# symmetrize
if sym:
# Note: this operation produces a CSR (compressed sparse row) matrix!
# This is why we have to do it after filling the diagonal in self-mode
rec = rec.minimum(rec.T)
rec = rec.tocsr()
rec.eliminate_zeros()
if mode == 'connectivity':
rec = rec.astype(np.bool)
elif mode == 'affinity':
if bandwidth is None:
bandwidth = np.nanmedian(rec.max(axis=1).data)
# Set all the negatives back to 0
# Negatives are temporarily inserted above to preserve the sparsity structure
# of the matrix without corrupting the bandwidth calculations
rec.data[rec.data < 0] = 0.0
rec.data[:] = np.exp(rec.data / (-1 * bandwidth))
if not sparse:
rec = rec.toarray()
return rec
|
[
"Compute",
"a",
"recurrence",
"matrix",
"from",
"a",
"data",
"matrix",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/segment.py#L54-L287
|
[
"def",
"recurrence_matrix",
"(",
"data",
",",
"k",
"=",
"None",
",",
"width",
"=",
"1",
",",
"metric",
"=",
"'euclidean'",
",",
"sym",
"=",
"False",
",",
"sparse",
"=",
"False",
",",
"mode",
"=",
"'connectivity'",
",",
"bandwidth",
"=",
"None",
",",
"self",
"=",
"False",
",",
"axis",
"=",
"-",
"1",
")",
":",
"data",
"=",
"np",
".",
"atleast_2d",
"(",
"data",
")",
"# Swap observations to the first dimension and flatten the rest",
"data",
"=",
"np",
".",
"swapaxes",
"(",
"data",
",",
"axis",
",",
"0",
")",
"t",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"data",
"=",
"data",
".",
"reshape",
"(",
"(",
"t",
",",
"-",
"1",
")",
")",
"if",
"width",
"<",
"1",
"or",
"width",
">",
"t",
":",
"raise",
"ParameterError",
"(",
"'width={} must be at least 1 and at most data.shape[{}]={}'",
".",
"format",
"(",
"width",
",",
"axis",
",",
"t",
")",
")",
"if",
"mode",
"not",
"in",
"[",
"'connectivity'",
",",
"'distance'",
",",
"'affinity'",
"]",
":",
"raise",
"ParameterError",
"(",
"(",
"\"Invalid mode='{}'. Must be one of \"",
"\"['connectivity', 'distance', \"",
"\"'affinity']\"",
")",
".",
"format",
"(",
"mode",
")",
")",
"if",
"k",
"is",
"None",
":",
"if",
"t",
">",
"2",
"*",
"width",
"+",
"1",
":",
"k",
"=",
"2",
"*",
"np",
".",
"ceil",
"(",
"np",
".",
"sqrt",
"(",
"t",
"-",
"2",
"*",
"width",
"+",
"1",
")",
")",
"else",
":",
"k",
"=",
"2",
"if",
"bandwidth",
"is",
"not",
"None",
":",
"if",
"bandwidth",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'Invalid bandwidth={}. '",
"'Must be strictly positive.'",
".",
"format",
"(",
"bandwidth",
")",
")",
"k",
"=",
"int",
"(",
"k",
")",
"# Build the neighbor search object",
"try",
":",
"knn",
"=",
"sklearn",
".",
"neighbors",
".",
"NearestNeighbors",
"(",
"n_neighbors",
"=",
"min",
"(",
"t",
"-",
"1",
",",
"k",
"+",
"2",
"*",
"width",
")",
",",
"metric",
"=",
"metric",
",",
"algorithm",
"=",
"'auto'",
")",
"except",
"ValueError",
":",
"knn",
"=",
"sklearn",
".",
"neighbors",
".",
"NearestNeighbors",
"(",
"n_neighbors",
"=",
"min",
"(",
"t",
"-",
"1",
",",
"k",
"+",
"2",
"*",
"width",
")",
",",
"metric",
"=",
"metric",
",",
"algorithm",
"=",
"'brute'",
")",
"knn",
".",
"fit",
"(",
"data",
")",
"# Get the knn graph",
"if",
"mode",
"==",
"'affinity'",
":",
"kng_mode",
"=",
"'distance'",
"else",
":",
"kng_mode",
"=",
"mode",
"rec",
"=",
"knn",
".",
"kneighbors_graph",
"(",
"mode",
"=",
"kng_mode",
")",
".",
"tolil",
"(",
")",
"# Remove connections within width",
"for",
"diag",
"in",
"range",
"(",
"-",
"width",
"+",
"1",
",",
"width",
")",
":",
"rec",
".",
"setdiag",
"(",
"0",
",",
"diag",
")",
"# Retain only the top-k links per point",
"for",
"i",
"in",
"range",
"(",
"t",
")",
":",
"# Get the links from point i",
"links",
"=",
"rec",
"[",
"i",
"]",
".",
"nonzero",
"(",
")",
"[",
"1",
"]",
"# Order them ascending",
"idx",
"=",
"links",
"[",
"np",
".",
"argsort",
"(",
"rec",
"[",
"i",
",",
"links",
"]",
".",
"toarray",
"(",
")",
")",
"]",
"[",
"0",
"]",
"# Everything past the kth closest gets squashed",
"rec",
"[",
"i",
",",
"idx",
"[",
"k",
":",
"]",
"]",
"=",
"0",
"if",
"self",
":",
"if",
"mode",
"==",
"'connectivity'",
":",
"rec",
".",
"setdiag",
"(",
"1",
")",
"elif",
"mode",
"==",
"'affinity'",
":",
"# we need to keep the self-loop in here, but not mess up the",
"# bandwidth estimation",
"#",
"# using negative distances here preserves the structure without changing",
"# the statistics of the data",
"rec",
".",
"setdiag",
"(",
"-",
"1",
")",
"# symmetrize",
"if",
"sym",
":",
"# Note: this operation produces a CSR (compressed sparse row) matrix!",
"# This is why we have to do it after filling the diagonal in self-mode",
"rec",
"=",
"rec",
".",
"minimum",
"(",
"rec",
".",
"T",
")",
"rec",
"=",
"rec",
".",
"tocsr",
"(",
")",
"rec",
".",
"eliminate_zeros",
"(",
")",
"if",
"mode",
"==",
"'connectivity'",
":",
"rec",
"=",
"rec",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"elif",
"mode",
"==",
"'affinity'",
":",
"if",
"bandwidth",
"is",
"None",
":",
"bandwidth",
"=",
"np",
".",
"nanmedian",
"(",
"rec",
".",
"max",
"(",
"axis",
"=",
"1",
")",
".",
"data",
")",
"# Set all the negatives back to 0",
"# Negatives are temporarily inserted above to preserve the sparsity structure",
"# of the matrix without corrupting the bandwidth calculations",
"rec",
".",
"data",
"[",
"rec",
".",
"data",
"<",
"0",
"]",
"=",
"0.0",
"rec",
".",
"data",
"[",
":",
"]",
"=",
"np",
".",
"exp",
"(",
"rec",
".",
"data",
"/",
"(",
"-",
"1",
"*",
"bandwidth",
")",
")",
"if",
"not",
"sparse",
":",
"rec",
"=",
"rec",
".",
"toarray",
"(",
")",
"return",
"rec"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
recurrence_to_lag
|
Convert a recurrence matrix into a lag matrix.
`lag[i, j] == rec[i+j, j]`
Parameters
----------
rec : np.ndarray, or scipy.sparse.spmatrix [shape=(n, n)]
A (binary) recurrence matrix, as returned by `recurrence_matrix`
pad : bool
If False, `lag` matrix is square, which is equivalent to
assuming that the signal repeats itself indefinitely.
If True, `lag` is padded with `n` zeros, which eliminates
the assumption of repetition.
axis : int
The axis to keep as the `time` axis.
The alternate axis will be converted to lag coordinates.
Returns
-------
lag : np.ndarray
The recurrence matrix in (lag, time) (if `axis=1`)
or (time, lag) (if `axis=0`) coordinates
Raises
------
ParameterError : if `rec` is non-square
See Also
--------
recurrence_matrix
lag_to_recurrence
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr)
>>> recurrence = librosa.segment.recurrence_matrix(mfccs)
>>> lag_pad = librosa.segment.recurrence_to_lag(recurrence, pad=True)
>>> lag_nopad = librosa.segment.recurrence_to_lag(recurrence, pad=False)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1, 2, 1)
>>> librosa.display.specshow(lag_pad, x_axis='time', y_axis='lag')
>>> plt.title('Lag (zero-padded)')
>>> plt.subplot(1, 2, 2)
>>> librosa.display.specshow(lag_nopad, x_axis='time')
>>> plt.title('Lag (no padding)')
>>> plt.tight_layout()
|
librosa/segment.py
|
def recurrence_to_lag(rec, pad=True, axis=-1):
'''Convert a recurrence matrix into a lag matrix.
`lag[i, j] == rec[i+j, j]`
Parameters
----------
rec : np.ndarray, or scipy.sparse.spmatrix [shape=(n, n)]
A (binary) recurrence matrix, as returned by `recurrence_matrix`
pad : bool
If False, `lag` matrix is square, which is equivalent to
assuming that the signal repeats itself indefinitely.
If True, `lag` is padded with `n` zeros, which eliminates
the assumption of repetition.
axis : int
The axis to keep as the `time` axis.
The alternate axis will be converted to lag coordinates.
Returns
-------
lag : np.ndarray
The recurrence matrix in (lag, time) (if `axis=1`)
or (time, lag) (if `axis=0`) coordinates
Raises
------
ParameterError : if `rec` is non-square
See Also
--------
recurrence_matrix
lag_to_recurrence
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr)
>>> recurrence = librosa.segment.recurrence_matrix(mfccs)
>>> lag_pad = librosa.segment.recurrence_to_lag(recurrence, pad=True)
>>> lag_nopad = librosa.segment.recurrence_to_lag(recurrence, pad=False)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1, 2, 1)
>>> librosa.display.specshow(lag_pad, x_axis='time', y_axis='lag')
>>> plt.title('Lag (zero-padded)')
>>> plt.subplot(1, 2, 2)
>>> librosa.display.specshow(lag_nopad, x_axis='time')
>>> plt.title('Lag (no padding)')
>>> plt.tight_layout()
'''
axis = np.abs(axis)
if rec.ndim != 2 or rec.shape[0] != rec.shape[1]:
raise ParameterError('non-square recurrence matrix shape: '
'{}'.format(rec.shape))
sparse = scipy.sparse.issparse(rec)
roll_ax = None
if sparse:
roll_ax = 1 - axis
lag_format = rec.format
if axis == 0:
rec = rec.tocsc()
elif axis in (-1, 1):
rec = rec.tocsr()
t = rec.shape[axis]
if sparse:
if pad:
kron = np.asarray([[1, 0]]).swapaxes(axis, 0)
lag = scipy.sparse.kron(kron.astype(rec.dtype), rec, format='lil')
else:
lag = scipy.sparse.lil_matrix(rec)
else:
if pad:
padding = [(0, 0), (0, 0)]
padding[(1-axis)] = (0, t)
lag = np.pad(rec, padding, mode='constant')
else:
lag = rec.copy()
idx_slice = [slice(None)] * lag.ndim
for i in range(1, t):
idx_slice[axis] = i
lag[tuple(idx_slice)] = util.roll_sparse(lag[tuple(idx_slice)], -i, axis=roll_ax)
if sparse:
return lag.asformat(lag_format)
return np.ascontiguousarray(lag.T).T
|
def recurrence_to_lag(rec, pad=True, axis=-1):
'''Convert a recurrence matrix into a lag matrix.
`lag[i, j] == rec[i+j, j]`
Parameters
----------
rec : np.ndarray, or scipy.sparse.spmatrix [shape=(n, n)]
A (binary) recurrence matrix, as returned by `recurrence_matrix`
pad : bool
If False, `lag` matrix is square, which is equivalent to
assuming that the signal repeats itself indefinitely.
If True, `lag` is padded with `n` zeros, which eliminates
the assumption of repetition.
axis : int
The axis to keep as the `time` axis.
The alternate axis will be converted to lag coordinates.
Returns
-------
lag : np.ndarray
The recurrence matrix in (lag, time) (if `axis=1`)
or (time, lag) (if `axis=0`) coordinates
Raises
------
ParameterError : if `rec` is non-square
See Also
--------
recurrence_matrix
lag_to_recurrence
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr)
>>> recurrence = librosa.segment.recurrence_matrix(mfccs)
>>> lag_pad = librosa.segment.recurrence_to_lag(recurrence, pad=True)
>>> lag_nopad = librosa.segment.recurrence_to_lag(recurrence, pad=False)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1, 2, 1)
>>> librosa.display.specshow(lag_pad, x_axis='time', y_axis='lag')
>>> plt.title('Lag (zero-padded)')
>>> plt.subplot(1, 2, 2)
>>> librosa.display.specshow(lag_nopad, x_axis='time')
>>> plt.title('Lag (no padding)')
>>> plt.tight_layout()
'''
axis = np.abs(axis)
if rec.ndim != 2 or rec.shape[0] != rec.shape[1]:
raise ParameterError('non-square recurrence matrix shape: '
'{}'.format(rec.shape))
sparse = scipy.sparse.issparse(rec)
roll_ax = None
if sparse:
roll_ax = 1 - axis
lag_format = rec.format
if axis == 0:
rec = rec.tocsc()
elif axis in (-1, 1):
rec = rec.tocsr()
t = rec.shape[axis]
if sparse:
if pad:
kron = np.asarray([[1, 0]]).swapaxes(axis, 0)
lag = scipy.sparse.kron(kron.astype(rec.dtype), rec, format='lil')
else:
lag = scipy.sparse.lil_matrix(rec)
else:
if pad:
padding = [(0, 0), (0, 0)]
padding[(1-axis)] = (0, t)
lag = np.pad(rec, padding, mode='constant')
else:
lag = rec.copy()
idx_slice = [slice(None)] * lag.ndim
for i in range(1, t):
idx_slice[axis] = i
lag[tuple(idx_slice)] = util.roll_sparse(lag[tuple(idx_slice)], -i, axis=roll_ax)
if sparse:
return lag.asformat(lag_format)
return np.ascontiguousarray(lag.T).T
|
[
"Convert",
"a",
"recurrence",
"matrix",
"into",
"a",
"lag",
"matrix",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/segment.py#L290-L386
|
[
"def",
"recurrence_to_lag",
"(",
"rec",
",",
"pad",
"=",
"True",
",",
"axis",
"=",
"-",
"1",
")",
":",
"axis",
"=",
"np",
".",
"abs",
"(",
"axis",
")",
"if",
"rec",
".",
"ndim",
"!=",
"2",
"or",
"rec",
".",
"shape",
"[",
"0",
"]",
"!=",
"rec",
".",
"shape",
"[",
"1",
"]",
":",
"raise",
"ParameterError",
"(",
"'non-square recurrence matrix shape: '",
"'{}'",
".",
"format",
"(",
"rec",
".",
"shape",
")",
")",
"sparse",
"=",
"scipy",
".",
"sparse",
".",
"issparse",
"(",
"rec",
")",
"roll_ax",
"=",
"None",
"if",
"sparse",
":",
"roll_ax",
"=",
"1",
"-",
"axis",
"lag_format",
"=",
"rec",
".",
"format",
"if",
"axis",
"==",
"0",
":",
"rec",
"=",
"rec",
".",
"tocsc",
"(",
")",
"elif",
"axis",
"in",
"(",
"-",
"1",
",",
"1",
")",
":",
"rec",
"=",
"rec",
".",
"tocsr",
"(",
")",
"t",
"=",
"rec",
".",
"shape",
"[",
"axis",
"]",
"if",
"sparse",
":",
"if",
"pad",
":",
"kron",
"=",
"np",
".",
"asarray",
"(",
"[",
"[",
"1",
",",
"0",
"]",
"]",
")",
".",
"swapaxes",
"(",
"axis",
",",
"0",
")",
"lag",
"=",
"scipy",
".",
"sparse",
".",
"kron",
"(",
"kron",
".",
"astype",
"(",
"rec",
".",
"dtype",
")",
",",
"rec",
",",
"format",
"=",
"'lil'",
")",
"else",
":",
"lag",
"=",
"scipy",
".",
"sparse",
".",
"lil_matrix",
"(",
"rec",
")",
"else",
":",
"if",
"pad",
":",
"padding",
"=",
"[",
"(",
"0",
",",
"0",
")",
",",
"(",
"0",
",",
"0",
")",
"]",
"padding",
"[",
"(",
"1",
"-",
"axis",
")",
"]",
"=",
"(",
"0",
",",
"t",
")",
"lag",
"=",
"np",
".",
"pad",
"(",
"rec",
",",
"padding",
",",
"mode",
"=",
"'constant'",
")",
"else",
":",
"lag",
"=",
"rec",
".",
"copy",
"(",
")",
"idx_slice",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"lag",
".",
"ndim",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"t",
")",
":",
"idx_slice",
"[",
"axis",
"]",
"=",
"i",
"lag",
"[",
"tuple",
"(",
"idx_slice",
")",
"]",
"=",
"util",
".",
"roll_sparse",
"(",
"lag",
"[",
"tuple",
"(",
"idx_slice",
")",
"]",
",",
"-",
"i",
",",
"axis",
"=",
"roll_ax",
")",
"if",
"sparse",
":",
"return",
"lag",
".",
"asformat",
"(",
"lag_format",
")",
"return",
"np",
".",
"ascontiguousarray",
"(",
"lag",
".",
"T",
")",
".",
"T"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
lag_to_recurrence
|
Convert a lag matrix into a recurrence matrix.
Parameters
----------
lag : np.ndarray or scipy.sparse.spmatrix
A lag matrix, as produced by `recurrence_to_lag`
axis : int
The axis corresponding to the time dimension.
The alternate axis will be interpreted in lag coordinates.
Returns
-------
rec : np.ndarray or scipy.sparse.spmatrix [shape=(n, n)]
A recurrence matrix in (time, time) coordinates
For sparse matrices, format will match that of `lag`.
Raises
------
ParameterError : if `lag` does not have the correct shape
See Also
--------
recurrence_to_lag
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr)
>>> recurrence = librosa.segment.recurrence_matrix(mfccs)
>>> lag_pad = librosa.segment.recurrence_to_lag(recurrence, pad=True)
>>> lag_nopad = librosa.segment.recurrence_to_lag(recurrence, pad=False)
>>> rec_pad = librosa.segment.lag_to_recurrence(lag_pad)
>>> rec_nopad = librosa.segment.lag_to_recurrence(lag_nopad)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(2, 2, 1)
>>> librosa.display.specshow(lag_pad, x_axis='time', y_axis='lag')
>>> plt.title('Lag (zero-padded)')
>>> plt.subplot(2, 2, 2)
>>> librosa.display.specshow(lag_nopad, x_axis='time', y_axis='time')
>>> plt.title('Lag (no padding)')
>>> plt.subplot(2, 2, 3)
>>> librosa.display.specshow(rec_pad, x_axis='time', y_axis='time')
>>> plt.title('Recurrence (with padding)')
>>> plt.subplot(2, 2, 4)
>>> librosa.display.specshow(rec_nopad, x_axis='time', y_axis='time')
>>> plt.title('Recurrence (without padding)')
>>> plt.tight_layout()
|
librosa/segment.py
|
def lag_to_recurrence(lag, axis=-1):
'''Convert a lag matrix into a recurrence matrix.
Parameters
----------
lag : np.ndarray or scipy.sparse.spmatrix
A lag matrix, as produced by `recurrence_to_lag`
axis : int
The axis corresponding to the time dimension.
The alternate axis will be interpreted in lag coordinates.
Returns
-------
rec : np.ndarray or scipy.sparse.spmatrix [shape=(n, n)]
A recurrence matrix in (time, time) coordinates
For sparse matrices, format will match that of `lag`.
Raises
------
ParameterError : if `lag` does not have the correct shape
See Also
--------
recurrence_to_lag
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr)
>>> recurrence = librosa.segment.recurrence_matrix(mfccs)
>>> lag_pad = librosa.segment.recurrence_to_lag(recurrence, pad=True)
>>> lag_nopad = librosa.segment.recurrence_to_lag(recurrence, pad=False)
>>> rec_pad = librosa.segment.lag_to_recurrence(lag_pad)
>>> rec_nopad = librosa.segment.lag_to_recurrence(lag_nopad)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(2, 2, 1)
>>> librosa.display.specshow(lag_pad, x_axis='time', y_axis='lag')
>>> plt.title('Lag (zero-padded)')
>>> plt.subplot(2, 2, 2)
>>> librosa.display.specshow(lag_nopad, x_axis='time', y_axis='time')
>>> plt.title('Lag (no padding)')
>>> plt.subplot(2, 2, 3)
>>> librosa.display.specshow(rec_pad, x_axis='time', y_axis='time')
>>> plt.title('Recurrence (with padding)')
>>> plt.subplot(2, 2, 4)
>>> librosa.display.specshow(rec_nopad, x_axis='time', y_axis='time')
>>> plt.title('Recurrence (without padding)')
>>> plt.tight_layout()
'''
if axis not in [0, 1, -1]:
raise ParameterError('Invalid target axis: {}'.format(axis))
axis = np.abs(axis)
if lag.ndim != 2 or (lag.shape[0] != lag.shape[1] and
lag.shape[1 - axis] != 2 * lag.shape[axis]):
raise ParameterError('Invalid lag matrix shape: {}'.format(lag.shape))
# Since lag must be 2-dimensional, abs(axis) = axis
t = lag.shape[axis]
sparse = scipy.sparse.issparse(lag)
if sparse:
rec = scipy.sparse.lil_matrix(lag)
roll_ax = 1 - axis
else:
rec = lag.copy()
roll_ax = None
idx_slice = [slice(None)] * lag.ndim
for i in range(1, t):
idx_slice[axis] = i
rec[tuple(idx_slice)] = util.roll_sparse(lag[tuple(idx_slice)], i, axis=roll_ax)
sub_slice = [slice(None)] * rec.ndim
sub_slice[1 - axis] = slice(t)
rec = rec[tuple(sub_slice)]
if sparse:
return rec.asformat(lag.format)
return np.ascontiguousarray(rec.T).T
|
def lag_to_recurrence(lag, axis=-1):
'''Convert a lag matrix into a recurrence matrix.
Parameters
----------
lag : np.ndarray or scipy.sparse.spmatrix
A lag matrix, as produced by `recurrence_to_lag`
axis : int
The axis corresponding to the time dimension.
The alternate axis will be interpreted in lag coordinates.
Returns
-------
rec : np.ndarray or scipy.sparse.spmatrix [shape=(n, n)]
A recurrence matrix in (time, time) coordinates
For sparse matrices, format will match that of `lag`.
Raises
------
ParameterError : if `lag` does not have the correct shape
See Also
--------
recurrence_to_lag
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfccs = librosa.feature.mfcc(y=y, sr=sr)
>>> recurrence = librosa.segment.recurrence_matrix(mfccs)
>>> lag_pad = librosa.segment.recurrence_to_lag(recurrence, pad=True)
>>> lag_nopad = librosa.segment.recurrence_to_lag(recurrence, pad=False)
>>> rec_pad = librosa.segment.lag_to_recurrence(lag_pad)
>>> rec_nopad = librosa.segment.lag_to_recurrence(lag_nopad)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(2, 2, 1)
>>> librosa.display.specshow(lag_pad, x_axis='time', y_axis='lag')
>>> plt.title('Lag (zero-padded)')
>>> plt.subplot(2, 2, 2)
>>> librosa.display.specshow(lag_nopad, x_axis='time', y_axis='time')
>>> plt.title('Lag (no padding)')
>>> plt.subplot(2, 2, 3)
>>> librosa.display.specshow(rec_pad, x_axis='time', y_axis='time')
>>> plt.title('Recurrence (with padding)')
>>> plt.subplot(2, 2, 4)
>>> librosa.display.specshow(rec_nopad, x_axis='time', y_axis='time')
>>> plt.title('Recurrence (without padding)')
>>> plt.tight_layout()
'''
if axis not in [0, 1, -1]:
raise ParameterError('Invalid target axis: {}'.format(axis))
axis = np.abs(axis)
if lag.ndim != 2 or (lag.shape[0] != lag.shape[1] and
lag.shape[1 - axis] != 2 * lag.shape[axis]):
raise ParameterError('Invalid lag matrix shape: {}'.format(lag.shape))
# Since lag must be 2-dimensional, abs(axis) = axis
t = lag.shape[axis]
sparse = scipy.sparse.issparse(lag)
if sparse:
rec = scipy.sparse.lil_matrix(lag)
roll_ax = 1 - axis
else:
rec = lag.copy()
roll_ax = None
idx_slice = [slice(None)] * lag.ndim
for i in range(1, t):
idx_slice[axis] = i
rec[tuple(idx_slice)] = util.roll_sparse(lag[tuple(idx_slice)], i, axis=roll_ax)
sub_slice = [slice(None)] * rec.ndim
sub_slice[1 - axis] = slice(t)
rec = rec[tuple(sub_slice)]
if sparse:
return rec.asformat(lag.format)
return np.ascontiguousarray(rec.T).T
|
[
"Convert",
"a",
"lag",
"matrix",
"into",
"a",
"recurrence",
"matrix",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/segment.py#L389-L474
|
[
"def",
"lag_to_recurrence",
"(",
"lag",
",",
"axis",
"=",
"-",
"1",
")",
":",
"if",
"axis",
"not",
"in",
"[",
"0",
",",
"1",
",",
"-",
"1",
"]",
":",
"raise",
"ParameterError",
"(",
"'Invalid target axis: {}'",
".",
"format",
"(",
"axis",
")",
")",
"axis",
"=",
"np",
".",
"abs",
"(",
"axis",
")",
"if",
"lag",
".",
"ndim",
"!=",
"2",
"or",
"(",
"lag",
".",
"shape",
"[",
"0",
"]",
"!=",
"lag",
".",
"shape",
"[",
"1",
"]",
"and",
"lag",
".",
"shape",
"[",
"1",
"-",
"axis",
"]",
"!=",
"2",
"*",
"lag",
".",
"shape",
"[",
"axis",
"]",
")",
":",
"raise",
"ParameterError",
"(",
"'Invalid lag matrix shape: {}'",
".",
"format",
"(",
"lag",
".",
"shape",
")",
")",
"# Since lag must be 2-dimensional, abs(axis) = axis",
"t",
"=",
"lag",
".",
"shape",
"[",
"axis",
"]",
"sparse",
"=",
"scipy",
".",
"sparse",
".",
"issparse",
"(",
"lag",
")",
"if",
"sparse",
":",
"rec",
"=",
"scipy",
".",
"sparse",
".",
"lil_matrix",
"(",
"lag",
")",
"roll_ax",
"=",
"1",
"-",
"axis",
"else",
":",
"rec",
"=",
"lag",
".",
"copy",
"(",
")",
"roll_ax",
"=",
"None",
"idx_slice",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"lag",
".",
"ndim",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"t",
")",
":",
"idx_slice",
"[",
"axis",
"]",
"=",
"i",
"rec",
"[",
"tuple",
"(",
"idx_slice",
")",
"]",
"=",
"util",
".",
"roll_sparse",
"(",
"lag",
"[",
"tuple",
"(",
"idx_slice",
")",
"]",
",",
"i",
",",
"axis",
"=",
"roll_ax",
")",
"sub_slice",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"rec",
".",
"ndim",
"sub_slice",
"[",
"1",
"-",
"axis",
"]",
"=",
"slice",
"(",
"t",
")",
"rec",
"=",
"rec",
"[",
"tuple",
"(",
"sub_slice",
")",
"]",
"if",
"sparse",
":",
"return",
"rec",
".",
"asformat",
"(",
"lag",
".",
"format",
")",
"return",
"np",
".",
"ascontiguousarray",
"(",
"rec",
".",
"T",
")",
".",
"T"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
timelag_filter
|
Filtering in the time-lag domain.
This is primarily useful for adapting image filters to operate on
`recurrence_to_lag` output.
Using `timelag_filter` is equivalent to the following sequence of
operations:
>>> data_tl = librosa.segment.recurrence_to_lag(data)
>>> data_filtered_tl = function(data_tl)
>>> data_filtered = librosa.segment.lag_to_recurrence(data_filtered_tl)
Parameters
----------
function : callable
The filtering function to wrap, e.g., `scipy.ndimage.median_filter`
pad : bool
Whether to zero-pad the structure feature matrix
index : int >= 0
If `function` accepts input data as a positional argument, it should be
indexed by `index`
Returns
-------
wrapped_function : callable
A new filter function which applies in time-lag space rather than
time-time space.
Examples
--------
Apply a 5-bin median filter to the diagonal of a recurrence matrix
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> rec = librosa.segment.recurrence_matrix(chroma)
>>> from scipy.ndimage import median_filter
>>> diagonal_median = librosa.segment.timelag_filter(median_filter)
>>> rec_filtered = diagonal_median(rec, size=(1, 3), mode='mirror')
Or with affinity weights
>>> rec_aff = librosa.segment.recurrence_matrix(chroma, mode='affinity')
>>> rec_aff_fil = diagonal_median(rec_aff, size=(1, 3), mode='mirror')
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8,8))
>>> plt.subplot(2, 2, 1)
>>> librosa.display.specshow(rec, y_axis='time')
>>> plt.title('Raw recurrence matrix')
>>> plt.subplot(2, 2, 2)
>>> librosa.display.specshow(rec_filtered)
>>> plt.title('Filtered recurrence matrix')
>>> plt.subplot(2, 2, 3)
>>> librosa.display.specshow(rec_aff, x_axis='time', y_axis='time',
... cmap='magma_r')
>>> plt.title('Raw affinity matrix')
>>> plt.subplot(2, 2, 4)
>>> librosa.display.specshow(rec_aff_fil, x_axis='time',
... cmap='magma_r')
>>> plt.title('Filtered affinity matrix')
>>> plt.tight_layout()
|
librosa/segment.py
|
def timelag_filter(function, pad=True, index=0):
'''Filtering in the time-lag domain.
This is primarily useful for adapting image filters to operate on
`recurrence_to_lag` output.
Using `timelag_filter` is equivalent to the following sequence of
operations:
>>> data_tl = librosa.segment.recurrence_to_lag(data)
>>> data_filtered_tl = function(data_tl)
>>> data_filtered = librosa.segment.lag_to_recurrence(data_filtered_tl)
Parameters
----------
function : callable
The filtering function to wrap, e.g., `scipy.ndimage.median_filter`
pad : bool
Whether to zero-pad the structure feature matrix
index : int >= 0
If `function` accepts input data as a positional argument, it should be
indexed by `index`
Returns
-------
wrapped_function : callable
A new filter function which applies in time-lag space rather than
time-time space.
Examples
--------
Apply a 5-bin median filter to the diagonal of a recurrence matrix
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> rec = librosa.segment.recurrence_matrix(chroma)
>>> from scipy.ndimage import median_filter
>>> diagonal_median = librosa.segment.timelag_filter(median_filter)
>>> rec_filtered = diagonal_median(rec, size=(1, 3), mode='mirror')
Or with affinity weights
>>> rec_aff = librosa.segment.recurrence_matrix(chroma, mode='affinity')
>>> rec_aff_fil = diagonal_median(rec_aff, size=(1, 3), mode='mirror')
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8,8))
>>> plt.subplot(2, 2, 1)
>>> librosa.display.specshow(rec, y_axis='time')
>>> plt.title('Raw recurrence matrix')
>>> plt.subplot(2, 2, 2)
>>> librosa.display.specshow(rec_filtered)
>>> plt.title('Filtered recurrence matrix')
>>> plt.subplot(2, 2, 3)
>>> librosa.display.specshow(rec_aff, x_axis='time', y_axis='time',
... cmap='magma_r')
>>> plt.title('Raw affinity matrix')
>>> plt.subplot(2, 2, 4)
>>> librosa.display.specshow(rec_aff_fil, x_axis='time',
... cmap='magma_r')
>>> plt.title('Filtered affinity matrix')
>>> plt.tight_layout()
'''
def __my_filter(wrapped_f, *args, **kwargs):
'''Decorator to wrap the filter'''
# Map the input data into time-lag space
args = list(args)
args[index] = recurrence_to_lag(args[index], pad=pad)
# Apply the filtering function
result = wrapped_f(*args, **kwargs)
# Map back into time-time and return
return lag_to_recurrence(result)
return decorator(__my_filter, function)
|
def timelag_filter(function, pad=True, index=0):
'''Filtering in the time-lag domain.
This is primarily useful for adapting image filters to operate on
`recurrence_to_lag` output.
Using `timelag_filter` is equivalent to the following sequence of
operations:
>>> data_tl = librosa.segment.recurrence_to_lag(data)
>>> data_filtered_tl = function(data_tl)
>>> data_filtered = librosa.segment.lag_to_recurrence(data_filtered_tl)
Parameters
----------
function : callable
The filtering function to wrap, e.g., `scipy.ndimage.median_filter`
pad : bool
Whether to zero-pad the structure feature matrix
index : int >= 0
If `function` accepts input data as a positional argument, it should be
indexed by `index`
Returns
-------
wrapped_function : callable
A new filter function which applies in time-lag space rather than
time-time space.
Examples
--------
Apply a 5-bin median filter to the diagonal of a recurrence matrix
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> rec = librosa.segment.recurrence_matrix(chroma)
>>> from scipy.ndimage import median_filter
>>> diagonal_median = librosa.segment.timelag_filter(median_filter)
>>> rec_filtered = diagonal_median(rec, size=(1, 3), mode='mirror')
Or with affinity weights
>>> rec_aff = librosa.segment.recurrence_matrix(chroma, mode='affinity')
>>> rec_aff_fil = diagonal_median(rec_aff, size=(1, 3), mode='mirror')
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8,8))
>>> plt.subplot(2, 2, 1)
>>> librosa.display.specshow(rec, y_axis='time')
>>> plt.title('Raw recurrence matrix')
>>> plt.subplot(2, 2, 2)
>>> librosa.display.specshow(rec_filtered)
>>> plt.title('Filtered recurrence matrix')
>>> plt.subplot(2, 2, 3)
>>> librosa.display.specshow(rec_aff, x_axis='time', y_axis='time',
... cmap='magma_r')
>>> plt.title('Raw affinity matrix')
>>> plt.subplot(2, 2, 4)
>>> librosa.display.specshow(rec_aff_fil, x_axis='time',
... cmap='magma_r')
>>> plt.title('Filtered affinity matrix')
>>> plt.tight_layout()
'''
def __my_filter(wrapped_f, *args, **kwargs):
'''Decorator to wrap the filter'''
# Map the input data into time-lag space
args = list(args)
args[index] = recurrence_to_lag(args[index], pad=pad)
# Apply the filtering function
result = wrapped_f(*args, **kwargs)
# Map back into time-time and return
return lag_to_recurrence(result)
return decorator(__my_filter, function)
|
[
"Filtering",
"in",
"the",
"time",
"-",
"lag",
"domain",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/segment.py#L477-L559
|
[
"def",
"timelag_filter",
"(",
"function",
",",
"pad",
"=",
"True",
",",
"index",
"=",
"0",
")",
":",
"def",
"__my_filter",
"(",
"wrapped_f",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"'''Decorator to wrap the filter'''",
"# Map the input data into time-lag space",
"args",
"=",
"list",
"(",
"args",
")",
"args",
"[",
"index",
"]",
"=",
"recurrence_to_lag",
"(",
"args",
"[",
"index",
"]",
",",
"pad",
"=",
"pad",
")",
"# Apply the filtering function",
"result",
"=",
"wrapped_f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Map back into time-time and return",
"return",
"lag_to_recurrence",
"(",
"result",
")",
"return",
"decorator",
"(",
"__my_filter",
",",
"function",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
subsegment
|
Sub-divide a segmentation by feature clustering.
Given a set of frame boundaries (`frames`), and a data matrix (`data`),
each successive interval defined by `frames` is partitioned into
`n_segments` by constrained agglomerative clustering.
.. note::
If an interval spans fewer than `n_segments` frames, then each
frame becomes a sub-segment.
Parameters
----------
data : np.ndarray
Data matrix to use in clustering
frames : np.ndarray [shape=(n_boundaries,)], dtype=int, non-negative]
Array of beat or segment boundaries, as provided by
`librosa.beat.beat_track`,
`librosa.onset.onset_detect`,
or `agglomerative`.
n_segments : int > 0
Maximum number of frames to sub-divide each interval.
axis : int
Axis along which to apply the segmentation.
By default, the last index (-1) is taken.
Returns
-------
boundaries : np.ndarray [shape=(n_subboundaries,)]
List of sub-divided segment boundaries
See Also
--------
agglomerative : Temporal segmentation
librosa.onset.onset_detect : Onset detection
librosa.beat.beat_track : Beat tracking
Notes
-----
This function caches at level 30.
Examples
--------
Load audio, detect beat frames, and subdivide in twos by CQT
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=8)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)
>>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)
>>> cqt = np.abs(librosa.cqt(y, sr=sr, hop_length=512))
>>> subseg = librosa.segment.subsegment(cqt, beats, n_segments=2)
>>> subseg_t = librosa.frames_to_time(subseg, sr=sr, hop_length=512)
>>> subseg
array([ 0, 2, 4, 21, 23, 26, 43, 55, 63, 72, 83,
97, 102, 111, 122, 137, 142, 153, 162, 180, 182, 185,
202, 210, 221, 231, 241, 256, 261, 271, 281, 296, 301,
310, 320, 339, 341, 344, 361, 368, 382, 389, 401, 416,
420, 430, 436, 451, 456, 465, 476, 489, 496, 503, 515,
527, 535, 544, 553, 558, 571, 578, 590, 607, 609, 638])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(cqt,
... ref=np.max),
... y_axis='cqt_hz', x_axis='time')
>>> lims = plt.gca().get_ylim()
>>> plt.vlines(beat_times, lims[0], lims[1], color='lime', alpha=0.9,
... linewidth=2, label='Beats')
>>> plt.vlines(subseg_t, lims[0], lims[1], color='linen', linestyle='--',
... linewidth=1.5, alpha=0.5, label='Sub-beats')
>>> plt.legend(frameon=True, shadow=True)
>>> plt.title('CQT + Beat and sub-beat markers')
>>> plt.tight_layout()
|
librosa/segment.py
|
def subsegment(data, frames, n_segments=4, axis=-1):
'''Sub-divide a segmentation by feature clustering.
Given a set of frame boundaries (`frames`), and a data matrix (`data`),
each successive interval defined by `frames` is partitioned into
`n_segments` by constrained agglomerative clustering.
.. note::
If an interval spans fewer than `n_segments` frames, then each
frame becomes a sub-segment.
Parameters
----------
data : np.ndarray
Data matrix to use in clustering
frames : np.ndarray [shape=(n_boundaries,)], dtype=int, non-negative]
Array of beat or segment boundaries, as provided by
`librosa.beat.beat_track`,
`librosa.onset.onset_detect`,
or `agglomerative`.
n_segments : int > 0
Maximum number of frames to sub-divide each interval.
axis : int
Axis along which to apply the segmentation.
By default, the last index (-1) is taken.
Returns
-------
boundaries : np.ndarray [shape=(n_subboundaries,)]
List of sub-divided segment boundaries
See Also
--------
agglomerative : Temporal segmentation
librosa.onset.onset_detect : Onset detection
librosa.beat.beat_track : Beat tracking
Notes
-----
This function caches at level 30.
Examples
--------
Load audio, detect beat frames, and subdivide in twos by CQT
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=8)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)
>>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)
>>> cqt = np.abs(librosa.cqt(y, sr=sr, hop_length=512))
>>> subseg = librosa.segment.subsegment(cqt, beats, n_segments=2)
>>> subseg_t = librosa.frames_to_time(subseg, sr=sr, hop_length=512)
>>> subseg
array([ 0, 2, 4, 21, 23, 26, 43, 55, 63, 72, 83,
97, 102, 111, 122, 137, 142, 153, 162, 180, 182, 185,
202, 210, 221, 231, 241, 256, 261, 271, 281, 296, 301,
310, 320, 339, 341, 344, 361, 368, 382, 389, 401, 416,
420, 430, 436, 451, 456, 465, 476, 489, 496, 503, 515,
527, 535, 544, 553, 558, 571, 578, 590, 607, 609, 638])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(cqt,
... ref=np.max),
... y_axis='cqt_hz', x_axis='time')
>>> lims = plt.gca().get_ylim()
>>> plt.vlines(beat_times, lims[0], lims[1], color='lime', alpha=0.9,
... linewidth=2, label='Beats')
>>> plt.vlines(subseg_t, lims[0], lims[1], color='linen', linestyle='--',
... linewidth=1.5, alpha=0.5, label='Sub-beats')
>>> plt.legend(frameon=True, shadow=True)
>>> plt.title('CQT + Beat and sub-beat markers')
>>> plt.tight_layout()
'''
frames = util.fix_frames(frames, x_min=0, x_max=data.shape[axis], pad=True)
if n_segments < 1:
raise ParameterError('n_segments must be a positive integer')
boundaries = []
idx_slices = [slice(None)] * data.ndim
for seg_start, seg_end in zip(frames[:-1], frames[1:]):
idx_slices[axis] = slice(seg_start, seg_end)
boundaries.extend(seg_start + agglomerative(data[tuple(idx_slices)],
min(seg_end - seg_start, n_segments),
axis=axis))
return np.ascontiguousarray(boundaries)
|
def subsegment(data, frames, n_segments=4, axis=-1):
'''Sub-divide a segmentation by feature clustering.
Given a set of frame boundaries (`frames`), and a data matrix (`data`),
each successive interval defined by `frames` is partitioned into
`n_segments` by constrained agglomerative clustering.
.. note::
If an interval spans fewer than `n_segments` frames, then each
frame becomes a sub-segment.
Parameters
----------
data : np.ndarray
Data matrix to use in clustering
frames : np.ndarray [shape=(n_boundaries,)], dtype=int, non-negative]
Array of beat or segment boundaries, as provided by
`librosa.beat.beat_track`,
`librosa.onset.onset_detect`,
or `agglomerative`.
n_segments : int > 0
Maximum number of frames to sub-divide each interval.
axis : int
Axis along which to apply the segmentation.
By default, the last index (-1) is taken.
Returns
-------
boundaries : np.ndarray [shape=(n_subboundaries,)]
List of sub-divided segment boundaries
See Also
--------
agglomerative : Temporal segmentation
librosa.onset.onset_detect : Onset detection
librosa.beat.beat_track : Beat tracking
Notes
-----
This function caches at level 30.
Examples
--------
Load audio, detect beat frames, and subdivide in twos by CQT
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=8)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)
>>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)
>>> cqt = np.abs(librosa.cqt(y, sr=sr, hop_length=512))
>>> subseg = librosa.segment.subsegment(cqt, beats, n_segments=2)
>>> subseg_t = librosa.frames_to_time(subseg, sr=sr, hop_length=512)
>>> subseg
array([ 0, 2, 4, 21, 23, 26, 43, 55, 63, 72, 83,
97, 102, 111, 122, 137, 142, 153, 162, 180, 182, 185,
202, 210, 221, 231, 241, 256, 261, 271, 281, 296, 301,
310, 320, 339, 341, 344, 361, 368, 382, 389, 401, 416,
420, 430, 436, 451, 456, 465, 476, 489, 496, 503, 515,
527, 535, 544, 553, 558, 571, 578, 590, 607, 609, 638])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(librosa.amplitude_to_db(cqt,
... ref=np.max),
... y_axis='cqt_hz', x_axis='time')
>>> lims = plt.gca().get_ylim()
>>> plt.vlines(beat_times, lims[0], lims[1], color='lime', alpha=0.9,
... linewidth=2, label='Beats')
>>> plt.vlines(subseg_t, lims[0], lims[1], color='linen', linestyle='--',
... linewidth=1.5, alpha=0.5, label='Sub-beats')
>>> plt.legend(frameon=True, shadow=True)
>>> plt.title('CQT + Beat and sub-beat markers')
>>> plt.tight_layout()
'''
frames = util.fix_frames(frames, x_min=0, x_max=data.shape[axis], pad=True)
if n_segments < 1:
raise ParameterError('n_segments must be a positive integer')
boundaries = []
idx_slices = [slice(None)] * data.ndim
for seg_start, seg_end in zip(frames[:-1], frames[1:]):
idx_slices[axis] = slice(seg_start, seg_end)
boundaries.extend(seg_start + agglomerative(data[tuple(idx_slices)],
min(seg_end - seg_start, n_segments),
axis=axis))
return np.ascontiguousarray(boundaries)
|
[
"Sub",
"-",
"divide",
"a",
"segmentation",
"by",
"feature",
"clustering",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/segment.py#L563-L655
|
[
"def",
"subsegment",
"(",
"data",
",",
"frames",
",",
"n_segments",
"=",
"4",
",",
"axis",
"=",
"-",
"1",
")",
":",
"frames",
"=",
"util",
".",
"fix_frames",
"(",
"frames",
",",
"x_min",
"=",
"0",
",",
"x_max",
"=",
"data",
".",
"shape",
"[",
"axis",
"]",
",",
"pad",
"=",
"True",
")",
"if",
"n_segments",
"<",
"1",
":",
"raise",
"ParameterError",
"(",
"'n_segments must be a positive integer'",
")",
"boundaries",
"=",
"[",
"]",
"idx_slices",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"data",
".",
"ndim",
"for",
"seg_start",
",",
"seg_end",
"in",
"zip",
"(",
"frames",
"[",
":",
"-",
"1",
"]",
",",
"frames",
"[",
"1",
":",
"]",
")",
":",
"idx_slices",
"[",
"axis",
"]",
"=",
"slice",
"(",
"seg_start",
",",
"seg_end",
")",
"boundaries",
".",
"extend",
"(",
"seg_start",
"+",
"agglomerative",
"(",
"data",
"[",
"tuple",
"(",
"idx_slices",
")",
"]",
",",
"min",
"(",
"seg_end",
"-",
"seg_start",
",",
"n_segments",
")",
",",
"axis",
"=",
"axis",
")",
")",
"return",
"np",
".",
"ascontiguousarray",
"(",
"boundaries",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
agglomerative
|
Bottom-up temporal segmentation.
Use a temporally-constrained agglomerative clustering routine to partition
`data` into `k` contiguous segments.
Parameters
----------
data : np.ndarray
data to cluster
k : int > 0 [scalar]
number of segments to produce
clusterer : sklearn.cluster.AgglomerativeClustering, optional
An optional AgglomerativeClustering object.
If `None`, a constrained Ward object is instantiated.
axis : int
axis along which to cluster.
By default, the last axis (-1) is chosen.
Returns
-------
boundaries : np.ndarray [shape=(k,)]
left-boundaries (frame numbers) of detected segments. This
will always include `0` as the first left-boundary.
See Also
--------
sklearn.cluster.AgglomerativeClustering
Examples
--------
Cluster by chroma similarity, break into 20 segments
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> bounds = librosa.segment.agglomerative(chroma, 20)
>>> bound_times = librosa.frames_to_time(bounds, sr=sr)
>>> bound_times
array([ 0. , 1.672, 2.322, 2.624, 3.251, 3.506,
4.18 , 5.387, 6.014, 6.293, 6.943, 7.198,
7.848, 9.033, 9.706, 9.961, 10.635, 10.89 ,
11.54 , 12.539])
Plot the segmentation over the chromagram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.vlines(bound_times, 0, chroma.shape[0], color='linen', linestyle='--',
... linewidth=2, alpha=0.9, label='Segment boundaries')
>>> plt.axis('tight')
>>> plt.legend(frameon=True, shadow=True)
>>> plt.title('Power spectrogram')
>>> plt.tight_layout()
|
librosa/segment.py
|
def agglomerative(data, k, clusterer=None, axis=-1):
"""Bottom-up temporal segmentation.
Use a temporally-constrained agglomerative clustering routine to partition
`data` into `k` contiguous segments.
Parameters
----------
data : np.ndarray
data to cluster
k : int > 0 [scalar]
number of segments to produce
clusterer : sklearn.cluster.AgglomerativeClustering, optional
An optional AgglomerativeClustering object.
If `None`, a constrained Ward object is instantiated.
axis : int
axis along which to cluster.
By default, the last axis (-1) is chosen.
Returns
-------
boundaries : np.ndarray [shape=(k,)]
left-boundaries (frame numbers) of detected segments. This
will always include `0` as the first left-boundary.
See Also
--------
sklearn.cluster.AgglomerativeClustering
Examples
--------
Cluster by chroma similarity, break into 20 segments
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> bounds = librosa.segment.agglomerative(chroma, 20)
>>> bound_times = librosa.frames_to_time(bounds, sr=sr)
>>> bound_times
array([ 0. , 1.672, 2.322, 2.624, 3.251, 3.506,
4.18 , 5.387, 6.014, 6.293, 6.943, 7.198,
7.848, 9.033, 9.706, 9.961, 10.635, 10.89 ,
11.54 , 12.539])
Plot the segmentation over the chromagram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.vlines(bound_times, 0, chroma.shape[0], color='linen', linestyle='--',
... linewidth=2, alpha=0.9, label='Segment boundaries')
>>> plt.axis('tight')
>>> plt.legend(frameon=True, shadow=True)
>>> plt.title('Power spectrogram')
>>> plt.tight_layout()
"""
# Make sure we have at least two dimensions
data = np.atleast_2d(data)
# Swap data index to position 0
data = np.swapaxes(data, axis, 0)
# Flatten the features
n = data.shape[0]
data = data.reshape((n, -1))
if clusterer is None:
# Connect the temporal connectivity graph
grid = sklearn.feature_extraction.image.grid_to_graph(n_x=n,
n_y=1, n_z=1)
# Instantiate the clustering object
clusterer = sklearn.cluster.AgglomerativeClustering(n_clusters=k,
connectivity=grid,
memory=cache.memory)
# Fit the model
clusterer.fit(data)
# Find the change points from the labels
boundaries = [0]
boundaries.extend(
list(1 + np.nonzero(np.diff(clusterer.labels_))[0].astype(int)))
return np.asarray(boundaries)
|
def agglomerative(data, k, clusterer=None, axis=-1):
"""Bottom-up temporal segmentation.
Use a temporally-constrained agglomerative clustering routine to partition
`data` into `k` contiguous segments.
Parameters
----------
data : np.ndarray
data to cluster
k : int > 0 [scalar]
number of segments to produce
clusterer : sklearn.cluster.AgglomerativeClustering, optional
An optional AgglomerativeClustering object.
If `None`, a constrained Ward object is instantiated.
axis : int
axis along which to cluster.
By default, the last axis (-1) is chosen.
Returns
-------
boundaries : np.ndarray [shape=(k,)]
left-boundaries (frame numbers) of detected segments. This
will always include `0` as the first left-boundary.
See Also
--------
sklearn.cluster.AgglomerativeClustering
Examples
--------
Cluster by chroma similarity, break into 20 segments
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> bounds = librosa.segment.agglomerative(chroma, 20)
>>> bound_times = librosa.frames_to_time(bounds, sr=sr)
>>> bound_times
array([ 0. , 1.672, 2.322, 2.624, 3.251, 3.506,
4.18 , 5.387, 6.014, 6.293, 6.943, 7.198,
7.848, 9.033, 9.706, 9.961, 10.635, 10.89 ,
11.54 , 12.539])
Plot the segmentation over the chromagram
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.vlines(bound_times, 0, chroma.shape[0], color='linen', linestyle='--',
... linewidth=2, alpha=0.9, label='Segment boundaries')
>>> plt.axis('tight')
>>> plt.legend(frameon=True, shadow=True)
>>> plt.title('Power spectrogram')
>>> plt.tight_layout()
"""
# Make sure we have at least two dimensions
data = np.atleast_2d(data)
# Swap data index to position 0
data = np.swapaxes(data, axis, 0)
# Flatten the features
n = data.shape[0]
data = data.reshape((n, -1))
if clusterer is None:
# Connect the temporal connectivity graph
grid = sklearn.feature_extraction.image.grid_to_graph(n_x=n,
n_y=1, n_z=1)
# Instantiate the clustering object
clusterer = sklearn.cluster.AgglomerativeClustering(n_clusters=k,
connectivity=grid,
memory=cache.memory)
# Fit the model
clusterer.fit(data)
# Find the change points from the labels
boundaries = [0]
boundaries.extend(
list(1 + np.nonzero(np.diff(clusterer.labels_))[0].astype(int)))
return np.asarray(boundaries)
|
[
"Bottom",
"-",
"up",
"temporal",
"segmentation",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/segment.py#L658-L745
|
[
"def",
"agglomerative",
"(",
"data",
",",
"k",
",",
"clusterer",
"=",
"None",
",",
"axis",
"=",
"-",
"1",
")",
":",
"# Make sure we have at least two dimensions",
"data",
"=",
"np",
".",
"atleast_2d",
"(",
"data",
")",
"# Swap data index to position 0",
"data",
"=",
"np",
".",
"swapaxes",
"(",
"data",
",",
"axis",
",",
"0",
")",
"# Flatten the features",
"n",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"data",
"=",
"data",
".",
"reshape",
"(",
"(",
"n",
",",
"-",
"1",
")",
")",
"if",
"clusterer",
"is",
"None",
":",
"# Connect the temporal connectivity graph",
"grid",
"=",
"sklearn",
".",
"feature_extraction",
".",
"image",
".",
"grid_to_graph",
"(",
"n_x",
"=",
"n",
",",
"n_y",
"=",
"1",
",",
"n_z",
"=",
"1",
")",
"# Instantiate the clustering object",
"clusterer",
"=",
"sklearn",
".",
"cluster",
".",
"AgglomerativeClustering",
"(",
"n_clusters",
"=",
"k",
",",
"connectivity",
"=",
"grid",
",",
"memory",
"=",
"cache",
".",
"memory",
")",
"# Fit the model",
"clusterer",
".",
"fit",
"(",
"data",
")",
"# Find the change points from the labels",
"boundaries",
"=",
"[",
"0",
"]",
"boundaries",
".",
"extend",
"(",
"list",
"(",
"1",
"+",
"np",
".",
"nonzero",
"(",
"np",
".",
"diff",
"(",
"clusterer",
".",
"labels_",
")",
")",
"[",
"0",
"]",
".",
"astype",
"(",
"int",
")",
")",
")",
"return",
"np",
".",
"asarray",
"(",
"boundaries",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
path_enhance
|
Multi-angle path enhancement for self- and cross-similarity matrices.
This function convolves multiple diagonal smoothing filters with a self-similarity (or
recurrence) matrix R, and aggregates the result by an element-wise maximum.
Technically, the output is a matrix R_smooth such that
`R_smooth[i, j] = max_theta (R * filter_theta)[i, j]`
where `*` denotes 2-dimensional convolution, and `filter_theta` is a smoothing filter at
orientation theta.
This is intended to provide coherent temporal smoothing of self-similarity matrices
when there are changes in tempo.
Smoothing filters are generated at evenly spaced orientations between min_ratio and
max_ratio.
This function is inspired by the multi-angle path enhancement of [1]_, but differs by
modeling tempo differences in the space of similarity matrices rather than re-sampling
the underlying features prior to generating the self-similarity matrix.
.. [1] Müller, Meinard and Frank Kurth.
"Enhancing similarity matrices for music audio analysis."
2006 IEEE International Conference on Acoustics Speech and Signal Processing Proceedings.
Vol. 5. IEEE, 2006.
.. note:: if using recurrence_matrix to construct the input similarity matrix, be sure to include the main
diagonal by setting `self=True`. Otherwise, the diagonal will be suppressed, and this is likely to
produce discontinuities which will pollute the smoothing filter response.
Parameters
----------
R : np.ndarray
The self- or cross-similarity matrix to be smoothed.
Note: sparse inputs are not supported.
n : int > 0
The length of the smoothing filter
window : window specification
The type of smoothing filter to use. See `filters.get_window` for more information
on window specification formats.
max_ratio : float > 0
The maximum tempo ratio to support
min_ratio : float > 0
The minimum tempo ratio to support.
If not provided, it will default to `1/max_ratio`
n_filters : int >= 1
The number of different smoothing filters to use, evenly spaced
between `min_ratio` and `max_ratio`.
If `min_ratio = 1/max_ratio` (the default), using an odd number
of filters will ensure that the main diagonal (ratio=1) is included.
zero_mean : bool
By default, the smoothing filters are non-negative and sum to one (i.e. are averaging
filters).
If `zero_mean=True`, then the smoothing filters are made to sum to zero by subtracting
a constant value from the non-diagonal coordinates of the filter. This is primarily
useful for suppressing blocks while enhancing diagonals.
clip : bool
If True, the smoothed similarity matrix will be thresholded at 0, and will not contain
negative entries.
kwargs : additional keyword arguments
Additional arguments to pass to `scipy.ndimage.convolve`
Returns
-------
R_smooth : np.ndarray, shape=R.shape
The smoothed self- or cross-similarity matrix
See Also
--------
filters.diagonal_filter
recurrence_matrix
Examples
--------
Use a 51-frame diagonal smoothing filter to enhance paths in a recurrence matrix
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=30)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity', self=True)
>>> rec_smooth = librosa.segment.path_enhance(rec, 51, window='hann', n_filters=7)
Plot the recurrence matrix before and after smoothing
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1,2,1)
>>> librosa.display.specshow(rec, x_axis='time', y_axis='time')
>>> plt.title('Unfiltered recurrence')
>>> plt.subplot(1,2,2)
>>> librosa.display.specshow(rec_smooth, x_axis='time', y_axis='time')
>>> plt.title('Multi-angle enhanced recurrence')
>>> plt.tight_layout()
|
librosa/segment.py
|
def path_enhance(R, n, window='hann', max_ratio=2.0, min_ratio=None, n_filters=7,
zero_mean=False, clip=True, **kwargs):
'''Multi-angle path enhancement for self- and cross-similarity matrices.
This function convolves multiple diagonal smoothing filters with a self-similarity (or
recurrence) matrix R, and aggregates the result by an element-wise maximum.
Technically, the output is a matrix R_smooth such that
`R_smooth[i, j] = max_theta (R * filter_theta)[i, j]`
where `*` denotes 2-dimensional convolution, and `filter_theta` is a smoothing filter at
orientation theta.
This is intended to provide coherent temporal smoothing of self-similarity matrices
when there are changes in tempo.
Smoothing filters are generated at evenly spaced orientations between min_ratio and
max_ratio.
This function is inspired by the multi-angle path enhancement of [1]_, but differs by
modeling tempo differences in the space of similarity matrices rather than re-sampling
the underlying features prior to generating the self-similarity matrix.
.. [1] Müller, Meinard and Frank Kurth.
"Enhancing similarity matrices for music audio analysis."
2006 IEEE International Conference on Acoustics Speech and Signal Processing Proceedings.
Vol. 5. IEEE, 2006.
.. note:: if using recurrence_matrix to construct the input similarity matrix, be sure to include the main
diagonal by setting `self=True`. Otherwise, the diagonal will be suppressed, and this is likely to
produce discontinuities which will pollute the smoothing filter response.
Parameters
----------
R : np.ndarray
The self- or cross-similarity matrix to be smoothed.
Note: sparse inputs are not supported.
n : int > 0
The length of the smoothing filter
window : window specification
The type of smoothing filter to use. See `filters.get_window` for more information
on window specification formats.
max_ratio : float > 0
The maximum tempo ratio to support
min_ratio : float > 0
The minimum tempo ratio to support.
If not provided, it will default to `1/max_ratio`
n_filters : int >= 1
The number of different smoothing filters to use, evenly spaced
between `min_ratio` and `max_ratio`.
If `min_ratio = 1/max_ratio` (the default), using an odd number
of filters will ensure that the main diagonal (ratio=1) is included.
zero_mean : bool
By default, the smoothing filters are non-negative and sum to one (i.e. are averaging
filters).
If `zero_mean=True`, then the smoothing filters are made to sum to zero by subtracting
a constant value from the non-diagonal coordinates of the filter. This is primarily
useful for suppressing blocks while enhancing diagonals.
clip : bool
If True, the smoothed similarity matrix will be thresholded at 0, and will not contain
negative entries.
kwargs : additional keyword arguments
Additional arguments to pass to `scipy.ndimage.convolve`
Returns
-------
R_smooth : np.ndarray, shape=R.shape
The smoothed self- or cross-similarity matrix
See Also
--------
filters.diagonal_filter
recurrence_matrix
Examples
--------
Use a 51-frame diagonal smoothing filter to enhance paths in a recurrence matrix
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=30)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity', self=True)
>>> rec_smooth = librosa.segment.path_enhance(rec, 51, window='hann', n_filters=7)
Plot the recurrence matrix before and after smoothing
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1,2,1)
>>> librosa.display.specshow(rec, x_axis='time', y_axis='time')
>>> plt.title('Unfiltered recurrence')
>>> plt.subplot(1,2,2)
>>> librosa.display.specshow(rec_smooth, x_axis='time', y_axis='time')
>>> plt.title('Multi-angle enhanced recurrence')
>>> plt.tight_layout()
'''
if min_ratio is None:
min_ratio = 1./max_ratio
elif min_ratio > max_ratio:
raise ParameterError('min_ratio={} cannot exceed max_ratio={}'.format(min_ratio, max_ratio))
R_smooth = None
for ratio in np.logspace(np.log2(min_ratio), np.log2(max_ratio), num=n_filters, base=2):
kernel = diagonal_filter(window, n, slope=ratio, zero_mean=zero_mean)
if R_smooth is None:
R_smooth = scipy.ndimage.convolve(R, kernel, **kwargs)
else:
# Compute the point-wise maximum in-place
np.maximum(R_smooth, scipy.ndimage.convolve(R, kernel, **kwargs),
out=R_smooth)
if clip:
# Clip the output in-place
np.clip(R_smooth, 0, None, out=R_smooth)
return R_smooth
|
def path_enhance(R, n, window='hann', max_ratio=2.0, min_ratio=None, n_filters=7,
zero_mean=False, clip=True, **kwargs):
'''Multi-angle path enhancement for self- and cross-similarity matrices.
This function convolves multiple diagonal smoothing filters with a self-similarity (or
recurrence) matrix R, and aggregates the result by an element-wise maximum.
Technically, the output is a matrix R_smooth such that
`R_smooth[i, j] = max_theta (R * filter_theta)[i, j]`
where `*` denotes 2-dimensional convolution, and `filter_theta` is a smoothing filter at
orientation theta.
This is intended to provide coherent temporal smoothing of self-similarity matrices
when there are changes in tempo.
Smoothing filters are generated at evenly spaced orientations between min_ratio and
max_ratio.
This function is inspired by the multi-angle path enhancement of [1]_, but differs by
modeling tempo differences in the space of similarity matrices rather than re-sampling
the underlying features prior to generating the self-similarity matrix.
.. [1] Müller, Meinard and Frank Kurth.
"Enhancing similarity matrices for music audio analysis."
2006 IEEE International Conference on Acoustics Speech and Signal Processing Proceedings.
Vol. 5. IEEE, 2006.
.. note:: if using recurrence_matrix to construct the input similarity matrix, be sure to include the main
diagonal by setting `self=True`. Otherwise, the diagonal will be suppressed, and this is likely to
produce discontinuities which will pollute the smoothing filter response.
Parameters
----------
R : np.ndarray
The self- or cross-similarity matrix to be smoothed.
Note: sparse inputs are not supported.
n : int > 0
The length of the smoothing filter
window : window specification
The type of smoothing filter to use. See `filters.get_window` for more information
on window specification formats.
max_ratio : float > 0
The maximum tempo ratio to support
min_ratio : float > 0
The minimum tempo ratio to support.
If not provided, it will default to `1/max_ratio`
n_filters : int >= 1
The number of different smoothing filters to use, evenly spaced
between `min_ratio` and `max_ratio`.
If `min_ratio = 1/max_ratio` (the default), using an odd number
of filters will ensure that the main diagonal (ratio=1) is included.
zero_mean : bool
By default, the smoothing filters are non-negative and sum to one (i.e. are averaging
filters).
If `zero_mean=True`, then the smoothing filters are made to sum to zero by subtracting
a constant value from the non-diagonal coordinates of the filter. This is primarily
useful for suppressing blocks while enhancing diagonals.
clip : bool
If True, the smoothed similarity matrix will be thresholded at 0, and will not contain
negative entries.
kwargs : additional keyword arguments
Additional arguments to pass to `scipy.ndimage.convolve`
Returns
-------
R_smooth : np.ndarray, shape=R.shape
The smoothed self- or cross-similarity matrix
See Also
--------
filters.diagonal_filter
recurrence_matrix
Examples
--------
Use a 51-frame diagonal smoothing filter to enhance paths in a recurrence matrix
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=30)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity', self=True)
>>> rec_smooth = librosa.segment.path_enhance(rec, 51, window='hann', n_filters=7)
Plot the recurrence matrix before and after smoothing
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1,2,1)
>>> librosa.display.specshow(rec, x_axis='time', y_axis='time')
>>> plt.title('Unfiltered recurrence')
>>> plt.subplot(1,2,2)
>>> librosa.display.specshow(rec_smooth, x_axis='time', y_axis='time')
>>> plt.title('Multi-angle enhanced recurrence')
>>> plt.tight_layout()
'''
if min_ratio is None:
min_ratio = 1./max_ratio
elif min_ratio > max_ratio:
raise ParameterError('min_ratio={} cannot exceed max_ratio={}'.format(min_ratio, max_ratio))
R_smooth = None
for ratio in np.logspace(np.log2(min_ratio), np.log2(max_ratio), num=n_filters, base=2):
kernel = diagonal_filter(window, n, slope=ratio, zero_mean=zero_mean)
if R_smooth is None:
R_smooth = scipy.ndimage.convolve(R, kernel, **kwargs)
else:
# Compute the point-wise maximum in-place
np.maximum(R_smooth, scipy.ndimage.convolve(R, kernel, **kwargs),
out=R_smooth)
if clip:
# Clip the output in-place
np.clip(R_smooth, 0, None, out=R_smooth)
return R_smooth
|
[
"Multi",
"-",
"angle",
"path",
"enhancement",
"for",
"self",
"-",
"and",
"cross",
"-",
"similarity",
"matrices",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/segment.py#L748-L877
|
[
"def",
"path_enhance",
"(",
"R",
",",
"n",
",",
"window",
"=",
"'hann'",
",",
"max_ratio",
"=",
"2.0",
",",
"min_ratio",
"=",
"None",
",",
"n_filters",
"=",
"7",
",",
"zero_mean",
"=",
"False",
",",
"clip",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"min_ratio",
"is",
"None",
":",
"min_ratio",
"=",
"1.",
"/",
"max_ratio",
"elif",
"min_ratio",
">",
"max_ratio",
":",
"raise",
"ParameterError",
"(",
"'min_ratio={} cannot exceed max_ratio={}'",
".",
"format",
"(",
"min_ratio",
",",
"max_ratio",
")",
")",
"R_smooth",
"=",
"None",
"for",
"ratio",
"in",
"np",
".",
"logspace",
"(",
"np",
".",
"log2",
"(",
"min_ratio",
")",
",",
"np",
".",
"log2",
"(",
"max_ratio",
")",
",",
"num",
"=",
"n_filters",
",",
"base",
"=",
"2",
")",
":",
"kernel",
"=",
"diagonal_filter",
"(",
"window",
",",
"n",
",",
"slope",
"=",
"ratio",
",",
"zero_mean",
"=",
"zero_mean",
")",
"if",
"R_smooth",
"is",
"None",
":",
"R_smooth",
"=",
"scipy",
".",
"ndimage",
".",
"convolve",
"(",
"R",
",",
"kernel",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"# Compute the point-wise maximum in-place",
"np",
".",
"maximum",
"(",
"R_smooth",
",",
"scipy",
".",
"ndimage",
".",
"convolve",
"(",
"R",
",",
"kernel",
",",
"*",
"*",
"kwargs",
")",
",",
"out",
"=",
"R_smooth",
")",
"if",
"clip",
":",
"# Clip the output in-place",
"np",
".",
"clip",
"(",
"R_smooth",
",",
"0",
",",
"None",
",",
"out",
"=",
"R_smooth",
")",
"return",
"R_smooth"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
onset_detect
|
Onset detection function
:parameters:
- input_file : str
Path to input audio file (wav, mp3, m4a, flac, etc.)
- output_file : str
Path to save onset timestamps as a CSV file
|
examples/onset_detector.py
|
def onset_detect(input_file, output_csv):
'''Onset detection function
:parameters:
- input_file : str
Path to input audio file (wav, mp3, m4a, flac, etc.)
- output_file : str
Path to save onset timestamps as a CSV file
'''
# 1. load the wav file and resample to 22.050 KHz
print('Loading ', input_file)
y, sr = librosa.load(input_file, sr=22050)
# Use a default hop size of 512 frames @ 22KHz ~= 23ms
hop_length = 512
# 2. run onset detection
print('Detecting onsets...')
onsets = librosa.onset.onset_detect(y=y,
sr=sr,
hop_length=hop_length)
print("Found {:d} onsets.".format(onsets.shape[0]))
# 3. save output
# 'beats' will contain the frame numbers of beat events.
onset_times = librosa.frames_to_time(onsets,
sr=sr,
hop_length=hop_length)
print('Saving output to ', output_csv)
librosa.output.times_csv(output_csv, onset_times)
print('done!')
|
def onset_detect(input_file, output_csv):
'''Onset detection function
:parameters:
- input_file : str
Path to input audio file (wav, mp3, m4a, flac, etc.)
- output_file : str
Path to save onset timestamps as a CSV file
'''
# 1. load the wav file and resample to 22.050 KHz
print('Loading ', input_file)
y, sr = librosa.load(input_file, sr=22050)
# Use a default hop size of 512 frames @ 22KHz ~= 23ms
hop_length = 512
# 2. run onset detection
print('Detecting onsets...')
onsets = librosa.onset.onset_detect(y=y,
sr=sr,
hop_length=hop_length)
print("Found {:d} onsets.".format(onsets.shape[0]))
# 3. save output
# 'beats' will contain the frame numbers of beat events.
onset_times = librosa.frames_to_time(onsets,
sr=sr,
hop_length=hop_length)
print('Saving output to ', output_csv)
librosa.output.times_csv(output_csv, onset_times)
print('done!')
|
[
"Onset",
"detection",
"function"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/examples/onset_detector.py#L16-L51
|
[
"def",
"onset_detect",
"(",
"input_file",
",",
"output_csv",
")",
":",
"# 1. load the wav file and resample to 22.050 KHz",
"print",
"(",
"'Loading '",
",",
"input_file",
")",
"y",
",",
"sr",
"=",
"librosa",
".",
"load",
"(",
"input_file",
",",
"sr",
"=",
"22050",
")",
"# Use a default hop size of 512 frames @ 22KHz ~= 23ms",
"hop_length",
"=",
"512",
"# 2. run onset detection",
"print",
"(",
"'Detecting onsets...'",
")",
"onsets",
"=",
"librosa",
".",
"onset",
".",
"onset_detect",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"hop_length",
")",
"print",
"(",
"\"Found {:d} onsets.\"",
".",
"format",
"(",
"onsets",
".",
"shape",
"[",
"0",
"]",
")",
")",
"# 3. save output",
"# 'beats' will contain the frame numbers of beat events.",
"onset_times",
"=",
"librosa",
".",
"frames_to_time",
"(",
"onsets",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"hop_length",
")",
"print",
"(",
"'Saving output to '",
",",
"output_csv",
")",
"librosa",
".",
"output",
".",
"times_csv",
"(",
"output_csv",
",",
"onset_times",
")",
"print",
"(",
"'done!'",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
frame
|
Slice a time series into overlapping frames.
This implementation uses low-level stride manipulation to avoid
redundant copies of the time series data.
Parameters
----------
y : np.ndarray [shape=(n,)]
Time series to frame. Must be one-dimensional and contiguous
in memory.
frame_length : int > 0 [scalar]
Length of the frame in samples
hop_length : int > 0 [scalar]
Number of samples to hop between frames
Returns
-------
y_frames : np.ndarray [shape=(frame_length, N_FRAMES)]
An array of frames sampled from `y`:
`y_frames[i, j] == y[j * hop_length + i]`
Raises
------
ParameterError
If `y` is not contiguous in memory, not an `np.ndarray`, or
not one-dimensional. See `np.ascontiguous()` for details.
If `hop_length < 1`, frames cannot advance.
If `len(y) < frame_length`.
Examples
--------
Extract 2048-sample frames from `y` with a hop of 64 samples per frame
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.util.frame(y, frame_length=2048, hop_length=64)
array([[ -9.216e-06, 7.710e-06, ..., -2.117e-06, -4.362e-07],
[ 2.518e-06, -6.294e-06, ..., -1.775e-05, -6.365e-06],
...,
[ -7.429e-04, 5.173e-03, ..., 1.105e-05, -5.074e-06],
[ 2.169e-03, 4.867e-03, ..., 3.666e-06, -5.571e-06]], dtype=float32)
|
librosa/util/utils.py
|
def frame(y, frame_length=2048, hop_length=512):
'''Slice a time series into overlapping frames.
This implementation uses low-level stride manipulation to avoid
redundant copies of the time series data.
Parameters
----------
y : np.ndarray [shape=(n,)]
Time series to frame. Must be one-dimensional and contiguous
in memory.
frame_length : int > 0 [scalar]
Length of the frame in samples
hop_length : int > 0 [scalar]
Number of samples to hop between frames
Returns
-------
y_frames : np.ndarray [shape=(frame_length, N_FRAMES)]
An array of frames sampled from `y`:
`y_frames[i, j] == y[j * hop_length + i]`
Raises
------
ParameterError
If `y` is not contiguous in memory, not an `np.ndarray`, or
not one-dimensional. See `np.ascontiguous()` for details.
If `hop_length < 1`, frames cannot advance.
If `len(y) < frame_length`.
Examples
--------
Extract 2048-sample frames from `y` with a hop of 64 samples per frame
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.util.frame(y, frame_length=2048, hop_length=64)
array([[ -9.216e-06, 7.710e-06, ..., -2.117e-06, -4.362e-07],
[ 2.518e-06, -6.294e-06, ..., -1.775e-05, -6.365e-06],
...,
[ -7.429e-04, 5.173e-03, ..., 1.105e-05, -5.074e-06],
[ 2.169e-03, 4.867e-03, ..., 3.666e-06, -5.571e-06]], dtype=float32)
'''
if not isinstance(y, np.ndarray):
raise ParameterError('Input must be of type numpy.ndarray, '
'given type(y)={}'.format(type(y)))
if y.ndim != 1:
raise ParameterError('Input must be one-dimensional, '
'given y.ndim={}'.format(y.ndim))
if len(y) < frame_length:
raise ParameterError('Buffer is too short (n={:d})'
' for frame_length={:d}'.format(len(y), frame_length))
if hop_length < 1:
raise ParameterError('Invalid hop_length: {:d}'.format(hop_length))
if not y.flags['C_CONTIGUOUS']:
raise ParameterError('Input buffer must be contiguous.')
# Compute the number of frames that will fit. The end may get truncated.
n_frames = 1 + int((len(y) - frame_length) / hop_length)
# Vertical stride is one sample
# Horizontal stride is `hop_length` samples
y_frames = as_strided(y, shape=(frame_length, n_frames),
strides=(y.itemsize, hop_length * y.itemsize))
return y_frames
|
def frame(y, frame_length=2048, hop_length=512):
'''Slice a time series into overlapping frames.
This implementation uses low-level stride manipulation to avoid
redundant copies of the time series data.
Parameters
----------
y : np.ndarray [shape=(n,)]
Time series to frame. Must be one-dimensional and contiguous
in memory.
frame_length : int > 0 [scalar]
Length of the frame in samples
hop_length : int > 0 [scalar]
Number of samples to hop between frames
Returns
-------
y_frames : np.ndarray [shape=(frame_length, N_FRAMES)]
An array of frames sampled from `y`:
`y_frames[i, j] == y[j * hop_length + i]`
Raises
------
ParameterError
If `y` is not contiguous in memory, not an `np.ndarray`, or
not one-dimensional. See `np.ascontiguous()` for details.
If `hop_length < 1`, frames cannot advance.
If `len(y) < frame_length`.
Examples
--------
Extract 2048-sample frames from `y` with a hop of 64 samples per frame
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.util.frame(y, frame_length=2048, hop_length=64)
array([[ -9.216e-06, 7.710e-06, ..., -2.117e-06, -4.362e-07],
[ 2.518e-06, -6.294e-06, ..., -1.775e-05, -6.365e-06],
...,
[ -7.429e-04, 5.173e-03, ..., 1.105e-05, -5.074e-06],
[ 2.169e-03, 4.867e-03, ..., 3.666e-06, -5.571e-06]], dtype=float32)
'''
if not isinstance(y, np.ndarray):
raise ParameterError('Input must be of type numpy.ndarray, '
'given type(y)={}'.format(type(y)))
if y.ndim != 1:
raise ParameterError('Input must be one-dimensional, '
'given y.ndim={}'.format(y.ndim))
if len(y) < frame_length:
raise ParameterError('Buffer is too short (n={:d})'
' for frame_length={:d}'.format(len(y), frame_length))
if hop_length < 1:
raise ParameterError('Invalid hop_length: {:d}'.format(hop_length))
if not y.flags['C_CONTIGUOUS']:
raise ParameterError('Input buffer must be contiguous.')
# Compute the number of frames that will fit. The end may get truncated.
n_frames = 1 + int((len(y) - frame_length) / hop_length)
# Vertical stride is one sample
# Horizontal stride is `hop_length` samples
y_frames = as_strided(y, shape=(frame_length, n_frames),
strides=(y.itemsize, hop_length * y.itemsize))
return y_frames
|
[
"Slice",
"a",
"time",
"series",
"into",
"overlapping",
"frames",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L34-L107
|
[
"def",
"frame",
"(",
"y",
",",
"frame_length",
"=",
"2048",
",",
"hop_length",
"=",
"512",
")",
":",
"if",
"not",
"isinstance",
"(",
"y",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"ParameterError",
"(",
"'Input must be of type numpy.ndarray, '",
"'given type(y)={}'",
".",
"format",
"(",
"type",
"(",
"y",
")",
")",
")",
"if",
"y",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ParameterError",
"(",
"'Input must be one-dimensional, '",
"'given y.ndim={}'",
".",
"format",
"(",
"y",
".",
"ndim",
")",
")",
"if",
"len",
"(",
"y",
")",
"<",
"frame_length",
":",
"raise",
"ParameterError",
"(",
"'Buffer is too short (n={:d})'",
"' for frame_length={:d}'",
".",
"format",
"(",
"len",
"(",
"y",
")",
",",
"frame_length",
")",
")",
"if",
"hop_length",
"<",
"1",
":",
"raise",
"ParameterError",
"(",
"'Invalid hop_length: {:d}'",
".",
"format",
"(",
"hop_length",
")",
")",
"if",
"not",
"y",
".",
"flags",
"[",
"'C_CONTIGUOUS'",
"]",
":",
"raise",
"ParameterError",
"(",
"'Input buffer must be contiguous.'",
")",
"# Compute the number of frames that will fit. The end may get truncated.",
"n_frames",
"=",
"1",
"+",
"int",
"(",
"(",
"len",
"(",
"y",
")",
"-",
"frame_length",
")",
"/",
"hop_length",
")",
"# Vertical stride is one sample",
"# Horizontal stride is `hop_length` samples",
"y_frames",
"=",
"as_strided",
"(",
"y",
",",
"shape",
"=",
"(",
"frame_length",
",",
"n_frames",
")",
",",
"strides",
"=",
"(",
"y",
".",
"itemsize",
",",
"hop_length",
"*",
"y",
".",
"itemsize",
")",
")",
"return",
"y_frames"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
valid_audio
|
Validate whether a variable contains valid, mono audio data.
Parameters
----------
y : np.ndarray
The input data to validate
mono : bool
Whether or not to force monophonic audio
Returns
-------
valid : bool
True if all tests pass
Raises
------
ParameterError
If `y` fails to meet the following criteria:
- `type(y)` is `np.ndarray`
- `y.dtype` is floating-point
- `mono == True` and `y.ndim` is not 1
- `mono == False` and `y.ndim` is not 1 or 2
- `np.isfinite(y).all()` is not True
Notes
-----
This function caches at level 20.
Examples
--------
>>> # Only allow monophonic signals
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.util.valid_audio(y)
True
>>> # If we want to allow stereo signals
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> librosa.util.valid_audio(y, mono=False)
True
|
librosa/util/utils.py
|
def valid_audio(y, mono=True):
'''Validate whether a variable contains valid, mono audio data.
Parameters
----------
y : np.ndarray
The input data to validate
mono : bool
Whether or not to force monophonic audio
Returns
-------
valid : bool
True if all tests pass
Raises
------
ParameterError
If `y` fails to meet the following criteria:
- `type(y)` is `np.ndarray`
- `y.dtype` is floating-point
- `mono == True` and `y.ndim` is not 1
- `mono == False` and `y.ndim` is not 1 or 2
- `np.isfinite(y).all()` is not True
Notes
-----
This function caches at level 20.
Examples
--------
>>> # Only allow monophonic signals
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.util.valid_audio(y)
True
>>> # If we want to allow stereo signals
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> librosa.util.valid_audio(y, mono=False)
True
'''
if not isinstance(y, np.ndarray):
raise ParameterError('data must be of type numpy.ndarray')
if not np.issubdtype(y.dtype, np.floating):
raise ParameterError('data must be floating-point')
if mono and y.ndim != 1:
raise ParameterError('Invalid shape for monophonic audio: '
'ndim={:d}, shape={}'.format(y.ndim, y.shape))
elif y.ndim > 2 or y.ndim == 0:
raise ParameterError('Audio must have shape (samples,) or (channels, samples). '
'Received shape={}'.format(y.shape))
if not np.isfinite(y).all():
raise ParameterError('Audio buffer is not finite everywhere')
return True
|
def valid_audio(y, mono=True):
'''Validate whether a variable contains valid, mono audio data.
Parameters
----------
y : np.ndarray
The input data to validate
mono : bool
Whether or not to force monophonic audio
Returns
-------
valid : bool
True if all tests pass
Raises
------
ParameterError
If `y` fails to meet the following criteria:
- `type(y)` is `np.ndarray`
- `y.dtype` is floating-point
- `mono == True` and `y.ndim` is not 1
- `mono == False` and `y.ndim` is not 1 or 2
- `np.isfinite(y).all()` is not True
Notes
-----
This function caches at level 20.
Examples
--------
>>> # Only allow monophonic signals
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.util.valid_audio(y)
True
>>> # If we want to allow stereo signals
>>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False)
>>> librosa.util.valid_audio(y, mono=False)
True
'''
if not isinstance(y, np.ndarray):
raise ParameterError('data must be of type numpy.ndarray')
if not np.issubdtype(y.dtype, np.floating):
raise ParameterError('data must be floating-point')
if mono and y.ndim != 1:
raise ParameterError('Invalid shape for monophonic audio: '
'ndim={:d}, shape={}'.format(y.ndim, y.shape))
elif y.ndim > 2 or y.ndim == 0:
raise ParameterError('Audio must have shape (samples,) or (channels, samples). '
'Received shape={}'.format(y.shape))
if not np.isfinite(y).all():
raise ParameterError('Audio buffer is not finite everywhere')
return True
|
[
"Validate",
"whether",
"a",
"variable",
"contains",
"valid",
"mono",
"audio",
"data",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L111-L172
|
[
"def",
"valid_audio",
"(",
"y",
",",
"mono",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"y",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"ParameterError",
"(",
"'data must be of type numpy.ndarray'",
")",
"if",
"not",
"np",
".",
"issubdtype",
"(",
"y",
".",
"dtype",
",",
"np",
".",
"floating",
")",
":",
"raise",
"ParameterError",
"(",
"'data must be floating-point'",
")",
"if",
"mono",
"and",
"y",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ParameterError",
"(",
"'Invalid shape for monophonic audio: '",
"'ndim={:d}, shape={}'",
".",
"format",
"(",
"y",
".",
"ndim",
",",
"y",
".",
"shape",
")",
")",
"elif",
"y",
".",
"ndim",
">",
"2",
"or",
"y",
".",
"ndim",
"==",
"0",
":",
"raise",
"ParameterError",
"(",
"'Audio must have shape (samples,) or (channels, samples). '",
"'Received shape={}'",
".",
"format",
"(",
"y",
".",
"shape",
")",
")",
"if",
"not",
"np",
".",
"isfinite",
"(",
"y",
")",
".",
"all",
"(",
")",
":",
"raise",
"ParameterError",
"(",
"'Audio buffer is not finite everywhere'",
")",
"return",
"True"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
valid_int
|
Ensure that an input value is integer-typed.
This is primarily useful for ensuring integrable-valued
array indices.
Parameters
----------
x : number
A scalar value to be cast to int
cast : function [optional]
A function to modify `x` before casting.
Default: `np.floor`
Returns
-------
x_int : int
`x_int = int(cast(x))`
Raises
------
ParameterError
If `cast` is provided and is not callable.
|
librosa/util/utils.py
|
def valid_int(x, cast=None):
'''Ensure that an input value is integer-typed.
This is primarily useful for ensuring integrable-valued
array indices.
Parameters
----------
x : number
A scalar value to be cast to int
cast : function [optional]
A function to modify `x` before casting.
Default: `np.floor`
Returns
-------
x_int : int
`x_int = int(cast(x))`
Raises
------
ParameterError
If `cast` is provided and is not callable.
'''
if cast is None:
cast = np.floor
if not six.callable(cast):
raise ParameterError('cast parameter must be callable')
return int(cast(x))
|
def valid_int(x, cast=None):
'''Ensure that an input value is integer-typed.
This is primarily useful for ensuring integrable-valued
array indices.
Parameters
----------
x : number
A scalar value to be cast to int
cast : function [optional]
A function to modify `x` before casting.
Default: `np.floor`
Returns
-------
x_int : int
`x_int = int(cast(x))`
Raises
------
ParameterError
If `cast` is provided and is not callable.
'''
if cast is None:
cast = np.floor
if not six.callable(cast):
raise ParameterError('cast parameter must be callable')
return int(cast(x))
|
[
"Ensure",
"that",
"an",
"input",
"value",
"is",
"integer",
"-",
"typed",
".",
"This",
"is",
"primarily",
"useful",
"for",
"ensuring",
"integrable",
"-",
"valued",
"array",
"indices",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L175-L206
|
[
"def",
"valid_int",
"(",
"x",
",",
"cast",
"=",
"None",
")",
":",
"if",
"cast",
"is",
"None",
":",
"cast",
"=",
"np",
".",
"floor",
"if",
"not",
"six",
".",
"callable",
"(",
"cast",
")",
":",
"raise",
"ParameterError",
"(",
"'cast parameter must be callable'",
")",
"return",
"int",
"(",
"cast",
"(",
"x",
")",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
valid_intervals
|
Ensure that an array is a valid representation of time intervals:
- intervals.ndim == 2
- intervals.shape[1] == 2
- intervals[i, 0] <= intervals[i, 1] for all i
Parameters
----------
intervals : np.ndarray [shape=(n, 2)]
set of time intervals
Returns
-------
valid : bool
True if `intervals` passes validation.
|
librosa/util/utils.py
|
def valid_intervals(intervals):
'''Ensure that an array is a valid representation of time intervals:
- intervals.ndim == 2
- intervals.shape[1] == 2
- intervals[i, 0] <= intervals[i, 1] for all i
Parameters
----------
intervals : np.ndarray [shape=(n, 2)]
set of time intervals
Returns
-------
valid : bool
True if `intervals` passes validation.
'''
if intervals.ndim != 2 or intervals.shape[-1] != 2:
raise ParameterError('intervals must have shape (n, 2)')
if np.any(intervals[:, 0] > intervals[:, 1]):
raise ParameterError('intervals={} must have non-negative durations'.format(intervals))
return True
|
def valid_intervals(intervals):
'''Ensure that an array is a valid representation of time intervals:
- intervals.ndim == 2
- intervals.shape[1] == 2
- intervals[i, 0] <= intervals[i, 1] for all i
Parameters
----------
intervals : np.ndarray [shape=(n, 2)]
set of time intervals
Returns
-------
valid : bool
True if `intervals` passes validation.
'''
if intervals.ndim != 2 or intervals.shape[-1] != 2:
raise ParameterError('intervals must have shape (n, 2)')
if np.any(intervals[:, 0] > intervals[:, 1]):
raise ParameterError('intervals={} must have non-negative durations'.format(intervals))
return True
|
[
"Ensure",
"that",
"an",
"array",
"is",
"a",
"valid",
"representation",
"of",
"time",
"intervals",
":"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L209-L233
|
[
"def",
"valid_intervals",
"(",
"intervals",
")",
":",
"if",
"intervals",
".",
"ndim",
"!=",
"2",
"or",
"intervals",
".",
"shape",
"[",
"-",
"1",
"]",
"!=",
"2",
":",
"raise",
"ParameterError",
"(",
"'intervals must have shape (n, 2)'",
")",
"if",
"np",
".",
"any",
"(",
"intervals",
"[",
":",
",",
"0",
"]",
">",
"intervals",
"[",
":",
",",
"1",
"]",
")",
":",
"raise",
"ParameterError",
"(",
"'intervals={} must have non-negative durations'",
".",
"format",
"(",
"intervals",
")",
")",
"return",
"True"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
pad_center
|
Wrapper for np.pad to automatically center an array prior to padding.
This is analogous to `str.center()`
Examples
--------
>>> # Generate a vector
>>> data = np.ones(5)
>>> librosa.util.pad_center(data, 10, mode='constant')
array([ 0., 0., 1., 1., 1., 1., 1., 0., 0., 0.])
>>> # Pad a matrix along its first dimension
>>> data = np.ones((3, 5))
>>> librosa.util.pad_center(data, 7, axis=0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # Or its second dimension
>>> librosa.util.pad_center(data, 7, axis=1)
array([[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.]])
Parameters
----------
data : np.ndarray
Vector to be padded and centered
size : int >= len(data) [scalar]
Length to pad `data`
axis : int
Axis along which to pad and center the data
kwargs : additional keyword arguments
arguments passed to `np.pad()`
Returns
-------
data_padded : np.ndarray
`data` centered and padded to length `size` along the
specified axis
Raises
------
ParameterError
If `size < data.shape[axis]`
See Also
--------
numpy.pad
|
librosa/util/utils.py
|
def pad_center(data, size, axis=-1, **kwargs):
'''Wrapper for np.pad to automatically center an array prior to padding.
This is analogous to `str.center()`
Examples
--------
>>> # Generate a vector
>>> data = np.ones(5)
>>> librosa.util.pad_center(data, 10, mode='constant')
array([ 0., 0., 1., 1., 1., 1., 1., 0., 0., 0.])
>>> # Pad a matrix along its first dimension
>>> data = np.ones((3, 5))
>>> librosa.util.pad_center(data, 7, axis=0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # Or its second dimension
>>> librosa.util.pad_center(data, 7, axis=1)
array([[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.]])
Parameters
----------
data : np.ndarray
Vector to be padded and centered
size : int >= len(data) [scalar]
Length to pad `data`
axis : int
Axis along which to pad and center the data
kwargs : additional keyword arguments
arguments passed to `np.pad()`
Returns
-------
data_padded : np.ndarray
`data` centered and padded to length `size` along the
specified axis
Raises
------
ParameterError
If `size < data.shape[axis]`
See Also
--------
numpy.pad
'''
kwargs.setdefault('mode', 'constant')
n = data.shape[axis]
lpad = int((size - n) // 2)
lengths = [(0, 0)] * data.ndim
lengths[axis] = (lpad, int(size - n - lpad))
if lpad < 0:
raise ParameterError(('Target size ({:d}) must be '
'at least input size ({:d})').format(size, n))
return np.pad(data, lengths, **kwargs)
|
def pad_center(data, size, axis=-1, **kwargs):
'''Wrapper for np.pad to automatically center an array prior to padding.
This is analogous to `str.center()`
Examples
--------
>>> # Generate a vector
>>> data = np.ones(5)
>>> librosa.util.pad_center(data, 10, mode='constant')
array([ 0., 0., 1., 1., 1., 1., 1., 0., 0., 0.])
>>> # Pad a matrix along its first dimension
>>> data = np.ones((3, 5))
>>> librosa.util.pad_center(data, 7, axis=0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # Or its second dimension
>>> librosa.util.pad_center(data, 7, axis=1)
array([[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 1., 1., 0.]])
Parameters
----------
data : np.ndarray
Vector to be padded and centered
size : int >= len(data) [scalar]
Length to pad `data`
axis : int
Axis along which to pad and center the data
kwargs : additional keyword arguments
arguments passed to `np.pad()`
Returns
-------
data_padded : np.ndarray
`data` centered and padded to length `size` along the
specified axis
Raises
------
ParameterError
If `size < data.shape[axis]`
See Also
--------
numpy.pad
'''
kwargs.setdefault('mode', 'constant')
n = data.shape[axis]
lpad = int((size - n) // 2)
lengths = [(0, 0)] * data.ndim
lengths[axis] = (lpad, int(size - n - lpad))
if lpad < 0:
raise ParameterError(('Target size ({:d}) must be '
'at least input size ({:d})').format(size, n))
return np.pad(data, lengths, **kwargs)
|
[
"Wrapper",
"for",
"np",
".",
"pad",
"to",
"automatically",
"center",
"an",
"array",
"prior",
"to",
"padding",
".",
"This",
"is",
"analogous",
"to",
"str",
".",
"center",
"()"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L236-L306
|
[
"def",
"pad_center",
"(",
"data",
",",
"size",
",",
"axis",
"=",
"-",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'mode'",
",",
"'constant'",
")",
"n",
"=",
"data",
".",
"shape",
"[",
"axis",
"]",
"lpad",
"=",
"int",
"(",
"(",
"size",
"-",
"n",
")",
"//",
"2",
")",
"lengths",
"=",
"[",
"(",
"0",
",",
"0",
")",
"]",
"*",
"data",
".",
"ndim",
"lengths",
"[",
"axis",
"]",
"=",
"(",
"lpad",
",",
"int",
"(",
"size",
"-",
"n",
"-",
"lpad",
")",
")",
"if",
"lpad",
"<",
"0",
":",
"raise",
"ParameterError",
"(",
"(",
"'Target size ({:d}) must be '",
"'at least input size ({:d})'",
")",
".",
"format",
"(",
"size",
",",
"n",
")",
")",
"return",
"np",
".",
"pad",
"(",
"data",
",",
"lengths",
",",
"*",
"*",
"kwargs",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
fix_length
|
Fix the length an array `data` to exactly `size`.
If `data.shape[axis] < n`, pad according to the provided kwargs.
By default, `data` is padded with trailing zeros.
Examples
--------
>>> y = np.arange(7)
>>> # Default: pad with zeros
>>> librosa.util.fix_length(y, 10)
array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> # Trim to a desired length
>>> librosa.util.fix_length(y, 5)
array([0, 1, 2, 3, 4])
>>> # Use edge-padding instead of zeros
>>> librosa.util.fix_length(y, 10, mode='edge')
array([0, 1, 2, 3, 4, 5, 6, 6, 6, 6])
Parameters
----------
data : np.ndarray
array to be length-adjusted
size : int >= 0 [scalar]
desired length of the array
axis : int, <= data.ndim
axis along which to fix length
kwargs : additional keyword arguments
Parameters to `np.pad()`
Returns
-------
data_fixed : np.ndarray [shape=data.shape]
`data` either trimmed or padded to length `size`
along the specified axis.
See Also
--------
numpy.pad
|
librosa/util/utils.py
|
def fix_length(data, size, axis=-1, **kwargs):
'''Fix the length an array `data` to exactly `size`.
If `data.shape[axis] < n`, pad according to the provided kwargs.
By default, `data` is padded with trailing zeros.
Examples
--------
>>> y = np.arange(7)
>>> # Default: pad with zeros
>>> librosa.util.fix_length(y, 10)
array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> # Trim to a desired length
>>> librosa.util.fix_length(y, 5)
array([0, 1, 2, 3, 4])
>>> # Use edge-padding instead of zeros
>>> librosa.util.fix_length(y, 10, mode='edge')
array([0, 1, 2, 3, 4, 5, 6, 6, 6, 6])
Parameters
----------
data : np.ndarray
array to be length-adjusted
size : int >= 0 [scalar]
desired length of the array
axis : int, <= data.ndim
axis along which to fix length
kwargs : additional keyword arguments
Parameters to `np.pad()`
Returns
-------
data_fixed : np.ndarray [shape=data.shape]
`data` either trimmed or padded to length `size`
along the specified axis.
See Also
--------
numpy.pad
'''
kwargs.setdefault('mode', 'constant')
n = data.shape[axis]
if n > size:
slices = [slice(None)] * data.ndim
slices[axis] = slice(0, size)
return data[tuple(slices)]
elif n < size:
lengths = [(0, 0)] * data.ndim
lengths[axis] = (0, size - n)
return np.pad(data, lengths, **kwargs)
return data
|
def fix_length(data, size, axis=-1, **kwargs):
'''Fix the length an array `data` to exactly `size`.
If `data.shape[axis] < n`, pad according to the provided kwargs.
By default, `data` is padded with trailing zeros.
Examples
--------
>>> y = np.arange(7)
>>> # Default: pad with zeros
>>> librosa.util.fix_length(y, 10)
array([0, 1, 2, 3, 4, 5, 6, 0, 0, 0])
>>> # Trim to a desired length
>>> librosa.util.fix_length(y, 5)
array([0, 1, 2, 3, 4])
>>> # Use edge-padding instead of zeros
>>> librosa.util.fix_length(y, 10, mode='edge')
array([0, 1, 2, 3, 4, 5, 6, 6, 6, 6])
Parameters
----------
data : np.ndarray
array to be length-adjusted
size : int >= 0 [scalar]
desired length of the array
axis : int, <= data.ndim
axis along which to fix length
kwargs : additional keyword arguments
Parameters to `np.pad()`
Returns
-------
data_fixed : np.ndarray [shape=data.shape]
`data` either trimmed or padded to length `size`
along the specified axis.
See Also
--------
numpy.pad
'''
kwargs.setdefault('mode', 'constant')
n = data.shape[axis]
if n > size:
slices = [slice(None)] * data.ndim
slices[axis] = slice(0, size)
return data[tuple(slices)]
elif n < size:
lengths = [(0, 0)] * data.ndim
lengths[axis] = (0, size - n)
return np.pad(data, lengths, **kwargs)
return data
|
[
"Fix",
"the",
"length",
"an",
"array",
"data",
"to",
"exactly",
"size",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L309-L367
|
[
"def",
"fix_length",
"(",
"data",
",",
"size",
",",
"axis",
"=",
"-",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
".",
"setdefault",
"(",
"'mode'",
",",
"'constant'",
")",
"n",
"=",
"data",
".",
"shape",
"[",
"axis",
"]",
"if",
"n",
">",
"size",
":",
"slices",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"data",
".",
"ndim",
"slices",
"[",
"axis",
"]",
"=",
"slice",
"(",
"0",
",",
"size",
")",
"return",
"data",
"[",
"tuple",
"(",
"slices",
")",
"]",
"elif",
"n",
"<",
"size",
":",
"lengths",
"=",
"[",
"(",
"0",
",",
"0",
")",
"]",
"*",
"data",
".",
"ndim",
"lengths",
"[",
"axis",
"]",
"=",
"(",
"0",
",",
"size",
"-",
"n",
")",
"return",
"np",
".",
"pad",
"(",
"data",
",",
"lengths",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
fix_frames
|
Fix a list of frames to lie within [x_min, x_max]
Examples
--------
>>> # Generate a list of frame indices
>>> frames = np.arange(0, 1000.0, 50)
>>> frames
array([ 0., 50., 100., 150., 200., 250., 300., 350.,
400., 450., 500., 550., 600., 650., 700., 750.,
800., 850., 900., 950.])
>>> # Clip to span at most 250
>>> librosa.util.fix_frames(frames, x_max=250)
array([ 0, 50, 100, 150, 200, 250])
>>> # Or pad to span up to 2500
>>> librosa.util.fix_frames(frames, x_max=2500)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400,
450, 500, 550, 600, 650, 700, 750, 800, 850,
900, 950, 2500])
>>> librosa.util.fix_frames(frames, x_max=2500, pad=False)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500,
550, 600, 650, 700, 750, 800, 850, 900, 950])
>>> # Or starting away from zero
>>> frames = np.arange(200, 500, 33)
>>> frames
array([200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames, x_max=500)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497,
500])
Parameters
----------
frames : np.ndarray [shape=(n_frames,)]
List of non-negative frame indices
x_min : int >= 0 or None
Minimum allowed frame index
x_max : int >= 0 or None
Maximum allowed frame index
pad : boolean
If `True`, then `frames` is expanded to span the full range
`[x_min, x_max]`
Returns
-------
fixed_frames : np.ndarray [shape=(n_fixed_frames,), dtype=int]
Fixed frame indices, flattened and sorted
Raises
------
ParameterError
If `frames` contains negative values
|
librosa/util/utils.py
|
def fix_frames(frames, x_min=0, x_max=None, pad=True):
'''Fix a list of frames to lie within [x_min, x_max]
Examples
--------
>>> # Generate a list of frame indices
>>> frames = np.arange(0, 1000.0, 50)
>>> frames
array([ 0., 50., 100., 150., 200., 250., 300., 350.,
400., 450., 500., 550., 600., 650., 700., 750.,
800., 850., 900., 950.])
>>> # Clip to span at most 250
>>> librosa.util.fix_frames(frames, x_max=250)
array([ 0, 50, 100, 150, 200, 250])
>>> # Or pad to span up to 2500
>>> librosa.util.fix_frames(frames, x_max=2500)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400,
450, 500, 550, 600, 650, 700, 750, 800, 850,
900, 950, 2500])
>>> librosa.util.fix_frames(frames, x_max=2500, pad=False)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500,
550, 600, 650, 700, 750, 800, 850, 900, 950])
>>> # Or starting away from zero
>>> frames = np.arange(200, 500, 33)
>>> frames
array([200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames, x_max=500)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497,
500])
Parameters
----------
frames : np.ndarray [shape=(n_frames,)]
List of non-negative frame indices
x_min : int >= 0 or None
Minimum allowed frame index
x_max : int >= 0 or None
Maximum allowed frame index
pad : boolean
If `True`, then `frames` is expanded to span the full range
`[x_min, x_max]`
Returns
-------
fixed_frames : np.ndarray [shape=(n_fixed_frames,), dtype=int]
Fixed frame indices, flattened and sorted
Raises
------
ParameterError
If `frames` contains negative values
'''
frames = np.asarray(frames)
if np.any(frames < 0):
raise ParameterError('Negative frame index detected')
if pad and (x_min is not None or x_max is not None):
frames = np.clip(frames, x_min, x_max)
if pad:
pad_data = []
if x_min is not None:
pad_data.append(x_min)
if x_max is not None:
pad_data.append(x_max)
frames = np.concatenate((pad_data, frames))
if x_min is not None:
frames = frames[frames >= x_min]
if x_max is not None:
frames = frames[frames <= x_max]
return np.unique(frames).astype(int)
|
def fix_frames(frames, x_min=0, x_max=None, pad=True):
'''Fix a list of frames to lie within [x_min, x_max]
Examples
--------
>>> # Generate a list of frame indices
>>> frames = np.arange(0, 1000.0, 50)
>>> frames
array([ 0., 50., 100., 150., 200., 250., 300., 350.,
400., 450., 500., 550., 600., 650., 700., 750.,
800., 850., 900., 950.])
>>> # Clip to span at most 250
>>> librosa.util.fix_frames(frames, x_max=250)
array([ 0, 50, 100, 150, 200, 250])
>>> # Or pad to span up to 2500
>>> librosa.util.fix_frames(frames, x_max=2500)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400,
450, 500, 550, 600, 650, 700, 750, 800, 850,
900, 950, 2500])
>>> librosa.util.fix_frames(frames, x_max=2500, pad=False)
array([ 0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500,
550, 600, 650, 700, 750, 800, 850, 900, 950])
>>> # Or starting away from zero
>>> frames = np.arange(200, 500, 33)
>>> frames
array([200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497])
>>> librosa.util.fix_frames(frames, x_max=500)
array([ 0, 200, 233, 266, 299, 332, 365, 398, 431, 464, 497,
500])
Parameters
----------
frames : np.ndarray [shape=(n_frames,)]
List of non-negative frame indices
x_min : int >= 0 or None
Minimum allowed frame index
x_max : int >= 0 or None
Maximum allowed frame index
pad : boolean
If `True`, then `frames` is expanded to span the full range
`[x_min, x_max]`
Returns
-------
fixed_frames : np.ndarray [shape=(n_fixed_frames,), dtype=int]
Fixed frame indices, flattened and sorted
Raises
------
ParameterError
If `frames` contains negative values
'''
frames = np.asarray(frames)
if np.any(frames < 0):
raise ParameterError('Negative frame index detected')
if pad and (x_min is not None or x_max is not None):
frames = np.clip(frames, x_min, x_max)
if pad:
pad_data = []
if x_min is not None:
pad_data.append(x_min)
if x_max is not None:
pad_data.append(x_max)
frames = np.concatenate((pad_data, frames))
if x_min is not None:
frames = frames[frames >= x_min]
if x_max is not None:
frames = frames[frames <= x_max]
return np.unique(frames).astype(int)
|
[
"Fix",
"a",
"list",
"of",
"frames",
"to",
"lie",
"within",
"[",
"x_min",
"x_max",
"]"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L370-L452
|
[
"def",
"fix_frames",
"(",
"frames",
",",
"x_min",
"=",
"0",
",",
"x_max",
"=",
"None",
",",
"pad",
"=",
"True",
")",
":",
"frames",
"=",
"np",
".",
"asarray",
"(",
"frames",
")",
"if",
"np",
".",
"any",
"(",
"frames",
"<",
"0",
")",
":",
"raise",
"ParameterError",
"(",
"'Negative frame index detected'",
")",
"if",
"pad",
"and",
"(",
"x_min",
"is",
"not",
"None",
"or",
"x_max",
"is",
"not",
"None",
")",
":",
"frames",
"=",
"np",
".",
"clip",
"(",
"frames",
",",
"x_min",
",",
"x_max",
")",
"if",
"pad",
":",
"pad_data",
"=",
"[",
"]",
"if",
"x_min",
"is",
"not",
"None",
":",
"pad_data",
".",
"append",
"(",
"x_min",
")",
"if",
"x_max",
"is",
"not",
"None",
":",
"pad_data",
".",
"append",
"(",
"x_max",
")",
"frames",
"=",
"np",
".",
"concatenate",
"(",
"(",
"pad_data",
",",
"frames",
")",
")",
"if",
"x_min",
"is",
"not",
"None",
":",
"frames",
"=",
"frames",
"[",
"frames",
">=",
"x_min",
"]",
"if",
"x_max",
"is",
"not",
"None",
":",
"frames",
"=",
"frames",
"[",
"frames",
"<=",
"x_max",
"]",
"return",
"np",
".",
"unique",
"(",
"frames",
")",
".",
"astype",
"(",
"int",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
axis_sort
|
Sort an array along its rows or columns.
Examples
--------
Visualize NMF output for a spectrogram S
>>> # Sort the columns of W by peak frequency bin
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> W, H = librosa.decompose.decompose(S, n_components=32)
>>> W_sort = librosa.util.axis_sort(W)
Or sort by the lowest frequency bin
>>> W_sort = librosa.util.axis_sort(W, value=np.argmin)
Or sort the rows instead of the columns
>>> W_sort_rows = librosa.util.axis_sort(W, axis=0)
Get the sorting index also, and use it to permute the rows of H
>>> W_sort, idx = librosa.util.axis_sort(W, index=True)
>>> H_sort = H[idx, :]
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 2, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(W, ref=np.max),
... y_axis='log')
>>> plt.title('W')
>>> plt.subplot(2, 2, 2)
>>> librosa.display.specshow(H, x_axis='time')
>>> plt.title('H')
>>> plt.subplot(2, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(W_sort,
... ref=np.max),
... y_axis='log')
>>> plt.title('W sorted')
>>> plt.subplot(2, 2, 4)
>>> librosa.display.specshow(H_sort, x_axis='time')
>>> plt.title('H sorted')
>>> plt.tight_layout()
Parameters
----------
S : np.ndarray [shape=(d, n)]
Array to be sorted
axis : int [scalar]
The axis along which to compute the sorting values
- `axis=0` to sort rows by peak column index
- `axis=1` to sort columns by peak row index
index : boolean [scalar]
If true, returns the index array as well as the permuted data.
value : function
function to return the index corresponding to the sort order.
Default: `np.argmax`.
Returns
-------
S_sort : np.ndarray [shape=(d, n)]
`S` with the columns or rows permuted in sorting order
idx : np.ndarray (optional) [shape=(d,) or (n,)]
If `index == True`, the sorting index used to permute `S`.
Length of `idx` corresponds to the selected `axis`.
Raises
------
ParameterError
If `S` does not have exactly 2 dimensions (`S.ndim != 2`)
|
librosa/util/utils.py
|
def axis_sort(S, axis=-1, index=False, value=None):
'''Sort an array along its rows or columns.
Examples
--------
Visualize NMF output for a spectrogram S
>>> # Sort the columns of W by peak frequency bin
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> W, H = librosa.decompose.decompose(S, n_components=32)
>>> W_sort = librosa.util.axis_sort(W)
Or sort by the lowest frequency bin
>>> W_sort = librosa.util.axis_sort(W, value=np.argmin)
Or sort the rows instead of the columns
>>> W_sort_rows = librosa.util.axis_sort(W, axis=0)
Get the sorting index also, and use it to permute the rows of H
>>> W_sort, idx = librosa.util.axis_sort(W, index=True)
>>> H_sort = H[idx, :]
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 2, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(W, ref=np.max),
... y_axis='log')
>>> plt.title('W')
>>> plt.subplot(2, 2, 2)
>>> librosa.display.specshow(H, x_axis='time')
>>> plt.title('H')
>>> plt.subplot(2, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(W_sort,
... ref=np.max),
... y_axis='log')
>>> plt.title('W sorted')
>>> plt.subplot(2, 2, 4)
>>> librosa.display.specshow(H_sort, x_axis='time')
>>> plt.title('H sorted')
>>> plt.tight_layout()
Parameters
----------
S : np.ndarray [shape=(d, n)]
Array to be sorted
axis : int [scalar]
The axis along which to compute the sorting values
- `axis=0` to sort rows by peak column index
- `axis=1` to sort columns by peak row index
index : boolean [scalar]
If true, returns the index array as well as the permuted data.
value : function
function to return the index corresponding to the sort order.
Default: `np.argmax`.
Returns
-------
S_sort : np.ndarray [shape=(d, n)]
`S` with the columns or rows permuted in sorting order
idx : np.ndarray (optional) [shape=(d,) or (n,)]
If `index == True`, the sorting index used to permute `S`.
Length of `idx` corresponds to the selected `axis`.
Raises
------
ParameterError
If `S` does not have exactly 2 dimensions (`S.ndim != 2`)
'''
if value is None:
value = np.argmax
if S.ndim != 2:
raise ParameterError('axis_sort is only defined for 2D arrays')
bin_idx = value(S, axis=np.mod(1-axis, S.ndim))
idx = np.argsort(bin_idx)
sort_slice = [slice(None)] * S.ndim
sort_slice[axis] = idx
if index:
return S[tuple(sort_slice)], idx
else:
return S[tuple(sort_slice)]
|
def axis_sort(S, axis=-1, index=False, value=None):
'''Sort an array along its rows or columns.
Examples
--------
Visualize NMF output for a spectrogram S
>>> # Sort the columns of W by peak frequency bin
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> W, H = librosa.decompose.decompose(S, n_components=32)
>>> W_sort = librosa.util.axis_sort(W)
Or sort by the lowest frequency bin
>>> W_sort = librosa.util.axis_sort(W, value=np.argmin)
Or sort the rows instead of the columns
>>> W_sort_rows = librosa.util.axis_sort(W, axis=0)
Get the sorting index also, and use it to permute the rows of H
>>> W_sort, idx = librosa.util.axis_sort(W, index=True)
>>> H_sort = H[idx, :]
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 2, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(W, ref=np.max),
... y_axis='log')
>>> plt.title('W')
>>> plt.subplot(2, 2, 2)
>>> librosa.display.specshow(H, x_axis='time')
>>> plt.title('H')
>>> plt.subplot(2, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(W_sort,
... ref=np.max),
... y_axis='log')
>>> plt.title('W sorted')
>>> plt.subplot(2, 2, 4)
>>> librosa.display.specshow(H_sort, x_axis='time')
>>> plt.title('H sorted')
>>> plt.tight_layout()
Parameters
----------
S : np.ndarray [shape=(d, n)]
Array to be sorted
axis : int [scalar]
The axis along which to compute the sorting values
- `axis=0` to sort rows by peak column index
- `axis=1` to sort columns by peak row index
index : boolean [scalar]
If true, returns the index array as well as the permuted data.
value : function
function to return the index corresponding to the sort order.
Default: `np.argmax`.
Returns
-------
S_sort : np.ndarray [shape=(d, n)]
`S` with the columns or rows permuted in sorting order
idx : np.ndarray (optional) [shape=(d,) or (n,)]
If `index == True`, the sorting index used to permute `S`.
Length of `idx` corresponds to the selected `axis`.
Raises
------
ParameterError
If `S` does not have exactly 2 dimensions (`S.ndim != 2`)
'''
if value is None:
value = np.argmax
if S.ndim != 2:
raise ParameterError('axis_sort is only defined for 2D arrays')
bin_idx = value(S, axis=np.mod(1-axis, S.ndim))
idx = np.argsort(bin_idx)
sort_slice = [slice(None)] * S.ndim
sort_slice[axis] = idx
if index:
return S[tuple(sort_slice)], idx
else:
return S[tuple(sort_slice)]
|
[
"Sort",
"an",
"array",
"along",
"its",
"rows",
"or",
"columns",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L455-L549
|
[
"def",
"axis_sort",
"(",
"S",
",",
"axis",
"=",
"-",
"1",
",",
"index",
"=",
"False",
",",
"value",
"=",
"None",
")",
":",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"np",
".",
"argmax",
"if",
"S",
".",
"ndim",
"!=",
"2",
":",
"raise",
"ParameterError",
"(",
"'axis_sort is only defined for 2D arrays'",
")",
"bin_idx",
"=",
"value",
"(",
"S",
",",
"axis",
"=",
"np",
".",
"mod",
"(",
"1",
"-",
"axis",
",",
"S",
".",
"ndim",
")",
")",
"idx",
"=",
"np",
".",
"argsort",
"(",
"bin_idx",
")",
"sort_slice",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"S",
".",
"ndim",
"sort_slice",
"[",
"axis",
"]",
"=",
"idx",
"if",
"index",
":",
"return",
"S",
"[",
"tuple",
"(",
"sort_slice",
")",
"]",
",",
"idx",
"else",
":",
"return",
"S",
"[",
"tuple",
"(",
"sort_slice",
")",
"]"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
normalize
|
Normalize an array along a chosen axis.
Given a norm (described below) and a target axis, the input
array is scaled so that
`norm(S, axis=axis) == 1`
For example, `axis=0` normalizes each column of a 2-d array
by aggregating over the rows (0-axis).
Similarly, `axis=1` normalizes each row of a 2-d array.
This function also supports thresholding small-norm slices:
any slice (i.e., row or column) with norm below a specified
`threshold` can be left un-normalized, set to all-zeros, or
filled with uniform non-zero values that normalize to 1.
Note: the semantics of this function differ from
`scipy.linalg.norm` in two ways: multi-dimensional arrays
are supported, but matrix-norms are not.
Parameters
----------
S : np.ndarray
The matrix to normalize
norm : {np.inf, -np.inf, 0, float > 0, None}
- `np.inf` : maximum absolute value
- `-np.inf` : mininum absolute value
- `0` : number of non-zeros (the support)
- float : corresponding l_p norm
See `scipy.linalg.norm` for details.
- None : no normalization is performed
axis : int [scalar]
Axis along which to compute the norm.
threshold : number > 0 [optional]
Only the columns (or rows) with norm at least `threshold` are
normalized.
By default, the threshold is determined from
the numerical precision of `S.dtype`.
fill : None or bool
If None, then columns (or rows) with norm below `threshold`
are left as is.
If False, then columns (rows) with norm below `threshold`
are set to 0.
If True, then columns (rows) with norm below `threshold`
are filled uniformly such that the corresponding norm is 1.
.. note:: `fill=True` is incompatible with `norm=0` because
no uniform vector exists with l0 "norm" equal to 1.
Returns
-------
S_norm : np.ndarray [shape=S.shape]
Normalized array
Raises
------
ParameterError
If `norm` is not among the valid types defined above
If `S` is not finite
If `fill=True` and `norm=0`
See Also
--------
scipy.linalg.norm
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct an example matrix
>>> S = np.vander(np.arange(-2.0, 2.0))
>>> S
array([[-8., 4., -2., 1.],
[-1., 1., -1., 1.],
[ 0., 0., 0., 1.],
[ 1., 1., 1., 1.]])
>>> # Max (l-infinity)-normalize the columns
>>> librosa.util.normalize(S)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # Max (l-infinity)-normalize the rows
>>> librosa.util.normalize(S, axis=1)
array([[-1. , 0.5 , -0.25 , 0.125],
[-1. , 1. , -1. , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 1. , 1. , 1. , 1. ]])
>>> # l1-normalize the columns
>>> librosa.util.normalize(S, norm=1)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
>>> # l2-normalize the columns
>>> librosa.util.normalize(S, norm=2)
array([[-0.985, 0.943, -0.816, 0.5 ],
[-0.123, 0.236, -0.408, 0.5 ],
[ 0. , 0. , 0. , 0.5 ],
[ 0.123, 0.236, 0.408, 0.5 ]])
>>> # Thresholding and filling
>>> S[:, -1] = 1e-308
>>> S
array([[ -8.000e+000, 4.000e+000, -2.000e+000,
1.000e-308],
[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.000e+000, 1.000e+000, 1.000e+000,
1.000e-308]])
>>> # By default, small-norm columns are left untouched
>>> librosa.util.normalize(S)
array([[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ -1.250e-001, 2.500e-001, -5.000e-001,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.250e-001, 2.500e-001, 5.000e-001,
1.000e-308]])
>>> # Small-norm columns can be zeroed out
>>> librosa.util.normalize(S, fill=False)
array([[-1. , 1. , -1. , 0. ],
[-0.125, 0.25 , -0.5 , 0. ],
[ 0. , 0. , 0. , 0. ],
[ 0.125, 0.25 , 0.5 , 0. ]])
>>> # Or set to constant with unit-norm
>>> librosa.util.normalize(S, fill=True)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # With an l1 norm instead of max-norm
>>> librosa.util.normalize(S, norm=1, fill=True)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
|
librosa/util/utils.py
|
def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None):
'''Normalize an array along a chosen axis.
Given a norm (described below) and a target axis, the input
array is scaled so that
`norm(S, axis=axis) == 1`
For example, `axis=0` normalizes each column of a 2-d array
by aggregating over the rows (0-axis).
Similarly, `axis=1` normalizes each row of a 2-d array.
This function also supports thresholding small-norm slices:
any slice (i.e., row or column) with norm below a specified
`threshold` can be left un-normalized, set to all-zeros, or
filled with uniform non-zero values that normalize to 1.
Note: the semantics of this function differ from
`scipy.linalg.norm` in two ways: multi-dimensional arrays
are supported, but matrix-norms are not.
Parameters
----------
S : np.ndarray
The matrix to normalize
norm : {np.inf, -np.inf, 0, float > 0, None}
- `np.inf` : maximum absolute value
- `-np.inf` : mininum absolute value
- `0` : number of non-zeros (the support)
- float : corresponding l_p norm
See `scipy.linalg.norm` for details.
- None : no normalization is performed
axis : int [scalar]
Axis along which to compute the norm.
threshold : number > 0 [optional]
Only the columns (or rows) with norm at least `threshold` are
normalized.
By default, the threshold is determined from
the numerical precision of `S.dtype`.
fill : None or bool
If None, then columns (or rows) with norm below `threshold`
are left as is.
If False, then columns (rows) with norm below `threshold`
are set to 0.
If True, then columns (rows) with norm below `threshold`
are filled uniformly such that the corresponding norm is 1.
.. note:: `fill=True` is incompatible with `norm=0` because
no uniform vector exists with l0 "norm" equal to 1.
Returns
-------
S_norm : np.ndarray [shape=S.shape]
Normalized array
Raises
------
ParameterError
If `norm` is not among the valid types defined above
If `S` is not finite
If `fill=True` and `norm=0`
See Also
--------
scipy.linalg.norm
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct an example matrix
>>> S = np.vander(np.arange(-2.0, 2.0))
>>> S
array([[-8., 4., -2., 1.],
[-1., 1., -1., 1.],
[ 0., 0., 0., 1.],
[ 1., 1., 1., 1.]])
>>> # Max (l-infinity)-normalize the columns
>>> librosa.util.normalize(S)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # Max (l-infinity)-normalize the rows
>>> librosa.util.normalize(S, axis=1)
array([[-1. , 0.5 , -0.25 , 0.125],
[-1. , 1. , -1. , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 1. , 1. , 1. , 1. ]])
>>> # l1-normalize the columns
>>> librosa.util.normalize(S, norm=1)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
>>> # l2-normalize the columns
>>> librosa.util.normalize(S, norm=2)
array([[-0.985, 0.943, -0.816, 0.5 ],
[-0.123, 0.236, -0.408, 0.5 ],
[ 0. , 0. , 0. , 0.5 ],
[ 0.123, 0.236, 0.408, 0.5 ]])
>>> # Thresholding and filling
>>> S[:, -1] = 1e-308
>>> S
array([[ -8.000e+000, 4.000e+000, -2.000e+000,
1.000e-308],
[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.000e+000, 1.000e+000, 1.000e+000,
1.000e-308]])
>>> # By default, small-norm columns are left untouched
>>> librosa.util.normalize(S)
array([[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ -1.250e-001, 2.500e-001, -5.000e-001,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.250e-001, 2.500e-001, 5.000e-001,
1.000e-308]])
>>> # Small-norm columns can be zeroed out
>>> librosa.util.normalize(S, fill=False)
array([[-1. , 1. , -1. , 0. ],
[-0.125, 0.25 , -0.5 , 0. ],
[ 0. , 0. , 0. , 0. ],
[ 0.125, 0.25 , 0.5 , 0. ]])
>>> # Or set to constant with unit-norm
>>> librosa.util.normalize(S, fill=True)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # With an l1 norm instead of max-norm
>>> librosa.util.normalize(S, norm=1, fill=True)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
'''
# Avoid div-by-zero
if threshold is None:
threshold = tiny(S)
elif threshold <= 0:
raise ParameterError('threshold={} must be strictly '
'positive'.format(threshold))
if fill not in [None, False, True]:
raise ParameterError('fill={} must be None or boolean'.format(fill))
if not np.all(np.isfinite(S)):
raise ParameterError('Input must be finite')
# All norms only depend on magnitude, let's do that first
mag = np.abs(S).astype(np.float)
# For max/min norms, filling with 1 works
fill_norm = 1
if norm == np.inf:
length = np.max(mag, axis=axis, keepdims=True)
elif norm == -np.inf:
length = np.min(mag, axis=axis, keepdims=True)
elif norm == 0:
if fill is True:
raise ParameterError('Cannot normalize with norm=0 and fill=True')
length = np.sum(mag > 0, axis=axis, keepdims=True, dtype=mag.dtype)
elif np.issubdtype(type(norm), np.number) and norm > 0:
length = np.sum(mag**norm, axis=axis, keepdims=True)**(1./norm)
if axis is None:
fill_norm = mag.size**(-1./norm)
else:
fill_norm = mag.shape[axis]**(-1./norm)
elif norm is None:
return S
else:
raise ParameterError('Unsupported norm: {}'.format(repr(norm)))
# indices where norm is below the threshold
small_idx = length < threshold
Snorm = np.empty_like(S)
if fill is None:
# Leave small indices un-normalized
length[small_idx] = 1.0
Snorm[:] = S / length
elif fill:
# If we have a non-zero fill value, we locate those entries by
# doing a nan-divide.
# If S was finite, then length is finite (except for small positions)
length[small_idx] = np.nan
Snorm[:] = S / length
Snorm[np.isnan(Snorm)] = fill_norm
else:
# Set small values to zero by doing an inf-divide.
# This is safe (by IEEE-754) as long as S is finite.
length[small_idx] = np.inf
Snorm[:] = S / length
return Snorm
|
def normalize(S, norm=np.inf, axis=0, threshold=None, fill=None):
'''Normalize an array along a chosen axis.
Given a norm (described below) and a target axis, the input
array is scaled so that
`norm(S, axis=axis) == 1`
For example, `axis=0` normalizes each column of a 2-d array
by aggregating over the rows (0-axis).
Similarly, `axis=1` normalizes each row of a 2-d array.
This function also supports thresholding small-norm slices:
any slice (i.e., row or column) with norm below a specified
`threshold` can be left un-normalized, set to all-zeros, or
filled with uniform non-zero values that normalize to 1.
Note: the semantics of this function differ from
`scipy.linalg.norm` in two ways: multi-dimensional arrays
are supported, but matrix-norms are not.
Parameters
----------
S : np.ndarray
The matrix to normalize
norm : {np.inf, -np.inf, 0, float > 0, None}
- `np.inf` : maximum absolute value
- `-np.inf` : mininum absolute value
- `0` : number of non-zeros (the support)
- float : corresponding l_p norm
See `scipy.linalg.norm` for details.
- None : no normalization is performed
axis : int [scalar]
Axis along which to compute the norm.
threshold : number > 0 [optional]
Only the columns (or rows) with norm at least `threshold` are
normalized.
By default, the threshold is determined from
the numerical precision of `S.dtype`.
fill : None or bool
If None, then columns (or rows) with norm below `threshold`
are left as is.
If False, then columns (rows) with norm below `threshold`
are set to 0.
If True, then columns (rows) with norm below `threshold`
are filled uniformly such that the corresponding norm is 1.
.. note:: `fill=True` is incompatible with `norm=0` because
no uniform vector exists with l0 "norm" equal to 1.
Returns
-------
S_norm : np.ndarray [shape=S.shape]
Normalized array
Raises
------
ParameterError
If `norm` is not among the valid types defined above
If `S` is not finite
If `fill=True` and `norm=0`
See Also
--------
scipy.linalg.norm
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct an example matrix
>>> S = np.vander(np.arange(-2.0, 2.0))
>>> S
array([[-8., 4., -2., 1.],
[-1., 1., -1., 1.],
[ 0., 0., 0., 1.],
[ 1., 1., 1., 1.]])
>>> # Max (l-infinity)-normalize the columns
>>> librosa.util.normalize(S)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # Max (l-infinity)-normalize the rows
>>> librosa.util.normalize(S, axis=1)
array([[-1. , 0.5 , -0.25 , 0.125],
[-1. , 1. , -1. , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 1. , 1. , 1. , 1. ]])
>>> # l1-normalize the columns
>>> librosa.util.normalize(S, norm=1)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
>>> # l2-normalize the columns
>>> librosa.util.normalize(S, norm=2)
array([[-0.985, 0.943, -0.816, 0.5 ],
[-0.123, 0.236, -0.408, 0.5 ],
[ 0. , 0. , 0. , 0.5 ],
[ 0.123, 0.236, 0.408, 0.5 ]])
>>> # Thresholding and filling
>>> S[:, -1] = 1e-308
>>> S
array([[ -8.000e+000, 4.000e+000, -2.000e+000,
1.000e-308],
[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.000e+000, 1.000e+000, 1.000e+000,
1.000e-308]])
>>> # By default, small-norm columns are left untouched
>>> librosa.util.normalize(S)
array([[ -1.000e+000, 1.000e+000, -1.000e+000,
1.000e-308],
[ -1.250e-001, 2.500e-001, -5.000e-001,
1.000e-308],
[ 0.000e+000, 0.000e+000, 0.000e+000,
1.000e-308],
[ 1.250e-001, 2.500e-001, 5.000e-001,
1.000e-308]])
>>> # Small-norm columns can be zeroed out
>>> librosa.util.normalize(S, fill=False)
array([[-1. , 1. , -1. , 0. ],
[-0.125, 0.25 , -0.5 , 0. ],
[ 0. , 0. , 0. , 0. ],
[ 0.125, 0.25 , 0.5 , 0. ]])
>>> # Or set to constant with unit-norm
>>> librosa.util.normalize(S, fill=True)
array([[-1. , 1. , -1. , 1. ],
[-0.125, 0.25 , -0.5 , 1. ],
[ 0. , 0. , 0. , 1. ],
[ 0.125, 0.25 , 0.5 , 1. ]])
>>> # With an l1 norm instead of max-norm
>>> librosa.util.normalize(S, norm=1, fill=True)
array([[-0.8 , 0.667, -0.5 , 0.25 ],
[-0.1 , 0.167, -0.25 , 0.25 ],
[ 0. , 0. , 0. , 0.25 ],
[ 0.1 , 0.167, 0.25 , 0.25 ]])
'''
# Avoid div-by-zero
if threshold is None:
threshold = tiny(S)
elif threshold <= 0:
raise ParameterError('threshold={} must be strictly '
'positive'.format(threshold))
if fill not in [None, False, True]:
raise ParameterError('fill={} must be None or boolean'.format(fill))
if not np.all(np.isfinite(S)):
raise ParameterError('Input must be finite')
# All norms only depend on magnitude, let's do that first
mag = np.abs(S).astype(np.float)
# For max/min norms, filling with 1 works
fill_norm = 1
if norm == np.inf:
length = np.max(mag, axis=axis, keepdims=True)
elif norm == -np.inf:
length = np.min(mag, axis=axis, keepdims=True)
elif norm == 0:
if fill is True:
raise ParameterError('Cannot normalize with norm=0 and fill=True')
length = np.sum(mag > 0, axis=axis, keepdims=True, dtype=mag.dtype)
elif np.issubdtype(type(norm), np.number) and norm > 0:
length = np.sum(mag**norm, axis=axis, keepdims=True)**(1./norm)
if axis is None:
fill_norm = mag.size**(-1./norm)
else:
fill_norm = mag.shape[axis]**(-1./norm)
elif norm is None:
return S
else:
raise ParameterError('Unsupported norm: {}'.format(repr(norm)))
# indices where norm is below the threshold
small_idx = length < threshold
Snorm = np.empty_like(S)
if fill is None:
# Leave small indices un-normalized
length[small_idx] = 1.0
Snorm[:] = S / length
elif fill:
# If we have a non-zero fill value, we locate those entries by
# doing a nan-divide.
# If S was finite, then length is finite (except for small positions)
length[small_idx] = np.nan
Snorm[:] = S / length
Snorm[np.isnan(Snorm)] = fill_norm
else:
# Set small values to zero by doing an inf-divide.
# This is safe (by IEEE-754) as long as S is finite.
length[small_idx] = np.inf
Snorm[:] = S / length
return Snorm
|
[
"Normalize",
"an",
"array",
"along",
"a",
"chosen",
"axis",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L553-L777
|
[
"def",
"normalize",
"(",
"S",
",",
"norm",
"=",
"np",
".",
"inf",
",",
"axis",
"=",
"0",
",",
"threshold",
"=",
"None",
",",
"fill",
"=",
"None",
")",
":",
"# Avoid div-by-zero",
"if",
"threshold",
"is",
"None",
":",
"threshold",
"=",
"tiny",
"(",
"S",
")",
"elif",
"threshold",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'threshold={} must be strictly '",
"'positive'",
".",
"format",
"(",
"threshold",
")",
")",
"if",
"fill",
"not",
"in",
"[",
"None",
",",
"False",
",",
"True",
"]",
":",
"raise",
"ParameterError",
"(",
"'fill={} must be None or boolean'",
".",
"format",
"(",
"fill",
")",
")",
"if",
"not",
"np",
".",
"all",
"(",
"np",
".",
"isfinite",
"(",
"S",
")",
")",
":",
"raise",
"ParameterError",
"(",
"'Input must be finite'",
")",
"# All norms only depend on magnitude, let's do that first",
"mag",
"=",
"np",
".",
"abs",
"(",
"S",
")",
".",
"astype",
"(",
"np",
".",
"float",
")",
"# For max/min norms, filling with 1 works",
"fill_norm",
"=",
"1",
"if",
"norm",
"==",
"np",
".",
"inf",
":",
"length",
"=",
"np",
".",
"max",
"(",
"mag",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"True",
")",
"elif",
"norm",
"==",
"-",
"np",
".",
"inf",
":",
"length",
"=",
"np",
".",
"min",
"(",
"mag",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"True",
")",
"elif",
"norm",
"==",
"0",
":",
"if",
"fill",
"is",
"True",
":",
"raise",
"ParameterError",
"(",
"'Cannot normalize with norm=0 and fill=True'",
")",
"length",
"=",
"np",
".",
"sum",
"(",
"mag",
">",
"0",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"True",
",",
"dtype",
"=",
"mag",
".",
"dtype",
")",
"elif",
"np",
".",
"issubdtype",
"(",
"type",
"(",
"norm",
")",
",",
"np",
".",
"number",
")",
"and",
"norm",
">",
"0",
":",
"length",
"=",
"np",
".",
"sum",
"(",
"mag",
"**",
"norm",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"True",
")",
"**",
"(",
"1.",
"/",
"norm",
")",
"if",
"axis",
"is",
"None",
":",
"fill_norm",
"=",
"mag",
".",
"size",
"**",
"(",
"-",
"1.",
"/",
"norm",
")",
"else",
":",
"fill_norm",
"=",
"mag",
".",
"shape",
"[",
"axis",
"]",
"**",
"(",
"-",
"1.",
"/",
"norm",
")",
"elif",
"norm",
"is",
"None",
":",
"return",
"S",
"else",
":",
"raise",
"ParameterError",
"(",
"'Unsupported norm: {}'",
".",
"format",
"(",
"repr",
"(",
"norm",
")",
")",
")",
"# indices where norm is below the threshold",
"small_idx",
"=",
"length",
"<",
"threshold",
"Snorm",
"=",
"np",
".",
"empty_like",
"(",
"S",
")",
"if",
"fill",
"is",
"None",
":",
"# Leave small indices un-normalized",
"length",
"[",
"small_idx",
"]",
"=",
"1.0",
"Snorm",
"[",
":",
"]",
"=",
"S",
"/",
"length",
"elif",
"fill",
":",
"# If we have a non-zero fill value, we locate those entries by",
"# doing a nan-divide.",
"# If S was finite, then length is finite (except for small positions)",
"length",
"[",
"small_idx",
"]",
"=",
"np",
".",
"nan",
"Snorm",
"[",
":",
"]",
"=",
"S",
"/",
"length",
"Snorm",
"[",
"np",
".",
"isnan",
"(",
"Snorm",
")",
"]",
"=",
"fill_norm",
"else",
":",
"# Set small values to zero by doing an inf-divide.",
"# This is safe (by IEEE-754) as long as S is finite.",
"length",
"[",
"small_idx",
"]",
"=",
"np",
".",
"inf",
"Snorm",
"[",
":",
"]",
"=",
"S",
"/",
"length",
"return",
"Snorm"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
localmax
|
Find local maxima in an array `x`.
An element `x[i]` is considered a local maximum if the following
conditions are met:
- `x[i] > x[i-1]`
- `x[i] >= x[i+1]`
Note that the first condition is strict, and that the first element
`x[0]` will never be considered as a local maximum.
Examples
--------
>>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1])
>>> librosa.util.localmax(x)
array([False, False, False, True, False, True, False, True], dtype=bool)
>>> # Two-dimensional example
>>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]])
>>> librosa.util.localmax(x, axis=0)
array([[False, False, False],
[ True, False, False],
[False, True, True]], dtype=bool)
>>> librosa.util.localmax(x, axis=1)
array([[False, False, True],
[False, False, True],
[False, False, True]], dtype=bool)
Parameters
----------
x : np.ndarray [shape=(d1,d2,...)]
input vector or array
axis : int
axis along which to compute local maximality
Returns
-------
m : np.ndarray [shape=x.shape, dtype=bool]
indicator array of local maximality along `axis`
|
librosa/util/utils.py
|
def localmax(x, axis=0):
"""Find local maxima in an array `x`.
An element `x[i]` is considered a local maximum if the following
conditions are met:
- `x[i] > x[i-1]`
- `x[i] >= x[i+1]`
Note that the first condition is strict, and that the first element
`x[0]` will never be considered as a local maximum.
Examples
--------
>>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1])
>>> librosa.util.localmax(x)
array([False, False, False, True, False, True, False, True], dtype=bool)
>>> # Two-dimensional example
>>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]])
>>> librosa.util.localmax(x, axis=0)
array([[False, False, False],
[ True, False, False],
[False, True, True]], dtype=bool)
>>> librosa.util.localmax(x, axis=1)
array([[False, False, True],
[False, False, True],
[False, False, True]], dtype=bool)
Parameters
----------
x : np.ndarray [shape=(d1,d2,...)]
input vector or array
axis : int
axis along which to compute local maximality
Returns
-------
m : np.ndarray [shape=x.shape, dtype=bool]
indicator array of local maximality along `axis`
"""
paddings = [(0, 0)] * x.ndim
paddings[axis] = (1, 1)
x_pad = np.pad(x, paddings, mode='edge')
inds1 = [slice(None)] * x.ndim
inds1[axis] = slice(0, -2)
inds2 = [slice(None)] * x.ndim
inds2[axis] = slice(2, x_pad.shape[axis])
return (x > x_pad[tuple(inds1)]) & (x >= x_pad[tuple(inds2)])
|
def localmax(x, axis=0):
"""Find local maxima in an array `x`.
An element `x[i]` is considered a local maximum if the following
conditions are met:
- `x[i] > x[i-1]`
- `x[i] >= x[i+1]`
Note that the first condition is strict, and that the first element
`x[0]` will never be considered as a local maximum.
Examples
--------
>>> x = np.array([1, 0, 1, 2, -1, 0, -2, 1])
>>> librosa.util.localmax(x)
array([False, False, False, True, False, True, False, True], dtype=bool)
>>> # Two-dimensional example
>>> x = np.array([[1,0,1], [2, -1, 0], [2, 1, 3]])
>>> librosa.util.localmax(x, axis=0)
array([[False, False, False],
[ True, False, False],
[False, True, True]], dtype=bool)
>>> librosa.util.localmax(x, axis=1)
array([[False, False, True],
[False, False, True],
[False, False, True]], dtype=bool)
Parameters
----------
x : np.ndarray [shape=(d1,d2,...)]
input vector or array
axis : int
axis along which to compute local maximality
Returns
-------
m : np.ndarray [shape=x.shape, dtype=bool]
indicator array of local maximality along `axis`
"""
paddings = [(0, 0)] * x.ndim
paddings[axis] = (1, 1)
x_pad = np.pad(x, paddings, mode='edge')
inds1 = [slice(None)] * x.ndim
inds1[axis] = slice(0, -2)
inds2 = [slice(None)] * x.ndim
inds2[axis] = slice(2, x_pad.shape[axis])
return (x > x_pad[tuple(inds1)]) & (x >= x_pad[tuple(inds2)])
|
[
"Find",
"local",
"maxima",
"in",
"an",
"array",
"x",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L780-L835
|
[
"def",
"localmax",
"(",
"x",
",",
"axis",
"=",
"0",
")",
":",
"paddings",
"=",
"[",
"(",
"0",
",",
"0",
")",
"]",
"*",
"x",
".",
"ndim",
"paddings",
"[",
"axis",
"]",
"=",
"(",
"1",
",",
"1",
")",
"x_pad",
"=",
"np",
".",
"pad",
"(",
"x",
",",
"paddings",
",",
"mode",
"=",
"'edge'",
")",
"inds1",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"x",
".",
"ndim",
"inds1",
"[",
"axis",
"]",
"=",
"slice",
"(",
"0",
",",
"-",
"2",
")",
"inds2",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"x",
".",
"ndim",
"inds2",
"[",
"axis",
"]",
"=",
"slice",
"(",
"2",
",",
"x_pad",
".",
"shape",
"[",
"axis",
"]",
")",
"return",
"(",
"x",
">",
"x_pad",
"[",
"tuple",
"(",
"inds1",
")",
"]",
")",
"&",
"(",
"x",
">=",
"x_pad",
"[",
"tuple",
"(",
"inds2",
")",
"]",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
peak_pick
|
Uses a flexible heuristic to pick peaks in a signal.
A sample n is selected as an peak if the corresponding x[n]
fulfills the following three conditions:
1. `x[n] == max(x[n - pre_max:n + post_max])`
2. `x[n] >= mean(x[n - pre_avg:n + post_avg]) + delta`
3. `n - previous_n > wait`
where `previous_n` is the last sample picked as a peak (greedily).
This implementation is based on [1]_ and [2]_.
.. [1] Boeck, Sebastian, Florian Krebs, and Markus Schedl.
"Evaluating the Online Capabilities of Onset Detection Methods." ISMIR.
2012.
.. [2] https://github.com/CPJKU/onset_detection/blob/master/onset_program.py
Parameters
----------
x : np.ndarray [shape=(n,)]
input signal to peak picks from
pre_max : int >= 0 [scalar]
number of samples before `n` over which max is computed
post_max : int >= 1 [scalar]
number of samples after `n` over which max is computed
pre_avg : int >= 0 [scalar]
number of samples before `n` over which mean is computed
post_avg : int >= 1 [scalar]
number of samples after `n` over which mean is computed
delta : float >= 0 [scalar]
threshold offset for mean
wait : int >= 0 [scalar]
number of samples to wait after picking a peak
Returns
-------
peaks : np.ndarray [shape=(n_peaks,), dtype=int]
indices of peaks in `x`
Raises
------
ParameterError
If any input lies outside its defined range
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... hop_length=512,
... aggregate=np.median)
>>> peaks = librosa.util.peak_pick(onset_env, 3, 3, 3, 5, 0.5, 10)
>>> peaks
array([ 4, 23, 73, 102, 142, 162, 182, 211, 261, 301, 320,
331, 348, 368, 382, 396, 411, 431, 446, 461, 476, 491,
510, 525, 536, 555, 570, 590, 609, 625, 639])
>>> import matplotlib.pyplot as plt
>>> times = librosa.frames_to_time(np.arange(len(onset_env)),
... sr=sr, hop_length=512)
>>> plt.figure()
>>> ax = plt.subplot(2, 1, 2)
>>> D = librosa.stft(y)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.subplot(2, 1, 1, sharex=ax)
>>> plt.plot(times, onset_env, alpha=0.8, label='Onset strength')
>>> plt.vlines(times[peaks], 0,
... onset_env.max(), color='r', alpha=0.8,
... label='Selected peaks')
>>> plt.legend(frameon=True, framealpha=0.8)
>>> plt.axis('tight')
>>> plt.tight_layout()
|
librosa/util/utils.py
|
def peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait):
'''Uses a flexible heuristic to pick peaks in a signal.
A sample n is selected as an peak if the corresponding x[n]
fulfills the following three conditions:
1. `x[n] == max(x[n - pre_max:n + post_max])`
2. `x[n] >= mean(x[n - pre_avg:n + post_avg]) + delta`
3. `n - previous_n > wait`
where `previous_n` is the last sample picked as a peak (greedily).
This implementation is based on [1]_ and [2]_.
.. [1] Boeck, Sebastian, Florian Krebs, and Markus Schedl.
"Evaluating the Online Capabilities of Onset Detection Methods." ISMIR.
2012.
.. [2] https://github.com/CPJKU/onset_detection/blob/master/onset_program.py
Parameters
----------
x : np.ndarray [shape=(n,)]
input signal to peak picks from
pre_max : int >= 0 [scalar]
number of samples before `n` over which max is computed
post_max : int >= 1 [scalar]
number of samples after `n` over which max is computed
pre_avg : int >= 0 [scalar]
number of samples before `n` over which mean is computed
post_avg : int >= 1 [scalar]
number of samples after `n` over which mean is computed
delta : float >= 0 [scalar]
threshold offset for mean
wait : int >= 0 [scalar]
number of samples to wait after picking a peak
Returns
-------
peaks : np.ndarray [shape=(n_peaks,), dtype=int]
indices of peaks in `x`
Raises
------
ParameterError
If any input lies outside its defined range
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... hop_length=512,
... aggregate=np.median)
>>> peaks = librosa.util.peak_pick(onset_env, 3, 3, 3, 5, 0.5, 10)
>>> peaks
array([ 4, 23, 73, 102, 142, 162, 182, 211, 261, 301, 320,
331, 348, 368, 382, 396, 411, 431, 446, 461, 476, 491,
510, 525, 536, 555, 570, 590, 609, 625, 639])
>>> import matplotlib.pyplot as plt
>>> times = librosa.frames_to_time(np.arange(len(onset_env)),
... sr=sr, hop_length=512)
>>> plt.figure()
>>> ax = plt.subplot(2, 1, 2)
>>> D = librosa.stft(y)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.subplot(2, 1, 1, sharex=ax)
>>> plt.plot(times, onset_env, alpha=0.8, label='Onset strength')
>>> plt.vlines(times[peaks], 0,
... onset_env.max(), color='r', alpha=0.8,
... label='Selected peaks')
>>> plt.legend(frameon=True, framealpha=0.8)
>>> plt.axis('tight')
>>> plt.tight_layout()
'''
if pre_max < 0:
raise ParameterError('pre_max must be non-negative')
if pre_avg < 0:
raise ParameterError('pre_avg must be non-negative')
if delta < 0:
raise ParameterError('delta must be non-negative')
if wait < 0:
raise ParameterError('wait must be non-negative')
if post_max <= 0:
raise ParameterError('post_max must be positive')
if post_avg <= 0:
raise ParameterError('post_avg must be positive')
if x.ndim != 1:
raise ParameterError('input array must be one-dimensional')
# Ensure valid index types
pre_max = valid_int(pre_max, cast=np.ceil)
post_max = valid_int(post_max, cast=np.ceil)
pre_avg = valid_int(pre_avg, cast=np.ceil)
post_avg = valid_int(post_avg, cast=np.ceil)
wait = valid_int(wait, cast=np.ceil)
# Get the maximum of the signal over a sliding window
max_length = pre_max + post_max
max_origin = np.ceil(0.5 * (pre_max - post_max))
# Using mode='constant' and cval=x.min() effectively truncates
# the sliding window at the boundaries
mov_max = scipy.ndimage.filters.maximum_filter1d(x, int(max_length),
mode='constant',
origin=int(max_origin),
cval=x.min())
# Get the mean of the signal over a sliding window
avg_length = pre_avg + post_avg
avg_origin = np.ceil(0.5 * (pre_avg - post_avg))
# Here, there is no mode which results in the behavior we want,
# so we'll correct below.
mov_avg = scipy.ndimage.filters.uniform_filter1d(x, int(avg_length),
mode='nearest',
origin=int(avg_origin))
# Correct sliding average at the beginning
n = 0
# Only need to correct in the range where the window needs to be truncated
while n - pre_avg < 0 and n < x.shape[0]:
# This just explicitly does mean(x[n - pre_avg:n + post_avg])
# with truncation
start = n - pre_avg
start = start if start > 0 else 0
mov_avg[n] = np.mean(x[start:n + post_avg])
n += 1
# Correct sliding average at the end
n = x.shape[0] - post_avg
# When post_avg > x.shape[0] (weird case), reset to 0
n = n if n > 0 else 0
while n < x.shape[0]:
start = n - pre_avg
start = start if start > 0 else 0
mov_avg[n] = np.mean(x[start:n + post_avg])
n += 1
# First mask out all entries not equal to the local max
detections = x * (x == mov_max)
# Then mask out all entries less than the thresholded average
detections = detections * (detections >= (mov_avg + delta))
# Initialize peaks array, to be filled greedily
peaks = []
# Remove onsets which are close together in time
last_onset = -np.inf
for i in np.nonzero(detections)[0]:
# Only report an onset if the "wait" samples was reported
if i > last_onset + wait:
peaks.append(i)
# Save last reported onset
last_onset = i
return np.array(peaks)
|
def peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait):
'''Uses a flexible heuristic to pick peaks in a signal.
A sample n is selected as an peak if the corresponding x[n]
fulfills the following three conditions:
1. `x[n] == max(x[n - pre_max:n + post_max])`
2. `x[n] >= mean(x[n - pre_avg:n + post_avg]) + delta`
3. `n - previous_n > wait`
where `previous_n` is the last sample picked as a peak (greedily).
This implementation is based on [1]_ and [2]_.
.. [1] Boeck, Sebastian, Florian Krebs, and Markus Schedl.
"Evaluating the Online Capabilities of Onset Detection Methods." ISMIR.
2012.
.. [2] https://github.com/CPJKU/onset_detection/blob/master/onset_program.py
Parameters
----------
x : np.ndarray [shape=(n,)]
input signal to peak picks from
pre_max : int >= 0 [scalar]
number of samples before `n` over which max is computed
post_max : int >= 1 [scalar]
number of samples after `n` over which max is computed
pre_avg : int >= 0 [scalar]
number of samples before `n` over which mean is computed
post_avg : int >= 1 [scalar]
number of samples after `n` over which mean is computed
delta : float >= 0 [scalar]
threshold offset for mean
wait : int >= 0 [scalar]
number of samples to wait after picking a peak
Returns
-------
peaks : np.ndarray [shape=(n_peaks,), dtype=int]
indices of peaks in `x`
Raises
------
ParameterError
If any input lies outside its defined range
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... hop_length=512,
... aggregate=np.median)
>>> peaks = librosa.util.peak_pick(onset_env, 3, 3, 3, 5, 0.5, 10)
>>> peaks
array([ 4, 23, 73, 102, 142, 162, 182, 211, 261, 301, 320,
331, 348, 368, 382, 396, 411, 431, 446, 461, 476, 491,
510, 525, 536, 555, 570, 590, 609, 625, 639])
>>> import matplotlib.pyplot as plt
>>> times = librosa.frames_to_time(np.arange(len(onset_env)),
... sr=sr, hop_length=512)
>>> plt.figure()
>>> ax = plt.subplot(2, 1, 2)
>>> D = librosa.stft(y)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.subplot(2, 1, 1, sharex=ax)
>>> plt.plot(times, onset_env, alpha=0.8, label='Onset strength')
>>> plt.vlines(times[peaks], 0,
... onset_env.max(), color='r', alpha=0.8,
... label='Selected peaks')
>>> plt.legend(frameon=True, framealpha=0.8)
>>> plt.axis('tight')
>>> plt.tight_layout()
'''
if pre_max < 0:
raise ParameterError('pre_max must be non-negative')
if pre_avg < 0:
raise ParameterError('pre_avg must be non-negative')
if delta < 0:
raise ParameterError('delta must be non-negative')
if wait < 0:
raise ParameterError('wait must be non-negative')
if post_max <= 0:
raise ParameterError('post_max must be positive')
if post_avg <= 0:
raise ParameterError('post_avg must be positive')
if x.ndim != 1:
raise ParameterError('input array must be one-dimensional')
# Ensure valid index types
pre_max = valid_int(pre_max, cast=np.ceil)
post_max = valid_int(post_max, cast=np.ceil)
pre_avg = valid_int(pre_avg, cast=np.ceil)
post_avg = valid_int(post_avg, cast=np.ceil)
wait = valid_int(wait, cast=np.ceil)
# Get the maximum of the signal over a sliding window
max_length = pre_max + post_max
max_origin = np.ceil(0.5 * (pre_max - post_max))
# Using mode='constant' and cval=x.min() effectively truncates
# the sliding window at the boundaries
mov_max = scipy.ndimage.filters.maximum_filter1d(x, int(max_length),
mode='constant',
origin=int(max_origin),
cval=x.min())
# Get the mean of the signal over a sliding window
avg_length = pre_avg + post_avg
avg_origin = np.ceil(0.5 * (pre_avg - post_avg))
# Here, there is no mode which results in the behavior we want,
# so we'll correct below.
mov_avg = scipy.ndimage.filters.uniform_filter1d(x, int(avg_length),
mode='nearest',
origin=int(avg_origin))
# Correct sliding average at the beginning
n = 0
# Only need to correct in the range where the window needs to be truncated
while n - pre_avg < 0 and n < x.shape[0]:
# This just explicitly does mean(x[n - pre_avg:n + post_avg])
# with truncation
start = n - pre_avg
start = start if start > 0 else 0
mov_avg[n] = np.mean(x[start:n + post_avg])
n += 1
# Correct sliding average at the end
n = x.shape[0] - post_avg
# When post_avg > x.shape[0] (weird case), reset to 0
n = n if n > 0 else 0
while n < x.shape[0]:
start = n - pre_avg
start = start if start > 0 else 0
mov_avg[n] = np.mean(x[start:n + post_avg])
n += 1
# First mask out all entries not equal to the local max
detections = x * (x == mov_max)
# Then mask out all entries less than the thresholded average
detections = detections * (detections >= (mov_avg + delta))
# Initialize peaks array, to be filled greedily
peaks = []
# Remove onsets which are close together in time
last_onset = -np.inf
for i in np.nonzero(detections)[0]:
# Only report an onset if the "wait" samples was reported
if i > last_onset + wait:
peaks.append(i)
# Save last reported onset
last_onset = i
return np.array(peaks)
|
[
"Uses",
"a",
"flexible",
"heuristic",
"to",
"pick",
"peaks",
"in",
"a",
"signal",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L838-L1005
|
[
"def",
"peak_pick",
"(",
"x",
",",
"pre_max",
",",
"post_max",
",",
"pre_avg",
",",
"post_avg",
",",
"delta",
",",
"wait",
")",
":",
"if",
"pre_max",
"<",
"0",
":",
"raise",
"ParameterError",
"(",
"'pre_max must be non-negative'",
")",
"if",
"pre_avg",
"<",
"0",
":",
"raise",
"ParameterError",
"(",
"'pre_avg must be non-negative'",
")",
"if",
"delta",
"<",
"0",
":",
"raise",
"ParameterError",
"(",
"'delta must be non-negative'",
")",
"if",
"wait",
"<",
"0",
":",
"raise",
"ParameterError",
"(",
"'wait must be non-negative'",
")",
"if",
"post_max",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'post_max must be positive'",
")",
"if",
"post_avg",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'post_avg must be positive'",
")",
"if",
"x",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ParameterError",
"(",
"'input array must be one-dimensional'",
")",
"# Ensure valid index types",
"pre_max",
"=",
"valid_int",
"(",
"pre_max",
",",
"cast",
"=",
"np",
".",
"ceil",
")",
"post_max",
"=",
"valid_int",
"(",
"post_max",
",",
"cast",
"=",
"np",
".",
"ceil",
")",
"pre_avg",
"=",
"valid_int",
"(",
"pre_avg",
",",
"cast",
"=",
"np",
".",
"ceil",
")",
"post_avg",
"=",
"valid_int",
"(",
"post_avg",
",",
"cast",
"=",
"np",
".",
"ceil",
")",
"wait",
"=",
"valid_int",
"(",
"wait",
",",
"cast",
"=",
"np",
".",
"ceil",
")",
"# Get the maximum of the signal over a sliding window",
"max_length",
"=",
"pre_max",
"+",
"post_max",
"max_origin",
"=",
"np",
".",
"ceil",
"(",
"0.5",
"*",
"(",
"pre_max",
"-",
"post_max",
")",
")",
"# Using mode='constant' and cval=x.min() effectively truncates",
"# the sliding window at the boundaries",
"mov_max",
"=",
"scipy",
".",
"ndimage",
".",
"filters",
".",
"maximum_filter1d",
"(",
"x",
",",
"int",
"(",
"max_length",
")",
",",
"mode",
"=",
"'constant'",
",",
"origin",
"=",
"int",
"(",
"max_origin",
")",
",",
"cval",
"=",
"x",
".",
"min",
"(",
")",
")",
"# Get the mean of the signal over a sliding window",
"avg_length",
"=",
"pre_avg",
"+",
"post_avg",
"avg_origin",
"=",
"np",
".",
"ceil",
"(",
"0.5",
"*",
"(",
"pre_avg",
"-",
"post_avg",
")",
")",
"# Here, there is no mode which results in the behavior we want,",
"# so we'll correct below.",
"mov_avg",
"=",
"scipy",
".",
"ndimage",
".",
"filters",
".",
"uniform_filter1d",
"(",
"x",
",",
"int",
"(",
"avg_length",
")",
",",
"mode",
"=",
"'nearest'",
",",
"origin",
"=",
"int",
"(",
"avg_origin",
")",
")",
"# Correct sliding average at the beginning",
"n",
"=",
"0",
"# Only need to correct in the range where the window needs to be truncated",
"while",
"n",
"-",
"pre_avg",
"<",
"0",
"and",
"n",
"<",
"x",
".",
"shape",
"[",
"0",
"]",
":",
"# This just explicitly does mean(x[n - pre_avg:n + post_avg])",
"# with truncation",
"start",
"=",
"n",
"-",
"pre_avg",
"start",
"=",
"start",
"if",
"start",
">",
"0",
"else",
"0",
"mov_avg",
"[",
"n",
"]",
"=",
"np",
".",
"mean",
"(",
"x",
"[",
"start",
":",
"n",
"+",
"post_avg",
"]",
")",
"n",
"+=",
"1",
"# Correct sliding average at the end",
"n",
"=",
"x",
".",
"shape",
"[",
"0",
"]",
"-",
"post_avg",
"# When post_avg > x.shape[0] (weird case), reset to 0",
"n",
"=",
"n",
"if",
"n",
">",
"0",
"else",
"0",
"while",
"n",
"<",
"x",
".",
"shape",
"[",
"0",
"]",
":",
"start",
"=",
"n",
"-",
"pre_avg",
"start",
"=",
"start",
"if",
"start",
">",
"0",
"else",
"0",
"mov_avg",
"[",
"n",
"]",
"=",
"np",
".",
"mean",
"(",
"x",
"[",
"start",
":",
"n",
"+",
"post_avg",
"]",
")",
"n",
"+=",
"1",
"# First mask out all entries not equal to the local max",
"detections",
"=",
"x",
"*",
"(",
"x",
"==",
"mov_max",
")",
"# Then mask out all entries less than the thresholded average",
"detections",
"=",
"detections",
"*",
"(",
"detections",
">=",
"(",
"mov_avg",
"+",
"delta",
")",
")",
"# Initialize peaks array, to be filled greedily",
"peaks",
"=",
"[",
"]",
"# Remove onsets which are close together in time",
"last_onset",
"=",
"-",
"np",
".",
"inf",
"for",
"i",
"in",
"np",
".",
"nonzero",
"(",
"detections",
")",
"[",
"0",
"]",
":",
"# Only report an onset if the \"wait\" samples was reported",
"if",
"i",
">",
"last_onset",
"+",
"wait",
":",
"peaks",
".",
"append",
"(",
"i",
")",
"# Save last reported onset",
"last_onset",
"=",
"i",
"return",
"np",
".",
"array",
"(",
"peaks",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
sparsify_rows
|
Return a row-sparse matrix approximating the input `x`.
Parameters
----------
x : np.ndarray [ndim <= 2]
The input matrix to sparsify.
quantile : float in [0, 1.0)
Percentage of magnitude to discard in each row of `x`
Returns
-------
x_sparse : `scipy.sparse.csr_matrix` [shape=x.shape]
Row-sparsified approximation of `x`
If `x.ndim == 1`, then `x` is interpreted as a row vector,
and `x_sparse.shape == (1, len(x))`.
Raises
------
ParameterError
If `x.ndim > 2`
If `quantile` lies outside `[0, 1.0)`
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct a Hann window to sparsify
>>> x = scipy.signal.hann(32)
>>> x
array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0.041, 0.01 , 0. ])
>>> # Discard the bottom percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 26 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0. , 0. , 0. ]])
>>> # Discard up to the bottom 10th percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 20 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. ,
0. , 0. , 0. , 0. ]])
|
librosa/util/utils.py
|
def sparsify_rows(x, quantile=0.01):
'''
Return a row-sparse matrix approximating the input `x`.
Parameters
----------
x : np.ndarray [ndim <= 2]
The input matrix to sparsify.
quantile : float in [0, 1.0)
Percentage of magnitude to discard in each row of `x`
Returns
-------
x_sparse : `scipy.sparse.csr_matrix` [shape=x.shape]
Row-sparsified approximation of `x`
If `x.ndim == 1`, then `x` is interpreted as a row vector,
and `x_sparse.shape == (1, len(x))`.
Raises
------
ParameterError
If `x.ndim > 2`
If `quantile` lies outside `[0, 1.0)`
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct a Hann window to sparsify
>>> x = scipy.signal.hann(32)
>>> x
array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0.041, 0.01 , 0. ])
>>> # Discard the bottom percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 26 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0. , 0. , 0. ]])
>>> # Discard up to the bottom 10th percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 20 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. ,
0. , 0. , 0. , 0. ]])
'''
if x.ndim == 1:
x = x.reshape((1, -1))
elif x.ndim > 2:
raise ParameterError('Input must have 2 or fewer dimensions. '
'Provided x.shape={}.'.format(x.shape))
if not 0.0 <= quantile < 1:
raise ParameterError('Invalid quantile {:.2f}'.format(quantile))
x_sparse = scipy.sparse.lil_matrix(x.shape, dtype=x.dtype)
mags = np.abs(x)
norms = np.sum(mags, axis=1, keepdims=True)
mag_sort = np.sort(mags, axis=1)
cumulative_mag = np.cumsum(mag_sort / norms, axis=1)
threshold_idx = np.argmin(cumulative_mag < quantile, axis=1)
for i, j in enumerate(threshold_idx):
idx = np.where(mags[i] >= mag_sort[i, j])
x_sparse[i, idx] = x[i, idx]
return x_sparse.tocsr()
|
def sparsify_rows(x, quantile=0.01):
'''
Return a row-sparse matrix approximating the input `x`.
Parameters
----------
x : np.ndarray [ndim <= 2]
The input matrix to sparsify.
quantile : float in [0, 1.0)
Percentage of magnitude to discard in each row of `x`
Returns
-------
x_sparse : `scipy.sparse.csr_matrix` [shape=x.shape]
Row-sparsified approximation of `x`
If `x.ndim == 1`, then `x` is interpreted as a row vector,
and `x_sparse.shape == (1, len(x))`.
Raises
------
ParameterError
If `x.ndim > 2`
If `quantile` lies outside `[0, 1.0)`
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct a Hann window to sparsify
>>> x = scipy.signal.hann(32)
>>> x
array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0.041, 0.01 , 0. ])
>>> # Discard the bottom percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 26 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0. , 0. , 0. ]])
>>> # Discard up to the bottom 10th percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 20 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. ,
0. , 0. , 0. , 0. ]])
'''
if x.ndim == 1:
x = x.reshape((1, -1))
elif x.ndim > 2:
raise ParameterError('Input must have 2 or fewer dimensions. '
'Provided x.shape={}.'.format(x.shape))
if not 0.0 <= quantile < 1:
raise ParameterError('Invalid quantile {:.2f}'.format(quantile))
x_sparse = scipy.sparse.lil_matrix(x.shape, dtype=x.dtype)
mags = np.abs(x)
norms = np.sum(mags, axis=1, keepdims=True)
mag_sort = np.sort(mags, axis=1)
cumulative_mag = np.cumsum(mag_sort / norms, axis=1)
threshold_idx = np.argmin(cumulative_mag < quantile, axis=1)
for i, j in enumerate(threshold_idx):
idx = np.where(mags[i] >= mag_sort[i, j])
x_sparse[i, idx] = x[i, idx]
return x_sparse.tocsr()
|
[
"Return",
"a",
"row",
"-",
"sparse",
"matrix",
"approximating",
"the",
"input",
"x",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L1009-L1098
|
[
"def",
"sparsify_rows",
"(",
"x",
",",
"quantile",
"=",
"0.01",
")",
":",
"if",
"x",
".",
"ndim",
"==",
"1",
":",
"x",
"=",
"x",
".",
"reshape",
"(",
"(",
"1",
",",
"-",
"1",
")",
")",
"elif",
"x",
".",
"ndim",
">",
"2",
":",
"raise",
"ParameterError",
"(",
"'Input must have 2 or fewer dimensions. '",
"'Provided x.shape={}.'",
".",
"format",
"(",
"x",
".",
"shape",
")",
")",
"if",
"not",
"0.0",
"<=",
"quantile",
"<",
"1",
":",
"raise",
"ParameterError",
"(",
"'Invalid quantile {:.2f}'",
".",
"format",
"(",
"quantile",
")",
")",
"x_sparse",
"=",
"scipy",
".",
"sparse",
".",
"lil_matrix",
"(",
"x",
".",
"shape",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
"mags",
"=",
"np",
".",
"abs",
"(",
"x",
")",
"norms",
"=",
"np",
".",
"sum",
"(",
"mags",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"mag_sort",
"=",
"np",
".",
"sort",
"(",
"mags",
",",
"axis",
"=",
"1",
")",
"cumulative_mag",
"=",
"np",
".",
"cumsum",
"(",
"mag_sort",
"/",
"norms",
",",
"axis",
"=",
"1",
")",
"threshold_idx",
"=",
"np",
".",
"argmin",
"(",
"cumulative_mag",
"<",
"quantile",
",",
"axis",
"=",
"1",
")",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"threshold_idx",
")",
":",
"idx",
"=",
"np",
".",
"where",
"(",
"mags",
"[",
"i",
"]",
">=",
"mag_sort",
"[",
"i",
",",
"j",
"]",
")",
"x_sparse",
"[",
"i",
",",
"idx",
"]",
"=",
"x",
"[",
"i",
",",
"idx",
"]",
"return",
"x_sparse",
".",
"tocsr",
"(",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
roll_sparse
|
Sparse matrix roll
This operation is equivalent to ``numpy.roll``, but operates on sparse matrices.
Parameters
----------
x : scipy.sparse.spmatrix or np.ndarray
The sparse matrix input
shift : int
The number of positions to roll the specified axis
axis : (0, 1, -1)
The axis along which to roll.
Returns
-------
x_rolled : same type as `x`
The rolled matrix, with the same format as `x`
See Also
--------
numpy.roll
Examples
--------
>>> # Generate a random sparse binary matrix
>>> X = scipy.sparse.lil_matrix(np.random.randint(0, 2, size=(5,5)))
>>> X_roll = roll_sparse(X, 2, axis=0) # Roll by 2 on the first axis
>>> X_dense_r = roll_sparse(X.toarray(), 2, axis=0) # Equivalent dense roll
>>> np.allclose(X_roll, X_dense_r.toarray())
True
|
librosa/util/utils.py
|
def roll_sparse(x, shift, axis=0):
'''Sparse matrix roll
This operation is equivalent to ``numpy.roll``, but operates on sparse matrices.
Parameters
----------
x : scipy.sparse.spmatrix or np.ndarray
The sparse matrix input
shift : int
The number of positions to roll the specified axis
axis : (0, 1, -1)
The axis along which to roll.
Returns
-------
x_rolled : same type as `x`
The rolled matrix, with the same format as `x`
See Also
--------
numpy.roll
Examples
--------
>>> # Generate a random sparse binary matrix
>>> X = scipy.sparse.lil_matrix(np.random.randint(0, 2, size=(5,5)))
>>> X_roll = roll_sparse(X, 2, axis=0) # Roll by 2 on the first axis
>>> X_dense_r = roll_sparse(X.toarray(), 2, axis=0) # Equivalent dense roll
>>> np.allclose(X_roll, X_dense_r.toarray())
True
'''
if not scipy.sparse.isspmatrix(x):
return np.roll(x, shift, axis=axis)
# shift-mod-length lets us have shift > x.shape[axis]
if axis not in [0, 1, -1]:
raise ParameterError('axis must be one of (0, 1, -1)')
shift = np.mod(shift, x.shape[axis])
if shift == 0:
return x.copy()
fmt = x.format
if axis == 0:
x = x.tocsc()
elif axis in (-1, 1):
x = x.tocsr()
# lil matrix to start
x_r = scipy.sparse.lil_matrix(x.shape, dtype=x.dtype)
idx_in = [slice(None)] * x.ndim
idx_out = [slice(None)] * x_r.ndim
idx_in[axis] = slice(0, -shift)
idx_out[axis] = slice(shift, None)
x_r[tuple(idx_out)] = x[tuple(idx_in)]
idx_out[axis] = slice(0, shift)
idx_in[axis] = slice(-shift, None)
x_r[tuple(idx_out)] = x[tuple(idx_in)]
return x_r.asformat(fmt)
|
def roll_sparse(x, shift, axis=0):
'''Sparse matrix roll
This operation is equivalent to ``numpy.roll``, but operates on sparse matrices.
Parameters
----------
x : scipy.sparse.spmatrix or np.ndarray
The sparse matrix input
shift : int
The number of positions to roll the specified axis
axis : (0, 1, -1)
The axis along which to roll.
Returns
-------
x_rolled : same type as `x`
The rolled matrix, with the same format as `x`
See Also
--------
numpy.roll
Examples
--------
>>> # Generate a random sparse binary matrix
>>> X = scipy.sparse.lil_matrix(np.random.randint(0, 2, size=(5,5)))
>>> X_roll = roll_sparse(X, 2, axis=0) # Roll by 2 on the first axis
>>> X_dense_r = roll_sparse(X.toarray(), 2, axis=0) # Equivalent dense roll
>>> np.allclose(X_roll, X_dense_r.toarray())
True
'''
if not scipy.sparse.isspmatrix(x):
return np.roll(x, shift, axis=axis)
# shift-mod-length lets us have shift > x.shape[axis]
if axis not in [0, 1, -1]:
raise ParameterError('axis must be one of (0, 1, -1)')
shift = np.mod(shift, x.shape[axis])
if shift == 0:
return x.copy()
fmt = x.format
if axis == 0:
x = x.tocsc()
elif axis in (-1, 1):
x = x.tocsr()
# lil matrix to start
x_r = scipy.sparse.lil_matrix(x.shape, dtype=x.dtype)
idx_in = [slice(None)] * x.ndim
idx_out = [slice(None)] * x_r.ndim
idx_in[axis] = slice(0, -shift)
idx_out[axis] = slice(shift, None)
x_r[tuple(idx_out)] = x[tuple(idx_in)]
idx_out[axis] = slice(0, shift)
idx_in[axis] = slice(-shift, None)
x_r[tuple(idx_out)] = x[tuple(idx_in)]
return x_r.asformat(fmt)
|
[
"Sparse",
"matrix",
"roll"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L1101-L1167
|
[
"def",
"roll_sparse",
"(",
"x",
",",
"shift",
",",
"axis",
"=",
"0",
")",
":",
"if",
"not",
"scipy",
".",
"sparse",
".",
"isspmatrix",
"(",
"x",
")",
":",
"return",
"np",
".",
"roll",
"(",
"x",
",",
"shift",
",",
"axis",
"=",
"axis",
")",
"# shift-mod-length lets us have shift > x.shape[axis]",
"if",
"axis",
"not",
"in",
"[",
"0",
",",
"1",
",",
"-",
"1",
"]",
":",
"raise",
"ParameterError",
"(",
"'axis must be one of (0, 1, -1)'",
")",
"shift",
"=",
"np",
".",
"mod",
"(",
"shift",
",",
"x",
".",
"shape",
"[",
"axis",
"]",
")",
"if",
"shift",
"==",
"0",
":",
"return",
"x",
".",
"copy",
"(",
")",
"fmt",
"=",
"x",
".",
"format",
"if",
"axis",
"==",
"0",
":",
"x",
"=",
"x",
".",
"tocsc",
"(",
")",
"elif",
"axis",
"in",
"(",
"-",
"1",
",",
"1",
")",
":",
"x",
"=",
"x",
".",
"tocsr",
"(",
")",
"# lil matrix to start",
"x_r",
"=",
"scipy",
".",
"sparse",
".",
"lil_matrix",
"(",
"x",
".",
"shape",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
"idx_in",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"x",
".",
"ndim",
"idx_out",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"x_r",
".",
"ndim",
"idx_in",
"[",
"axis",
"]",
"=",
"slice",
"(",
"0",
",",
"-",
"shift",
")",
"idx_out",
"[",
"axis",
"]",
"=",
"slice",
"(",
"shift",
",",
"None",
")",
"x_r",
"[",
"tuple",
"(",
"idx_out",
")",
"]",
"=",
"x",
"[",
"tuple",
"(",
"idx_in",
")",
"]",
"idx_out",
"[",
"axis",
"]",
"=",
"slice",
"(",
"0",
",",
"shift",
")",
"idx_in",
"[",
"axis",
"]",
"=",
"slice",
"(",
"-",
"shift",
",",
"None",
")",
"x_r",
"[",
"tuple",
"(",
"idx_out",
")",
"]",
"=",
"x",
"[",
"tuple",
"(",
"idx_in",
")",
"]",
"return",
"x_r",
".",
"asformat",
"(",
"fmt",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
buf_to_float
|
Convert an integer buffer to floating point values.
This is primarily useful when loading integer-valued wav data
into numpy arrays.
See Also
--------
buf_to_float
Parameters
----------
x : np.ndarray [dtype=int]
The integer-valued data buffer
n_bytes : int [1, 2, 4]
The number of bytes per sample in `x`
dtype : numeric type
The target output type (default: 32-bit float)
Returns
-------
x_float : np.ndarray [dtype=float]
The input data buffer cast to floating point
|
librosa/util/utils.py
|
def buf_to_float(x, n_bytes=2, dtype=np.float32):
"""Convert an integer buffer to floating point values.
This is primarily useful when loading integer-valued wav data
into numpy arrays.
See Also
--------
buf_to_float
Parameters
----------
x : np.ndarray [dtype=int]
The integer-valued data buffer
n_bytes : int [1, 2, 4]
The number of bytes per sample in `x`
dtype : numeric type
The target output type (default: 32-bit float)
Returns
-------
x_float : np.ndarray [dtype=float]
The input data buffer cast to floating point
"""
# Invert the scale of the data
scale = 1./float(1 << ((8 * n_bytes) - 1))
# Construct the format string
fmt = '<i{:d}'.format(n_bytes)
# Rescale and format the data buffer
return scale * np.frombuffer(x, fmt).astype(dtype)
|
def buf_to_float(x, n_bytes=2, dtype=np.float32):
"""Convert an integer buffer to floating point values.
This is primarily useful when loading integer-valued wav data
into numpy arrays.
See Also
--------
buf_to_float
Parameters
----------
x : np.ndarray [dtype=int]
The integer-valued data buffer
n_bytes : int [1, 2, 4]
The number of bytes per sample in `x`
dtype : numeric type
The target output type (default: 32-bit float)
Returns
-------
x_float : np.ndarray [dtype=float]
The input data buffer cast to floating point
"""
# Invert the scale of the data
scale = 1./float(1 << ((8 * n_bytes) - 1))
# Construct the format string
fmt = '<i{:d}'.format(n_bytes)
# Rescale and format the data buffer
return scale * np.frombuffer(x, fmt).astype(dtype)
|
[
"Convert",
"an",
"integer",
"buffer",
"to",
"floating",
"point",
"values",
".",
"This",
"is",
"primarily",
"useful",
"when",
"loading",
"integer",
"-",
"valued",
"wav",
"data",
"into",
"numpy",
"arrays",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L1170-L1203
|
[
"def",
"buf_to_float",
"(",
"x",
",",
"n_bytes",
"=",
"2",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
":",
"# Invert the scale of the data",
"scale",
"=",
"1.",
"/",
"float",
"(",
"1",
"<<",
"(",
"(",
"8",
"*",
"n_bytes",
")",
"-",
"1",
")",
")",
"# Construct the format string",
"fmt",
"=",
"'<i{:d}'",
".",
"format",
"(",
"n_bytes",
")",
"# Rescale and format the data buffer",
"return",
"scale",
"*",
"np",
".",
"frombuffer",
"(",
"x",
",",
"fmt",
")",
".",
"astype",
"(",
"dtype",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
index_to_slice
|
Generate a slice array from an index array.
Parameters
----------
idx : list-like
Array of index boundaries
idx_min : None or int
idx_max : None or int
Minimum and maximum allowed indices
step : None or int
Step size for each slice. If `None`, then the default
step of 1 is used.
pad : boolean
If `True`, pad `idx` to span the range `idx_min:idx_max`.
Returns
-------
slices : list of slice
``slices[i] = slice(idx[i], idx[i+1], step)``
Additional slice objects may be added at the beginning or end,
depending on whether ``pad==True`` and the supplied values for
`idx_min` and `idx_max`.
See Also
--------
fix_frames
Examples
--------
>>> # Generate slices from spaced indices
>>> librosa.util.index_to_slice(np.arange(20, 100, 15))
[slice(20, 35, None), slice(35, 50, None), slice(50, 65, None), slice(65, 80, None),
slice(80, 95, None)]
>>> # Pad to span the range (0, 100)
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100)
[slice(0, 20, None), slice(20, 35, None), slice(35, 50, None), slice(50, 65, None),
slice(65, 80, None), slice(80, 95, None), slice(95, 100, None)]
>>> # Use a step of 5 for each slice
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100, step=5)
[slice(0, 20, 5), slice(20, 35, 5), slice(35, 50, 5), slice(50, 65, 5), slice(65, 80, 5),
slice(80, 95, 5), slice(95, 100, 5)]
|
librosa/util/utils.py
|
def index_to_slice(idx, idx_min=None, idx_max=None, step=None, pad=True):
'''Generate a slice array from an index array.
Parameters
----------
idx : list-like
Array of index boundaries
idx_min : None or int
idx_max : None or int
Minimum and maximum allowed indices
step : None or int
Step size for each slice. If `None`, then the default
step of 1 is used.
pad : boolean
If `True`, pad `idx` to span the range `idx_min:idx_max`.
Returns
-------
slices : list of slice
``slices[i] = slice(idx[i], idx[i+1], step)``
Additional slice objects may be added at the beginning or end,
depending on whether ``pad==True`` and the supplied values for
`idx_min` and `idx_max`.
See Also
--------
fix_frames
Examples
--------
>>> # Generate slices from spaced indices
>>> librosa.util.index_to_slice(np.arange(20, 100, 15))
[slice(20, 35, None), slice(35, 50, None), slice(50, 65, None), slice(65, 80, None),
slice(80, 95, None)]
>>> # Pad to span the range (0, 100)
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100)
[slice(0, 20, None), slice(20, 35, None), slice(35, 50, None), slice(50, 65, None),
slice(65, 80, None), slice(80, 95, None), slice(95, 100, None)]
>>> # Use a step of 5 for each slice
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100, step=5)
[slice(0, 20, 5), slice(20, 35, 5), slice(35, 50, 5), slice(50, 65, 5), slice(65, 80, 5),
slice(80, 95, 5), slice(95, 100, 5)]
'''
# First, normalize the index set
idx_fixed = fix_frames(idx, idx_min, idx_max, pad=pad)
# Now convert the indices to slices
return [slice(start, end, step) for (start, end) in zip(idx_fixed, idx_fixed[1:])]
|
def index_to_slice(idx, idx_min=None, idx_max=None, step=None, pad=True):
'''Generate a slice array from an index array.
Parameters
----------
idx : list-like
Array of index boundaries
idx_min : None or int
idx_max : None or int
Minimum and maximum allowed indices
step : None or int
Step size for each slice. If `None`, then the default
step of 1 is used.
pad : boolean
If `True`, pad `idx` to span the range `idx_min:idx_max`.
Returns
-------
slices : list of slice
``slices[i] = slice(idx[i], idx[i+1], step)``
Additional slice objects may be added at the beginning or end,
depending on whether ``pad==True`` and the supplied values for
`idx_min` and `idx_max`.
See Also
--------
fix_frames
Examples
--------
>>> # Generate slices from spaced indices
>>> librosa.util.index_to_slice(np.arange(20, 100, 15))
[slice(20, 35, None), slice(35, 50, None), slice(50, 65, None), slice(65, 80, None),
slice(80, 95, None)]
>>> # Pad to span the range (0, 100)
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100)
[slice(0, 20, None), slice(20, 35, None), slice(35, 50, None), slice(50, 65, None),
slice(65, 80, None), slice(80, 95, None), slice(95, 100, None)]
>>> # Use a step of 5 for each slice
>>> librosa.util.index_to_slice(np.arange(20, 100, 15),
... idx_min=0, idx_max=100, step=5)
[slice(0, 20, 5), slice(20, 35, 5), slice(35, 50, 5), slice(50, 65, 5), slice(65, 80, 5),
slice(80, 95, 5), slice(95, 100, 5)]
'''
# First, normalize the index set
idx_fixed = fix_frames(idx, idx_min, idx_max, pad=pad)
# Now convert the indices to slices
return [slice(start, end, step) for (start, end) in zip(idx_fixed, idx_fixed[1:])]
|
[
"Generate",
"a",
"slice",
"array",
"from",
"an",
"index",
"array",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L1206-L1259
|
[
"def",
"index_to_slice",
"(",
"idx",
",",
"idx_min",
"=",
"None",
",",
"idx_max",
"=",
"None",
",",
"step",
"=",
"None",
",",
"pad",
"=",
"True",
")",
":",
"# First, normalize the index set",
"idx_fixed",
"=",
"fix_frames",
"(",
"idx",
",",
"idx_min",
",",
"idx_max",
",",
"pad",
"=",
"pad",
")",
"# Now convert the indices to slices",
"return",
"[",
"slice",
"(",
"start",
",",
"end",
",",
"step",
")",
"for",
"(",
"start",
",",
"end",
")",
"in",
"zip",
"(",
"idx_fixed",
",",
"idx_fixed",
"[",
"1",
":",
"]",
")",
"]"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
sync
|
Synchronous aggregation of a multi-dimensional array between boundaries
.. note::
In order to ensure total coverage, boundary points may be added
to `idx`.
If synchronizing a feature matrix against beat tracker output, ensure
that frame index numbers are properly aligned and use the same hop length.
Parameters
----------
data : np.ndarray
multi-dimensional array of features
idx : iterable of ints or slices
Either an ordered array of boundary indices, or
an iterable collection of slice objects.
aggregate : function
aggregation function (default: `np.mean`)
pad : boolean
If `True`, `idx` is padded to span the full range `[0, data.shape[axis]]`
axis : int
The axis along which to aggregate data
Returns
-------
data_sync : ndarray
`data_sync` will have the same dimension as `data`, except that the `axis`
coordinate will be reduced according to `idx`.
For example, a 2-dimensional `data` with `axis=-1` should satisfy
`data_sync[:, i] = aggregate(data[:, idx[i-1]:idx[i]], axis=-1)`
Raises
------
ParameterError
If the index set is not of consistent type (all slices or all integers)
Notes
-----
This function caches at level 40.
Examples
--------
Beat-synchronous CQT spectra
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, trim=False)
>>> C = np.abs(librosa.cqt(y=y, sr=sr))
>>> beats = librosa.util.fix_frames(beats, x_max=C.shape[1])
By default, use mean aggregation
>>> C_avg = librosa.util.sync(C, beats)
Use median-aggregation instead of mean
>>> C_med = librosa.util.sync(C, beats,
... aggregate=np.median)
Or sub-beat synchronization
>>> sub_beats = librosa.segment.subsegment(C, beats)
>>> sub_beats = librosa.util.fix_frames(sub_beats, x_max=C.shape[1])
>>> C_med_sub = librosa.util.sync(C, sub_beats, aggregate=np.median)
Plot the results
>>> import matplotlib.pyplot as plt
>>> beat_t = librosa.frames_to_time(beats, sr=sr)
>>> subbeat_t = librosa.frames_to_time(sub_beats, sr=sr)
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... x_axis='time')
>>> plt.title('CQT power, shape={}'.format(C.shape))
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med,
... ref=np.max),
... x_coords=beat_t, x_axis='time')
>>> plt.title('Beat synchronous CQT power, '
... 'shape={}'.format(C_med.shape))
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med_sub,
... ref=np.max),
... x_coords=subbeat_t, x_axis='time')
>>> plt.title('Sub-beat synchronous CQT power, '
... 'shape={}'.format(C_med_sub.shape))
>>> plt.tight_layout()
|
librosa/util/utils.py
|
def sync(data, idx, aggregate=None, pad=True, axis=-1):
"""Synchronous aggregation of a multi-dimensional array between boundaries
.. note::
In order to ensure total coverage, boundary points may be added
to `idx`.
If synchronizing a feature matrix against beat tracker output, ensure
that frame index numbers are properly aligned and use the same hop length.
Parameters
----------
data : np.ndarray
multi-dimensional array of features
idx : iterable of ints or slices
Either an ordered array of boundary indices, or
an iterable collection of slice objects.
aggregate : function
aggregation function (default: `np.mean`)
pad : boolean
If `True`, `idx` is padded to span the full range `[0, data.shape[axis]]`
axis : int
The axis along which to aggregate data
Returns
-------
data_sync : ndarray
`data_sync` will have the same dimension as `data`, except that the `axis`
coordinate will be reduced according to `idx`.
For example, a 2-dimensional `data` with `axis=-1` should satisfy
`data_sync[:, i] = aggregate(data[:, idx[i-1]:idx[i]], axis=-1)`
Raises
------
ParameterError
If the index set is not of consistent type (all slices or all integers)
Notes
-----
This function caches at level 40.
Examples
--------
Beat-synchronous CQT spectra
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, trim=False)
>>> C = np.abs(librosa.cqt(y=y, sr=sr))
>>> beats = librosa.util.fix_frames(beats, x_max=C.shape[1])
By default, use mean aggregation
>>> C_avg = librosa.util.sync(C, beats)
Use median-aggregation instead of mean
>>> C_med = librosa.util.sync(C, beats,
... aggregate=np.median)
Or sub-beat synchronization
>>> sub_beats = librosa.segment.subsegment(C, beats)
>>> sub_beats = librosa.util.fix_frames(sub_beats, x_max=C.shape[1])
>>> C_med_sub = librosa.util.sync(C, sub_beats, aggregate=np.median)
Plot the results
>>> import matplotlib.pyplot as plt
>>> beat_t = librosa.frames_to_time(beats, sr=sr)
>>> subbeat_t = librosa.frames_to_time(sub_beats, sr=sr)
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... x_axis='time')
>>> plt.title('CQT power, shape={}'.format(C.shape))
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med,
... ref=np.max),
... x_coords=beat_t, x_axis='time')
>>> plt.title('Beat synchronous CQT power, '
... 'shape={}'.format(C_med.shape))
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med_sub,
... ref=np.max),
... x_coords=subbeat_t, x_axis='time')
>>> plt.title('Sub-beat synchronous CQT power, '
... 'shape={}'.format(C_med_sub.shape))
>>> plt.tight_layout()
"""
if aggregate is None:
aggregate = np.mean
shape = list(data.shape)
if np.all([isinstance(_, slice) for _ in idx]):
slices = idx
elif np.all([np.issubdtype(type(_), np.integer) for _ in idx]):
slices = index_to_slice(np.asarray(idx), 0, shape[axis], pad=pad)
else:
raise ParameterError('Invalid index set: {}'.format(idx))
agg_shape = list(shape)
agg_shape[axis] = len(slices)
data_agg = np.empty(agg_shape, order='F' if np.isfortran(data) else 'C', dtype=data.dtype)
idx_in = [slice(None)] * data.ndim
idx_agg = [slice(None)] * data_agg.ndim
for (i, segment) in enumerate(slices):
idx_in[axis] = segment
idx_agg[axis] = i
data_agg[tuple(idx_agg)] = aggregate(data[tuple(idx_in)], axis=axis)
return data_agg
|
def sync(data, idx, aggregate=None, pad=True, axis=-1):
"""Synchronous aggregation of a multi-dimensional array between boundaries
.. note::
In order to ensure total coverage, boundary points may be added
to `idx`.
If synchronizing a feature matrix against beat tracker output, ensure
that frame index numbers are properly aligned and use the same hop length.
Parameters
----------
data : np.ndarray
multi-dimensional array of features
idx : iterable of ints or slices
Either an ordered array of boundary indices, or
an iterable collection of slice objects.
aggregate : function
aggregation function (default: `np.mean`)
pad : boolean
If `True`, `idx` is padded to span the full range `[0, data.shape[axis]]`
axis : int
The axis along which to aggregate data
Returns
-------
data_sync : ndarray
`data_sync` will have the same dimension as `data`, except that the `axis`
coordinate will be reduced according to `idx`.
For example, a 2-dimensional `data` with `axis=-1` should satisfy
`data_sync[:, i] = aggregate(data[:, idx[i-1]:idx[i]], axis=-1)`
Raises
------
ParameterError
If the index set is not of consistent type (all slices or all integers)
Notes
-----
This function caches at level 40.
Examples
--------
Beat-synchronous CQT spectra
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, trim=False)
>>> C = np.abs(librosa.cqt(y=y, sr=sr))
>>> beats = librosa.util.fix_frames(beats, x_max=C.shape[1])
By default, use mean aggregation
>>> C_avg = librosa.util.sync(C, beats)
Use median-aggregation instead of mean
>>> C_med = librosa.util.sync(C, beats,
... aggregate=np.median)
Or sub-beat synchronization
>>> sub_beats = librosa.segment.subsegment(C, beats)
>>> sub_beats = librosa.util.fix_frames(sub_beats, x_max=C.shape[1])
>>> C_med_sub = librosa.util.sync(C, sub_beats, aggregate=np.median)
Plot the results
>>> import matplotlib.pyplot as plt
>>> beat_t = librosa.frames_to_time(beats, sr=sr)
>>> subbeat_t = librosa.frames_to_time(sub_beats, sr=sr)
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... x_axis='time')
>>> plt.title('CQT power, shape={}'.format(C.shape))
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med,
... ref=np.max),
... x_coords=beat_t, x_axis='time')
>>> plt.title('Beat synchronous CQT power, '
... 'shape={}'.format(C_med.shape))
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(C_med_sub,
... ref=np.max),
... x_coords=subbeat_t, x_axis='time')
>>> plt.title('Sub-beat synchronous CQT power, '
... 'shape={}'.format(C_med_sub.shape))
>>> plt.tight_layout()
"""
if aggregate is None:
aggregate = np.mean
shape = list(data.shape)
if np.all([isinstance(_, slice) for _ in idx]):
slices = idx
elif np.all([np.issubdtype(type(_), np.integer) for _ in idx]):
slices = index_to_slice(np.asarray(idx), 0, shape[axis], pad=pad)
else:
raise ParameterError('Invalid index set: {}'.format(idx))
agg_shape = list(shape)
agg_shape[axis] = len(slices)
data_agg = np.empty(agg_shape, order='F' if np.isfortran(data) else 'C', dtype=data.dtype)
idx_in = [slice(None)] * data.ndim
idx_agg = [slice(None)] * data_agg.ndim
for (i, segment) in enumerate(slices):
idx_in[axis] = segment
idx_agg[axis] = i
data_agg[tuple(idx_agg)] = aggregate(data[tuple(idx_in)], axis=axis)
return data_agg
|
[
"Synchronous",
"aggregation",
"of",
"a",
"multi",
"-",
"dimensional",
"array",
"between",
"boundaries"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L1263-L1388
|
[
"def",
"sync",
"(",
"data",
",",
"idx",
",",
"aggregate",
"=",
"None",
",",
"pad",
"=",
"True",
",",
"axis",
"=",
"-",
"1",
")",
":",
"if",
"aggregate",
"is",
"None",
":",
"aggregate",
"=",
"np",
".",
"mean",
"shape",
"=",
"list",
"(",
"data",
".",
"shape",
")",
"if",
"np",
".",
"all",
"(",
"[",
"isinstance",
"(",
"_",
",",
"slice",
")",
"for",
"_",
"in",
"idx",
"]",
")",
":",
"slices",
"=",
"idx",
"elif",
"np",
".",
"all",
"(",
"[",
"np",
".",
"issubdtype",
"(",
"type",
"(",
"_",
")",
",",
"np",
".",
"integer",
")",
"for",
"_",
"in",
"idx",
"]",
")",
":",
"slices",
"=",
"index_to_slice",
"(",
"np",
".",
"asarray",
"(",
"idx",
")",
",",
"0",
",",
"shape",
"[",
"axis",
"]",
",",
"pad",
"=",
"pad",
")",
"else",
":",
"raise",
"ParameterError",
"(",
"'Invalid index set: {}'",
".",
"format",
"(",
"idx",
")",
")",
"agg_shape",
"=",
"list",
"(",
"shape",
")",
"agg_shape",
"[",
"axis",
"]",
"=",
"len",
"(",
"slices",
")",
"data_agg",
"=",
"np",
".",
"empty",
"(",
"agg_shape",
",",
"order",
"=",
"'F'",
"if",
"np",
".",
"isfortran",
"(",
"data",
")",
"else",
"'C'",
",",
"dtype",
"=",
"data",
".",
"dtype",
")",
"idx_in",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"data",
".",
"ndim",
"idx_agg",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"data_agg",
".",
"ndim",
"for",
"(",
"i",
",",
"segment",
")",
"in",
"enumerate",
"(",
"slices",
")",
":",
"idx_in",
"[",
"axis",
"]",
"=",
"segment",
"idx_agg",
"[",
"axis",
"]",
"=",
"i",
"data_agg",
"[",
"tuple",
"(",
"idx_agg",
")",
"]",
"=",
"aggregate",
"(",
"data",
"[",
"tuple",
"(",
"idx_in",
")",
"]",
",",
"axis",
"=",
"axis",
")",
"return",
"data_agg"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
softmask
|
Robustly compute a softmask operation.
`M = X**power / (X**power + X_ref**power)`
Parameters
----------
X : np.ndarray
The (non-negative) input array corresponding to the positive mask elements
X_ref : np.ndarray
The (non-negative) array of reference or background elements.
Must have the same shape as `X`.
power : number > 0 or np.inf
If finite, returns the soft mask computed in a numerically stable way
If infinite, returns a hard (binary) mask equivalent to `X > X_ref`.
Note: for hard masks, ties are always broken in favor of `X_ref` (`mask=0`).
split_zeros : bool
If `True`, entries where `X` and X`_ref` are both small (close to 0)
will receive mask values of 0.5.
Otherwise, the mask is set to 0 for these entries.
Returns
-------
mask : np.ndarray, shape=`X.shape`
The output mask array
Raises
------
ParameterError
If `X` and `X_ref` have different shapes.
If `X` or `X_ref` are negative anywhere
If `power <= 0`
Examples
--------
>>> X = 2 * np.ones((3, 3))
>>> X_ref = np.vander(np.arange(3.0))
>>> X
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]])
>>> X_ref
array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]])
>>> librosa.util.softmask(X, X_ref, power=1)
array([[ 1. , 1. , 0.667],
[ 0.667, 0.667, 0.667],
[ 0.333, 0.5 , 0.667]])
>>> librosa.util.softmask(X_ref, X, power=1)
array([[ 0. , 0. , 0.333],
[ 0.333, 0.333, 0.333],
[ 0.667, 0.5 , 0.333]])
>>> librosa.util.softmask(X, X_ref, power=2)
array([[ 1. , 1. , 0.8],
[ 0.8, 0.8, 0.8],
[ 0.2, 0.5, 0.8]])
>>> librosa.util.softmask(X, X_ref, power=4)
array([[ 1. , 1. , 0.941],
[ 0.941, 0.941, 0.941],
[ 0.059, 0.5 , 0.941]])
>>> librosa.util.softmask(X, X_ref, power=100)
array([[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 7.889e-31, 5.000e-01, 1.000e+00]])
>>> librosa.util.softmask(X, X_ref, power=np.inf)
array([[ True, True, True],
[ True, True, True],
[False, False, True]], dtype=bool)
|
librosa/util/utils.py
|
def softmask(X, X_ref, power=1, split_zeros=False):
'''Robustly compute a softmask operation.
`M = X**power / (X**power + X_ref**power)`
Parameters
----------
X : np.ndarray
The (non-negative) input array corresponding to the positive mask elements
X_ref : np.ndarray
The (non-negative) array of reference or background elements.
Must have the same shape as `X`.
power : number > 0 or np.inf
If finite, returns the soft mask computed in a numerically stable way
If infinite, returns a hard (binary) mask equivalent to `X > X_ref`.
Note: for hard masks, ties are always broken in favor of `X_ref` (`mask=0`).
split_zeros : bool
If `True`, entries where `X` and X`_ref` are both small (close to 0)
will receive mask values of 0.5.
Otherwise, the mask is set to 0 for these entries.
Returns
-------
mask : np.ndarray, shape=`X.shape`
The output mask array
Raises
------
ParameterError
If `X` and `X_ref` have different shapes.
If `X` or `X_ref` are negative anywhere
If `power <= 0`
Examples
--------
>>> X = 2 * np.ones((3, 3))
>>> X_ref = np.vander(np.arange(3.0))
>>> X
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]])
>>> X_ref
array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]])
>>> librosa.util.softmask(X, X_ref, power=1)
array([[ 1. , 1. , 0.667],
[ 0.667, 0.667, 0.667],
[ 0.333, 0.5 , 0.667]])
>>> librosa.util.softmask(X_ref, X, power=1)
array([[ 0. , 0. , 0.333],
[ 0.333, 0.333, 0.333],
[ 0.667, 0.5 , 0.333]])
>>> librosa.util.softmask(X, X_ref, power=2)
array([[ 1. , 1. , 0.8],
[ 0.8, 0.8, 0.8],
[ 0.2, 0.5, 0.8]])
>>> librosa.util.softmask(X, X_ref, power=4)
array([[ 1. , 1. , 0.941],
[ 0.941, 0.941, 0.941],
[ 0.059, 0.5 , 0.941]])
>>> librosa.util.softmask(X, X_ref, power=100)
array([[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 7.889e-31, 5.000e-01, 1.000e+00]])
>>> librosa.util.softmask(X, X_ref, power=np.inf)
array([[ True, True, True],
[ True, True, True],
[False, False, True]], dtype=bool)
'''
if X.shape != X_ref.shape:
raise ParameterError('Shape mismatch: {}!={}'.format(X.shape,
X_ref.shape))
if np.any(X < 0) or np.any(X_ref < 0):
raise ParameterError('X and X_ref must be non-negative')
if power <= 0:
raise ParameterError('power must be strictly positive')
# We're working with ints, cast to float.
dtype = X.dtype
if not np.issubdtype(dtype, np.floating):
dtype = np.float32
# Re-scale the input arrays relative to the larger value
Z = np.maximum(X, X_ref).astype(dtype)
bad_idx = (Z < np.finfo(dtype).tiny)
Z[bad_idx] = 1
# For finite power, compute the softmask
if np.isfinite(power):
mask = (X / Z)**power
ref_mask = (X_ref / Z)**power
good_idx = ~bad_idx
mask[good_idx] /= mask[good_idx] + ref_mask[good_idx]
# Wherever energy is below energy in both inputs, split the mask
if split_zeros:
mask[bad_idx] = 0.5
else:
mask[bad_idx] = 0.0
else:
# Otherwise, compute the hard mask
mask = X > X_ref
return mask
|
def softmask(X, X_ref, power=1, split_zeros=False):
'''Robustly compute a softmask operation.
`M = X**power / (X**power + X_ref**power)`
Parameters
----------
X : np.ndarray
The (non-negative) input array corresponding to the positive mask elements
X_ref : np.ndarray
The (non-negative) array of reference or background elements.
Must have the same shape as `X`.
power : number > 0 or np.inf
If finite, returns the soft mask computed in a numerically stable way
If infinite, returns a hard (binary) mask equivalent to `X > X_ref`.
Note: for hard masks, ties are always broken in favor of `X_ref` (`mask=0`).
split_zeros : bool
If `True`, entries where `X` and X`_ref` are both small (close to 0)
will receive mask values of 0.5.
Otherwise, the mask is set to 0 for these entries.
Returns
-------
mask : np.ndarray, shape=`X.shape`
The output mask array
Raises
------
ParameterError
If `X` and `X_ref` have different shapes.
If `X` or `X_ref` are negative anywhere
If `power <= 0`
Examples
--------
>>> X = 2 * np.ones((3, 3))
>>> X_ref = np.vander(np.arange(3.0))
>>> X
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]])
>>> X_ref
array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]])
>>> librosa.util.softmask(X, X_ref, power=1)
array([[ 1. , 1. , 0.667],
[ 0.667, 0.667, 0.667],
[ 0.333, 0.5 , 0.667]])
>>> librosa.util.softmask(X_ref, X, power=1)
array([[ 0. , 0. , 0.333],
[ 0.333, 0.333, 0.333],
[ 0.667, 0.5 , 0.333]])
>>> librosa.util.softmask(X, X_ref, power=2)
array([[ 1. , 1. , 0.8],
[ 0.8, 0.8, 0.8],
[ 0.2, 0.5, 0.8]])
>>> librosa.util.softmask(X, X_ref, power=4)
array([[ 1. , 1. , 0.941],
[ 0.941, 0.941, 0.941],
[ 0.059, 0.5 , 0.941]])
>>> librosa.util.softmask(X, X_ref, power=100)
array([[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 1.000e+00, 1.000e+00, 1.000e+00],
[ 7.889e-31, 5.000e-01, 1.000e+00]])
>>> librosa.util.softmask(X, X_ref, power=np.inf)
array([[ True, True, True],
[ True, True, True],
[False, False, True]], dtype=bool)
'''
if X.shape != X_ref.shape:
raise ParameterError('Shape mismatch: {}!={}'.format(X.shape,
X_ref.shape))
if np.any(X < 0) or np.any(X_ref < 0):
raise ParameterError('X and X_ref must be non-negative')
if power <= 0:
raise ParameterError('power must be strictly positive')
# We're working with ints, cast to float.
dtype = X.dtype
if not np.issubdtype(dtype, np.floating):
dtype = np.float32
# Re-scale the input arrays relative to the larger value
Z = np.maximum(X, X_ref).astype(dtype)
bad_idx = (Z < np.finfo(dtype).tiny)
Z[bad_idx] = 1
# For finite power, compute the softmask
if np.isfinite(power):
mask = (X / Z)**power
ref_mask = (X_ref / Z)**power
good_idx = ~bad_idx
mask[good_idx] /= mask[good_idx] + ref_mask[good_idx]
# Wherever energy is below energy in both inputs, split the mask
if split_zeros:
mask[bad_idx] = 0.5
else:
mask[bad_idx] = 0.0
else:
# Otherwise, compute the hard mask
mask = X > X_ref
return mask
|
[
"Robustly",
"compute",
"a",
"softmask",
"operation",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L1391-L1507
|
[
"def",
"softmask",
"(",
"X",
",",
"X_ref",
",",
"power",
"=",
"1",
",",
"split_zeros",
"=",
"False",
")",
":",
"if",
"X",
".",
"shape",
"!=",
"X_ref",
".",
"shape",
":",
"raise",
"ParameterError",
"(",
"'Shape mismatch: {}!={}'",
".",
"format",
"(",
"X",
".",
"shape",
",",
"X_ref",
".",
"shape",
")",
")",
"if",
"np",
".",
"any",
"(",
"X",
"<",
"0",
")",
"or",
"np",
".",
"any",
"(",
"X_ref",
"<",
"0",
")",
":",
"raise",
"ParameterError",
"(",
"'X and X_ref must be non-negative'",
")",
"if",
"power",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'power must be strictly positive'",
")",
"# We're working with ints, cast to float.",
"dtype",
"=",
"X",
".",
"dtype",
"if",
"not",
"np",
".",
"issubdtype",
"(",
"dtype",
",",
"np",
".",
"floating",
")",
":",
"dtype",
"=",
"np",
".",
"float32",
"# Re-scale the input arrays relative to the larger value",
"Z",
"=",
"np",
".",
"maximum",
"(",
"X",
",",
"X_ref",
")",
".",
"astype",
"(",
"dtype",
")",
"bad_idx",
"=",
"(",
"Z",
"<",
"np",
".",
"finfo",
"(",
"dtype",
")",
".",
"tiny",
")",
"Z",
"[",
"bad_idx",
"]",
"=",
"1",
"# For finite power, compute the softmask",
"if",
"np",
".",
"isfinite",
"(",
"power",
")",
":",
"mask",
"=",
"(",
"X",
"/",
"Z",
")",
"**",
"power",
"ref_mask",
"=",
"(",
"X_ref",
"/",
"Z",
")",
"**",
"power",
"good_idx",
"=",
"~",
"bad_idx",
"mask",
"[",
"good_idx",
"]",
"/=",
"mask",
"[",
"good_idx",
"]",
"+",
"ref_mask",
"[",
"good_idx",
"]",
"# Wherever energy is below energy in both inputs, split the mask",
"if",
"split_zeros",
":",
"mask",
"[",
"bad_idx",
"]",
"=",
"0.5",
"else",
":",
"mask",
"[",
"bad_idx",
"]",
"=",
"0.0",
"else",
":",
"# Otherwise, compute the hard mask",
"mask",
"=",
"X",
">",
"X_ref",
"return",
"mask"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
tiny
|
Compute the tiny-value corresponding to an input's data type.
This is the smallest "usable" number representable in `x`'s
data type (e.g., float32).
This is primarily useful for determining a threshold for
numerical underflow in division or multiplication operations.
Parameters
----------
x : number or np.ndarray
The array to compute the tiny-value for.
All that matters here is `x.dtype`.
Returns
-------
tiny_value : float
The smallest positive usable number for the type of `x`.
If `x` is integer-typed, then the tiny value for `np.float32`
is returned instead.
See Also
--------
numpy.finfo
Examples
--------
For a standard double-precision floating point number:
>>> librosa.util.tiny(1.0)
2.2250738585072014e-308
Or explicitly as double-precision
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float64))
2.2250738585072014e-308
Or complex numbers
>>> librosa.util.tiny(1j)
2.2250738585072014e-308
Single-precision floating point:
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float32))
1.1754944e-38
Integer
>>> librosa.util.tiny(5)
1.1754944e-38
|
librosa/util/utils.py
|
def tiny(x):
'''Compute the tiny-value corresponding to an input's data type.
This is the smallest "usable" number representable in `x`'s
data type (e.g., float32).
This is primarily useful for determining a threshold for
numerical underflow in division or multiplication operations.
Parameters
----------
x : number or np.ndarray
The array to compute the tiny-value for.
All that matters here is `x.dtype`.
Returns
-------
tiny_value : float
The smallest positive usable number for the type of `x`.
If `x` is integer-typed, then the tiny value for `np.float32`
is returned instead.
See Also
--------
numpy.finfo
Examples
--------
For a standard double-precision floating point number:
>>> librosa.util.tiny(1.0)
2.2250738585072014e-308
Or explicitly as double-precision
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float64))
2.2250738585072014e-308
Or complex numbers
>>> librosa.util.tiny(1j)
2.2250738585072014e-308
Single-precision floating point:
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float32))
1.1754944e-38
Integer
>>> librosa.util.tiny(5)
1.1754944e-38
'''
# Make sure we have an array view
x = np.asarray(x)
# Only floating types generate a tiny
if np.issubdtype(x.dtype, np.floating) or np.issubdtype(x.dtype, np.complexfloating):
dtype = x.dtype
else:
dtype = np.float32
return np.finfo(dtype).tiny
|
def tiny(x):
'''Compute the tiny-value corresponding to an input's data type.
This is the smallest "usable" number representable in `x`'s
data type (e.g., float32).
This is primarily useful for determining a threshold for
numerical underflow in division or multiplication operations.
Parameters
----------
x : number or np.ndarray
The array to compute the tiny-value for.
All that matters here is `x.dtype`.
Returns
-------
tiny_value : float
The smallest positive usable number for the type of `x`.
If `x` is integer-typed, then the tiny value for `np.float32`
is returned instead.
See Also
--------
numpy.finfo
Examples
--------
For a standard double-precision floating point number:
>>> librosa.util.tiny(1.0)
2.2250738585072014e-308
Or explicitly as double-precision
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float64))
2.2250738585072014e-308
Or complex numbers
>>> librosa.util.tiny(1j)
2.2250738585072014e-308
Single-precision floating point:
>>> librosa.util.tiny(np.asarray(1e-5, dtype=np.float32))
1.1754944e-38
Integer
>>> librosa.util.tiny(5)
1.1754944e-38
'''
# Make sure we have an array view
x = np.asarray(x)
# Only floating types generate a tiny
if np.issubdtype(x.dtype, np.floating) or np.issubdtype(x.dtype, np.complexfloating):
dtype = x.dtype
else:
dtype = np.float32
return np.finfo(dtype).tiny
|
[
"Compute",
"the",
"tiny",
"-",
"value",
"corresponding",
"to",
"an",
"input",
"s",
"data",
"type",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L1510-L1574
|
[
"def",
"tiny",
"(",
"x",
")",
":",
"# Make sure we have an array view",
"x",
"=",
"np",
".",
"asarray",
"(",
"x",
")",
"# Only floating types generate a tiny",
"if",
"np",
".",
"issubdtype",
"(",
"x",
".",
"dtype",
",",
"np",
".",
"floating",
")",
"or",
"np",
".",
"issubdtype",
"(",
"x",
".",
"dtype",
",",
"np",
".",
"complexfloating",
")",
":",
"dtype",
"=",
"x",
".",
"dtype",
"else",
":",
"dtype",
"=",
"np",
".",
"float32",
"return",
"np",
".",
"finfo",
"(",
"dtype",
")",
".",
"tiny"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
fill_off_diagonal
|
Sets all cells of a matrix to a given ``value``
if they lie outside a constraint region.
In this case, the constraint region is the
Sakoe-Chiba band which runs with a fixed ``radius``
along the main diagonal.
When ``x.shape[0] != x.shape[1]``, the radius will be
expanded so that ``x[-1, -1] = 1`` always.
``x`` will be modified in place.
Parameters
----------
x : np.ndarray [shape=(N, M)]
Input matrix, will be modified in place.
radius : float
The band radius (1/2 of the width) will be
``int(radius*min(x.shape))``.
value : int
``x[n, m] = value`` when ``(n, m)`` lies outside the band.
Examples
--------
>>> x = np.ones((8, 8))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
>>> x = np.ones((8, 12))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
|
librosa/util/utils.py
|
def fill_off_diagonal(x, radius, value=0):
"""Sets all cells of a matrix to a given ``value``
if they lie outside a constraint region.
In this case, the constraint region is the
Sakoe-Chiba band which runs with a fixed ``radius``
along the main diagonal.
When ``x.shape[0] != x.shape[1]``, the radius will be
expanded so that ``x[-1, -1] = 1`` always.
``x`` will be modified in place.
Parameters
----------
x : np.ndarray [shape=(N, M)]
Input matrix, will be modified in place.
radius : float
The band radius (1/2 of the width) will be
``int(radius*min(x.shape))``.
value : int
``x[n, m] = value`` when ``(n, m)`` lies outside the band.
Examples
--------
>>> x = np.ones((8, 8))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
>>> x = np.ones((8, 12))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
"""
nx, ny = x.shape
# Calculate the radius in indices, rather than proportion
radius = np.round(radius * np.min(x.shape))
nx, ny = x.shape
offset = np.abs((x.shape[0] - x.shape[1]))
if nx < ny:
idx_u = np.triu_indices_from(x, k=radius + offset)
idx_l = np.tril_indices_from(x, k=-radius)
else:
idx_u = np.triu_indices_from(x, k=radius)
idx_l = np.tril_indices_from(x, k=-radius - offset)
# modify input matrix
x[idx_u] = value
x[idx_l] = value
|
def fill_off_diagonal(x, radius, value=0):
"""Sets all cells of a matrix to a given ``value``
if they lie outside a constraint region.
In this case, the constraint region is the
Sakoe-Chiba band which runs with a fixed ``radius``
along the main diagonal.
When ``x.shape[0] != x.shape[1]``, the radius will be
expanded so that ``x[-1, -1] = 1`` always.
``x`` will be modified in place.
Parameters
----------
x : np.ndarray [shape=(N, M)]
Input matrix, will be modified in place.
radius : float
The band radius (1/2 of the width) will be
``int(radius*min(x.shape))``.
value : int
``x[n, m] = value`` when ``(n, m)`` lies outside the band.
Examples
--------
>>> x = np.ones((8, 8))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
>>> x = np.ones((8, 12))
>>> librosa.util.fill_off_diagonal(x, 0.25)
>>> x
array([[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
"""
nx, ny = x.shape
# Calculate the radius in indices, rather than proportion
radius = np.round(radius * np.min(x.shape))
nx, ny = x.shape
offset = np.abs((x.shape[0] - x.shape[1]))
if nx < ny:
idx_u = np.triu_indices_from(x, k=radius + offset)
idx_l = np.tril_indices_from(x, k=-radius)
else:
idx_u = np.triu_indices_from(x, k=radius)
idx_l = np.tril_indices_from(x, k=-radius - offset)
# modify input matrix
x[idx_u] = value
x[idx_l] = value
|
[
"Sets",
"all",
"cells",
"of",
"a",
"matrix",
"to",
"a",
"given",
"value",
"if",
"they",
"lie",
"outside",
"a",
"constraint",
"region",
".",
"In",
"this",
"case",
"the",
"constraint",
"region",
"is",
"the",
"Sakoe",
"-",
"Chiba",
"band",
"which",
"runs",
"with",
"a",
"fixed",
"radius",
"along",
"the",
"main",
"diagonal",
".",
"When",
"x",
".",
"shape",
"[",
"0",
"]",
"!",
"=",
"x",
".",
"shape",
"[",
"1",
"]",
"the",
"radius",
"will",
"be",
"expanded",
"so",
"that",
"x",
"[",
"-",
"1",
"-",
"1",
"]",
"=",
"1",
"always",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L1577-L1640
|
[
"def",
"fill_off_diagonal",
"(",
"x",
",",
"radius",
",",
"value",
"=",
"0",
")",
":",
"nx",
",",
"ny",
"=",
"x",
".",
"shape",
"# Calculate the radius in indices, rather than proportion",
"radius",
"=",
"np",
".",
"round",
"(",
"radius",
"*",
"np",
".",
"min",
"(",
"x",
".",
"shape",
")",
")",
"nx",
",",
"ny",
"=",
"x",
".",
"shape",
"offset",
"=",
"np",
".",
"abs",
"(",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
"-",
"x",
".",
"shape",
"[",
"1",
"]",
")",
")",
"if",
"nx",
"<",
"ny",
":",
"idx_u",
"=",
"np",
".",
"triu_indices_from",
"(",
"x",
",",
"k",
"=",
"radius",
"+",
"offset",
")",
"idx_l",
"=",
"np",
".",
"tril_indices_from",
"(",
"x",
",",
"k",
"=",
"-",
"radius",
")",
"else",
":",
"idx_u",
"=",
"np",
".",
"triu_indices_from",
"(",
"x",
",",
"k",
"=",
"radius",
")",
"idx_l",
"=",
"np",
".",
"tril_indices_from",
"(",
"x",
",",
"k",
"=",
"-",
"radius",
"-",
"offset",
")",
"# modify input matrix",
"x",
"[",
"idx_u",
"]",
"=",
"value",
"x",
"[",
"idx_l",
"]",
"=",
"value"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
frames2video
|
Read the frame images from a directory and join them as a video
Args:
frame_dir (str): The directory containing video frames.
video_file (str): Output filename.
fps (float): FPS of the output video.
fourcc (str): Fourcc of the output video, this should be compatible
with the output file type.
filename_tmpl (str): Filename template with the index as the variable.
start (int): Starting frame index.
end (int): Ending frame index.
show_progress (bool): Whether to show a progress bar.
|
mmcv/video/io.py
|
def frames2video(frame_dir,
video_file,
fps=30,
fourcc='XVID',
filename_tmpl='{:06d}.jpg',
start=0,
end=0,
show_progress=True):
"""Read the frame images from a directory and join them as a video
Args:
frame_dir (str): The directory containing video frames.
video_file (str): Output filename.
fps (float): FPS of the output video.
fourcc (str): Fourcc of the output video, this should be compatible
with the output file type.
filename_tmpl (str): Filename template with the index as the variable.
start (int): Starting frame index.
end (int): Ending frame index.
show_progress (bool): Whether to show a progress bar.
"""
if end == 0:
ext = filename_tmpl.split('.')[-1]
end = len([name for name in scandir(frame_dir, ext)])
first_file = osp.join(frame_dir, filename_tmpl.format(start))
check_file_exist(first_file, 'The start frame not found: ' + first_file)
img = cv2.imread(first_file)
height, width = img.shape[:2]
resolution = (width, height)
vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps,
resolution)
def write_frame(file_idx):
filename = osp.join(frame_dir, filename_tmpl.format(file_idx))
img = cv2.imread(filename)
vwriter.write(img)
if show_progress:
track_progress(write_frame, range(start, end))
else:
for i in range(start, end):
filename = osp.join(frame_dir, filename_tmpl.format(i))
img = cv2.imread(filename)
vwriter.write(img)
vwriter.release()
|
def frames2video(frame_dir,
video_file,
fps=30,
fourcc='XVID',
filename_tmpl='{:06d}.jpg',
start=0,
end=0,
show_progress=True):
"""Read the frame images from a directory and join them as a video
Args:
frame_dir (str): The directory containing video frames.
video_file (str): Output filename.
fps (float): FPS of the output video.
fourcc (str): Fourcc of the output video, this should be compatible
with the output file type.
filename_tmpl (str): Filename template with the index as the variable.
start (int): Starting frame index.
end (int): Ending frame index.
show_progress (bool): Whether to show a progress bar.
"""
if end == 0:
ext = filename_tmpl.split('.')[-1]
end = len([name for name in scandir(frame_dir, ext)])
first_file = osp.join(frame_dir, filename_tmpl.format(start))
check_file_exist(first_file, 'The start frame not found: ' + first_file)
img = cv2.imread(first_file)
height, width = img.shape[:2]
resolution = (width, height)
vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps,
resolution)
def write_frame(file_idx):
filename = osp.join(frame_dir, filename_tmpl.format(file_idx))
img = cv2.imread(filename)
vwriter.write(img)
if show_progress:
track_progress(write_frame, range(start, end))
else:
for i in range(start, end):
filename = osp.join(frame_dir, filename_tmpl.format(i))
img = cv2.imread(filename)
vwriter.write(img)
vwriter.release()
|
[
"Read",
"the",
"frame",
"images",
"from",
"a",
"directory",
"and",
"join",
"them",
"as",
"a",
"video"
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/video/io.py#L288-L332
|
[
"def",
"frames2video",
"(",
"frame_dir",
",",
"video_file",
",",
"fps",
"=",
"30",
",",
"fourcc",
"=",
"'XVID'",
",",
"filename_tmpl",
"=",
"'{:06d}.jpg'",
",",
"start",
"=",
"0",
",",
"end",
"=",
"0",
",",
"show_progress",
"=",
"True",
")",
":",
"if",
"end",
"==",
"0",
":",
"ext",
"=",
"filename_tmpl",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"end",
"=",
"len",
"(",
"[",
"name",
"for",
"name",
"in",
"scandir",
"(",
"frame_dir",
",",
"ext",
")",
"]",
")",
"first_file",
"=",
"osp",
".",
"join",
"(",
"frame_dir",
",",
"filename_tmpl",
".",
"format",
"(",
"start",
")",
")",
"check_file_exist",
"(",
"first_file",
",",
"'The start frame not found: '",
"+",
"first_file",
")",
"img",
"=",
"cv2",
".",
"imread",
"(",
"first_file",
")",
"height",
",",
"width",
"=",
"img",
".",
"shape",
"[",
":",
"2",
"]",
"resolution",
"=",
"(",
"width",
",",
"height",
")",
"vwriter",
"=",
"cv2",
".",
"VideoWriter",
"(",
"video_file",
",",
"VideoWriter_fourcc",
"(",
"*",
"fourcc",
")",
",",
"fps",
",",
"resolution",
")",
"def",
"write_frame",
"(",
"file_idx",
")",
":",
"filename",
"=",
"osp",
".",
"join",
"(",
"frame_dir",
",",
"filename_tmpl",
".",
"format",
"(",
"file_idx",
")",
")",
"img",
"=",
"cv2",
".",
"imread",
"(",
"filename",
")",
"vwriter",
".",
"write",
"(",
"img",
")",
"if",
"show_progress",
":",
"track_progress",
"(",
"write_frame",
",",
"range",
"(",
"start",
",",
"end",
")",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"start",
",",
"end",
")",
":",
"filename",
"=",
"osp",
".",
"join",
"(",
"frame_dir",
",",
"filename_tmpl",
".",
"format",
"(",
"i",
")",
")",
"img",
"=",
"cv2",
".",
"imread",
"(",
"filename",
")",
"vwriter",
".",
"write",
"(",
"img",
")",
"vwriter",
".",
"release",
"(",
")"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
VideoReader.read
|
Read the next frame.
If the next frame have been decoded before and in the cache, then
return it directly, otherwise decode, cache and return it.
Returns:
ndarray or None: Return the frame if successful, otherwise None.
|
mmcv/video/io.py
|
def read(self):
"""Read the next frame.
If the next frame have been decoded before and in the cache, then
return it directly, otherwise decode, cache and return it.
Returns:
ndarray or None: Return the frame if successful, otherwise None.
"""
# pos = self._position
if self._cache:
img = self._cache.get(self._position)
if img is not None:
ret = True
else:
if self._position != self._get_real_position():
self._set_real_position(self._position)
ret, img = self._vcap.read()
if ret:
self._cache.put(self._position, img)
else:
ret, img = self._vcap.read()
if ret:
self._position += 1
return img
|
def read(self):
"""Read the next frame.
If the next frame have been decoded before and in the cache, then
return it directly, otherwise decode, cache and return it.
Returns:
ndarray or None: Return the frame if successful, otherwise None.
"""
# pos = self._position
if self._cache:
img = self._cache.get(self._position)
if img is not None:
ret = True
else:
if self._position != self._get_real_position():
self._set_real_position(self._position)
ret, img = self._vcap.read()
if ret:
self._cache.put(self._position, img)
else:
ret, img = self._vcap.read()
if ret:
self._position += 1
return img
|
[
"Read",
"the",
"next",
"frame",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/video/io.py#L142-L166
|
[
"def",
"read",
"(",
"self",
")",
":",
"# pos = self._position",
"if",
"self",
".",
"_cache",
":",
"img",
"=",
"self",
".",
"_cache",
".",
"get",
"(",
"self",
".",
"_position",
")",
"if",
"img",
"is",
"not",
"None",
":",
"ret",
"=",
"True",
"else",
":",
"if",
"self",
".",
"_position",
"!=",
"self",
".",
"_get_real_position",
"(",
")",
":",
"self",
".",
"_set_real_position",
"(",
"self",
".",
"_position",
")",
"ret",
",",
"img",
"=",
"self",
".",
"_vcap",
".",
"read",
"(",
")",
"if",
"ret",
":",
"self",
".",
"_cache",
".",
"put",
"(",
"self",
".",
"_position",
",",
"img",
")",
"else",
":",
"ret",
",",
"img",
"=",
"self",
".",
"_vcap",
".",
"read",
"(",
")",
"if",
"ret",
":",
"self",
".",
"_position",
"+=",
"1",
"return",
"img"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
VideoReader.get_frame
|
Get frame by index.
Args:
frame_id (int): Index of the expected frame, 0-based.
Returns:
ndarray or None: Return the frame if successful, otherwise None.
|
mmcv/video/io.py
|
def get_frame(self, frame_id):
"""Get frame by index.
Args:
frame_id (int): Index of the expected frame, 0-based.
Returns:
ndarray or None: Return the frame if successful, otherwise None.
"""
if frame_id < 0 or frame_id >= self._frame_cnt:
raise IndexError(
'"frame_id" must be between 0 and {}'.format(self._frame_cnt -
1))
if frame_id == self._position:
return self.read()
if self._cache:
img = self._cache.get(frame_id)
if img is not None:
self._position = frame_id + 1
return img
self._set_real_position(frame_id)
ret, img = self._vcap.read()
if ret:
if self._cache:
self._cache.put(self._position, img)
self._position += 1
return img
|
def get_frame(self, frame_id):
"""Get frame by index.
Args:
frame_id (int): Index of the expected frame, 0-based.
Returns:
ndarray or None: Return the frame if successful, otherwise None.
"""
if frame_id < 0 or frame_id >= self._frame_cnt:
raise IndexError(
'"frame_id" must be between 0 and {}'.format(self._frame_cnt -
1))
if frame_id == self._position:
return self.read()
if self._cache:
img = self._cache.get(frame_id)
if img is not None:
self._position = frame_id + 1
return img
self._set_real_position(frame_id)
ret, img = self._vcap.read()
if ret:
if self._cache:
self._cache.put(self._position, img)
self._position += 1
return img
|
[
"Get",
"frame",
"by",
"index",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/video/io.py#L168-L194
|
[
"def",
"get_frame",
"(",
"self",
",",
"frame_id",
")",
":",
"if",
"frame_id",
"<",
"0",
"or",
"frame_id",
">=",
"self",
".",
"_frame_cnt",
":",
"raise",
"IndexError",
"(",
"'\"frame_id\" must be between 0 and {}'",
".",
"format",
"(",
"self",
".",
"_frame_cnt",
"-",
"1",
")",
")",
"if",
"frame_id",
"==",
"self",
".",
"_position",
":",
"return",
"self",
".",
"read",
"(",
")",
"if",
"self",
".",
"_cache",
":",
"img",
"=",
"self",
".",
"_cache",
".",
"get",
"(",
"frame_id",
")",
"if",
"img",
"is",
"not",
"None",
":",
"self",
".",
"_position",
"=",
"frame_id",
"+",
"1",
"return",
"img",
"self",
".",
"_set_real_position",
"(",
"frame_id",
")",
"ret",
",",
"img",
"=",
"self",
".",
"_vcap",
".",
"read",
"(",
")",
"if",
"ret",
":",
"if",
"self",
".",
"_cache",
":",
"self",
".",
"_cache",
".",
"put",
"(",
"self",
".",
"_position",
",",
"img",
")",
"self",
".",
"_position",
"+=",
"1",
"return",
"img"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
VideoReader.cvt2frames
|
Convert a video to frame images
Args:
frame_dir (str): Output directory to store all the frame images.
file_start (int): Filenames will start from the specified number.
filename_tmpl (str): Filename template with the index as the
placeholder.
start (int): The starting frame index.
max_num (int): Maximum number of frames to be written.
show_progress (bool): Whether to show a progress bar.
|
mmcv/video/io.py
|
def cvt2frames(self,
frame_dir,
file_start=0,
filename_tmpl='{:06d}.jpg',
start=0,
max_num=0,
show_progress=True):
"""Convert a video to frame images
Args:
frame_dir (str): Output directory to store all the frame images.
file_start (int): Filenames will start from the specified number.
filename_tmpl (str): Filename template with the index as the
placeholder.
start (int): The starting frame index.
max_num (int): Maximum number of frames to be written.
show_progress (bool): Whether to show a progress bar.
"""
mkdir_or_exist(frame_dir)
if max_num == 0:
task_num = self.frame_cnt - start
else:
task_num = min(self.frame_cnt - start, max_num)
if task_num <= 0:
raise ValueError('start must be less than total frame number')
if start > 0:
self._set_real_position(start)
def write_frame(file_idx):
img = self.read()
filename = osp.join(frame_dir, filename_tmpl.format(file_idx))
cv2.imwrite(filename, img)
if show_progress:
track_progress(write_frame, range(file_start,
file_start + task_num))
else:
for i in range(task_num):
img = self.read()
if img is None:
break
filename = osp.join(frame_dir,
filename_tmpl.format(i + file_start))
cv2.imwrite(filename, img)
|
def cvt2frames(self,
frame_dir,
file_start=0,
filename_tmpl='{:06d}.jpg',
start=0,
max_num=0,
show_progress=True):
"""Convert a video to frame images
Args:
frame_dir (str): Output directory to store all the frame images.
file_start (int): Filenames will start from the specified number.
filename_tmpl (str): Filename template with the index as the
placeholder.
start (int): The starting frame index.
max_num (int): Maximum number of frames to be written.
show_progress (bool): Whether to show a progress bar.
"""
mkdir_or_exist(frame_dir)
if max_num == 0:
task_num = self.frame_cnt - start
else:
task_num = min(self.frame_cnt - start, max_num)
if task_num <= 0:
raise ValueError('start must be less than total frame number')
if start > 0:
self._set_real_position(start)
def write_frame(file_idx):
img = self.read()
filename = osp.join(frame_dir, filename_tmpl.format(file_idx))
cv2.imwrite(filename, img)
if show_progress:
track_progress(write_frame, range(file_start,
file_start + task_num))
else:
for i in range(task_num):
img = self.read()
if img is None:
break
filename = osp.join(frame_dir,
filename_tmpl.format(i + file_start))
cv2.imwrite(filename, img)
|
[
"Convert",
"a",
"video",
"to",
"frame",
"images"
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/video/io.py#L207-L250
|
[
"def",
"cvt2frames",
"(",
"self",
",",
"frame_dir",
",",
"file_start",
"=",
"0",
",",
"filename_tmpl",
"=",
"'{:06d}.jpg'",
",",
"start",
"=",
"0",
",",
"max_num",
"=",
"0",
",",
"show_progress",
"=",
"True",
")",
":",
"mkdir_or_exist",
"(",
"frame_dir",
")",
"if",
"max_num",
"==",
"0",
":",
"task_num",
"=",
"self",
".",
"frame_cnt",
"-",
"start",
"else",
":",
"task_num",
"=",
"min",
"(",
"self",
".",
"frame_cnt",
"-",
"start",
",",
"max_num",
")",
"if",
"task_num",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'start must be less than total frame number'",
")",
"if",
"start",
">",
"0",
":",
"self",
".",
"_set_real_position",
"(",
"start",
")",
"def",
"write_frame",
"(",
"file_idx",
")",
":",
"img",
"=",
"self",
".",
"read",
"(",
")",
"filename",
"=",
"osp",
".",
"join",
"(",
"frame_dir",
",",
"filename_tmpl",
".",
"format",
"(",
"file_idx",
")",
")",
"cv2",
".",
"imwrite",
"(",
"filename",
",",
"img",
")",
"if",
"show_progress",
":",
"track_progress",
"(",
"write_frame",
",",
"range",
"(",
"file_start",
",",
"file_start",
"+",
"task_num",
")",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"task_num",
")",
":",
"img",
"=",
"self",
".",
"read",
"(",
")",
"if",
"img",
"is",
"None",
":",
"break",
"filename",
"=",
"osp",
".",
"join",
"(",
"frame_dir",
",",
"filename_tmpl",
".",
"format",
"(",
"i",
"+",
"file_start",
")",
")",
"cv2",
".",
"imwrite",
"(",
"filename",
",",
"img",
")"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
track_progress
|
Track the progress of tasks execution with a progress bar.
Tasks are done with a simple for-loop.
Args:
func (callable): The function to be applied to each task.
tasks (list or tuple[Iterable, int]): A list of tasks or
(tasks, total num).
bar_width (int): Width of progress bar.
Returns:
list: The task results.
|
mmcv/utils/progressbar.py
|
def track_progress(func, tasks, bar_width=50, **kwargs):
"""Track the progress of tasks execution with a progress bar.
Tasks are done with a simple for-loop.
Args:
func (callable): The function to be applied to each task.
tasks (list or tuple[Iterable, int]): A list of tasks or
(tasks, total num).
bar_width (int): Width of progress bar.
Returns:
list: The task results.
"""
if isinstance(tasks, tuple):
assert len(tasks) == 2
assert isinstance(tasks[0], collections_abc.Iterable)
assert isinstance(tasks[1], int)
task_num = tasks[1]
tasks = tasks[0]
elif isinstance(tasks, collections_abc.Iterable):
task_num = len(tasks)
else:
raise TypeError(
'"tasks" must be an iterable object or a (iterator, int) tuple')
prog_bar = ProgressBar(task_num, bar_width)
results = []
for task in tasks:
results.append(func(task, **kwargs))
prog_bar.update()
sys.stdout.write('\n')
return results
|
def track_progress(func, tasks, bar_width=50, **kwargs):
"""Track the progress of tasks execution with a progress bar.
Tasks are done with a simple for-loop.
Args:
func (callable): The function to be applied to each task.
tasks (list or tuple[Iterable, int]): A list of tasks or
(tasks, total num).
bar_width (int): Width of progress bar.
Returns:
list: The task results.
"""
if isinstance(tasks, tuple):
assert len(tasks) == 2
assert isinstance(tasks[0], collections_abc.Iterable)
assert isinstance(tasks[1], int)
task_num = tasks[1]
tasks = tasks[0]
elif isinstance(tasks, collections_abc.Iterable):
task_num = len(tasks)
else:
raise TypeError(
'"tasks" must be an iterable object or a (iterator, int) tuple')
prog_bar = ProgressBar(task_num, bar_width)
results = []
for task in tasks:
results.append(func(task, **kwargs))
prog_bar.update()
sys.stdout.write('\n')
return results
|
[
"Track",
"the",
"progress",
"of",
"tasks",
"execution",
"with",
"a",
"progress",
"bar",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/utils/progressbar.py#L63-L94
|
[
"def",
"track_progress",
"(",
"func",
",",
"tasks",
",",
"bar_width",
"=",
"50",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"tasks",
",",
"tuple",
")",
":",
"assert",
"len",
"(",
"tasks",
")",
"==",
"2",
"assert",
"isinstance",
"(",
"tasks",
"[",
"0",
"]",
",",
"collections_abc",
".",
"Iterable",
")",
"assert",
"isinstance",
"(",
"tasks",
"[",
"1",
"]",
",",
"int",
")",
"task_num",
"=",
"tasks",
"[",
"1",
"]",
"tasks",
"=",
"tasks",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"tasks",
",",
"collections_abc",
".",
"Iterable",
")",
":",
"task_num",
"=",
"len",
"(",
"tasks",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'\"tasks\" must be an iterable object or a (iterator, int) tuple'",
")",
"prog_bar",
"=",
"ProgressBar",
"(",
"task_num",
",",
"bar_width",
")",
"results",
"=",
"[",
"]",
"for",
"task",
"in",
"tasks",
":",
"results",
".",
"append",
"(",
"func",
"(",
"task",
",",
"*",
"*",
"kwargs",
")",
")",
"prog_bar",
".",
"update",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n'",
")",
"return",
"results"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
track_parallel_progress
|
Track the progress of parallel task execution with a progress bar.
The built-in :mod:`multiprocessing` module is used for process pools and
tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`.
Args:
func (callable): The function to be applied to each task.
tasks (list or tuple[Iterable, int]): A list of tasks or
(tasks, total num).
nproc (int): Process (worker) number.
initializer (None or callable): Refer to :class:`multiprocessing.Pool`
for details.
initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for
details.
chunksize (int): Refer to :class:`multiprocessing.Pool` for details.
bar_width (int): Width of progress bar.
skip_first (bool): Whether to skip the first sample for each worker
when estimating fps, since the initialization step may takes
longer.
keep_order (bool): If True, :func:`Pool.imap` is used, otherwise
:func:`Pool.imap_unordered` is used.
Returns:
list: The task results.
|
mmcv/utils/progressbar.py
|
def track_parallel_progress(func,
tasks,
nproc,
initializer=None,
initargs=None,
bar_width=50,
chunksize=1,
skip_first=False,
keep_order=True):
"""Track the progress of parallel task execution with a progress bar.
The built-in :mod:`multiprocessing` module is used for process pools and
tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`.
Args:
func (callable): The function to be applied to each task.
tasks (list or tuple[Iterable, int]): A list of tasks or
(tasks, total num).
nproc (int): Process (worker) number.
initializer (None or callable): Refer to :class:`multiprocessing.Pool`
for details.
initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for
details.
chunksize (int): Refer to :class:`multiprocessing.Pool` for details.
bar_width (int): Width of progress bar.
skip_first (bool): Whether to skip the first sample for each worker
when estimating fps, since the initialization step may takes
longer.
keep_order (bool): If True, :func:`Pool.imap` is used, otherwise
:func:`Pool.imap_unordered` is used.
Returns:
list: The task results.
"""
if isinstance(tasks, tuple):
assert len(tasks) == 2
assert isinstance(tasks[0], collections_abc.Iterable)
assert isinstance(tasks[1], int)
task_num = tasks[1]
tasks = tasks[0]
elif isinstance(tasks, collections_abc.Iterable):
task_num = len(tasks)
else:
raise TypeError(
'"tasks" must be an iterable object or a (iterator, int) tuple')
pool = init_pool(nproc, initializer, initargs)
start = not skip_first
task_num -= nproc * chunksize * int(skip_first)
prog_bar = ProgressBar(task_num, bar_width, start)
results = []
if keep_order:
gen = pool.imap(func, tasks, chunksize)
else:
gen = pool.imap_unordered(func, tasks, chunksize)
for result in gen:
results.append(result)
if skip_first:
if len(results) < nproc * chunksize:
continue
elif len(results) == nproc * chunksize:
prog_bar.start()
continue
prog_bar.update()
sys.stdout.write('\n')
pool.close()
pool.join()
return results
|
def track_parallel_progress(func,
tasks,
nproc,
initializer=None,
initargs=None,
bar_width=50,
chunksize=1,
skip_first=False,
keep_order=True):
"""Track the progress of parallel task execution with a progress bar.
The built-in :mod:`multiprocessing` module is used for process pools and
tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`.
Args:
func (callable): The function to be applied to each task.
tasks (list or tuple[Iterable, int]): A list of tasks or
(tasks, total num).
nproc (int): Process (worker) number.
initializer (None or callable): Refer to :class:`multiprocessing.Pool`
for details.
initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for
details.
chunksize (int): Refer to :class:`multiprocessing.Pool` for details.
bar_width (int): Width of progress bar.
skip_first (bool): Whether to skip the first sample for each worker
when estimating fps, since the initialization step may takes
longer.
keep_order (bool): If True, :func:`Pool.imap` is used, otherwise
:func:`Pool.imap_unordered` is used.
Returns:
list: The task results.
"""
if isinstance(tasks, tuple):
assert len(tasks) == 2
assert isinstance(tasks[0], collections_abc.Iterable)
assert isinstance(tasks[1], int)
task_num = tasks[1]
tasks = tasks[0]
elif isinstance(tasks, collections_abc.Iterable):
task_num = len(tasks)
else:
raise TypeError(
'"tasks" must be an iterable object or a (iterator, int) tuple')
pool = init_pool(nproc, initializer, initargs)
start = not skip_first
task_num -= nproc * chunksize * int(skip_first)
prog_bar = ProgressBar(task_num, bar_width, start)
results = []
if keep_order:
gen = pool.imap(func, tasks, chunksize)
else:
gen = pool.imap_unordered(func, tasks, chunksize)
for result in gen:
results.append(result)
if skip_first:
if len(results) < nproc * chunksize:
continue
elif len(results) == nproc * chunksize:
prog_bar.start()
continue
prog_bar.update()
sys.stdout.write('\n')
pool.close()
pool.join()
return results
|
[
"Track",
"the",
"progress",
"of",
"parallel",
"task",
"execution",
"with",
"a",
"progress",
"bar",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/utils/progressbar.py#L108-L174
|
[
"def",
"track_parallel_progress",
"(",
"func",
",",
"tasks",
",",
"nproc",
",",
"initializer",
"=",
"None",
",",
"initargs",
"=",
"None",
",",
"bar_width",
"=",
"50",
",",
"chunksize",
"=",
"1",
",",
"skip_first",
"=",
"False",
",",
"keep_order",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"tasks",
",",
"tuple",
")",
":",
"assert",
"len",
"(",
"tasks",
")",
"==",
"2",
"assert",
"isinstance",
"(",
"tasks",
"[",
"0",
"]",
",",
"collections_abc",
".",
"Iterable",
")",
"assert",
"isinstance",
"(",
"tasks",
"[",
"1",
"]",
",",
"int",
")",
"task_num",
"=",
"tasks",
"[",
"1",
"]",
"tasks",
"=",
"tasks",
"[",
"0",
"]",
"elif",
"isinstance",
"(",
"tasks",
",",
"collections_abc",
".",
"Iterable",
")",
":",
"task_num",
"=",
"len",
"(",
"tasks",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'\"tasks\" must be an iterable object or a (iterator, int) tuple'",
")",
"pool",
"=",
"init_pool",
"(",
"nproc",
",",
"initializer",
",",
"initargs",
")",
"start",
"=",
"not",
"skip_first",
"task_num",
"-=",
"nproc",
"*",
"chunksize",
"*",
"int",
"(",
"skip_first",
")",
"prog_bar",
"=",
"ProgressBar",
"(",
"task_num",
",",
"bar_width",
",",
"start",
")",
"results",
"=",
"[",
"]",
"if",
"keep_order",
":",
"gen",
"=",
"pool",
".",
"imap",
"(",
"func",
",",
"tasks",
",",
"chunksize",
")",
"else",
":",
"gen",
"=",
"pool",
".",
"imap_unordered",
"(",
"func",
",",
"tasks",
",",
"chunksize",
")",
"for",
"result",
"in",
"gen",
":",
"results",
".",
"append",
"(",
"result",
")",
"if",
"skip_first",
":",
"if",
"len",
"(",
"results",
")",
"<",
"nproc",
"*",
"chunksize",
":",
"continue",
"elif",
"len",
"(",
"results",
")",
"==",
"nproc",
"*",
"chunksize",
":",
"prog_bar",
".",
"start",
"(",
")",
"continue",
"prog_bar",
".",
"update",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n'",
")",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"return",
"results"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
imflip
|
Flip an image horizontally or vertically.
Args:
img (ndarray): Image to be flipped.
direction (str): The flip direction, either "horizontal" or "vertical".
Returns:
ndarray: The flipped image.
|
mmcv/image/transforms/geometry.py
|
def imflip(img, direction='horizontal'):
"""Flip an image horizontally or vertically.
Args:
img (ndarray): Image to be flipped.
direction (str): The flip direction, either "horizontal" or "vertical".
Returns:
ndarray: The flipped image.
"""
assert direction in ['horizontal', 'vertical']
if direction == 'horizontal':
return np.flip(img, axis=1)
else:
return np.flip(img, axis=0)
|
def imflip(img, direction='horizontal'):
"""Flip an image horizontally or vertically.
Args:
img (ndarray): Image to be flipped.
direction (str): The flip direction, either "horizontal" or "vertical".
Returns:
ndarray: The flipped image.
"""
assert direction in ['horizontal', 'vertical']
if direction == 'horizontal':
return np.flip(img, axis=1)
else:
return np.flip(img, axis=0)
|
[
"Flip",
"an",
"image",
"horizontally",
"or",
"vertically",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/geometry.py#L7-L21
|
[
"def",
"imflip",
"(",
"img",
",",
"direction",
"=",
"'horizontal'",
")",
":",
"assert",
"direction",
"in",
"[",
"'horizontal'",
",",
"'vertical'",
"]",
"if",
"direction",
"==",
"'horizontal'",
":",
"return",
"np",
".",
"flip",
"(",
"img",
",",
"axis",
"=",
"1",
")",
"else",
":",
"return",
"np",
".",
"flip",
"(",
"img",
",",
"axis",
"=",
"0",
")"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
imrotate
|
Rotate an image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees, positive values mean
clockwise rotation.
center (tuple): Center of the rotation in the source image, by default
it is the center of the image.
scale (float): Isotropic scale factor.
border_value (int): Border value.
auto_bound (bool): Whether to adjust the image size to cover the whole
rotated image.
Returns:
ndarray: The rotated image.
|
mmcv/image/transforms/geometry.py
|
def imrotate(img,
angle,
center=None,
scale=1.0,
border_value=0,
auto_bound=False):
"""Rotate an image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees, positive values mean
clockwise rotation.
center (tuple): Center of the rotation in the source image, by default
it is the center of the image.
scale (float): Isotropic scale factor.
border_value (int): Border value.
auto_bound (bool): Whether to adjust the image size to cover the whole
rotated image.
Returns:
ndarray: The rotated image.
"""
if center is not None and auto_bound:
raise ValueError('`auto_bound` conflicts with `center`')
h, w = img.shape[:2]
if center is None:
center = ((w - 1) * 0.5, (h - 1) * 0.5)
assert isinstance(center, tuple)
matrix = cv2.getRotationMatrix2D(center, -angle, scale)
if auto_bound:
cos = np.abs(matrix[0, 0])
sin = np.abs(matrix[0, 1])
new_w = h * sin + w * cos
new_h = h * cos + w * sin
matrix[0, 2] += (new_w - w) * 0.5
matrix[1, 2] += (new_h - h) * 0.5
w = int(np.round(new_w))
h = int(np.round(new_h))
rotated = cv2.warpAffine(img, matrix, (w, h), borderValue=border_value)
return rotated
|
def imrotate(img,
angle,
center=None,
scale=1.0,
border_value=0,
auto_bound=False):
"""Rotate an image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees, positive values mean
clockwise rotation.
center (tuple): Center of the rotation in the source image, by default
it is the center of the image.
scale (float): Isotropic scale factor.
border_value (int): Border value.
auto_bound (bool): Whether to adjust the image size to cover the whole
rotated image.
Returns:
ndarray: The rotated image.
"""
if center is not None and auto_bound:
raise ValueError('`auto_bound` conflicts with `center`')
h, w = img.shape[:2]
if center is None:
center = ((w - 1) * 0.5, (h - 1) * 0.5)
assert isinstance(center, tuple)
matrix = cv2.getRotationMatrix2D(center, -angle, scale)
if auto_bound:
cos = np.abs(matrix[0, 0])
sin = np.abs(matrix[0, 1])
new_w = h * sin + w * cos
new_h = h * cos + w * sin
matrix[0, 2] += (new_w - w) * 0.5
matrix[1, 2] += (new_h - h) * 0.5
w = int(np.round(new_w))
h = int(np.round(new_h))
rotated = cv2.warpAffine(img, matrix, (w, h), borderValue=border_value)
return rotated
|
[
"Rotate",
"an",
"image",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/geometry.py#L24-L64
|
[
"def",
"imrotate",
"(",
"img",
",",
"angle",
",",
"center",
"=",
"None",
",",
"scale",
"=",
"1.0",
",",
"border_value",
"=",
"0",
",",
"auto_bound",
"=",
"False",
")",
":",
"if",
"center",
"is",
"not",
"None",
"and",
"auto_bound",
":",
"raise",
"ValueError",
"(",
"'`auto_bound` conflicts with `center`'",
")",
"h",
",",
"w",
"=",
"img",
".",
"shape",
"[",
":",
"2",
"]",
"if",
"center",
"is",
"None",
":",
"center",
"=",
"(",
"(",
"w",
"-",
"1",
")",
"*",
"0.5",
",",
"(",
"h",
"-",
"1",
")",
"*",
"0.5",
")",
"assert",
"isinstance",
"(",
"center",
",",
"tuple",
")",
"matrix",
"=",
"cv2",
".",
"getRotationMatrix2D",
"(",
"center",
",",
"-",
"angle",
",",
"scale",
")",
"if",
"auto_bound",
":",
"cos",
"=",
"np",
".",
"abs",
"(",
"matrix",
"[",
"0",
",",
"0",
"]",
")",
"sin",
"=",
"np",
".",
"abs",
"(",
"matrix",
"[",
"0",
",",
"1",
"]",
")",
"new_w",
"=",
"h",
"*",
"sin",
"+",
"w",
"*",
"cos",
"new_h",
"=",
"h",
"*",
"cos",
"+",
"w",
"*",
"sin",
"matrix",
"[",
"0",
",",
"2",
"]",
"+=",
"(",
"new_w",
"-",
"w",
")",
"*",
"0.5",
"matrix",
"[",
"1",
",",
"2",
"]",
"+=",
"(",
"new_h",
"-",
"h",
")",
"*",
"0.5",
"w",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"new_w",
")",
")",
"h",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"new_h",
")",
")",
"rotated",
"=",
"cv2",
".",
"warpAffine",
"(",
"img",
",",
"matrix",
",",
"(",
"w",
",",
"h",
")",
",",
"borderValue",
"=",
"border_value",
")",
"return",
"rotated"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
bbox_clip
|
Clip bboxes to fit the image shape.
Args:
bboxes (ndarray): Shape (..., 4*k)
img_shape (tuple): (height, width) of the image.
Returns:
ndarray: Clipped bboxes.
|
mmcv/image/transforms/geometry.py
|
def bbox_clip(bboxes, img_shape):
"""Clip bboxes to fit the image shape.
Args:
bboxes (ndarray): Shape (..., 4*k)
img_shape (tuple): (height, width) of the image.
Returns:
ndarray: Clipped bboxes.
"""
assert bboxes.shape[-1] % 4 == 0
clipped_bboxes = np.empty_like(bboxes, dtype=bboxes.dtype)
clipped_bboxes[..., 0::2] = np.maximum(
np.minimum(bboxes[..., 0::2], img_shape[1] - 1), 0)
clipped_bboxes[..., 1::2] = np.maximum(
np.minimum(bboxes[..., 1::2], img_shape[0] - 1), 0)
return clipped_bboxes
|
def bbox_clip(bboxes, img_shape):
"""Clip bboxes to fit the image shape.
Args:
bboxes (ndarray): Shape (..., 4*k)
img_shape (tuple): (height, width) of the image.
Returns:
ndarray: Clipped bboxes.
"""
assert bboxes.shape[-1] % 4 == 0
clipped_bboxes = np.empty_like(bboxes, dtype=bboxes.dtype)
clipped_bboxes[..., 0::2] = np.maximum(
np.minimum(bboxes[..., 0::2], img_shape[1] - 1), 0)
clipped_bboxes[..., 1::2] = np.maximum(
np.minimum(bboxes[..., 1::2], img_shape[0] - 1), 0)
return clipped_bboxes
|
[
"Clip",
"bboxes",
"to",
"fit",
"the",
"image",
"shape",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/geometry.py#L67-L83
|
[
"def",
"bbox_clip",
"(",
"bboxes",
",",
"img_shape",
")",
":",
"assert",
"bboxes",
".",
"shape",
"[",
"-",
"1",
"]",
"%",
"4",
"==",
"0",
"clipped_bboxes",
"=",
"np",
".",
"empty_like",
"(",
"bboxes",
",",
"dtype",
"=",
"bboxes",
".",
"dtype",
")",
"clipped_bboxes",
"[",
"...",
",",
"0",
":",
":",
"2",
"]",
"=",
"np",
".",
"maximum",
"(",
"np",
".",
"minimum",
"(",
"bboxes",
"[",
"...",
",",
"0",
":",
":",
"2",
"]",
",",
"img_shape",
"[",
"1",
"]",
"-",
"1",
")",
",",
"0",
")",
"clipped_bboxes",
"[",
"...",
",",
"1",
":",
":",
"2",
"]",
"=",
"np",
".",
"maximum",
"(",
"np",
".",
"minimum",
"(",
"bboxes",
"[",
"...",
",",
"1",
":",
":",
"2",
"]",
",",
"img_shape",
"[",
"0",
"]",
"-",
"1",
")",
",",
"0",
")",
"return",
"clipped_bboxes"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
bbox_scaling
|
Scaling bboxes w.r.t the box center.
Args:
bboxes (ndarray): Shape(..., 4).
scale (float): Scaling factor.
clip_shape (tuple, optional): If specified, bboxes that exceed the
boundary will be clipped according to the given shape (h, w).
Returns:
ndarray: Scaled bboxes.
|
mmcv/image/transforms/geometry.py
|
def bbox_scaling(bboxes, scale, clip_shape=None):
"""Scaling bboxes w.r.t the box center.
Args:
bboxes (ndarray): Shape(..., 4).
scale (float): Scaling factor.
clip_shape (tuple, optional): If specified, bboxes that exceed the
boundary will be clipped according to the given shape (h, w).
Returns:
ndarray: Scaled bboxes.
"""
if float(scale) == 1.0:
scaled_bboxes = bboxes.copy()
else:
w = bboxes[..., 2] - bboxes[..., 0] + 1
h = bboxes[..., 3] - bboxes[..., 1] + 1
dw = (w * (scale - 1)) * 0.5
dh = (h * (scale - 1)) * 0.5
scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1)
if clip_shape is not None:
return bbox_clip(scaled_bboxes, clip_shape)
else:
return scaled_bboxes
|
def bbox_scaling(bboxes, scale, clip_shape=None):
"""Scaling bboxes w.r.t the box center.
Args:
bboxes (ndarray): Shape(..., 4).
scale (float): Scaling factor.
clip_shape (tuple, optional): If specified, bboxes that exceed the
boundary will be clipped according to the given shape (h, w).
Returns:
ndarray: Scaled bboxes.
"""
if float(scale) == 1.0:
scaled_bboxes = bboxes.copy()
else:
w = bboxes[..., 2] - bboxes[..., 0] + 1
h = bboxes[..., 3] - bboxes[..., 1] + 1
dw = (w * (scale - 1)) * 0.5
dh = (h * (scale - 1)) * 0.5
scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1)
if clip_shape is not None:
return bbox_clip(scaled_bboxes, clip_shape)
else:
return scaled_bboxes
|
[
"Scaling",
"bboxes",
"w",
".",
"r",
".",
"t",
"the",
"box",
"center",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/geometry.py#L86-L109
|
[
"def",
"bbox_scaling",
"(",
"bboxes",
",",
"scale",
",",
"clip_shape",
"=",
"None",
")",
":",
"if",
"float",
"(",
"scale",
")",
"==",
"1.0",
":",
"scaled_bboxes",
"=",
"bboxes",
".",
"copy",
"(",
")",
"else",
":",
"w",
"=",
"bboxes",
"[",
"...",
",",
"2",
"]",
"-",
"bboxes",
"[",
"...",
",",
"0",
"]",
"+",
"1",
"h",
"=",
"bboxes",
"[",
"...",
",",
"3",
"]",
"-",
"bboxes",
"[",
"...",
",",
"1",
"]",
"+",
"1",
"dw",
"=",
"(",
"w",
"*",
"(",
"scale",
"-",
"1",
")",
")",
"*",
"0.5",
"dh",
"=",
"(",
"h",
"*",
"(",
"scale",
"-",
"1",
")",
")",
"*",
"0.5",
"scaled_bboxes",
"=",
"bboxes",
"+",
"np",
".",
"stack",
"(",
"(",
"-",
"dw",
",",
"-",
"dh",
",",
"dw",
",",
"dh",
")",
",",
"axis",
"=",
"-",
"1",
")",
"if",
"clip_shape",
"is",
"not",
"None",
":",
"return",
"bbox_clip",
"(",
"scaled_bboxes",
",",
"clip_shape",
")",
"else",
":",
"return",
"scaled_bboxes"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
imcrop
|
Crop image patches.
3 steps: scale the bboxes -> clip bboxes -> crop and pad.
Args:
img (ndarray): Image to be cropped.
bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
scale (float, optional): Scale ratio of bboxes, the default value
1.0 means no padding.
pad_fill (number or list): Value to be filled for padding, None for
no padding.
Returns:
list or ndarray: The cropped image patches.
|
mmcv/image/transforms/geometry.py
|
def imcrop(img, bboxes, scale=1.0, pad_fill=None):
"""Crop image patches.
3 steps: scale the bboxes -> clip bboxes -> crop and pad.
Args:
img (ndarray): Image to be cropped.
bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
scale (float, optional): Scale ratio of bboxes, the default value
1.0 means no padding.
pad_fill (number or list): Value to be filled for padding, None for
no padding.
Returns:
list or ndarray: The cropped image patches.
"""
chn = 1 if img.ndim == 2 else img.shape[2]
if pad_fill is not None:
if isinstance(pad_fill, (int, float)):
pad_fill = [pad_fill for _ in range(chn)]
assert len(pad_fill) == chn
_bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes
scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32)
clipped_bbox = bbox_clip(scaled_bboxes, img.shape)
patches = []
for i in range(clipped_bbox.shape[0]):
x1, y1, x2, y2 = tuple(clipped_bbox[i, :])
if pad_fill is None:
patch = img[y1:y2 + 1, x1:x2 + 1, ...]
else:
_x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :])
if chn == 2:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1)
else:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn)
patch = np.array(
pad_fill, dtype=img.dtype) * np.ones(
patch_shape, dtype=img.dtype)
x_start = 0 if _x1 >= 0 else -_x1
y_start = 0 if _y1 >= 0 else -_y1
w = x2 - x1 + 1
h = y2 - y1 + 1
patch[y_start:y_start + h, x_start:x_start +
w, ...] = img[y1:y1 + h, x1:x1 + w, ...]
patches.append(patch)
if bboxes.ndim == 1:
return patches[0]
else:
return patches
|
def imcrop(img, bboxes, scale=1.0, pad_fill=None):
"""Crop image patches.
3 steps: scale the bboxes -> clip bboxes -> crop and pad.
Args:
img (ndarray): Image to be cropped.
bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
scale (float, optional): Scale ratio of bboxes, the default value
1.0 means no padding.
pad_fill (number or list): Value to be filled for padding, None for
no padding.
Returns:
list or ndarray: The cropped image patches.
"""
chn = 1 if img.ndim == 2 else img.shape[2]
if pad_fill is not None:
if isinstance(pad_fill, (int, float)):
pad_fill = [pad_fill for _ in range(chn)]
assert len(pad_fill) == chn
_bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes
scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32)
clipped_bbox = bbox_clip(scaled_bboxes, img.shape)
patches = []
for i in range(clipped_bbox.shape[0]):
x1, y1, x2, y2 = tuple(clipped_bbox[i, :])
if pad_fill is None:
patch = img[y1:y2 + 1, x1:x2 + 1, ...]
else:
_x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :])
if chn == 2:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1)
else:
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn)
patch = np.array(
pad_fill, dtype=img.dtype) * np.ones(
patch_shape, dtype=img.dtype)
x_start = 0 if _x1 >= 0 else -_x1
y_start = 0 if _y1 >= 0 else -_y1
w = x2 - x1 + 1
h = y2 - y1 + 1
patch[y_start:y_start + h, x_start:x_start +
w, ...] = img[y1:y1 + h, x1:x1 + w, ...]
patches.append(patch)
if bboxes.ndim == 1:
return patches[0]
else:
return patches
|
[
"Crop",
"image",
"patches",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/geometry.py#L112-L163
|
[
"def",
"imcrop",
"(",
"img",
",",
"bboxes",
",",
"scale",
"=",
"1.0",
",",
"pad_fill",
"=",
"None",
")",
":",
"chn",
"=",
"1",
"if",
"img",
".",
"ndim",
"==",
"2",
"else",
"img",
".",
"shape",
"[",
"2",
"]",
"if",
"pad_fill",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"pad_fill",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"pad_fill",
"=",
"[",
"pad_fill",
"for",
"_",
"in",
"range",
"(",
"chn",
")",
"]",
"assert",
"len",
"(",
"pad_fill",
")",
"==",
"chn",
"_bboxes",
"=",
"bboxes",
"[",
"None",
",",
"...",
"]",
"if",
"bboxes",
".",
"ndim",
"==",
"1",
"else",
"bboxes",
"scaled_bboxes",
"=",
"bbox_scaling",
"(",
"_bboxes",
",",
"scale",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"clipped_bbox",
"=",
"bbox_clip",
"(",
"scaled_bboxes",
",",
"img",
".",
"shape",
")",
"patches",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"clipped_bbox",
".",
"shape",
"[",
"0",
"]",
")",
":",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
"=",
"tuple",
"(",
"clipped_bbox",
"[",
"i",
",",
":",
"]",
")",
"if",
"pad_fill",
"is",
"None",
":",
"patch",
"=",
"img",
"[",
"y1",
":",
"y2",
"+",
"1",
",",
"x1",
":",
"x2",
"+",
"1",
",",
"...",
"]",
"else",
":",
"_x1",
",",
"_y1",
",",
"_x2",
",",
"_y2",
"=",
"tuple",
"(",
"scaled_bboxes",
"[",
"i",
",",
":",
"]",
")",
"if",
"chn",
"==",
"2",
":",
"patch_shape",
"=",
"(",
"_y2",
"-",
"_y1",
"+",
"1",
",",
"_x2",
"-",
"_x1",
"+",
"1",
")",
"else",
":",
"patch_shape",
"=",
"(",
"_y2",
"-",
"_y1",
"+",
"1",
",",
"_x2",
"-",
"_x1",
"+",
"1",
",",
"chn",
")",
"patch",
"=",
"np",
".",
"array",
"(",
"pad_fill",
",",
"dtype",
"=",
"img",
".",
"dtype",
")",
"*",
"np",
".",
"ones",
"(",
"patch_shape",
",",
"dtype",
"=",
"img",
".",
"dtype",
")",
"x_start",
"=",
"0",
"if",
"_x1",
">=",
"0",
"else",
"-",
"_x1",
"y_start",
"=",
"0",
"if",
"_y1",
">=",
"0",
"else",
"-",
"_y1",
"w",
"=",
"x2",
"-",
"x1",
"+",
"1",
"h",
"=",
"y2",
"-",
"y1",
"+",
"1",
"patch",
"[",
"y_start",
":",
"y_start",
"+",
"h",
",",
"x_start",
":",
"x_start",
"+",
"w",
",",
"...",
"]",
"=",
"img",
"[",
"y1",
":",
"y1",
"+",
"h",
",",
"x1",
":",
"x1",
"+",
"w",
",",
"...",
"]",
"patches",
".",
"append",
"(",
"patch",
")",
"if",
"bboxes",
".",
"ndim",
"==",
"1",
":",
"return",
"patches",
"[",
"0",
"]",
"else",
":",
"return",
"patches"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
impad
|
Pad an image to a certain shape.
Args:
img (ndarray): Image to be padded.
shape (tuple): Expected padding shape.
pad_val (number or sequence): Values to be filled in padding areas.
Returns:
ndarray: The padded image.
|
mmcv/image/transforms/geometry.py
|
def impad(img, shape, pad_val=0):
"""Pad an image to a certain shape.
Args:
img (ndarray): Image to be padded.
shape (tuple): Expected padding shape.
pad_val (number or sequence): Values to be filled in padding areas.
Returns:
ndarray: The padded image.
"""
if not isinstance(pad_val, (int, float)):
assert len(pad_val) == img.shape[-1]
if len(shape) < len(img.shape):
shape = shape + (img.shape[-1], )
assert len(shape) == len(img.shape)
for i in range(len(shape) - 1):
assert shape[i] >= img.shape[i]
pad = np.empty(shape, dtype=img.dtype)
pad[...] = pad_val
pad[:img.shape[0], :img.shape[1], ...] = img
return pad
|
def impad(img, shape, pad_val=0):
"""Pad an image to a certain shape.
Args:
img (ndarray): Image to be padded.
shape (tuple): Expected padding shape.
pad_val (number or sequence): Values to be filled in padding areas.
Returns:
ndarray: The padded image.
"""
if not isinstance(pad_val, (int, float)):
assert len(pad_val) == img.shape[-1]
if len(shape) < len(img.shape):
shape = shape + (img.shape[-1], )
assert len(shape) == len(img.shape)
for i in range(len(shape) - 1):
assert shape[i] >= img.shape[i]
pad = np.empty(shape, dtype=img.dtype)
pad[...] = pad_val
pad[:img.shape[0], :img.shape[1], ...] = img
return pad
|
[
"Pad",
"an",
"image",
"to",
"a",
"certain",
"shape",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/geometry.py#L166-L187
|
[
"def",
"impad",
"(",
"img",
",",
"shape",
",",
"pad_val",
"=",
"0",
")",
":",
"if",
"not",
"isinstance",
"(",
"pad_val",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"assert",
"len",
"(",
"pad_val",
")",
"==",
"img",
".",
"shape",
"[",
"-",
"1",
"]",
"if",
"len",
"(",
"shape",
")",
"<",
"len",
"(",
"img",
".",
"shape",
")",
":",
"shape",
"=",
"shape",
"+",
"(",
"img",
".",
"shape",
"[",
"-",
"1",
"]",
",",
")",
"assert",
"len",
"(",
"shape",
")",
"==",
"len",
"(",
"img",
".",
"shape",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"shape",
")",
"-",
"1",
")",
":",
"assert",
"shape",
"[",
"i",
"]",
">=",
"img",
".",
"shape",
"[",
"i",
"]",
"pad",
"=",
"np",
".",
"empty",
"(",
"shape",
",",
"dtype",
"=",
"img",
".",
"dtype",
")",
"pad",
"[",
"...",
"]",
"=",
"pad_val",
"pad",
"[",
":",
"img",
".",
"shape",
"[",
"0",
"]",
",",
":",
"img",
".",
"shape",
"[",
"1",
"]",
",",
"...",
"]",
"=",
"img",
"return",
"pad"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
impad_to_multiple
|
Pad an image to ensure each edge to be multiple to some number.
Args:
img (ndarray): Image to be padded.
divisor (int): Padded image edges will be multiple to divisor.
pad_val (number or sequence): Same as :func:`impad`.
Returns:
ndarray: The padded image.
|
mmcv/image/transforms/geometry.py
|
def impad_to_multiple(img, divisor, pad_val=0):
"""Pad an image to ensure each edge to be multiple to some number.
Args:
img (ndarray): Image to be padded.
divisor (int): Padded image edges will be multiple to divisor.
pad_val (number or sequence): Same as :func:`impad`.
Returns:
ndarray: The padded image.
"""
pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor
pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor
return impad(img, (pad_h, pad_w), pad_val)
|
def impad_to_multiple(img, divisor, pad_val=0):
"""Pad an image to ensure each edge to be multiple to some number.
Args:
img (ndarray): Image to be padded.
divisor (int): Padded image edges will be multiple to divisor.
pad_val (number or sequence): Same as :func:`impad`.
Returns:
ndarray: The padded image.
"""
pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor
pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor
return impad(img, (pad_h, pad_w), pad_val)
|
[
"Pad",
"an",
"image",
"to",
"ensure",
"each",
"edge",
"to",
"be",
"multiple",
"to",
"some",
"number",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/geometry.py#L190-L203
|
[
"def",
"impad_to_multiple",
"(",
"img",
",",
"divisor",
",",
"pad_val",
"=",
"0",
")",
":",
"pad_h",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"img",
".",
"shape",
"[",
"0",
"]",
"/",
"divisor",
")",
")",
"*",
"divisor",
"pad_w",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"img",
".",
"shape",
"[",
"1",
"]",
"/",
"divisor",
")",
")",
"*",
"divisor",
"return",
"impad",
"(",
"img",
",",
"(",
"pad_h",
",",
"pad_w",
")",
",",
"pad_val",
")"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
_scale_size
|
Rescale a size by a ratio.
Args:
size (tuple): w, h.
scale (float): Scaling factor.
Returns:
tuple[int]: scaled size.
|
mmcv/image/transforms/resize.py
|
def _scale_size(size, scale):
"""Rescale a size by a ratio.
Args:
size (tuple): w, h.
scale (float): Scaling factor.
Returns:
tuple[int]: scaled size.
"""
w, h = size
return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5)
|
def _scale_size(size, scale):
"""Rescale a size by a ratio.
Args:
size (tuple): w, h.
scale (float): Scaling factor.
Returns:
tuple[int]: scaled size.
"""
w, h = size
return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5)
|
[
"Rescale",
"a",
"size",
"by",
"a",
"ratio",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/resize.py#L6-L17
|
[
"def",
"_scale_size",
"(",
"size",
",",
"scale",
")",
":",
"w",
",",
"h",
"=",
"size",
"return",
"int",
"(",
"w",
"*",
"float",
"(",
"scale",
")",
"+",
"0.5",
")",
",",
"int",
"(",
"h",
"*",
"float",
"(",
"scale",
")",
"+",
"0.5",
")"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
imresize
|
Resize image to a given size.
Args:
img (ndarray): The input image.
size (tuple): Target (w, h).
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos".
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
|
mmcv/image/transforms/resize.py
|
def imresize(img, size, return_scale=False, interpolation='bilinear'):
"""Resize image to a given size.
Args:
img (ndarray): The input image.
size (tuple): Target (w, h).
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos".
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
"""
h, w = img.shape[:2]
resized_img = cv2.resize(
img, size, interpolation=interp_codes[interpolation])
if not return_scale:
return resized_img
else:
w_scale = size[0] / w
h_scale = size[1] / h
return resized_img, w_scale, h_scale
|
def imresize(img, size, return_scale=False, interpolation='bilinear'):
"""Resize image to a given size.
Args:
img (ndarray): The input image.
size (tuple): Target (w, h).
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos".
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
"""
h, w = img.shape[:2]
resized_img = cv2.resize(
img, size, interpolation=interp_codes[interpolation])
if not return_scale:
return resized_img
else:
w_scale = size[0] / w
h_scale = size[1] / h
return resized_img, w_scale, h_scale
|
[
"Resize",
"image",
"to",
"a",
"given",
"size",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/resize.py#L29-L51
|
[
"def",
"imresize",
"(",
"img",
",",
"size",
",",
"return_scale",
"=",
"False",
",",
"interpolation",
"=",
"'bilinear'",
")",
":",
"h",
",",
"w",
"=",
"img",
".",
"shape",
"[",
":",
"2",
"]",
"resized_img",
"=",
"cv2",
".",
"resize",
"(",
"img",
",",
"size",
",",
"interpolation",
"=",
"interp_codes",
"[",
"interpolation",
"]",
")",
"if",
"not",
"return_scale",
":",
"return",
"resized_img",
"else",
":",
"w_scale",
"=",
"size",
"[",
"0",
"]",
"/",
"w",
"h_scale",
"=",
"size",
"[",
"1",
"]",
"/",
"h",
"return",
"resized_img",
",",
"w_scale",
",",
"h_scale"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
imresize_like
|
Resize image to the same size of a given image.
Args:
img (ndarray): The input image.
dst_img (ndarray): The target image.
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Same as :func:`resize`.
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
|
mmcv/image/transforms/resize.py
|
def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'):
"""Resize image to the same size of a given image.
Args:
img (ndarray): The input image.
dst_img (ndarray): The target image.
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Same as :func:`resize`.
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
"""
h, w = dst_img.shape[:2]
return imresize(img, (w, h), return_scale, interpolation)
|
def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'):
"""Resize image to the same size of a given image.
Args:
img (ndarray): The input image.
dst_img (ndarray): The target image.
return_scale (bool): Whether to return `w_scale` and `h_scale`.
interpolation (str): Same as :func:`resize`.
Returns:
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
`resized_img`.
"""
h, w = dst_img.shape[:2]
return imresize(img, (w, h), return_scale, interpolation)
|
[
"Resize",
"image",
"to",
"the",
"same",
"size",
"of",
"a",
"given",
"image",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/resize.py#L54-L68
|
[
"def",
"imresize_like",
"(",
"img",
",",
"dst_img",
",",
"return_scale",
"=",
"False",
",",
"interpolation",
"=",
"'bilinear'",
")",
":",
"h",
",",
"w",
"=",
"dst_img",
".",
"shape",
"[",
":",
"2",
"]",
"return",
"imresize",
"(",
"img",
",",
"(",
"w",
",",
"h",
")",
",",
"return_scale",
",",
"interpolation",
")"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
imrescale
|
Resize image while keeping the aspect ratio.
Args:
img (ndarray): The input image.
scale (float or tuple[int]): The scaling factor or maximum size.
If it is a float number, then the image will be rescaled by this
factor, else if it is a tuple of 2 integers, then the image will
be rescaled as large as possible within the scale.
return_scale (bool): Whether to return the scaling factor besides the
rescaled image.
interpolation (str): Same as :func:`resize`.
Returns:
ndarray: The rescaled image.
|
mmcv/image/transforms/resize.py
|
def imrescale(img, scale, return_scale=False, interpolation='bilinear'):
"""Resize image while keeping the aspect ratio.
Args:
img (ndarray): The input image.
scale (float or tuple[int]): The scaling factor or maximum size.
If it is a float number, then the image will be rescaled by this
factor, else if it is a tuple of 2 integers, then the image will
be rescaled as large as possible within the scale.
return_scale (bool): Whether to return the scaling factor besides the
rescaled image.
interpolation (str): Same as :func:`resize`.
Returns:
ndarray: The rescaled image.
"""
h, w = img.shape[:2]
if isinstance(scale, (float, int)):
if scale <= 0:
raise ValueError(
'Invalid scale {}, must be positive.'.format(scale))
scale_factor = scale
elif isinstance(scale, tuple):
max_long_edge = max(scale)
max_short_edge = min(scale)
scale_factor = min(max_long_edge / max(h, w),
max_short_edge / min(h, w))
else:
raise TypeError(
'Scale must be a number or tuple of int, but got {}'.format(
type(scale)))
new_size = _scale_size((w, h), scale_factor)
rescaled_img = imresize(img, new_size, interpolation=interpolation)
if return_scale:
return rescaled_img, scale_factor
else:
return rescaled_img
|
def imrescale(img, scale, return_scale=False, interpolation='bilinear'):
"""Resize image while keeping the aspect ratio.
Args:
img (ndarray): The input image.
scale (float or tuple[int]): The scaling factor or maximum size.
If it is a float number, then the image will be rescaled by this
factor, else if it is a tuple of 2 integers, then the image will
be rescaled as large as possible within the scale.
return_scale (bool): Whether to return the scaling factor besides the
rescaled image.
interpolation (str): Same as :func:`resize`.
Returns:
ndarray: The rescaled image.
"""
h, w = img.shape[:2]
if isinstance(scale, (float, int)):
if scale <= 0:
raise ValueError(
'Invalid scale {}, must be positive.'.format(scale))
scale_factor = scale
elif isinstance(scale, tuple):
max_long_edge = max(scale)
max_short_edge = min(scale)
scale_factor = min(max_long_edge / max(h, w),
max_short_edge / min(h, w))
else:
raise TypeError(
'Scale must be a number or tuple of int, but got {}'.format(
type(scale)))
new_size = _scale_size((w, h), scale_factor)
rescaled_img = imresize(img, new_size, interpolation=interpolation)
if return_scale:
return rescaled_img, scale_factor
else:
return rescaled_img
|
[
"Resize",
"image",
"while",
"keeping",
"the",
"aspect",
"ratio",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/image/transforms/resize.py#L71-L107
|
[
"def",
"imrescale",
"(",
"img",
",",
"scale",
",",
"return_scale",
"=",
"False",
",",
"interpolation",
"=",
"'bilinear'",
")",
":",
"h",
",",
"w",
"=",
"img",
".",
"shape",
"[",
":",
"2",
"]",
"if",
"isinstance",
"(",
"scale",
",",
"(",
"float",
",",
"int",
")",
")",
":",
"if",
"scale",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"'Invalid scale {}, must be positive.'",
".",
"format",
"(",
"scale",
")",
")",
"scale_factor",
"=",
"scale",
"elif",
"isinstance",
"(",
"scale",
",",
"tuple",
")",
":",
"max_long_edge",
"=",
"max",
"(",
"scale",
")",
"max_short_edge",
"=",
"min",
"(",
"scale",
")",
"scale_factor",
"=",
"min",
"(",
"max_long_edge",
"/",
"max",
"(",
"h",
",",
"w",
")",
",",
"max_short_edge",
"/",
"min",
"(",
"h",
",",
"w",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Scale must be a number or tuple of int, but got {}'",
".",
"format",
"(",
"type",
"(",
"scale",
")",
")",
")",
"new_size",
"=",
"_scale_size",
"(",
"(",
"w",
",",
"h",
")",
",",
"scale_factor",
")",
"rescaled_img",
"=",
"imresize",
"(",
"img",
",",
"new_size",
",",
"interpolation",
"=",
"interpolation",
")",
"if",
"return_scale",
":",
"return",
"rescaled_img",
",",
"scale_factor",
"else",
":",
"return",
"rescaled_img"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
load
|
Load data from json/yaml/pickle files.
This method provides a unified api for loading data from serialized files.
Args:
file (str or file-like object): Filename or a file-like object.
file_format (str, optional): If not specified, the file format will be
inferred from the file extension, otherwise use the specified one.
Currently supported formats include "json", "yaml/yml" and
"pickle/pkl".
Returns:
The content from the file.
|
mmcv/fileio/io.py
|
def load(file, file_format=None, **kwargs):
"""Load data from json/yaml/pickle files.
This method provides a unified api for loading data from serialized files.
Args:
file (str or file-like object): Filename or a file-like object.
file_format (str, optional): If not specified, the file format will be
inferred from the file extension, otherwise use the specified one.
Currently supported formats include "json", "yaml/yml" and
"pickle/pkl".
Returns:
The content from the file.
"""
if file_format is None and is_str(file):
file_format = file.split('.')[-1]
if file_format not in file_handlers:
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if is_str(file):
obj = handler.load_from_path(file, **kwargs)
elif hasattr(file, 'read'):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj
|
def load(file, file_format=None, **kwargs):
"""Load data from json/yaml/pickle files.
This method provides a unified api for loading data from serialized files.
Args:
file (str or file-like object): Filename or a file-like object.
file_format (str, optional): If not specified, the file format will be
inferred from the file extension, otherwise use the specified one.
Currently supported formats include "json", "yaml/yml" and
"pickle/pkl".
Returns:
The content from the file.
"""
if file_format is None and is_str(file):
file_format = file.split('.')[-1]
if file_format not in file_handlers:
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if is_str(file):
obj = handler.load_from_path(file, **kwargs)
elif hasattr(file, 'read'):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj
|
[
"Load",
"data",
"from",
"json",
"/",
"yaml",
"/",
"pickle",
"files",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/fileio/io.py#L13-L40
|
[
"def",
"load",
"(",
"file",
",",
"file_format",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"file_format",
"is",
"None",
"and",
"is_str",
"(",
"file",
")",
":",
"file_format",
"=",
"file",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"if",
"file_format",
"not",
"in",
"file_handlers",
":",
"raise",
"TypeError",
"(",
"'Unsupported format: {}'",
".",
"format",
"(",
"file_format",
")",
")",
"handler",
"=",
"file_handlers",
"[",
"file_format",
"]",
"if",
"is_str",
"(",
"file",
")",
":",
"obj",
"=",
"handler",
".",
"load_from_path",
"(",
"file",
",",
"*",
"*",
"kwargs",
")",
"elif",
"hasattr",
"(",
"file",
",",
"'read'",
")",
":",
"obj",
"=",
"handler",
".",
"load_from_fileobj",
"(",
"file",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'\"file\" must be a filepath str or a file-object'",
")",
"return",
"obj"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
dump
|
Dump data to json/yaml/pickle strings or files.
This method provides a unified api for dumping data as strings or to files,
and also supports custom arguments for each file format.
Args:
obj (any): The python object to be dumped.
file (str or file-like object, optional): If not specified, then the
object is dump to a str, otherwise to a file specified by the
filename or file-like object.
file_format (str, optional): Same as :func:`load`.
Returns:
bool: True for success, False otherwise.
|
mmcv/fileio/io.py
|
def dump(obj, file=None, file_format=None, **kwargs):
"""Dump data to json/yaml/pickle strings or files.
This method provides a unified api for dumping data as strings or to files,
and also supports custom arguments for each file format.
Args:
obj (any): The python object to be dumped.
file (str or file-like object, optional): If not specified, then the
object is dump to a str, otherwise to a file specified by the
filename or file-like object.
file_format (str, optional): Same as :func:`load`.
Returns:
bool: True for success, False otherwise.
"""
if file_format is None:
if is_str(file):
file_format = file.split('.')[-1]
elif file is None:
raise ValueError(
'file_format must be specified since file is None')
if file_format not in file_handlers:
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if file is None:
return handler.dump_to_str(obj, **kwargs)
elif is_str(file):
handler.dump_to_path(obj, file, **kwargs)
elif hasattr(file, 'write'):
handler.dump_to_fileobj(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
|
def dump(obj, file=None, file_format=None, **kwargs):
"""Dump data to json/yaml/pickle strings or files.
This method provides a unified api for dumping data as strings or to files,
and also supports custom arguments for each file format.
Args:
obj (any): The python object to be dumped.
file (str or file-like object, optional): If not specified, then the
object is dump to a str, otherwise to a file specified by the
filename or file-like object.
file_format (str, optional): Same as :func:`load`.
Returns:
bool: True for success, False otherwise.
"""
if file_format is None:
if is_str(file):
file_format = file.split('.')[-1]
elif file is None:
raise ValueError(
'file_format must be specified since file is None')
if file_format not in file_handlers:
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if file is None:
return handler.dump_to_str(obj, **kwargs)
elif is_str(file):
handler.dump_to_path(obj, file, **kwargs)
elif hasattr(file, 'write'):
handler.dump_to_fileobj(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
|
[
"Dump",
"data",
"to",
"json",
"/",
"yaml",
"/",
"pickle",
"strings",
"or",
"files",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/fileio/io.py#L43-L76
|
[
"def",
"dump",
"(",
"obj",
",",
"file",
"=",
"None",
",",
"file_format",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"file_format",
"is",
"None",
":",
"if",
"is_str",
"(",
"file",
")",
":",
"file_format",
"=",
"file",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"elif",
"file",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'file_format must be specified since file is None'",
")",
"if",
"file_format",
"not",
"in",
"file_handlers",
":",
"raise",
"TypeError",
"(",
"'Unsupported format: {}'",
".",
"format",
"(",
"file_format",
")",
")",
"handler",
"=",
"file_handlers",
"[",
"file_format",
"]",
"if",
"file",
"is",
"None",
":",
"return",
"handler",
".",
"dump_to_str",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
"elif",
"is_str",
"(",
"file",
")",
":",
"handler",
".",
"dump_to_path",
"(",
"obj",
",",
"file",
",",
"*",
"*",
"kwargs",
")",
"elif",
"hasattr",
"(",
"file",
",",
"'write'",
")",
":",
"handler",
".",
"dump_to_fileobj",
"(",
"obj",
",",
"file",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'\"file\" must be a filename str or a file-object'",
")"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
_register_handler
|
Register a handler for some file extensions.
Args:
handler (:obj:`BaseFileHandler`): Handler to be registered.
file_formats (str or list[str]): File formats to be handled by this
handler.
|
mmcv/fileio/io.py
|
def _register_handler(handler, file_formats):
"""Register a handler for some file extensions.
Args:
handler (:obj:`BaseFileHandler`): Handler to be registered.
file_formats (str or list[str]): File formats to be handled by this
handler.
"""
if not isinstance(handler, BaseFileHandler):
raise TypeError(
'handler must be a child of BaseFileHandler, not {}'.format(
type(handler)))
if isinstance(file_formats, str):
file_formats = [file_formats]
if not is_list_of(file_formats, str):
raise TypeError('file_formats must be a str or a list of str')
for ext in file_formats:
file_handlers[ext] = handler
|
def _register_handler(handler, file_formats):
"""Register a handler for some file extensions.
Args:
handler (:obj:`BaseFileHandler`): Handler to be registered.
file_formats (str or list[str]): File formats to be handled by this
handler.
"""
if not isinstance(handler, BaseFileHandler):
raise TypeError(
'handler must be a child of BaseFileHandler, not {}'.format(
type(handler)))
if isinstance(file_formats, str):
file_formats = [file_formats]
if not is_list_of(file_formats, str):
raise TypeError('file_formats must be a str or a list of str')
for ext in file_formats:
file_handlers[ext] = handler
|
[
"Register",
"a",
"handler",
"for",
"some",
"file",
"extensions",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/fileio/io.py#L79-L96
|
[
"def",
"_register_handler",
"(",
"handler",
",",
"file_formats",
")",
":",
"if",
"not",
"isinstance",
"(",
"handler",
",",
"BaseFileHandler",
")",
":",
"raise",
"TypeError",
"(",
"'handler must be a child of BaseFileHandler, not {}'",
".",
"format",
"(",
"type",
"(",
"handler",
")",
")",
")",
"if",
"isinstance",
"(",
"file_formats",
",",
"str",
")",
":",
"file_formats",
"=",
"[",
"file_formats",
"]",
"if",
"not",
"is_list_of",
"(",
"file_formats",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"'file_formats must be a str or a list of str'",
")",
"for",
"ext",
"in",
"file_formats",
":",
"file_handlers",
"[",
"ext",
"]",
"=",
"handler"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
get_priority
|
Get priority value.
Args:
priority (int or str or :obj:`Priority`): Priority.
Returns:
int: The priority value.
|
mmcv/runner/priority.py
|
def get_priority(priority):
"""Get priority value.
Args:
priority (int or str or :obj:`Priority`): Priority.
Returns:
int: The priority value.
"""
if isinstance(priority, int):
if priority < 0 or priority > 100:
raise ValueError('priority must be between 0 and 100')
return priority
elif isinstance(priority, Priority):
return priority.value
elif isinstance(priority, str):
return Priority[priority.upper()].value
else:
raise TypeError('priority must be an integer or Priority enum value')
|
def get_priority(priority):
"""Get priority value.
Args:
priority (int or str or :obj:`Priority`): Priority.
Returns:
int: The priority value.
"""
if isinstance(priority, int):
if priority < 0 or priority > 100:
raise ValueError('priority must be between 0 and 100')
return priority
elif isinstance(priority, Priority):
return priority.value
elif isinstance(priority, str):
return Priority[priority.upper()].value
else:
raise TypeError('priority must be an integer or Priority enum value')
|
[
"Get",
"priority",
"value",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/runner/priority.py#L35-L53
|
[
"def",
"get_priority",
"(",
"priority",
")",
":",
"if",
"isinstance",
"(",
"priority",
",",
"int",
")",
":",
"if",
"priority",
"<",
"0",
"or",
"priority",
">",
"100",
":",
"raise",
"ValueError",
"(",
"'priority must be between 0 and 100'",
")",
"return",
"priority",
"elif",
"isinstance",
"(",
"priority",
",",
"Priority",
")",
":",
"return",
"priority",
".",
"value",
"elif",
"isinstance",
"(",
"priority",
",",
"str",
")",
":",
"return",
"Priority",
"[",
"priority",
".",
"upper",
"(",
")",
"]",
".",
"value",
"else",
":",
"raise",
"TypeError",
"(",
"'priority must be an integer or Priority enum value'",
")"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
quantize
|
Quantize an array of (-inf, inf) to [0, levels-1].
Args:
arr (ndarray): Input array.
min_val (scalar): Minimum value to be clipped.
max_val (scalar): Maximum value to be clipped.
levels (int): Quantization levels.
dtype (np.type): The type of the quantized array.
Returns:
tuple: Quantized array.
|
mmcv/arraymisc/quantization.py
|
def quantize(arr, min_val, max_val, levels, dtype=np.int64):
"""Quantize an array of (-inf, inf) to [0, levels-1].
Args:
arr (ndarray): Input array.
min_val (scalar): Minimum value to be clipped.
max_val (scalar): Maximum value to be clipped.
levels (int): Quantization levels.
dtype (np.type): The type of the quantized array.
Returns:
tuple: Quantized array.
"""
if not (isinstance(levels, int) and levels > 1):
raise ValueError(
'levels must be a positive integer, but got {}'.format(levels))
if min_val >= max_val:
raise ValueError(
'min_val ({}) must be smaller than max_val ({})'.format(
min_val, max_val))
arr = np.clip(arr, min_val, max_val) - min_val
quantized_arr = np.minimum(
np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1)
return quantized_arr
|
def quantize(arr, min_val, max_val, levels, dtype=np.int64):
"""Quantize an array of (-inf, inf) to [0, levels-1].
Args:
arr (ndarray): Input array.
min_val (scalar): Minimum value to be clipped.
max_val (scalar): Maximum value to be clipped.
levels (int): Quantization levels.
dtype (np.type): The type of the quantized array.
Returns:
tuple: Quantized array.
"""
if not (isinstance(levels, int) and levels > 1):
raise ValueError(
'levels must be a positive integer, but got {}'.format(levels))
if min_val >= max_val:
raise ValueError(
'min_val ({}) must be smaller than max_val ({})'.format(
min_val, max_val))
arr = np.clip(arr, min_val, max_val) - min_val
quantized_arr = np.minimum(
np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1)
return quantized_arr
|
[
"Quantize",
"an",
"array",
"of",
"(",
"-",
"inf",
"inf",
")",
"to",
"[",
"0",
"levels",
"-",
"1",
"]",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/arraymisc/quantization.py#L4-L29
|
[
"def",
"quantize",
"(",
"arr",
",",
"min_val",
",",
"max_val",
",",
"levels",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"levels",
",",
"int",
")",
"and",
"levels",
">",
"1",
")",
":",
"raise",
"ValueError",
"(",
"'levels must be a positive integer, but got {}'",
".",
"format",
"(",
"levels",
")",
")",
"if",
"min_val",
">=",
"max_val",
":",
"raise",
"ValueError",
"(",
"'min_val ({}) must be smaller than max_val ({})'",
".",
"format",
"(",
"min_val",
",",
"max_val",
")",
")",
"arr",
"=",
"np",
".",
"clip",
"(",
"arr",
",",
"min_val",
",",
"max_val",
")",
"-",
"min_val",
"quantized_arr",
"=",
"np",
".",
"minimum",
"(",
"np",
".",
"floor",
"(",
"levels",
"*",
"arr",
"/",
"(",
"max_val",
"-",
"min_val",
")",
")",
".",
"astype",
"(",
"dtype",
")",
",",
"levels",
"-",
"1",
")",
"return",
"quantized_arr"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
dequantize
|
Dequantize an array.
Args:
arr (ndarray): Input array.
min_val (scalar): Minimum value to be clipped.
max_val (scalar): Maximum value to be clipped.
levels (int): Quantization levels.
dtype (np.type): The type of the dequantized array.
Returns:
tuple: Dequantized array.
|
mmcv/arraymisc/quantization.py
|
def dequantize(arr, min_val, max_val, levels, dtype=np.float64):
"""Dequantize an array.
Args:
arr (ndarray): Input array.
min_val (scalar): Minimum value to be clipped.
max_val (scalar): Maximum value to be clipped.
levels (int): Quantization levels.
dtype (np.type): The type of the dequantized array.
Returns:
tuple: Dequantized array.
"""
if not (isinstance(levels, int) and levels > 1):
raise ValueError(
'levels must be a positive integer, but got {}'.format(levels))
if min_val >= max_val:
raise ValueError(
'min_val ({}) must be smaller than max_val ({})'.format(
min_val, max_val))
dequantized_arr = (arr + 0.5).astype(dtype) * (
max_val - min_val) / levels + min_val
return dequantized_arr
|
def dequantize(arr, min_val, max_val, levels, dtype=np.float64):
"""Dequantize an array.
Args:
arr (ndarray): Input array.
min_val (scalar): Minimum value to be clipped.
max_val (scalar): Maximum value to be clipped.
levels (int): Quantization levels.
dtype (np.type): The type of the dequantized array.
Returns:
tuple: Dequantized array.
"""
if not (isinstance(levels, int) and levels > 1):
raise ValueError(
'levels must be a positive integer, but got {}'.format(levels))
if min_val >= max_val:
raise ValueError(
'min_val ({}) must be smaller than max_val ({})'.format(
min_val, max_val))
dequantized_arr = (arr + 0.5).astype(dtype) * (
max_val - min_val) / levels + min_val
return dequantized_arr
|
[
"Dequantize",
"an",
"array",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/arraymisc/quantization.py#L32-L56
|
[
"def",
"dequantize",
"(",
"arr",
",",
"min_val",
",",
"max_val",
",",
"levels",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"levels",
",",
"int",
")",
"and",
"levels",
">",
"1",
")",
":",
"raise",
"ValueError",
"(",
"'levels must be a positive integer, but got {}'",
".",
"format",
"(",
"levels",
")",
")",
"if",
"min_val",
">=",
"max_val",
":",
"raise",
"ValueError",
"(",
"'min_val ({}) must be smaller than max_val ({})'",
".",
"format",
"(",
"min_val",
",",
"max_val",
")",
")",
"dequantized_arr",
"=",
"(",
"arr",
"+",
"0.5",
")",
".",
"astype",
"(",
"dtype",
")",
"*",
"(",
"max_val",
"-",
"min_val",
")",
"/",
"levels",
"+",
"min_val",
"return",
"dequantized_arr"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
Config.auto_argparser
|
Generate argparser from config file automatically (experimental)
|
mmcv/utils/config.py
|
def auto_argparser(description=None):
"""Generate argparser from config file automatically (experimental)
"""
partial_parser = ArgumentParser(description=description)
partial_parser.add_argument('config', help='config file path')
cfg_file = partial_parser.parse_known_args()[0].config
cfg = Config.from_file(cfg_file)
parser = ArgumentParser(description=description)
parser.add_argument('config', help='config file path')
add_args(parser, cfg)
return parser, cfg
|
def auto_argparser(description=None):
"""Generate argparser from config file automatically (experimental)
"""
partial_parser = ArgumentParser(description=description)
partial_parser.add_argument('config', help='config file path')
cfg_file = partial_parser.parse_known_args()[0].config
cfg = Config.from_file(cfg_file)
parser = ArgumentParser(description=description)
parser.add_argument('config', help='config file path')
add_args(parser, cfg)
return parser, cfg
|
[
"Generate",
"argparser",
"from",
"config",
"file",
"automatically",
"(",
"experimental",
")"
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/utils/config.py#L100-L110
|
[
"def",
"auto_argparser",
"(",
"description",
"=",
"None",
")",
":",
"partial_parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"description",
")",
"partial_parser",
".",
"add_argument",
"(",
"'config'",
",",
"help",
"=",
"'config file path'",
")",
"cfg_file",
"=",
"partial_parser",
".",
"parse_known_args",
"(",
")",
"[",
"0",
"]",
".",
"config",
"cfg",
"=",
"Config",
".",
"from_file",
"(",
"cfg_file",
")",
"parser",
"=",
"ArgumentParser",
"(",
"description",
"=",
"description",
")",
"parser",
".",
"add_argument",
"(",
"'config'",
",",
"help",
"=",
"'config file path'",
")",
"add_args",
"(",
"parser",
",",
"cfg",
")",
"return",
"parser",
",",
"cfg"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
test
|
collate
|
Puts each data field into a tensor/DataContainer with outer dimension
batch size.
Extend default_collate to add support for
:type:`~mmcv.parallel.DataContainer`. There are 3 cases.
1. cpu_only = True, e.g., meta data
2. cpu_only = False, stack = True, e.g., images tensors
3. cpu_only = False, stack = False, e.g., gt bboxes
|
mmcv/parallel/collate.py
|
def collate(batch, samples_per_gpu=1):
"""Puts each data field into a tensor/DataContainer with outer dimension
batch size.
Extend default_collate to add support for
:type:`~mmcv.parallel.DataContainer`. There are 3 cases.
1. cpu_only = True, e.g., meta data
2. cpu_only = False, stack = True, e.g., images tensors
3. cpu_only = False, stack = False, e.g., gt bboxes
"""
if not isinstance(batch, collections.Sequence):
raise TypeError("{} is not supported.".format(batch.dtype))
if isinstance(batch[0], DataContainer):
assert len(batch) % samples_per_gpu == 0
stacked = []
if batch[0].cpu_only:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i:i + samples_per_gpu]])
return DataContainer(
stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
elif batch[0].stack:
for i in range(0, len(batch), samples_per_gpu):
assert isinstance(batch[i].data, torch.Tensor)
# TODO: handle tensors other than 3d
assert batch[i].dim() == 3
c, h, w = batch[i].size()
for sample in batch[i:i + samples_per_gpu]:
assert c == sample.size(0)
h = max(h, sample.size(1))
w = max(w, sample.size(2))
padded_samples = [
F.pad(
sample.data,
(0, w - sample.size(2), 0, h - sample.size(1)),
value=sample.padding_value)
for sample in batch[i:i + samples_per_gpu]
]
stacked.append(default_collate(padded_samples))
else:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i:i + samples_per_gpu]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], collections.Mapping):
return {
key: collate([d[key] for d in batch], samples_per_gpu)
for key in batch[0]
}
else:
return default_collate(batch)
|
def collate(batch, samples_per_gpu=1):
"""Puts each data field into a tensor/DataContainer with outer dimension
batch size.
Extend default_collate to add support for
:type:`~mmcv.parallel.DataContainer`. There are 3 cases.
1. cpu_only = True, e.g., meta data
2. cpu_only = False, stack = True, e.g., images tensors
3. cpu_only = False, stack = False, e.g., gt bboxes
"""
if not isinstance(batch, collections.Sequence):
raise TypeError("{} is not supported.".format(batch.dtype))
if isinstance(batch[0], DataContainer):
assert len(batch) % samples_per_gpu == 0
stacked = []
if batch[0].cpu_only:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i:i + samples_per_gpu]])
return DataContainer(
stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
elif batch[0].stack:
for i in range(0, len(batch), samples_per_gpu):
assert isinstance(batch[i].data, torch.Tensor)
# TODO: handle tensors other than 3d
assert batch[i].dim() == 3
c, h, w = batch[i].size()
for sample in batch[i:i + samples_per_gpu]:
assert c == sample.size(0)
h = max(h, sample.size(1))
w = max(w, sample.size(2))
padded_samples = [
F.pad(
sample.data,
(0, w - sample.size(2), 0, h - sample.size(1)),
value=sample.padding_value)
for sample in batch[i:i + samples_per_gpu]
]
stacked.append(default_collate(padded_samples))
else:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i:i + samples_per_gpu]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], collections.Mapping):
return {
key: collate([d[key] for d in batch], samples_per_gpu)
for key in batch[0]
}
else:
return default_collate(batch)
|
[
"Puts",
"each",
"data",
"field",
"into",
"a",
"tensor",
"/",
"DataContainer",
"with",
"outer",
"dimension",
"batch",
"size",
"."
] |
open-mmlab/mmcv
|
python
|
https://github.com/open-mmlab/mmcv/blob/0d77f61450aab4dde8b8585a577cc496acb95d7f/mmcv/parallel/collate.py#L10-L66
|
[
"def",
"collate",
"(",
"batch",
",",
"samples_per_gpu",
"=",
"1",
")",
":",
"if",
"not",
"isinstance",
"(",
"batch",
",",
"collections",
".",
"Sequence",
")",
":",
"raise",
"TypeError",
"(",
"\"{} is not supported.\"",
".",
"format",
"(",
"batch",
".",
"dtype",
")",
")",
"if",
"isinstance",
"(",
"batch",
"[",
"0",
"]",
",",
"DataContainer",
")",
":",
"assert",
"len",
"(",
"batch",
")",
"%",
"samples_per_gpu",
"==",
"0",
"stacked",
"=",
"[",
"]",
"if",
"batch",
"[",
"0",
"]",
".",
"cpu_only",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"batch",
")",
",",
"samples_per_gpu",
")",
":",
"stacked",
".",
"append",
"(",
"[",
"sample",
".",
"data",
"for",
"sample",
"in",
"batch",
"[",
"i",
":",
"i",
"+",
"samples_per_gpu",
"]",
"]",
")",
"return",
"DataContainer",
"(",
"stacked",
",",
"batch",
"[",
"0",
"]",
".",
"stack",
",",
"batch",
"[",
"0",
"]",
".",
"padding_value",
",",
"cpu_only",
"=",
"True",
")",
"elif",
"batch",
"[",
"0",
"]",
".",
"stack",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"batch",
")",
",",
"samples_per_gpu",
")",
":",
"assert",
"isinstance",
"(",
"batch",
"[",
"i",
"]",
".",
"data",
",",
"torch",
".",
"Tensor",
")",
"# TODO: handle tensors other than 3d",
"assert",
"batch",
"[",
"i",
"]",
".",
"dim",
"(",
")",
"==",
"3",
"c",
",",
"h",
",",
"w",
"=",
"batch",
"[",
"i",
"]",
".",
"size",
"(",
")",
"for",
"sample",
"in",
"batch",
"[",
"i",
":",
"i",
"+",
"samples_per_gpu",
"]",
":",
"assert",
"c",
"==",
"sample",
".",
"size",
"(",
"0",
")",
"h",
"=",
"max",
"(",
"h",
",",
"sample",
".",
"size",
"(",
"1",
")",
")",
"w",
"=",
"max",
"(",
"w",
",",
"sample",
".",
"size",
"(",
"2",
")",
")",
"padded_samples",
"=",
"[",
"F",
".",
"pad",
"(",
"sample",
".",
"data",
",",
"(",
"0",
",",
"w",
"-",
"sample",
".",
"size",
"(",
"2",
")",
",",
"0",
",",
"h",
"-",
"sample",
".",
"size",
"(",
"1",
")",
")",
",",
"value",
"=",
"sample",
".",
"padding_value",
")",
"for",
"sample",
"in",
"batch",
"[",
"i",
":",
"i",
"+",
"samples_per_gpu",
"]",
"]",
"stacked",
".",
"append",
"(",
"default_collate",
"(",
"padded_samples",
")",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"batch",
")",
",",
"samples_per_gpu",
")",
":",
"stacked",
".",
"append",
"(",
"[",
"sample",
".",
"data",
"for",
"sample",
"in",
"batch",
"[",
"i",
":",
"i",
"+",
"samples_per_gpu",
"]",
"]",
")",
"return",
"DataContainer",
"(",
"stacked",
",",
"batch",
"[",
"0",
"]",
".",
"stack",
",",
"batch",
"[",
"0",
"]",
".",
"padding_value",
")",
"elif",
"isinstance",
"(",
"batch",
"[",
"0",
"]",
",",
"collections",
".",
"Sequence",
")",
":",
"transposed",
"=",
"zip",
"(",
"*",
"batch",
")",
"return",
"[",
"collate",
"(",
"samples",
",",
"samples_per_gpu",
")",
"for",
"samples",
"in",
"transposed",
"]",
"elif",
"isinstance",
"(",
"batch",
"[",
"0",
"]",
",",
"collections",
".",
"Mapping",
")",
":",
"return",
"{",
"key",
":",
"collate",
"(",
"[",
"d",
"[",
"key",
"]",
"for",
"d",
"in",
"batch",
"]",
",",
"samples_per_gpu",
")",
"for",
"key",
"in",
"batch",
"[",
"0",
"]",
"}",
"else",
":",
"return",
"default_collate",
"(",
"batch",
")"
] |
0d77f61450aab4dde8b8585a577cc496acb95d7f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.