partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
time_to_frames
|
Converts time stamps into STFT frames.
Parameters
----------
times : np.ndarray [shape=(n,)]
time (in seconds) or vector of time values
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `- n_fft / 2`
to counteract windowing effects in STFT.
.. note:: This may result in negative frame indices.
Returns
-------
frames : np.ndarray [shape=(n,), dtype=int]
Frame numbers corresponding to the given times:
`frames[i] = floor( times[i] * sr / hop_length )`
See Also
--------
frames_to_time : convert frame indices to time values
time_to_samples : convert time values to sample indices
Examples
--------
Get the frame numbers for every 100ms
>>> librosa.time_to_frames(np.arange(0, 1, 0.1),
... sr=22050, hop_length=512)
array([ 0, 4, 8, 12, 17, 21, 25, 30, 34, 38])
|
librosa/core/time_frequency.py
|
def time_to_frames(times, sr=22050, hop_length=512, n_fft=None):
"""Converts time stamps into STFT frames.
Parameters
----------
times : np.ndarray [shape=(n,)]
time (in seconds) or vector of time values
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `- n_fft / 2`
to counteract windowing effects in STFT.
.. note:: This may result in negative frame indices.
Returns
-------
frames : np.ndarray [shape=(n,), dtype=int]
Frame numbers corresponding to the given times:
`frames[i] = floor( times[i] * sr / hop_length )`
See Also
--------
frames_to_time : convert frame indices to time values
time_to_samples : convert time values to sample indices
Examples
--------
Get the frame numbers for every 100ms
>>> librosa.time_to_frames(np.arange(0, 1, 0.1),
... sr=22050, hop_length=512)
array([ 0, 4, 8, 12, 17, 21, 25, 30, 34, 38])
"""
samples = time_to_samples(times, sr=sr)
return samples_to_frames(samples, hop_length=hop_length, n_fft=n_fft)
|
def time_to_frames(times, sr=22050, hop_length=512, n_fft=None):
"""Converts time stamps into STFT frames.
Parameters
----------
times : np.ndarray [shape=(n,)]
time (in seconds) or vector of time values
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `- n_fft / 2`
to counteract windowing effects in STFT.
.. note:: This may result in negative frame indices.
Returns
-------
frames : np.ndarray [shape=(n,), dtype=int]
Frame numbers corresponding to the given times:
`frames[i] = floor( times[i] * sr / hop_length )`
See Also
--------
frames_to_time : convert frame indices to time values
time_to_samples : convert time values to sample indices
Examples
--------
Get the frame numbers for every 100ms
>>> librosa.time_to_frames(np.arange(0, 1, 0.1),
... sr=22050, hop_length=512)
array([ 0, 4, 8, 12, 17, 21, 25, 30, 34, 38])
"""
samples = time_to_samples(times, sr=sr)
return samples_to_frames(samples, hop_length=hop_length, n_fft=n_fft)
|
[
"Converts",
"time",
"stamps",
"into",
"STFT",
"frames",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L165-L209
|
[
"def",
"time_to_frames",
"(",
"times",
",",
"sr",
"=",
"22050",
",",
"hop_length",
"=",
"512",
",",
"n_fft",
"=",
"None",
")",
":",
"samples",
"=",
"time_to_samples",
"(",
"times",
",",
"sr",
"=",
"sr",
")",
"return",
"samples_to_frames",
"(",
"samples",
",",
"hop_length",
"=",
"hop_length",
",",
"n_fft",
"=",
"n_fft",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
note_to_midi
|
Convert one or more spelled notes to MIDI number(s).
Notes may be spelled out with optional accidentals or octave numbers.
The leading note name is case-insensitive.
Sharps are indicated with ``#``, flats may be indicated with ``!`` or ``b``.
Parameters
----------
note : str or iterable of str
One or more note names.
round_midi : bool
- If `True`, allow for fractional midi notes
- Otherwise, round cent deviations to the nearest note
Returns
-------
midi : float or np.array
Midi note numbers corresponding to inputs.
Raises
------
ParameterError
If the input is not in valid note format
See Also
--------
midi_to_note
note_to_hz
Examples
--------
>>> librosa.note_to_midi('C')
12
>>> librosa.note_to_midi('C#3')
49
>>> librosa.note_to_midi('f4')
65
>>> librosa.note_to_midi('Bb-1')
10
>>> librosa.note_to_midi('A!8')
116
>>> # Lists of notes also work
>>> librosa.note_to_midi(['C', 'E', 'G'])
array([12, 16, 19])
|
librosa/core/time_frequency.py
|
def note_to_midi(note, round_midi=True):
'''Convert one or more spelled notes to MIDI number(s).
Notes may be spelled out with optional accidentals or octave numbers.
The leading note name is case-insensitive.
Sharps are indicated with ``#``, flats may be indicated with ``!`` or ``b``.
Parameters
----------
note : str or iterable of str
One or more note names.
round_midi : bool
- If `True`, allow for fractional midi notes
- Otherwise, round cent deviations to the nearest note
Returns
-------
midi : float or np.array
Midi note numbers corresponding to inputs.
Raises
------
ParameterError
If the input is not in valid note format
See Also
--------
midi_to_note
note_to_hz
Examples
--------
>>> librosa.note_to_midi('C')
12
>>> librosa.note_to_midi('C#3')
49
>>> librosa.note_to_midi('f4')
65
>>> librosa.note_to_midi('Bb-1')
10
>>> librosa.note_to_midi('A!8')
116
>>> # Lists of notes also work
>>> librosa.note_to_midi(['C', 'E', 'G'])
array([12, 16, 19])
'''
if not isinstance(note, six.string_types):
return np.array([note_to_midi(n, round_midi=round_midi) for n in note])
pitch_map = {'C': 0, 'D': 2, 'E': 4, 'F': 5, 'G': 7, 'A': 9, 'B': 11}
acc_map = {'#': 1, '': 0, 'b': -1, '!': -1}
match = re.match(r'^(?P<note>[A-Ga-g])'
r'(?P<accidental>[#b!]*)'
r'(?P<octave>[+-]?\d+)?'
r'(?P<cents>[+-]\d+)?$',
note)
if not match:
raise ParameterError('Improper note format: {:s}'.format(note))
pitch = match.group('note').upper()
offset = np.sum([acc_map[o] for o in match.group('accidental')])
octave = match.group('octave')
cents = match.group('cents')
if not octave:
octave = 0
else:
octave = int(octave)
if not cents:
cents = 0
else:
cents = int(cents) * 1e-2
note_value = 12 * (octave + 1) + pitch_map[pitch] + offset + cents
if round_midi:
note_value = int(np.round(note_value))
return note_value
|
def note_to_midi(note, round_midi=True):
'''Convert one or more spelled notes to MIDI number(s).
Notes may be spelled out with optional accidentals or octave numbers.
The leading note name is case-insensitive.
Sharps are indicated with ``#``, flats may be indicated with ``!`` or ``b``.
Parameters
----------
note : str or iterable of str
One or more note names.
round_midi : bool
- If `True`, allow for fractional midi notes
- Otherwise, round cent deviations to the nearest note
Returns
-------
midi : float or np.array
Midi note numbers corresponding to inputs.
Raises
------
ParameterError
If the input is not in valid note format
See Also
--------
midi_to_note
note_to_hz
Examples
--------
>>> librosa.note_to_midi('C')
12
>>> librosa.note_to_midi('C#3')
49
>>> librosa.note_to_midi('f4')
65
>>> librosa.note_to_midi('Bb-1')
10
>>> librosa.note_to_midi('A!8')
116
>>> # Lists of notes also work
>>> librosa.note_to_midi(['C', 'E', 'G'])
array([12, 16, 19])
'''
if not isinstance(note, six.string_types):
return np.array([note_to_midi(n, round_midi=round_midi) for n in note])
pitch_map = {'C': 0, 'D': 2, 'E': 4, 'F': 5, 'G': 7, 'A': 9, 'B': 11}
acc_map = {'#': 1, '': 0, 'b': -1, '!': -1}
match = re.match(r'^(?P<note>[A-Ga-g])'
r'(?P<accidental>[#b!]*)'
r'(?P<octave>[+-]?\d+)?'
r'(?P<cents>[+-]\d+)?$',
note)
if not match:
raise ParameterError('Improper note format: {:s}'.format(note))
pitch = match.group('note').upper()
offset = np.sum([acc_map[o] for o in match.group('accidental')])
octave = match.group('octave')
cents = match.group('cents')
if not octave:
octave = 0
else:
octave = int(octave)
if not cents:
cents = 0
else:
cents = int(cents) * 1e-2
note_value = 12 * (octave + 1) + pitch_map[pitch] + offset + cents
if round_midi:
note_value = int(np.round(note_value))
return note_value
|
[
"Convert",
"one",
"or",
"more",
"spelled",
"notes",
"to",
"MIDI",
"number",
"(",
"s",
")",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L319-L404
|
[
"def",
"note_to_midi",
"(",
"note",
",",
"round_midi",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"note",
",",
"six",
".",
"string_types",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"note_to_midi",
"(",
"n",
",",
"round_midi",
"=",
"round_midi",
")",
"for",
"n",
"in",
"note",
"]",
")",
"pitch_map",
"=",
"{",
"'C'",
":",
"0",
",",
"'D'",
":",
"2",
",",
"'E'",
":",
"4",
",",
"'F'",
":",
"5",
",",
"'G'",
":",
"7",
",",
"'A'",
":",
"9",
",",
"'B'",
":",
"11",
"}",
"acc_map",
"=",
"{",
"'#'",
":",
"1",
",",
"''",
":",
"0",
",",
"'b'",
":",
"-",
"1",
",",
"'!'",
":",
"-",
"1",
"}",
"match",
"=",
"re",
".",
"match",
"(",
"r'^(?P<note>[A-Ga-g])'",
"r'(?P<accidental>[#b!]*)'",
"r'(?P<octave>[+-]?\\d+)?'",
"r'(?P<cents>[+-]\\d+)?$'",
",",
"note",
")",
"if",
"not",
"match",
":",
"raise",
"ParameterError",
"(",
"'Improper note format: {:s}'",
".",
"format",
"(",
"note",
")",
")",
"pitch",
"=",
"match",
".",
"group",
"(",
"'note'",
")",
".",
"upper",
"(",
")",
"offset",
"=",
"np",
".",
"sum",
"(",
"[",
"acc_map",
"[",
"o",
"]",
"for",
"o",
"in",
"match",
".",
"group",
"(",
"'accidental'",
")",
"]",
")",
"octave",
"=",
"match",
".",
"group",
"(",
"'octave'",
")",
"cents",
"=",
"match",
".",
"group",
"(",
"'cents'",
")",
"if",
"not",
"octave",
":",
"octave",
"=",
"0",
"else",
":",
"octave",
"=",
"int",
"(",
"octave",
")",
"if",
"not",
"cents",
":",
"cents",
"=",
"0",
"else",
":",
"cents",
"=",
"int",
"(",
"cents",
")",
"*",
"1e-2",
"note_value",
"=",
"12",
"*",
"(",
"octave",
"+",
"1",
")",
"+",
"pitch_map",
"[",
"pitch",
"]",
"+",
"offset",
"+",
"cents",
"if",
"round_midi",
":",
"note_value",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"note_value",
")",
")",
"return",
"note_value"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
midi_to_note
|
Convert one or more MIDI numbers to note strings.
MIDI numbers will be rounded to the nearest integer.
Notes will be of the format 'C0', 'C#0', 'D0', ...
Examples
--------
>>> librosa.midi_to_note(0)
'C-1'
>>> librosa.midi_to_note(37)
'C#2'
>>> librosa.midi_to_note(-2)
'A#-2'
>>> librosa.midi_to_note(104.7)
'A7'
>>> librosa.midi_to_note(104.7, cents=True)
'A7-30'
>>> librosa.midi_to_note(list(range(12, 24)))
['C0', 'C#0', 'D0', 'D#0', 'E0', 'F0', 'F#0', 'G0', 'G#0', 'A0', 'A#0', 'B0']
Parameters
----------
midi : int or iterable of int
Midi numbers to convert.
octave: bool
If True, include the octave number
cents: bool
If true, cent markers will be appended for fractional notes.
Eg, `midi_to_note(69.3, cents=True)` == `A4+03`
Returns
-------
notes : str or iterable of str
Strings describing each midi note.
Raises
------
ParameterError
if `cents` is True and `octave` is False
See Also
--------
midi_to_hz
note_to_midi
hz_to_note
|
librosa/core/time_frequency.py
|
def midi_to_note(midi, octave=True, cents=False):
'''Convert one or more MIDI numbers to note strings.
MIDI numbers will be rounded to the nearest integer.
Notes will be of the format 'C0', 'C#0', 'D0', ...
Examples
--------
>>> librosa.midi_to_note(0)
'C-1'
>>> librosa.midi_to_note(37)
'C#2'
>>> librosa.midi_to_note(-2)
'A#-2'
>>> librosa.midi_to_note(104.7)
'A7'
>>> librosa.midi_to_note(104.7, cents=True)
'A7-30'
>>> librosa.midi_to_note(list(range(12, 24)))
['C0', 'C#0', 'D0', 'D#0', 'E0', 'F0', 'F#0', 'G0', 'G#0', 'A0', 'A#0', 'B0']
Parameters
----------
midi : int or iterable of int
Midi numbers to convert.
octave: bool
If True, include the octave number
cents: bool
If true, cent markers will be appended for fractional notes.
Eg, `midi_to_note(69.3, cents=True)` == `A4+03`
Returns
-------
notes : str or iterable of str
Strings describing each midi note.
Raises
------
ParameterError
if `cents` is True and `octave` is False
See Also
--------
midi_to_hz
note_to_midi
hz_to_note
'''
if cents and not octave:
raise ParameterError('Cannot encode cents without octave information.')
if not np.isscalar(midi):
return [midi_to_note(x, octave=octave, cents=cents) for x in midi]
note_map = ['C', 'C#', 'D', 'D#',
'E', 'F', 'F#', 'G',
'G#', 'A', 'A#', 'B']
note_num = int(np.round(midi))
note_cents = int(100 * np.around(midi - note_num, 2))
note = note_map[note_num % 12]
if octave:
note = '{:s}{:0d}'.format(note, int(note_num / 12) - 1)
if cents:
note = '{:s}{:+02d}'.format(note, note_cents)
return note
|
def midi_to_note(midi, octave=True, cents=False):
'''Convert one or more MIDI numbers to note strings.
MIDI numbers will be rounded to the nearest integer.
Notes will be of the format 'C0', 'C#0', 'D0', ...
Examples
--------
>>> librosa.midi_to_note(0)
'C-1'
>>> librosa.midi_to_note(37)
'C#2'
>>> librosa.midi_to_note(-2)
'A#-2'
>>> librosa.midi_to_note(104.7)
'A7'
>>> librosa.midi_to_note(104.7, cents=True)
'A7-30'
>>> librosa.midi_to_note(list(range(12, 24)))
['C0', 'C#0', 'D0', 'D#0', 'E0', 'F0', 'F#0', 'G0', 'G#0', 'A0', 'A#0', 'B0']
Parameters
----------
midi : int or iterable of int
Midi numbers to convert.
octave: bool
If True, include the octave number
cents: bool
If true, cent markers will be appended for fractional notes.
Eg, `midi_to_note(69.3, cents=True)` == `A4+03`
Returns
-------
notes : str or iterable of str
Strings describing each midi note.
Raises
------
ParameterError
if `cents` is True and `octave` is False
See Also
--------
midi_to_hz
note_to_midi
hz_to_note
'''
if cents and not octave:
raise ParameterError('Cannot encode cents without octave information.')
if not np.isscalar(midi):
return [midi_to_note(x, octave=octave, cents=cents) for x in midi]
note_map = ['C', 'C#', 'D', 'D#',
'E', 'F', 'F#', 'G',
'G#', 'A', 'A#', 'B']
note_num = int(np.round(midi))
note_cents = int(100 * np.around(midi - note_num, 2))
note = note_map[note_num % 12]
if octave:
note = '{:s}{:0d}'.format(note, int(note_num / 12) - 1)
if cents:
note = '{:s}{:+02d}'.format(note, note_cents)
return note
|
[
"Convert",
"one",
"or",
"more",
"MIDI",
"numbers",
"to",
"note",
"strings",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L407-L478
|
[
"def",
"midi_to_note",
"(",
"midi",
",",
"octave",
"=",
"True",
",",
"cents",
"=",
"False",
")",
":",
"if",
"cents",
"and",
"not",
"octave",
":",
"raise",
"ParameterError",
"(",
"'Cannot encode cents without octave information.'",
")",
"if",
"not",
"np",
".",
"isscalar",
"(",
"midi",
")",
":",
"return",
"[",
"midi_to_note",
"(",
"x",
",",
"octave",
"=",
"octave",
",",
"cents",
"=",
"cents",
")",
"for",
"x",
"in",
"midi",
"]",
"note_map",
"=",
"[",
"'C'",
",",
"'C#'",
",",
"'D'",
",",
"'D#'",
",",
"'E'",
",",
"'F'",
",",
"'F#'",
",",
"'G'",
",",
"'G#'",
",",
"'A'",
",",
"'A#'",
",",
"'B'",
"]",
"note_num",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"midi",
")",
")",
"note_cents",
"=",
"int",
"(",
"100",
"*",
"np",
".",
"around",
"(",
"midi",
"-",
"note_num",
",",
"2",
")",
")",
"note",
"=",
"note_map",
"[",
"note_num",
"%",
"12",
"]",
"if",
"octave",
":",
"note",
"=",
"'{:s}{:0d}'",
".",
"format",
"(",
"note",
",",
"int",
"(",
"note_num",
"/",
"12",
")",
"-",
"1",
")",
"if",
"cents",
":",
"note",
"=",
"'{:s}{:+02d}'",
".",
"format",
"(",
"note",
",",
"note_cents",
")",
"return",
"note"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
hz_to_mel
|
Convert Hz to Mels
Examples
--------
>>> librosa.hz_to_mel(60)
0.9
>>> librosa.hz_to_mel([110, 220, 440])
array([ 1.65, 3.3 , 6.6 ])
Parameters
----------
frequencies : number or np.ndarray [shape=(n,)] , float
scalar or array of frequencies
htk : bool
use HTK formula instead of Slaney
Returns
-------
mels : number or np.ndarray [shape=(n,)]
input frequencies in Mels
See Also
--------
mel_to_hz
|
librosa/core/time_frequency.py
|
def hz_to_mel(frequencies, htk=False):
"""Convert Hz to Mels
Examples
--------
>>> librosa.hz_to_mel(60)
0.9
>>> librosa.hz_to_mel([110, 220, 440])
array([ 1.65, 3.3 , 6.6 ])
Parameters
----------
frequencies : number or np.ndarray [shape=(n,)] , float
scalar or array of frequencies
htk : bool
use HTK formula instead of Slaney
Returns
-------
mels : number or np.ndarray [shape=(n,)]
input frequencies in Mels
See Also
--------
mel_to_hz
"""
frequencies = np.asanyarray(frequencies)
if htk:
return 2595.0 * np.log10(1.0 + frequencies / 700.0)
# Fill in the linear part
f_min = 0.0
f_sp = 200.0 / 3
mels = (frequencies - f_min) / f_sp
# Fill in the log-scale part
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = np.log(6.4) / 27.0 # step size for log region
if frequencies.ndim:
# If we have array data, vectorize
log_t = (frequencies >= min_log_hz)
mels[log_t] = min_log_mel + np.log(frequencies[log_t]/min_log_hz) / logstep
elif frequencies >= min_log_hz:
# If we have scalar data, heck directly
mels = min_log_mel + np.log(frequencies / min_log_hz) / logstep
return mels
|
def hz_to_mel(frequencies, htk=False):
"""Convert Hz to Mels
Examples
--------
>>> librosa.hz_to_mel(60)
0.9
>>> librosa.hz_to_mel([110, 220, 440])
array([ 1.65, 3.3 , 6.6 ])
Parameters
----------
frequencies : number or np.ndarray [shape=(n,)] , float
scalar or array of frequencies
htk : bool
use HTK formula instead of Slaney
Returns
-------
mels : number or np.ndarray [shape=(n,)]
input frequencies in Mels
See Also
--------
mel_to_hz
"""
frequencies = np.asanyarray(frequencies)
if htk:
return 2595.0 * np.log10(1.0 + frequencies / 700.0)
# Fill in the linear part
f_min = 0.0
f_sp = 200.0 / 3
mels = (frequencies - f_min) / f_sp
# Fill in the log-scale part
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = np.log(6.4) / 27.0 # step size for log region
if frequencies.ndim:
# If we have array data, vectorize
log_t = (frequencies >= min_log_hz)
mels[log_t] = min_log_mel + np.log(frequencies[log_t]/min_log_hz) / logstep
elif frequencies >= min_log_hz:
# If we have scalar data, heck directly
mels = min_log_mel + np.log(frequencies / min_log_hz) / logstep
return mels
|
[
"Convert",
"Hz",
"to",
"Mels"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L591-L643
|
[
"def",
"hz_to_mel",
"(",
"frequencies",
",",
"htk",
"=",
"False",
")",
":",
"frequencies",
"=",
"np",
".",
"asanyarray",
"(",
"frequencies",
")",
"if",
"htk",
":",
"return",
"2595.0",
"*",
"np",
".",
"log10",
"(",
"1.0",
"+",
"frequencies",
"/",
"700.0",
")",
"# Fill in the linear part",
"f_min",
"=",
"0.0",
"f_sp",
"=",
"200.0",
"/",
"3",
"mels",
"=",
"(",
"frequencies",
"-",
"f_min",
")",
"/",
"f_sp",
"# Fill in the log-scale part",
"min_log_hz",
"=",
"1000.0",
"# beginning of log region (Hz)",
"min_log_mel",
"=",
"(",
"min_log_hz",
"-",
"f_min",
")",
"/",
"f_sp",
"# same (Mels)",
"logstep",
"=",
"np",
".",
"log",
"(",
"6.4",
")",
"/",
"27.0",
"# step size for log region",
"if",
"frequencies",
".",
"ndim",
":",
"# If we have array data, vectorize",
"log_t",
"=",
"(",
"frequencies",
">=",
"min_log_hz",
")",
"mels",
"[",
"log_t",
"]",
"=",
"min_log_mel",
"+",
"np",
".",
"log",
"(",
"frequencies",
"[",
"log_t",
"]",
"/",
"min_log_hz",
")",
"/",
"logstep",
"elif",
"frequencies",
">=",
"min_log_hz",
":",
"# If we have scalar data, heck directly",
"mels",
"=",
"min_log_mel",
"+",
"np",
".",
"log",
"(",
"frequencies",
"/",
"min_log_hz",
")",
"/",
"logstep",
"return",
"mels"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
mel_to_hz
|
Convert mel bin numbers to frequencies
Examples
--------
>>> librosa.mel_to_hz(3)
200.
>>> librosa.mel_to_hz([1,2,3,4,5])
array([ 66.667, 133.333, 200. , 266.667, 333.333])
Parameters
----------
mels : np.ndarray [shape=(n,)], float
mel bins to convert
htk : bool
use HTK formula instead of Slaney
Returns
-------
frequencies : np.ndarray [shape=(n,)]
input mels in Hz
See Also
--------
hz_to_mel
|
librosa/core/time_frequency.py
|
def mel_to_hz(mels, htk=False):
"""Convert mel bin numbers to frequencies
Examples
--------
>>> librosa.mel_to_hz(3)
200.
>>> librosa.mel_to_hz([1,2,3,4,5])
array([ 66.667, 133.333, 200. , 266.667, 333.333])
Parameters
----------
mels : np.ndarray [shape=(n,)], float
mel bins to convert
htk : bool
use HTK formula instead of Slaney
Returns
-------
frequencies : np.ndarray [shape=(n,)]
input mels in Hz
See Also
--------
hz_to_mel
"""
mels = np.asanyarray(mels)
if htk:
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = np.log(6.4) / 27.0 # step size for log region
if mels.ndim:
# If we have vector data, vectorize
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel))
elif mels >= min_log_mel:
# If we have scalar data, check directly
freqs = min_log_hz * np.exp(logstep * (mels - min_log_mel))
return freqs
|
def mel_to_hz(mels, htk=False):
"""Convert mel bin numbers to frequencies
Examples
--------
>>> librosa.mel_to_hz(3)
200.
>>> librosa.mel_to_hz([1,2,3,4,5])
array([ 66.667, 133.333, 200. , 266.667, 333.333])
Parameters
----------
mels : np.ndarray [shape=(n,)], float
mel bins to convert
htk : bool
use HTK formula instead of Slaney
Returns
-------
frequencies : np.ndarray [shape=(n,)]
input mels in Hz
See Also
--------
hz_to_mel
"""
mels = np.asanyarray(mels)
if htk:
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = np.log(6.4) / 27.0 # step size for log region
if mels.ndim:
# If we have vector data, vectorize
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel))
elif mels >= min_log_mel:
# If we have scalar data, check directly
freqs = min_log_hz * np.exp(logstep * (mels - min_log_mel))
return freqs
|
[
"Convert",
"mel",
"bin",
"numbers",
"to",
"frequencies"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L646-L697
|
[
"def",
"mel_to_hz",
"(",
"mels",
",",
"htk",
"=",
"False",
")",
":",
"mels",
"=",
"np",
".",
"asanyarray",
"(",
"mels",
")",
"if",
"htk",
":",
"return",
"700.0",
"*",
"(",
"10.0",
"**",
"(",
"mels",
"/",
"2595.0",
")",
"-",
"1.0",
")",
"# Fill in the linear scale",
"f_min",
"=",
"0.0",
"f_sp",
"=",
"200.0",
"/",
"3",
"freqs",
"=",
"f_min",
"+",
"f_sp",
"*",
"mels",
"# And now the nonlinear scale",
"min_log_hz",
"=",
"1000.0",
"# beginning of log region (Hz)",
"min_log_mel",
"=",
"(",
"min_log_hz",
"-",
"f_min",
")",
"/",
"f_sp",
"# same (Mels)",
"logstep",
"=",
"np",
".",
"log",
"(",
"6.4",
")",
"/",
"27.0",
"# step size for log region",
"if",
"mels",
".",
"ndim",
":",
"# If we have vector data, vectorize",
"log_t",
"=",
"(",
"mels",
">=",
"min_log_mel",
")",
"freqs",
"[",
"log_t",
"]",
"=",
"min_log_hz",
"*",
"np",
".",
"exp",
"(",
"logstep",
"*",
"(",
"mels",
"[",
"log_t",
"]",
"-",
"min_log_mel",
")",
")",
"elif",
"mels",
">=",
"min_log_mel",
":",
"# If we have scalar data, check directly",
"freqs",
"=",
"min_log_hz",
"*",
"np",
".",
"exp",
"(",
"logstep",
"*",
"(",
"mels",
"-",
"min_log_mel",
")",
")",
"return",
"freqs"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
hz_to_octs
|
Convert frequencies (Hz) to (fractional) octave numbers.
Examples
--------
>>> librosa.hz_to_octs(440.0)
4.
>>> librosa.hz_to_octs([32, 64, 128, 256])
array([ 0.219, 1.219, 2.219, 3.219])
Parameters
----------
frequencies : number >0 or np.ndarray [shape=(n,)] or float
scalar or vector of frequencies
A440 : float
frequency of A440 (in Hz)
Returns
-------
octaves : number or np.ndarray [shape=(n,)]
octave number for each frequency
See Also
--------
octs_to_hz
|
librosa/core/time_frequency.py
|
def hz_to_octs(frequencies, A440=440.0):
"""Convert frequencies (Hz) to (fractional) octave numbers.
Examples
--------
>>> librosa.hz_to_octs(440.0)
4.
>>> librosa.hz_to_octs([32, 64, 128, 256])
array([ 0.219, 1.219, 2.219, 3.219])
Parameters
----------
frequencies : number >0 or np.ndarray [shape=(n,)] or float
scalar or vector of frequencies
A440 : float
frequency of A440 (in Hz)
Returns
-------
octaves : number or np.ndarray [shape=(n,)]
octave number for each frequency
See Also
--------
octs_to_hz
"""
return np.log2(np.asanyarray(frequencies) / (float(A440) / 16))
|
def hz_to_octs(frequencies, A440=440.0):
"""Convert frequencies (Hz) to (fractional) octave numbers.
Examples
--------
>>> librosa.hz_to_octs(440.0)
4.
>>> librosa.hz_to_octs([32, 64, 128, 256])
array([ 0.219, 1.219, 2.219, 3.219])
Parameters
----------
frequencies : number >0 or np.ndarray [shape=(n,)] or float
scalar or vector of frequencies
A440 : float
frequency of A440 (in Hz)
Returns
-------
octaves : number or np.ndarray [shape=(n,)]
octave number for each frequency
See Also
--------
octs_to_hz
"""
return np.log2(np.asanyarray(frequencies) / (float(A440) / 16))
|
[
"Convert",
"frequencies",
"(",
"Hz",
")",
"to",
"(",
"fractional",
")",
"octave",
"numbers",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L700-L726
|
[
"def",
"hz_to_octs",
"(",
"frequencies",
",",
"A440",
"=",
"440.0",
")",
":",
"return",
"np",
".",
"log2",
"(",
"np",
".",
"asanyarray",
"(",
"frequencies",
")",
"/",
"(",
"float",
"(",
"A440",
")",
"/",
"16",
")",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
fft_frequencies
|
Alternative implementation of `np.fft.fftfreq`
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
n_fft : int > 0 [scalar]
FFT window size
Returns
-------
freqs : np.ndarray [shape=(1 + n_fft/2,)]
Frequencies `(0, sr/n_fft, 2*sr/n_fft, ..., sr/2)`
Examples
--------
>>> librosa.fft_frequencies(sr=22050, n_fft=16)
array([ 0. , 1378.125, 2756.25 , 4134.375,
5512.5 , 6890.625, 8268.75 , 9646.875, 11025. ])
|
librosa/core/time_frequency.py
|
def fft_frequencies(sr=22050, n_fft=2048):
'''Alternative implementation of `np.fft.fftfreq`
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
n_fft : int > 0 [scalar]
FFT window size
Returns
-------
freqs : np.ndarray [shape=(1 + n_fft/2,)]
Frequencies `(0, sr/n_fft, 2*sr/n_fft, ..., sr/2)`
Examples
--------
>>> librosa.fft_frequencies(sr=22050, n_fft=16)
array([ 0. , 1378.125, 2756.25 , 4134.375,
5512.5 , 6890.625, 8268.75 , 9646.875, 11025. ])
'''
return np.linspace(0,
float(sr) / 2,
int(1 + n_fft//2),
endpoint=True)
|
def fft_frequencies(sr=22050, n_fft=2048):
'''Alternative implementation of `np.fft.fftfreq`
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
n_fft : int > 0 [scalar]
FFT window size
Returns
-------
freqs : np.ndarray [shape=(1 + n_fft/2,)]
Frequencies `(0, sr/n_fft, 2*sr/n_fft, ..., sr/2)`
Examples
--------
>>> librosa.fft_frequencies(sr=22050, n_fft=16)
array([ 0. , 1378.125, 2756.25 , 4134.375,
5512.5 , 6890.625, 8268.75 , 9646.875, 11025. ])
'''
return np.linspace(0,
float(sr) / 2,
int(1 + n_fft//2),
endpoint=True)
|
[
"Alternative",
"implementation",
"of",
"np",
".",
"fft",
".",
"fftfreq"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L760-L789
|
[
"def",
"fft_frequencies",
"(",
"sr",
"=",
"22050",
",",
"n_fft",
"=",
"2048",
")",
":",
"return",
"np",
".",
"linspace",
"(",
"0",
",",
"float",
"(",
"sr",
")",
"/",
"2",
",",
"int",
"(",
"1",
"+",
"n_fft",
"//",
"2",
")",
",",
"endpoint",
"=",
"True",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
cqt_frequencies
|
Compute the center frequencies of Constant-Q bins.
Examples
--------
>>> # Get the CQT frequencies for 24 notes, starting at C2
>>> librosa.cqt_frequencies(24, fmin=librosa.note_to_hz('C2'))
array([ 65.406, 69.296, 73.416, 77.782, 82.407, 87.307,
92.499, 97.999, 103.826, 110. , 116.541, 123.471,
130.813, 138.591, 146.832, 155.563, 164.814, 174.614,
184.997, 195.998, 207.652, 220. , 233.082, 246.942])
Parameters
----------
n_bins : int > 0 [scalar]
Number of constant-Q bins
fmin : float > 0 [scalar]
Minimum frequency
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)`
Deviation from A440 tuning in fractional bins (cents)
Returns
-------
frequencies : np.ndarray [shape=(n_bins,)]
Center frequency for each CQT bin
|
librosa/core/time_frequency.py
|
def cqt_frequencies(n_bins, fmin, bins_per_octave=12, tuning=0.0):
"""Compute the center frequencies of Constant-Q bins.
Examples
--------
>>> # Get the CQT frequencies for 24 notes, starting at C2
>>> librosa.cqt_frequencies(24, fmin=librosa.note_to_hz('C2'))
array([ 65.406, 69.296, 73.416, 77.782, 82.407, 87.307,
92.499, 97.999, 103.826, 110. , 116.541, 123.471,
130.813, 138.591, 146.832, 155.563, 164.814, 174.614,
184.997, 195.998, 207.652, 220. , 233.082, 246.942])
Parameters
----------
n_bins : int > 0 [scalar]
Number of constant-Q bins
fmin : float > 0 [scalar]
Minimum frequency
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)`
Deviation from A440 tuning in fractional bins (cents)
Returns
-------
frequencies : np.ndarray [shape=(n_bins,)]
Center frequency for each CQT bin
"""
correction = 2.0**(float(tuning) / bins_per_octave)
frequencies = 2.0**(np.arange(0, n_bins, dtype=float) / bins_per_octave)
return correction * fmin * frequencies
|
def cqt_frequencies(n_bins, fmin, bins_per_octave=12, tuning=0.0):
"""Compute the center frequencies of Constant-Q bins.
Examples
--------
>>> # Get the CQT frequencies for 24 notes, starting at C2
>>> librosa.cqt_frequencies(24, fmin=librosa.note_to_hz('C2'))
array([ 65.406, 69.296, 73.416, 77.782, 82.407, 87.307,
92.499, 97.999, 103.826, 110. , 116.541, 123.471,
130.813, 138.591, 146.832, 155.563, 164.814, 174.614,
184.997, 195.998, 207.652, 220. , 233.082, 246.942])
Parameters
----------
n_bins : int > 0 [scalar]
Number of constant-Q bins
fmin : float > 0 [scalar]
Minimum frequency
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)`
Deviation from A440 tuning in fractional bins (cents)
Returns
-------
frequencies : np.ndarray [shape=(n_bins,)]
Center frequency for each CQT bin
"""
correction = 2.0**(float(tuning) / bins_per_octave)
frequencies = 2.0**(np.arange(0, n_bins, dtype=float) / bins_per_octave)
return correction * fmin * frequencies
|
[
"Compute",
"the",
"center",
"frequencies",
"of",
"Constant",
"-",
"Q",
"bins",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L792-L827
|
[
"def",
"cqt_frequencies",
"(",
"n_bins",
",",
"fmin",
",",
"bins_per_octave",
"=",
"12",
",",
"tuning",
"=",
"0.0",
")",
":",
"correction",
"=",
"2.0",
"**",
"(",
"float",
"(",
"tuning",
")",
"/",
"bins_per_octave",
")",
"frequencies",
"=",
"2.0",
"**",
"(",
"np",
".",
"arange",
"(",
"0",
",",
"n_bins",
",",
"dtype",
"=",
"float",
")",
"/",
"bins_per_octave",
")",
"return",
"correction",
"*",
"fmin",
"*",
"frequencies"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
mel_frequencies
|
Compute an array of acoustic frequencies tuned to the mel scale.
The mel scale is a quasi-logarithmic function of acoustic frequency
designed such that perceptually similar pitch intervals (e.g. octaves)
appear equal in width over the full hearing range.
Because the definition of the mel scale is conditioned by a finite number
of subjective psychoaoustical experiments, several implementations coexist
in the audio signal processing literature [1]_. By default, librosa replicates
the behavior of the well-established MATLAB Auditory Toolbox of Slaney [2]_.
According to this default implementation, the conversion from Hertz to mel is
linear below 1 kHz and logarithmic above 1 kHz. Another available implementation
replicates the Hidden Markov Toolkit [3]_ (HTK) according to the following formula:
`mel = 2595.0 * np.log10(1.0 + f / 700.0).`
The choice of implementation is determined by the `htk` keyword argument: setting
`htk=False` leads to the Auditory toolbox implementation, whereas setting it `htk=True`
leads to the HTK implementation.
.. [1] Umesh, S., Cohen, L., & Nelson, D. Fitting the mel scale.
In Proc. International Conference on Acoustics, Speech, and Signal Processing
(ICASSP), vol. 1, pp. 217-220, 1998.
.. [2] Slaney, M. Auditory Toolbox: A MATLAB Toolbox for Auditory
Modeling Work. Technical Report, version 2, Interval Research Corporation, 1998.
.. [3] Young, S., Evermann, G., Gales, M., Hain, T., Kershaw, D., Liu, X.,
Moore, G., Odell, J., Ollason, D., Povey, D., Valtchev, V., & Woodland, P.
The HTK book, version 3.4. Cambridge University, March 2009.
See Also
--------
hz_to_mel
mel_to_hz
librosa.feature.melspectrogram
librosa.feature.mfcc
Parameters
----------
n_mels : int > 0 [scalar]
Number of mel bins.
fmin : float >= 0 [scalar]
Minimum frequency (Hz).
fmax : float >= 0 [scalar]
Maximum frequency (Hz).
htk : bool
If True, use HTK formula to convert Hz to mel.
Otherwise (False), use Slaney's Auditory Toolbox.
Returns
-------
bin_frequencies : ndarray [shape=(n_mels,)]
Vector of n_mels frequencies in Hz which are uniformly spaced on the Mel
axis.
Examples
--------
>>> librosa.mel_frequencies(n_mels=40)
array([ 0. , 85.317, 170.635, 255.952,
341.269, 426.586, 511.904, 597.221,
682.538, 767.855, 853.173, 938.49 ,
1024.856, 1119.114, 1222.042, 1334.436,
1457.167, 1591.187, 1737.532, 1897.337,
2071.84 , 2262.393, 2470.47 , 2697.686,
2945.799, 3216.731, 3512.582, 3835.643,
4188.417, 4573.636, 4994.285, 5453.621,
5955.205, 6502.92 , 7101.009, 7754.107,
8467.272, 9246.028, 10096.408, 11025. ])
|
librosa/core/time_frequency.py
|
def mel_frequencies(n_mels=128, fmin=0.0, fmax=11025.0, htk=False):
"""Compute an array of acoustic frequencies tuned to the mel scale.
The mel scale is a quasi-logarithmic function of acoustic frequency
designed such that perceptually similar pitch intervals (e.g. octaves)
appear equal in width over the full hearing range.
Because the definition of the mel scale is conditioned by a finite number
of subjective psychoaoustical experiments, several implementations coexist
in the audio signal processing literature [1]_. By default, librosa replicates
the behavior of the well-established MATLAB Auditory Toolbox of Slaney [2]_.
According to this default implementation, the conversion from Hertz to mel is
linear below 1 kHz and logarithmic above 1 kHz. Another available implementation
replicates the Hidden Markov Toolkit [3]_ (HTK) according to the following formula:
`mel = 2595.0 * np.log10(1.0 + f / 700.0).`
The choice of implementation is determined by the `htk` keyword argument: setting
`htk=False` leads to the Auditory toolbox implementation, whereas setting it `htk=True`
leads to the HTK implementation.
.. [1] Umesh, S., Cohen, L., & Nelson, D. Fitting the mel scale.
In Proc. International Conference on Acoustics, Speech, and Signal Processing
(ICASSP), vol. 1, pp. 217-220, 1998.
.. [2] Slaney, M. Auditory Toolbox: A MATLAB Toolbox for Auditory
Modeling Work. Technical Report, version 2, Interval Research Corporation, 1998.
.. [3] Young, S., Evermann, G., Gales, M., Hain, T., Kershaw, D., Liu, X.,
Moore, G., Odell, J., Ollason, D., Povey, D., Valtchev, V., & Woodland, P.
The HTK book, version 3.4. Cambridge University, March 2009.
See Also
--------
hz_to_mel
mel_to_hz
librosa.feature.melspectrogram
librosa.feature.mfcc
Parameters
----------
n_mels : int > 0 [scalar]
Number of mel bins.
fmin : float >= 0 [scalar]
Minimum frequency (Hz).
fmax : float >= 0 [scalar]
Maximum frequency (Hz).
htk : bool
If True, use HTK formula to convert Hz to mel.
Otherwise (False), use Slaney's Auditory Toolbox.
Returns
-------
bin_frequencies : ndarray [shape=(n_mels,)]
Vector of n_mels frequencies in Hz which are uniformly spaced on the Mel
axis.
Examples
--------
>>> librosa.mel_frequencies(n_mels=40)
array([ 0. , 85.317, 170.635, 255.952,
341.269, 426.586, 511.904, 597.221,
682.538, 767.855, 853.173, 938.49 ,
1024.856, 1119.114, 1222.042, 1334.436,
1457.167, 1591.187, 1737.532, 1897.337,
2071.84 , 2262.393, 2470.47 , 2697.686,
2945.799, 3216.731, 3512.582, 3835.643,
4188.417, 4573.636, 4994.285, 5453.621,
5955.205, 6502.92 , 7101.009, 7754.107,
8467.272, 9246.028, 10096.408, 11025. ])
"""
# 'Center freqs' of mel bands - uniformly spaced between limits
min_mel = hz_to_mel(fmin, htk=htk)
max_mel = hz_to_mel(fmax, htk=htk)
mels = np.linspace(min_mel, max_mel, n_mels)
return mel_to_hz(mels, htk=htk)
|
def mel_frequencies(n_mels=128, fmin=0.0, fmax=11025.0, htk=False):
"""Compute an array of acoustic frequencies tuned to the mel scale.
The mel scale is a quasi-logarithmic function of acoustic frequency
designed such that perceptually similar pitch intervals (e.g. octaves)
appear equal in width over the full hearing range.
Because the definition of the mel scale is conditioned by a finite number
of subjective psychoaoustical experiments, several implementations coexist
in the audio signal processing literature [1]_. By default, librosa replicates
the behavior of the well-established MATLAB Auditory Toolbox of Slaney [2]_.
According to this default implementation, the conversion from Hertz to mel is
linear below 1 kHz and logarithmic above 1 kHz. Another available implementation
replicates the Hidden Markov Toolkit [3]_ (HTK) according to the following formula:
`mel = 2595.0 * np.log10(1.0 + f / 700.0).`
The choice of implementation is determined by the `htk` keyword argument: setting
`htk=False` leads to the Auditory toolbox implementation, whereas setting it `htk=True`
leads to the HTK implementation.
.. [1] Umesh, S., Cohen, L., & Nelson, D. Fitting the mel scale.
In Proc. International Conference on Acoustics, Speech, and Signal Processing
(ICASSP), vol. 1, pp. 217-220, 1998.
.. [2] Slaney, M. Auditory Toolbox: A MATLAB Toolbox for Auditory
Modeling Work. Technical Report, version 2, Interval Research Corporation, 1998.
.. [3] Young, S., Evermann, G., Gales, M., Hain, T., Kershaw, D., Liu, X.,
Moore, G., Odell, J., Ollason, D., Povey, D., Valtchev, V., & Woodland, P.
The HTK book, version 3.4. Cambridge University, March 2009.
See Also
--------
hz_to_mel
mel_to_hz
librosa.feature.melspectrogram
librosa.feature.mfcc
Parameters
----------
n_mels : int > 0 [scalar]
Number of mel bins.
fmin : float >= 0 [scalar]
Minimum frequency (Hz).
fmax : float >= 0 [scalar]
Maximum frequency (Hz).
htk : bool
If True, use HTK formula to convert Hz to mel.
Otherwise (False), use Slaney's Auditory Toolbox.
Returns
-------
bin_frequencies : ndarray [shape=(n_mels,)]
Vector of n_mels frequencies in Hz which are uniformly spaced on the Mel
axis.
Examples
--------
>>> librosa.mel_frequencies(n_mels=40)
array([ 0. , 85.317, 170.635, 255.952,
341.269, 426.586, 511.904, 597.221,
682.538, 767.855, 853.173, 938.49 ,
1024.856, 1119.114, 1222.042, 1334.436,
1457.167, 1591.187, 1737.532, 1897.337,
2071.84 , 2262.393, 2470.47 , 2697.686,
2945.799, 3216.731, 3512.582, 3835.643,
4188.417, 4573.636, 4994.285, 5453.621,
5955.205, 6502.92 , 7101.009, 7754.107,
8467.272, 9246.028, 10096.408, 11025. ])
"""
# 'Center freqs' of mel bands - uniformly spaced between limits
min_mel = hz_to_mel(fmin, htk=htk)
max_mel = hz_to_mel(fmax, htk=htk)
mels = np.linspace(min_mel, max_mel, n_mels)
return mel_to_hz(mels, htk=htk)
|
[
"Compute",
"an",
"array",
"of",
"acoustic",
"frequencies",
"tuned",
"to",
"the",
"mel",
"scale",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L830-L914
|
[
"def",
"mel_frequencies",
"(",
"n_mels",
"=",
"128",
",",
"fmin",
"=",
"0.0",
",",
"fmax",
"=",
"11025.0",
",",
"htk",
"=",
"False",
")",
":",
"# 'Center freqs' of mel bands - uniformly spaced between limits",
"min_mel",
"=",
"hz_to_mel",
"(",
"fmin",
",",
"htk",
"=",
"htk",
")",
"max_mel",
"=",
"hz_to_mel",
"(",
"fmax",
",",
"htk",
"=",
"htk",
")",
"mels",
"=",
"np",
".",
"linspace",
"(",
"min_mel",
",",
"max_mel",
",",
"n_mels",
")",
"return",
"mel_to_hz",
"(",
"mels",
",",
"htk",
"=",
"htk",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
tempo_frequencies
|
Compute the frequencies (in beats-per-minute) corresponding
to an onset auto-correlation or tempogram matrix.
Parameters
----------
n_bins : int > 0
The number of lag bins
hop_length : int > 0
The number of samples between each bin
sr : number > 0
The audio sampling rate
Returns
-------
bin_frequencies : ndarray [shape=(n_bins,)]
vector of bin frequencies measured in BPM.
.. note:: `bin_frequencies[0] = +np.inf` corresponds to 0-lag
Examples
--------
Get the tempo frequencies corresponding to a 384-bin (8-second) tempogram
>>> librosa.tempo_frequencies(384)
array([ inf, 2583.984, 1291.992, ..., 6.782,
6.764, 6.747])
|
librosa/core/time_frequency.py
|
def tempo_frequencies(n_bins, hop_length=512, sr=22050):
'''Compute the frequencies (in beats-per-minute) corresponding
to an onset auto-correlation or tempogram matrix.
Parameters
----------
n_bins : int > 0
The number of lag bins
hop_length : int > 0
The number of samples between each bin
sr : number > 0
The audio sampling rate
Returns
-------
bin_frequencies : ndarray [shape=(n_bins,)]
vector of bin frequencies measured in BPM.
.. note:: `bin_frequencies[0] = +np.inf` corresponds to 0-lag
Examples
--------
Get the tempo frequencies corresponding to a 384-bin (8-second) tempogram
>>> librosa.tempo_frequencies(384)
array([ inf, 2583.984, 1291.992, ..., 6.782,
6.764, 6.747])
'''
bin_frequencies = np.zeros(int(n_bins), dtype=np.float)
bin_frequencies[0] = np.inf
bin_frequencies[1:] = 60.0 * sr / (hop_length * np.arange(1.0, n_bins))
return bin_frequencies
|
def tempo_frequencies(n_bins, hop_length=512, sr=22050):
'''Compute the frequencies (in beats-per-minute) corresponding
to an onset auto-correlation or tempogram matrix.
Parameters
----------
n_bins : int > 0
The number of lag bins
hop_length : int > 0
The number of samples between each bin
sr : number > 0
The audio sampling rate
Returns
-------
bin_frequencies : ndarray [shape=(n_bins,)]
vector of bin frequencies measured in BPM.
.. note:: `bin_frequencies[0] = +np.inf` corresponds to 0-lag
Examples
--------
Get the tempo frequencies corresponding to a 384-bin (8-second) tempogram
>>> librosa.tempo_frequencies(384)
array([ inf, 2583.984, 1291.992, ..., 6.782,
6.764, 6.747])
'''
bin_frequencies = np.zeros(int(n_bins), dtype=np.float)
bin_frequencies[0] = np.inf
bin_frequencies[1:] = 60.0 * sr / (hop_length * np.arange(1.0, n_bins))
return bin_frequencies
|
[
"Compute",
"the",
"frequencies",
"(",
"in",
"beats",
"-",
"per",
"-",
"minute",
")",
"corresponding",
"to",
"an",
"onset",
"auto",
"-",
"correlation",
"or",
"tempogram",
"matrix",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L917-L953
|
[
"def",
"tempo_frequencies",
"(",
"n_bins",
",",
"hop_length",
"=",
"512",
",",
"sr",
"=",
"22050",
")",
":",
"bin_frequencies",
"=",
"np",
".",
"zeros",
"(",
"int",
"(",
"n_bins",
")",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"bin_frequencies",
"[",
"0",
"]",
"=",
"np",
".",
"inf",
"bin_frequencies",
"[",
"1",
":",
"]",
"=",
"60.0",
"*",
"sr",
"/",
"(",
"hop_length",
"*",
"np",
".",
"arange",
"(",
"1.0",
",",
"n_bins",
")",
")",
"return",
"bin_frequencies"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
A_weighting
|
Compute the A-weighting of a set of frequencies.
Parameters
----------
frequencies : scalar or np.ndarray [shape=(n,)]
One or more frequencies (in Hz)
min_db : float [scalar] or None
Clip weights below this threshold.
If `None`, no clipping is performed.
Returns
-------
A_weighting : scalar or np.ndarray [shape=(n,)]
`A_weighting[i]` is the A-weighting of `frequencies[i]`
See Also
--------
perceptual_weighting
Examples
--------
Get the A-weighting for CQT frequencies
>>> import matplotlib.pyplot as plt
>>> freqs = librosa.cqt_frequencies(108, librosa.note_to_hz('C1'))
>>> aw = librosa.A_weighting(freqs)
>>> plt.plot(freqs, aw)
>>> plt.xlabel('Frequency (Hz)')
>>> plt.ylabel('Weighting (log10)')
>>> plt.title('A-Weighting of CQT frequencies')
|
librosa/core/time_frequency.py
|
def A_weighting(frequencies, min_db=-80.0): # pylint: disable=invalid-name
'''Compute the A-weighting of a set of frequencies.
Parameters
----------
frequencies : scalar or np.ndarray [shape=(n,)]
One or more frequencies (in Hz)
min_db : float [scalar] or None
Clip weights below this threshold.
If `None`, no clipping is performed.
Returns
-------
A_weighting : scalar or np.ndarray [shape=(n,)]
`A_weighting[i]` is the A-weighting of `frequencies[i]`
See Also
--------
perceptual_weighting
Examples
--------
Get the A-weighting for CQT frequencies
>>> import matplotlib.pyplot as plt
>>> freqs = librosa.cqt_frequencies(108, librosa.note_to_hz('C1'))
>>> aw = librosa.A_weighting(freqs)
>>> plt.plot(freqs, aw)
>>> plt.xlabel('Frequency (Hz)')
>>> plt.ylabel('Weighting (log10)')
>>> plt.title('A-Weighting of CQT frequencies')
'''
# Vectorize to make our lives easier
frequencies = np.asanyarray(frequencies)
# Pre-compute squared frequency
f_sq = frequencies**2.0
const = np.array([12200, 20.6, 107.7, 737.9])**2.0
weights = 2.0 + 20.0 * (np.log10(const[0]) + 4 * np.log10(frequencies)
- np.log10(f_sq + const[0])
- np.log10(f_sq + const[1])
- 0.5 * np.log10(f_sq + const[2])
- 0.5 * np.log10(f_sq + const[3]))
if min_db is not None:
weights = np.maximum(min_db, weights)
return weights
|
def A_weighting(frequencies, min_db=-80.0): # pylint: disable=invalid-name
'''Compute the A-weighting of a set of frequencies.
Parameters
----------
frequencies : scalar or np.ndarray [shape=(n,)]
One or more frequencies (in Hz)
min_db : float [scalar] or None
Clip weights below this threshold.
If `None`, no clipping is performed.
Returns
-------
A_weighting : scalar or np.ndarray [shape=(n,)]
`A_weighting[i]` is the A-weighting of `frequencies[i]`
See Also
--------
perceptual_weighting
Examples
--------
Get the A-weighting for CQT frequencies
>>> import matplotlib.pyplot as plt
>>> freqs = librosa.cqt_frequencies(108, librosa.note_to_hz('C1'))
>>> aw = librosa.A_weighting(freqs)
>>> plt.plot(freqs, aw)
>>> plt.xlabel('Frequency (Hz)')
>>> plt.ylabel('Weighting (log10)')
>>> plt.title('A-Weighting of CQT frequencies')
'''
# Vectorize to make our lives easier
frequencies = np.asanyarray(frequencies)
# Pre-compute squared frequency
f_sq = frequencies**2.0
const = np.array([12200, 20.6, 107.7, 737.9])**2.0
weights = 2.0 + 20.0 * (np.log10(const[0]) + 4 * np.log10(frequencies)
- np.log10(f_sq + const[0])
- np.log10(f_sq + const[1])
- 0.5 * np.log10(f_sq + const[2])
- 0.5 * np.log10(f_sq + const[3]))
if min_db is not None:
weights = np.maximum(min_db, weights)
return weights
|
[
"Compute",
"the",
"A",
"-",
"weighting",
"of",
"a",
"set",
"of",
"frequencies",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L957-L1011
|
[
"def",
"A_weighting",
"(",
"frequencies",
",",
"min_db",
"=",
"-",
"80.0",
")",
":",
"# pylint: disable=invalid-name",
"# Vectorize to make our lives easier",
"frequencies",
"=",
"np",
".",
"asanyarray",
"(",
"frequencies",
")",
"# Pre-compute squared frequency",
"f_sq",
"=",
"frequencies",
"**",
"2.0",
"const",
"=",
"np",
".",
"array",
"(",
"[",
"12200",
",",
"20.6",
",",
"107.7",
",",
"737.9",
"]",
")",
"**",
"2.0",
"weights",
"=",
"2.0",
"+",
"20.0",
"*",
"(",
"np",
".",
"log10",
"(",
"const",
"[",
"0",
"]",
")",
"+",
"4",
"*",
"np",
".",
"log10",
"(",
"frequencies",
")",
"-",
"np",
".",
"log10",
"(",
"f_sq",
"+",
"const",
"[",
"0",
"]",
")",
"-",
"np",
".",
"log10",
"(",
"f_sq",
"+",
"const",
"[",
"1",
"]",
")",
"-",
"0.5",
"*",
"np",
".",
"log10",
"(",
"f_sq",
"+",
"const",
"[",
"2",
"]",
")",
"-",
"0.5",
"*",
"np",
".",
"log10",
"(",
"f_sq",
"+",
"const",
"[",
"3",
"]",
")",
")",
"if",
"min_db",
"is",
"not",
"None",
":",
"weights",
"=",
"np",
".",
"maximum",
"(",
"min_db",
",",
"weights",
")",
"return",
"weights"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
times_like
|
Return an array of time values to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
times : np.ndarray [shape=(n,)]
ndarray of times (in seconds) corresponding to each frame of X.
See Also
--------
samples_like : Return an array of sample indices to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> times = librosa.times_like(X)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
Provide a scalar input:
>>> n_frames = 2647
>>> times = librosa.times_like(n_frames)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
|
librosa/core/time_frequency.py
|
def times_like(X, sr=22050, hop_length=512, n_fft=None, axis=-1):
"""Return an array of time values to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
times : np.ndarray [shape=(n,)]
ndarray of times (in seconds) corresponding to each frame of X.
See Also
--------
samples_like : Return an array of sample indices to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> times = librosa.times_like(X)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
Provide a scalar input:
>>> n_frames = 2647
>>> times = librosa.times_like(n_frames)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
"""
samples = samples_like(X, hop_length=hop_length, n_fft=n_fft, axis=axis)
return samples_to_time(samples, sr=sr)
|
def times_like(X, sr=22050, hop_length=512, n_fft=None, axis=-1):
"""Return an array of time values to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
sr : number > 0 [scalar]
audio sampling rate
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
times : np.ndarray [shape=(n,)]
ndarray of times (in seconds) corresponding to each frame of X.
See Also
--------
samples_like : Return an array of sample indices to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> times = librosa.times_like(X)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
Provide a scalar input:
>>> n_frames = 2647
>>> times = librosa.times_like(n_frames)
>>> times
array([ 0.00000000e+00, 2.32199546e-02, 4.64399093e-02, ...,
6.13935601e+01, 6.14167800e+01, 6.14400000e+01])
"""
samples = samples_like(X, hop_length=hop_length, n_fft=n_fft, axis=axis)
return samples_to_time(samples, sr=sr)
|
[
"Return",
"an",
"array",
"of",
"time",
"values",
"to",
"match",
"the",
"time",
"axis",
"from",
"a",
"feature",
"matrix",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L1014-L1067
|
[
"def",
"times_like",
"(",
"X",
",",
"sr",
"=",
"22050",
",",
"hop_length",
"=",
"512",
",",
"n_fft",
"=",
"None",
",",
"axis",
"=",
"-",
"1",
")",
":",
"samples",
"=",
"samples_like",
"(",
"X",
",",
"hop_length",
"=",
"hop_length",
",",
"n_fft",
"=",
"n_fft",
",",
"axis",
"=",
"axis",
")",
"return",
"samples_to_time",
"(",
"samples",
",",
"sr",
"=",
"sr",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
samples_like
|
Return an array of sample indices to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
samples : np.ndarray [shape=(n,)]
ndarray of sample indices corresponding to each frame of X.
See Also
--------
times_like : Return an array of time values to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> samples = librosa.samples_like(X)
>>> samples
array([ 0, 512, 1024, ..., 1353728, 1354240, 1354752])
Provide a scalar input:
>>> n_frames = 2647
>>> samples = librosa.samples_like(n_frames)
>>> samples
array([ 0, 512, 1024, ..., 1353728, 1354240, 1354752])
|
librosa/core/time_frequency.py
|
def samples_like(X, hop_length=512, n_fft=None, axis=-1):
"""Return an array of sample indices to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
samples : np.ndarray [shape=(n,)]
ndarray of sample indices corresponding to each frame of X.
See Also
--------
times_like : Return an array of time values to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> samples = librosa.samples_like(X)
>>> samples
array([ 0, 512, 1024, ..., 1353728, 1354240, 1354752])
Provide a scalar input:
>>> n_frames = 2647
>>> samples = librosa.samples_like(n_frames)
>>> samples
array([ 0, 512, 1024, ..., 1353728, 1354240, 1354752])
"""
if np.isscalar(X):
frames = np.arange(X)
else:
frames = np.arange(X.shape[axis])
return frames_to_samples(frames, hop_length=hop_length, n_fft=n_fft)
|
def samples_like(X, hop_length=512, n_fft=None, axis=-1):
"""Return an array of sample indices to match the time axis from a feature matrix.
Parameters
----------
X : np.ndarray or scalar
- If ndarray, X is a feature matrix, e.g. STFT, chromagram, or mel spectrogram.
- If scalar, X represents the number of frames.
hop_length : int > 0 [scalar]
number of samples between successive frames
n_fft : None or int > 0 [scalar]
Optional: length of the FFT window.
If given, time conversion will include an offset of `n_fft / 2`
to counteract windowing effects when using a non-centered STFT.
axis : int [scalar]
The axis representing the time axis of X.
By default, the last axis (-1) is taken.
Returns
-------
samples : np.ndarray [shape=(n,)]
ndarray of sample indices corresponding to each frame of X.
See Also
--------
times_like : Return an array of time values to match the time axis from a feature matrix.
Examples
--------
Provide a feature matrix input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> X = librosa.stft(y)
>>> samples = librosa.samples_like(X)
>>> samples
array([ 0, 512, 1024, ..., 1353728, 1354240, 1354752])
Provide a scalar input:
>>> n_frames = 2647
>>> samples = librosa.samples_like(n_frames)
>>> samples
array([ 0, 512, 1024, ..., 1353728, 1354240, 1354752])
"""
if np.isscalar(X):
frames = np.arange(X)
else:
frames = np.arange(X.shape[axis])
return frames_to_samples(frames, hop_length=hop_length, n_fft=n_fft)
|
[
"Return",
"an",
"array",
"of",
"sample",
"indices",
"to",
"match",
"the",
"time",
"axis",
"from",
"a",
"feature",
"matrix",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/time_frequency.py#L1070-L1121
|
[
"def",
"samples_like",
"(",
"X",
",",
"hop_length",
"=",
"512",
",",
"n_fft",
"=",
"None",
",",
"axis",
"=",
"-",
"1",
")",
":",
"if",
"np",
".",
"isscalar",
"(",
"X",
")",
":",
"frames",
"=",
"np",
".",
"arange",
"(",
"X",
")",
"else",
":",
"frames",
"=",
"np",
".",
"arange",
"(",
"X",
".",
"shape",
"[",
"axis",
"]",
")",
"return",
"frames_to_samples",
"(",
"frames",
",",
"hop_length",
"=",
"hop_length",
",",
"n_fft",
"=",
"n_fft",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
cqt
|
Compute the constant-Q transform of an audio signal.
This implementation is based on the recursive sub-sampling method
described by [1]_.
.. [1] Schoerkhuber, Christian, and Anssi Klapuri.
"Constant-Q transform toolbox for music processing."
7th Sound and Music Computing Conference, Barcelona, Spain. 2010.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
hop_length : int > 0 [scalar]
number of samples between successive CQT columns.
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
n_bins : int > 0 [scalar]
Number of frequency bins, starting at `fmin`
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : None or float in `[-0.5, 0.5)`
Tuning offset in fractions of a bin (cents).
If `None`, tuning will be automatically estimated from the signal.
filter_scale : float > 0
Filter scale factor. Small values (<1) use shorter windows
for improved time resolution.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See `librosa.util.normalize`.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
scale : bool
If `True`, scale the CQT response by square-root the length of
each channel's filter. This is analogous to `norm='ortho'` in FFT.
If `False`, do not scale the CQT. This is analogous to
`norm=None` in FFT.
pad_mode : string
Padding mode for centered frame analysis.
See also: `librosa.core.stft` and `np.pad`.
res_type : string [optional]
The resampling mode for recursive downsampling.
By default, `cqt` will adaptively select a resampling mode
which trades off accuracy at high frequencies for efficiency at low frequencies.
You can override this by specifying a resampling mode as supported by
`librosa.core.resample`. For example, `res_type='fft'` will use a high-quality,
but potentially slow FFT-based down-sampling, while `res_type='polyphase'` will
use a fast, but potentially inaccurate down-sampling.
Returns
-------
CQT : np.ndarray [shape=(n_bins, t), dtype=np.complex or np.float]
Constant-Q value each frequency at each time.
Raises
------
ParameterError
If `hop_length` is not an integer multiple of
`2**(n_bins / bins_per_octave)`
Or if `y` is too short to support the frequency range of the CQT.
See Also
--------
librosa.core.resample
librosa.util.normalize
Notes
-----
This function caches at level 20.
Examples
--------
Generate and plot a constant-Q power spectrum
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> C = np.abs(librosa.cqt(y, sr=sr))
>>> librosa.display.specshow(librosa.amplitude_to_db(C, ref=np.max),
... sr=sr, x_axis='time', y_axis='cqt_note')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Constant-Q power spectrum')
>>> plt.tight_layout()
Limit the frequency range
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('C2'),
... n_bins=60))
>>> C
array([[ 8.827e-04, 9.293e-04, ..., 3.133e-07, 2.942e-07],
[ 1.076e-03, 1.068e-03, ..., 1.153e-06, 1.148e-06],
...,
[ 1.042e-07, 4.087e-07, ..., 1.612e-07, 1.928e-07],
[ 2.363e-07, 5.329e-07, ..., 1.294e-07, 1.611e-07]])
Using a higher frequency resolution
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('C2'),
... n_bins=60 * 2, bins_per_octave=12 * 2))
>>> C
array([[ 1.536e-05, 5.848e-05, ..., 3.241e-07, 2.453e-07],
[ 1.856e-03, 1.854e-03, ..., 2.397e-08, 3.549e-08],
...,
[ 2.034e-07, 4.245e-07, ..., 6.213e-08, 1.463e-07],
[ 4.896e-08, 5.407e-07, ..., 9.176e-08, 1.051e-07]])
|
librosa/core/constantq.py
|
def cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,
bins_per_octave=12, tuning=0.0, filter_scale=1,
norm=1, sparsity=0.01, window='hann',
scale=True, pad_mode='reflect', res_type=None):
'''Compute the constant-Q transform of an audio signal.
This implementation is based on the recursive sub-sampling method
described by [1]_.
.. [1] Schoerkhuber, Christian, and Anssi Klapuri.
"Constant-Q transform toolbox for music processing."
7th Sound and Music Computing Conference, Barcelona, Spain. 2010.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
hop_length : int > 0 [scalar]
number of samples between successive CQT columns.
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
n_bins : int > 0 [scalar]
Number of frequency bins, starting at `fmin`
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : None or float in `[-0.5, 0.5)`
Tuning offset in fractions of a bin (cents).
If `None`, tuning will be automatically estimated from the signal.
filter_scale : float > 0
Filter scale factor. Small values (<1) use shorter windows
for improved time resolution.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See `librosa.util.normalize`.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
scale : bool
If `True`, scale the CQT response by square-root the length of
each channel's filter. This is analogous to `norm='ortho'` in FFT.
If `False`, do not scale the CQT. This is analogous to
`norm=None` in FFT.
pad_mode : string
Padding mode for centered frame analysis.
See also: `librosa.core.stft` and `np.pad`.
res_type : string [optional]
The resampling mode for recursive downsampling.
By default, `cqt` will adaptively select a resampling mode
which trades off accuracy at high frequencies for efficiency at low frequencies.
You can override this by specifying a resampling mode as supported by
`librosa.core.resample`. For example, `res_type='fft'` will use a high-quality,
but potentially slow FFT-based down-sampling, while `res_type='polyphase'` will
use a fast, but potentially inaccurate down-sampling.
Returns
-------
CQT : np.ndarray [shape=(n_bins, t), dtype=np.complex or np.float]
Constant-Q value each frequency at each time.
Raises
------
ParameterError
If `hop_length` is not an integer multiple of
`2**(n_bins / bins_per_octave)`
Or if `y` is too short to support the frequency range of the CQT.
See Also
--------
librosa.core.resample
librosa.util.normalize
Notes
-----
This function caches at level 20.
Examples
--------
Generate and plot a constant-Q power spectrum
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> C = np.abs(librosa.cqt(y, sr=sr))
>>> librosa.display.specshow(librosa.amplitude_to_db(C, ref=np.max),
... sr=sr, x_axis='time', y_axis='cqt_note')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Constant-Q power spectrum')
>>> plt.tight_layout()
Limit the frequency range
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('C2'),
... n_bins=60))
>>> C
array([[ 8.827e-04, 9.293e-04, ..., 3.133e-07, 2.942e-07],
[ 1.076e-03, 1.068e-03, ..., 1.153e-06, 1.148e-06],
...,
[ 1.042e-07, 4.087e-07, ..., 1.612e-07, 1.928e-07],
[ 2.363e-07, 5.329e-07, ..., 1.294e-07, 1.611e-07]])
Using a higher frequency resolution
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('C2'),
... n_bins=60 * 2, bins_per_octave=12 * 2))
>>> C
array([[ 1.536e-05, 5.848e-05, ..., 3.241e-07, 2.453e-07],
[ 1.856e-03, 1.854e-03, ..., 2.397e-08, 3.549e-08],
...,
[ 2.034e-07, 4.245e-07, ..., 6.213e-08, 1.463e-07],
[ 4.896e-08, 5.407e-07, ..., 9.176e-08, 1.051e-07]])
'''
# How many octaves are we dealing with?
n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
n_filters = min(bins_per_octave, n_bins)
len_orig = len(y)
if fmin is None:
# C1 by default
fmin = note_to_hz('C1')
if tuning is None:
tuning = estimate_tuning(y=y, sr=sr)
# First thing, get the freqs of the top octave
freqs = cqt_frequencies(n_bins, fmin,
bins_per_octave=bins_per_octave, tuning=tuning)[-bins_per_octave:]
fmin_t = np.min(freqs)
fmax_t = np.max(freqs)
# Determine required resampling quality
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
filter_cutoff = fmax_t * (1 + 0.5 * filters.window_bandwidth(window) / Q)
nyquist = sr / 2.0
auto_resample = False
if not res_type:
auto_resample = True
if filter_cutoff < audio.BW_FASTEST * nyquist:
res_type = 'kaiser_fast'
else:
res_type = 'kaiser_best'
y, sr, hop_length = __early_downsample(y, sr, hop_length,
res_type,
n_octaves,
nyquist, filter_cutoff, scale)
cqt_resp = []
if auto_resample and res_type != 'kaiser_fast':
# Do the top octave before resampling to allow for fast resampling
fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin_t,
n_filters,
bins_per_octave,
tuning,
filter_scale,
norm,
sparsity,
window=window)
# Compute the CQT filter response and append it to the stack
cqt_resp.append(__cqt_response(y, n_fft, hop_length, fft_basis, pad_mode))
fmin_t /= 2
fmax_t /= 2
n_octaves -= 1
filter_cutoff = fmax_t * (1 + 0.5 * filters.window_bandwidth(window) / Q)
res_type = 'kaiser_fast'
# Make sure our hop is long enough to support the bottom octave
num_twos = __num_two_factors(hop_length)
if num_twos < n_octaves - 1:
raise ParameterError('hop_length must be a positive integer '
'multiple of 2^{0:d} for {1:d}-octave CQT'
.format(n_octaves - 1, n_octaves))
# Now do the recursive bit
fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin_t,
n_filters,
bins_per_octave,
tuning,
filter_scale,
norm,
sparsity,
window=window)
my_y, my_sr, my_hop = y, sr, hop_length
# Iterate down the octaves
for i in range(n_octaves):
# Resample (except first time)
if i > 0:
if len(my_y) < 2:
raise ParameterError('Input signal length={} is too short for '
'{:d}-octave CQT'.format(len_orig,
n_octaves))
my_y = audio.resample(my_y, 2, 1,
res_type=res_type,
scale=True)
# The re-scale the filters to compensate for downsampling
fft_basis[:] *= np.sqrt(2)
my_sr /= 2.0
my_hop //= 2
# Compute the cqt filter response and append to the stack
cqt_resp.append(__cqt_response(my_y, n_fft, my_hop, fft_basis, pad_mode))
C = __trim_stack(cqt_resp, n_bins)
if scale:
lengths = filters.constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
filter_scale=filter_scale)
C /= np.sqrt(lengths[:, np.newaxis])
return C
|
def cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,
bins_per_octave=12, tuning=0.0, filter_scale=1,
norm=1, sparsity=0.01, window='hann',
scale=True, pad_mode='reflect', res_type=None):
'''Compute the constant-Q transform of an audio signal.
This implementation is based on the recursive sub-sampling method
described by [1]_.
.. [1] Schoerkhuber, Christian, and Anssi Klapuri.
"Constant-Q transform toolbox for music processing."
7th Sound and Music Computing Conference, Barcelona, Spain. 2010.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
hop_length : int > 0 [scalar]
number of samples between successive CQT columns.
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
n_bins : int > 0 [scalar]
Number of frequency bins, starting at `fmin`
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : None or float in `[-0.5, 0.5)`
Tuning offset in fractions of a bin (cents).
If `None`, tuning will be automatically estimated from the signal.
filter_scale : float > 0
Filter scale factor. Small values (<1) use shorter windows
for improved time resolution.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See `librosa.util.normalize`.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
scale : bool
If `True`, scale the CQT response by square-root the length of
each channel's filter. This is analogous to `norm='ortho'` in FFT.
If `False`, do not scale the CQT. This is analogous to
`norm=None` in FFT.
pad_mode : string
Padding mode for centered frame analysis.
See also: `librosa.core.stft` and `np.pad`.
res_type : string [optional]
The resampling mode for recursive downsampling.
By default, `cqt` will adaptively select a resampling mode
which trades off accuracy at high frequencies for efficiency at low frequencies.
You can override this by specifying a resampling mode as supported by
`librosa.core.resample`. For example, `res_type='fft'` will use a high-quality,
but potentially slow FFT-based down-sampling, while `res_type='polyphase'` will
use a fast, but potentially inaccurate down-sampling.
Returns
-------
CQT : np.ndarray [shape=(n_bins, t), dtype=np.complex or np.float]
Constant-Q value each frequency at each time.
Raises
------
ParameterError
If `hop_length` is not an integer multiple of
`2**(n_bins / bins_per_octave)`
Or if `y` is too short to support the frequency range of the CQT.
See Also
--------
librosa.core.resample
librosa.util.normalize
Notes
-----
This function caches at level 20.
Examples
--------
Generate and plot a constant-Q power spectrum
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> C = np.abs(librosa.cqt(y, sr=sr))
>>> librosa.display.specshow(librosa.amplitude_to_db(C, ref=np.max),
... sr=sr, x_axis='time', y_axis='cqt_note')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Constant-Q power spectrum')
>>> plt.tight_layout()
Limit the frequency range
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('C2'),
... n_bins=60))
>>> C
array([[ 8.827e-04, 9.293e-04, ..., 3.133e-07, 2.942e-07],
[ 1.076e-03, 1.068e-03, ..., 1.153e-06, 1.148e-06],
...,
[ 1.042e-07, 4.087e-07, ..., 1.612e-07, 1.928e-07],
[ 2.363e-07, 5.329e-07, ..., 1.294e-07, 1.611e-07]])
Using a higher frequency resolution
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('C2'),
... n_bins=60 * 2, bins_per_octave=12 * 2))
>>> C
array([[ 1.536e-05, 5.848e-05, ..., 3.241e-07, 2.453e-07],
[ 1.856e-03, 1.854e-03, ..., 2.397e-08, 3.549e-08],
...,
[ 2.034e-07, 4.245e-07, ..., 6.213e-08, 1.463e-07],
[ 4.896e-08, 5.407e-07, ..., 9.176e-08, 1.051e-07]])
'''
# How many octaves are we dealing with?
n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
n_filters = min(bins_per_octave, n_bins)
len_orig = len(y)
if fmin is None:
# C1 by default
fmin = note_to_hz('C1')
if tuning is None:
tuning = estimate_tuning(y=y, sr=sr)
# First thing, get the freqs of the top octave
freqs = cqt_frequencies(n_bins, fmin,
bins_per_octave=bins_per_octave, tuning=tuning)[-bins_per_octave:]
fmin_t = np.min(freqs)
fmax_t = np.max(freqs)
# Determine required resampling quality
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
filter_cutoff = fmax_t * (1 + 0.5 * filters.window_bandwidth(window) / Q)
nyquist = sr / 2.0
auto_resample = False
if not res_type:
auto_resample = True
if filter_cutoff < audio.BW_FASTEST * nyquist:
res_type = 'kaiser_fast'
else:
res_type = 'kaiser_best'
y, sr, hop_length = __early_downsample(y, sr, hop_length,
res_type,
n_octaves,
nyquist, filter_cutoff, scale)
cqt_resp = []
if auto_resample and res_type != 'kaiser_fast':
# Do the top octave before resampling to allow for fast resampling
fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin_t,
n_filters,
bins_per_octave,
tuning,
filter_scale,
norm,
sparsity,
window=window)
# Compute the CQT filter response and append it to the stack
cqt_resp.append(__cqt_response(y, n_fft, hop_length, fft_basis, pad_mode))
fmin_t /= 2
fmax_t /= 2
n_octaves -= 1
filter_cutoff = fmax_t * (1 + 0.5 * filters.window_bandwidth(window) / Q)
res_type = 'kaiser_fast'
# Make sure our hop is long enough to support the bottom octave
num_twos = __num_two_factors(hop_length)
if num_twos < n_octaves - 1:
raise ParameterError('hop_length must be a positive integer '
'multiple of 2^{0:d} for {1:d}-octave CQT'
.format(n_octaves - 1, n_octaves))
# Now do the recursive bit
fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin_t,
n_filters,
bins_per_octave,
tuning,
filter_scale,
norm,
sparsity,
window=window)
my_y, my_sr, my_hop = y, sr, hop_length
# Iterate down the octaves
for i in range(n_octaves):
# Resample (except first time)
if i > 0:
if len(my_y) < 2:
raise ParameterError('Input signal length={} is too short for '
'{:d}-octave CQT'.format(len_orig,
n_octaves))
my_y = audio.resample(my_y, 2, 1,
res_type=res_type,
scale=True)
# The re-scale the filters to compensate for downsampling
fft_basis[:] *= np.sqrt(2)
my_sr /= 2.0
my_hop //= 2
# Compute the cqt filter response and append to the stack
cqt_resp.append(__cqt_response(my_y, n_fft, my_hop, fft_basis, pad_mode))
C = __trim_stack(cqt_resp, n_bins)
if scale:
lengths = filters.constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
filter_scale=filter_scale)
C /= np.sqrt(lengths[:, np.newaxis])
return C
|
[
"Compute",
"the",
"constant",
"-",
"Q",
"transform",
"of",
"an",
"audio",
"signal",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/constantq.py#L24-L278
|
[
"def",
"cqt",
"(",
"y",
",",
"sr",
"=",
"22050",
",",
"hop_length",
"=",
"512",
",",
"fmin",
"=",
"None",
",",
"n_bins",
"=",
"84",
",",
"bins_per_octave",
"=",
"12",
",",
"tuning",
"=",
"0.0",
",",
"filter_scale",
"=",
"1",
",",
"norm",
"=",
"1",
",",
"sparsity",
"=",
"0.01",
",",
"window",
"=",
"'hann'",
",",
"scale",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
",",
"res_type",
"=",
"None",
")",
":",
"# How many octaves are we dealing with?",
"n_octaves",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"float",
"(",
"n_bins",
")",
"/",
"bins_per_octave",
")",
")",
"n_filters",
"=",
"min",
"(",
"bins_per_octave",
",",
"n_bins",
")",
"len_orig",
"=",
"len",
"(",
"y",
")",
"if",
"fmin",
"is",
"None",
":",
"# C1 by default",
"fmin",
"=",
"note_to_hz",
"(",
"'C1'",
")",
"if",
"tuning",
"is",
"None",
":",
"tuning",
"=",
"estimate_tuning",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
")",
"# First thing, get the freqs of the top octave",
"freqs",
"=",
"cqt_frequencies",
"(",
"n_bins",
",",
"fmin",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"tuning",
"=",
"tuning",
")",
"[",
"-",
"bins_per_octave",
":",
"]",
"fmin_t",
"=",
"np",
".",
"min",
"(",
"freqs",
")",
"fmax_t",
"=",
"np",
".",
"max",
"(",
"freqs",
")",
"# Determine required resampling quality",
"Q",
"=",
"float",
"(",
"filter_scale",
")",
"/",
"(",
"2.0",
"**",
"(",
"1.",
"/",
"bins_per_octave",
")",
"-",
"1",
")",
"filter_cutoff",
"=",
"fmax_t",
"*",
"(",
"1",
"+",
"0.5",
"*",
"filters",
".",
"window_bandwidth",
"(",
"window",
")",
"/",
"Q",
")",
"nyquist",
"=",
"sr",
"/",
"2.0",
"auto_resample",
"=",
"False",
"if",
"not",
"res_type",
":",
"auto_resample",
"=",
"True",
"if",
"filter_cutoff",
"<",
"audio",
".",
"BW_FASTEST",
"*",
"nyquist",
":",
"res_type",
"=",
"'kaiser_fast'",
"else",
":",
"res_type",
"=",
"'kaiser_best'",
"y",
",",
"sr",
",",
"hop_length",
"=",
"__early_downsample",
"(",
"y",
",",
"sr",
",",
"hop_length",
",",
"res_type",
",",
"n_octaves",
",",
"nyquist",
",",
"filter_cutoff",
",",
"scale",
")",
"cqt_resp",
"=",
"[",
"]",
"if",
"auto_resample",
"and",
"res_type",
"!=",
"'kaiser_fast'",
":",
"# Do the top octave before resampling to allow for fast resampling",
"fft_basis",
",",
"n_fft",
",",
"_",
"=",
"__cqt_filter_fft",
"(",
"sr",
",",
"fmin_t",
",",
"n_filters",
",",
"bins_per_octave",
",",
"tuning",
",",
"filter_scale",
",",
"norm",
",",
"sparsity",
",",
"window",
"=",
"window",
")",
"# Compute the CQT filter response and append it to the stack",
"cqt_resp",
".",
"append",
"(",
"__cqt_response",
"(",
"y",
",",
"n_fft",
",",
"hop_length",
",",
"fft_basis",
",",
"pad_mode",
")",
")",
"fmin_t",
"/=",
"2",
"fmax_t",
"/=",
"2",
"n_octaves",
"-=",
"1",
"filter_cutoff",
"=",
"fmax_t",
"*",
"(",
"1",
"+",
"0.5",
"*",
"filters",
".",
"window_bandwidth",
"(",
"window",
")",
"/",
"Q",
")",
"res_type",
"=",
"'kaiser_fast'",
"# Make sure our hop is long enough to support the bottom octave",
"num_twos",
"=",
"__num_two_factors",
"(",
"hop_length",
")",
"if",
"num_twos",
"<",
"n_octaves",
"-",
"1",
":",
"raise",
"ParameterError",
"(",
"'hop_length must be a positive integer '",
"'multiple of 2^{0:d} for {1:d}-octave CQT'",
".",
"format",
"(",
"n_octaves",
"-",
"1",
",",
"n_octaves",
")",
")",
"# Now do the recursive bit",
"fft_basis",
",",
"n_fft",
",",
"_",
"=",
"__cqt_filter_fft",
"(",
"sr",
",",
"fmin_t",
",",
"n_filters",
",",
"bins_per_octave",
",",
"tuning",
",",
"filter_scale",
",",
"norm",
",",
"sparsity",
",",
"window",
"=",
"window",
")",
"my_y",
",",
"my_sr",
",",
"my_hop",
"=",
"y",
",",
"sr",
",",
"hop_length",
"# Iterate down the octaves",
"for",
"i",
"in",
"range",
"(",
"n_octaves",
")",
":",
"# Resample (except first time)",
"if",
"i",
">",
"0",
":",
"if",
"len",
"(",
"my_y",
")",
"<",
"2",
":",
"raise",
"ParameterError",
"(",
"'Input signal length={} is too short for '",
"'{:d}-octave CQT'",
".",
"format",
"(",
"len_orig",
",",
"n_octaves",
")",
")",
"my_y",
"=",
"audio",
".",
"resample",
"(",
"my_y",
",",
"2",
",",
"1",
",",
"res_type",
"=",
"res_type",
",",
"scale",
"=",
"True",
")",
"# The re-scale the filters to compensate for downsampling",
"fft_basis",
"[",
":",
"]",
"*=",
"np",
".",
"sqrt",
"(",
"2",
")",
"my_sr",
"/=",
"2.0",
"my_hop",
"//=",
"2",
"# Compute the cqt filter response and append to the stack",
"cqt_resp",
".",
"append",
"(",
"__cqt_response",
"(",
"my_y",
",",
"n_fft",
",",
"my_hop",
",",
"fft_basis",
",",
"pad_mode",
")",
")",
"C",
"=",
"__trim_stack",
"(",
"cqt_resp",
",",
"n_bins",
")",
"if",
"scale",
":",
"lengths",
"=",
"filters",
".",
"constant_q_lengths",
"(",
"sr",
",",
"fmin",
",",
"n_bins",
"=",
"n_bins",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"tuning",
"=",
"tuning",
",",
"window",
"=",
"window",
",",
"filter_scale",
"=",
"filter_scale",
")",
"C",
"/=",
"np",
".",
"sqrt",
"(",
"lengths",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
"return",
"C"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
hybrid_cqt
|
Compute the hybrid constant-Q transform of an audio signal.
Here, the hybrid CQT uses the pseudo CQT for higher frequencies where
the hop_length is longer than half the filter length and the full CQT
for lower frequencies.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
hop_length : int > 0 [scalar]
number of samples between successive CQT columns.
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
n_bins : int > 0 [scalar]
Number of frequency bins, starting at `fmin`
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : None or float in `[-0.5, 0.5)`
Tuning offset in fractions of a bin (cents).
If `None`, tuning will be automatically estimated from the signal.
filter_scale : float > 0
Filter filter_scale factor. Larger values use longer windows.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
pad_mode : string
Padding mode for centered frame analysis.
See also: `librosa.core.stft` and `np.pad`.
res_type : string
Resampling mode. See `librosa.core.cqt` for details.
Returns
-------
CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]
Constant-Q energy for each frequency at each time.
Raises
------
ParameterError
If `hop_length` is not an integer multiple of
`2**(n_bins / bins_per_octave)`
Or if `y` is too short to support the frequency range of the CQT.
See Also
--------
cqt
pseudo_cqt
Notes
-----
This function caches at level 20.
|
librosa/core/constantq.py
|
def hybrid_cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,
bins_per_octave=12, tuning=0.0, filter_scale=1,
norm=1, sparsity=0.01, window='hann', scale=True,
pad_mode='reflect', res_type=None):
'''Compute the hybrid constant-Q transform of an audio signal.
Here, the hybrid CQT uses the pseudo CQT for higher frequencies where
the hop_length is longer than half the filter length and the full CQT
for lower frequencies.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
hop_length : int > 0 [scalar]
number of samples between successive CQT columns.
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
n_bins : int > 0 [scalar]
Number of frequency bins, starting at `fmin`
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : None or float in `[-0.5, 0.5)`
Tuning offset in fractions of a bin (cents).
If `None`, tuning will be automatically estimated from the signal.
filter_scale : float > 0
Filter filter_scale factor. Larger values use longer windows.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
pad_mode : string
Padding mode for centered frame analysis.
See also: `librosa.core.stft` and `np.pad`.
res_type : string
Resampling mode. See `librosa.core.cqt` for details.
Returns
-------
CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]
Constant-Q energy for each frequency at each time.
Raises
------
ParameterError
If `hop_length` is not an integer multiple of
`2**(n_bins / bins_per_octave)`
Or if `y` is too short to support the frequency range of the CQT.
See Also
--------
cqt
pseudo_cqt
Notes
-----
This function caches at level 20.
'''
if fmin is None:
# C1 by default
fmin = note_to_hz('C1')
if tuning is None:
tuning = estimate_tuning(y=y, sr=sr)
# Get all CQT frequencies
freqs = cqt_frequencies(n_bins, fmin,
bins_per_octave=bins_per_octave,
tuning=tuning)
# Compute the length of each constant-Q basis function
lengths = filters.constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
window=window)
# Determine which filters to use with Pseudo CQT
# These are the ones that fit within 2 hop lengths after padding
pseudo_filters = 2.0**np.ceil(np.log2(lengths)) < 2 * hop_length
n_bins_pseudo = int(np.sum(pseudo_filters))
n_bins_full = n_bins - n_bins_pseudo
cqt_resp = []
if n_bins_pseudo > 0:
fmin_pseudo = np.min(freqs[pseudo_filters])
cqt_resp.append(pseudo_cqt(y, sr,
hop_length=hop_length,
fmin=fmin_pseudo,
n_bins=n_bins_pseudo,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
norm=norm,
sparsity=sparsity,
window=window,
scale=scale,
pad_mode=pad_mode))
if n_bins_full > 0:
cqt_resp.append(np.abs(cqt(y, sr,
hop_length=hop_length,
fmin=fmin,
n_bins=n_bins_full,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
norm=norm,
sparsity=sparsity,
window=window,
scale=scale,
pad_mode=pad_mode,
res_type=res_type)))
return __trim_stack(cqt_resp, n_bins)
|
def hybrid_cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,
bins_per_octave=12, tuning=0.0, filter_scale=1,
norm=1, sparsity=0.01, window='hann', scale=True,
pad_mode='reflect', res_type=None):
'''Compute the hybrid constant-Q transform of an audio signal.
Here, the hybrid CQT uses the pseudo CQT for higher frequencies where
the hop_length is longer than half the filter length and the full CQT
for lower frequencies.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
hop_length : int > 0 [scalar]
number of samples between successive CQT columns.
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
n_bins : int > 0 [scalar]
Number of frequency bins, starting at `fmin`
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : None or float in `[-0.5, 0.5)`
Tuning offset in fractions of a bin (cents).
If `None`, tuning will be automatically estimated from the signal.
filter_scale : float > 0
Filter filter_scale factor. Larger values use longer windows.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
pad_mode : string
Padding mode for centered frame analysis.
See also: `librosa.core.stft` and `np.pad`.
res_type : string
Resampling mode. See `librosa.core.cqt` for details.
Returns
-------
CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]
Constant-Q energy for each frequency at each time.
Raises
------
ParameterError
If `hop_length` is not an integer multiple of
`2**(n_bins / bins_per_octave)`
Or if `y` is too short to support the frequency range of the CQT.
See Also
--------
cqt
pseudo_cqt
Notes
-----
This function caches at level 20.
'''
if fmin is None:
# C1 by default
fmin = note_to_hz('C1')
if tuning is None:
tuning = estimate_tuning(y=y, sr=sr)
# Get all CQT frequencies
freqs = cqt_frequencies(n_bins, fmin,
bins_per_octave=bins_per_octave,
tuning=tuning)
# Compute the length of each constant-Q basis function
lengths = filters.constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
window=window)
# Determine which filters to use with Pseudo CQT
# These are the ones that fit within 2 hop lengths after padding
pseudo_filters = 2.0**np.ceil(np.log2(lengths)) < 2 * hop_length
n_bins_pseudo = int(np.sum(pseudo_filters))
n_bins_full = n_bins - n_bins_pseudo
cqt_resp = []
if n_bins_pseudo > 0:
fmin_pseudo = np.min(freqs[pseudo_filters])
cqt_resp.append(pseudo_cqt(y, sr,
hop_length=hop_length,
fmin=fmin_pseudo,
n_bins=n_bins_pseudo,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
norm=norm,
sparsity=sparsity,
window=window,
scale=scale,
pad_mode=pad_mode))
if n_bins_full > 0:
cqt_resp.append(np.abs(cqt(y, sr,
hop_length=hop_length,
fmin=fmin,
n_bins=n_bins_full,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
norm=norm,
sparsity=sparsity,
window=window,
scale=scale,
pad_mode=pad_mode,
res_type=res_type)))
return __trim_stack(cqt_resp, n_bins)
|
[
"Compute",
"the",
"hybrid",
"constant",
"-",
"Q",
"transform",
"of",
"an",
"audio",
"signal",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/constantq.py#L282-L422
|
[
"def",
"hybrid_cqt",
"(",
"y",
",",
"sr",
"=",
"22050",
",",
"hop_length",
"=",
"512",
",",
"fmin",
"=",
"None",
",",
"n_bins",
"=",
"84",
",",
"bins_per_octave",
"=",
"12",
",",
"tuning",
"=",
"0.0",
",",
"filter_scale",
"=",
"1",
",",
"norm",
"=",
"1",
",",
"sparsity",
"=",
"0.01",
",",
"window",
"=",
"'hann'",
",",
"scale",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
",",
"res_type",
"=",
"None",
")",
":",
"if",
"fmin",
"is",
"None",
":",
"# C1 by default",
"fmin",
"=",
"note_to_hz",
"(",
"'C1'",
")",
"if",
"tuning",
"is",
"None",
":",
"tuning",
"=",
"estimate_tuning",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
")",
"# Get all CQT frequencies",
"freqs",
"=",
"cqt_frequencies",
"(",
"n_bins",
",",
"fmin",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"tuning",
"=",
"tuning",
")",
"# Compute the length of each constant-Q basis function",
"lengths",
"=",
"filters",
".",
"constant_q_lengths",
"(",
"sr",
",",
"fmin",
",",
"n_bins",
"=",
"n_bins",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"tuning",
"=",
"tuning",
",",
"filter_scale",
"=",
"filter_scale",
",",
"window",
"=",
"window",
")",
"# Determine which filters to use with Pseudo CQT",
"# These are the ones that fit within 2 hop lengths after padding",
"pseudo_filters",
"=",
"2.0",
"**",
"np",
".",
"ceil",
"(",
"np",
".",
"log2",
"(",
"lengths",
")",
")",
"<",
"2",
"*",
"hop_length",
"n_bins_pseudo",
"=",
"int",
"(",
"np",
".",
"sum",
"(",
"pseudo_filters",
")",
")",
"n_bins_full",
"=",
"n_bins",
"-",
"n_bins_pseudo",
"cqt_resp",
"=",
"[",
"]",
"if",
"n_bins_pseudo",
">",
"0",
":",
"fmin_pseudo",
"=",
"np",
".",
"min",
"(",
"freqs",
"[",
"pseudo_filters",
"]",
")",
"cqt_resp",
".",
"append",
"(",
"pseudo_cqt",
"(",
"y",
",",
"sr",
",",
"hop_length",
"=",
"hop_length",
",",
"fmin",
"=",
"fmin_pseudo",
",",
"n_bins",
"=",
"n_bins_pseudo",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"tuning",
"=",
"tuning",
",",
"filter_scale",
"=",
"filter_scale",
",",
"norm",
"=",
"norm",
",",
"sparsity",
"=",
"sparsity",
",",
"window",
"=",
"window",
",",
"scale",
"=",
"scale",
",",
"pad_mode",
"=",
"pad_mode",
")",
")",
"if",
"n_bins_full",
">",
"0",
":",
"cqt_resp",
".",
"append",
"(",
"np",
".",
"abs",
"(",
"cqt",
"(",
"y",
",",
"sr",
",",
"hop_length",
"=",
"hop_length",
",",
"fmin",
"=",
"fmin",
",",
"n_bins",
"=",
"n_bins_full",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"tuning",
"=",
"tuning",
",",
"filter_scale",
"=",
"filter_scale",
",",
"norm",
"=",
"norm",
",",
"sparsity",
"=",
"sparsity",
",",
"window",
"=",
"window",
",",
"scale",
"=",
"scale",
",",
"pad_mode",
"=",
"pad_mode",
",",
"res_type",
"=",
"res_type",
")",
")",
")",
"return",
"__trim_stack",
"(",
"cqt_resp",
",",
"n_bins",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
pseudo_cqt
|
Compute the pseudo constant-Q transform of an audio signal.
This uses a single fft size that is the smallest power of 2 that is greater
than or equal to the max of:
1. The longest CQT filter
2. 2x the hop_length
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
hop_length : int > 0 [scalar]
number of samples between successive CQT columns.
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
n_bins : int > 0 [scalar]
Number of frequency bins, starting at `fmin`
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : None or float in `[-0.5, 0.5)`
Tuning offset in fractions of a bin (cents).
If `None`, tuning will be automatically estimated from the signal.
filter_scale : float > 0
Filter filter_scale factor. Larger values use longer windows.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
pad_mode : string
Padding mode for centered frame analysis.
See also: `librosa.core.stft` and `np.pad`.
Returns
-------
CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]
Pseudo Constant-Q energy for each frequency at each time.
Raises
------
ParameterError
If `hop_length` is not an integer multiple of
`2**(n_bins / bins_per_octave)`
Or if `y` is too short to support the frequency range of the CQT.
Notes
-----
This function caches at level 20.
|
librosa/core/constantq.py
|
def pseudo_cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,
bins_per_octave=12, tuning=0.0, filter_scale=1,
norm=1, sparsity=0.01, window='hann', scale=True,
pad_mode='reflect'):
'''Compute the pseudo constant-Q transform of an audio signal.
This uses a single fft size that is the smallest power of 2 that is greater
than or equal to the max of:
1. The longest CQT filter
2. 2x the hop_length
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
hop_length : int > 0 [scalar]
number of samples between successive CQT columns.
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
n_bins : int > 0 [scalar]
Number of frequency bins, starting at `fmin`
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : None or float in `[-0.5, 0.5)`
Tuning offset in fractions of a bin (cents).
If `None`, tuning will be automatically estimated from the signal.
filter_scale : float > 0
Filter filter_scale factor. Larger values use longer windows.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
pad_mode : string
Padding mode for centered frame analysis.
See also: `librosa.core.stft` and `np.pad`.
Returns
-------
CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]
Pseudo Constant-Q energy for each frequency at each time.
Raises
------
ParameterError
If `hop_length` is not an integer multiple of
`2**(n_bins / bins_per_octave)`
Or if `y` is too short to support the frequency range of the CQT.
Notes
-----
This function caches at level 20.
'''
if fmin is None:
# C1 by default
fmin = note_to_hz('C1')
if tuning is None:
tuning = estimate_tuning(y=y, sr=sr)
fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin, n_bins,
bins_per_octave,
tuning, filter_scale,
norm, sparsity,
hop_length=hop_length,
window=window)
fft_basis = np.abs(fft_basis)
# Compute the magnitude STFT with Hann window
D = np.abs(stft(y, n_fft=n_fft, hop_length=hop_length, pad_mode=pad_mode))
# Project onto the pseudo-cqt basis
C = fft_basis.dot(D)
if scale:
C /= np.sqrt(n_fft)
else:
lengths = filters.constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
filter_scale=filter_scale)
C *= np.sqrt(lengths[:, np.newaxis] / n_fft)
return C
|
def pseudo_cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84,
bins_per_octave=12, tuning=0.0, filter_scale=1,
norm=1, sparsity=0.01, window='hann', scale=True,
pad_mode='reflect'):
'''Compute the pseudo constant-Q transform of an audio signal.
This uses a single fft size that is the smallest power of 2 that is greater
than or equal to the max of:
1. The longest CQT filter
2. 2x the hop_length
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
hop_length : int > 0 [scalar]
number of samples between successive CQT columns.
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
n_bins : int > 0 [scalar]
Number of frequency bins, starting at `fmin`
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : None or float in `[-0.5, 0.5)`
Tuning offset in fractions of a bin (cents).
If `None`, tuning will be automatically estimated from the signal.
filter_scale : float > 0
Filter filter_scale factor. Larger values use longer windows.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
pad_mode : string
Padding mode for centered frame analysis.
See also: `librosa.core.stft` and `np.pad`.
Returns
-------
CQT : np.ndarray [shape=(n_bins, t), dtype=np.float]
Pseudo Constant-Q energy for each frequency at each time.
Raises
------
ParameterError
If `hop_length` is not an integer multiple of
`2**(n_bins / bins_per_octave)`
Or if `y` is too short to support the frequency range of the CQT.
Notes
-----
This function caches at level 20.
'''
if fmin is None:
# C1 by default
fmin = note_to_hz('C1')
if tuning is None:
tuning = estimate_tuning(y=y, sr=sr)
fft_basis, n_fft, _ = __cqt_filter_fft(sr, fmin, n_bins,
bins_per_octave,
tuning, filter_scale,
norm, sparsity,
hop_length=hop_length,
window=window)
fft_basis = np.abs(fft_basis)
# Compute the magnitude STFT with Hann window
D = np.abs(stft(y, n_fft=n_fft, hop_length=hop_length, pad_mode=pad_mode))
# Project onto the pseudo-cqt basis
C = fft_basis.dot(D)
if scale:
C /= np.sqrt(n_fft)
else:
lengths = filters.constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
filter_scale=filter_scale)
C *= np.sqrt(lengths[:, np.newaxis] / n_fft)
return C
|
[
"Compute",
"the",
"pseudo",
"constant",
"-",
"Q",
"transform",
"of",
"an",
"audio",
"signal",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/constantq.py#L426-L534
|
[
"def",
"pseudo_cqt",
"(",
"y",
",",
"sr",
"=",
"22050",
",",
"hop_length",
"=",
"512",
",",
"fmin",
"=",
"None",
",",
"n_bins",
"=",
"84",
",",
"bins_per_octave",
"=",
"12",
",",
"tuning",
"=",
"0.0",
",",
"filter_scale",
"=",
"1",
",",
"norm",
"=",
"1",
",",
"sparsity",
"=",
"0.01",
",",
"window",
"=",
"'hann'",
",",
"scale",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
")",
":",
"if",
"fmin",
"is",
"None",
":",
"# C1 by default",
"fmin",
"=",
"note_to_hz",
"(",
"'C1'",
")",
"if",
"tuning",
"is",
"None",
":",
"tuning",
"=",
"estimate_tuning",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
")",
"fft_basis",
",",
"n_fft",
",",
"_",
"=",
"__cqt_filter_fft",
"(",
"sr",
",",
"fmin",
",",
"n_bins",
",",
"bins_per_octave",
",",
"tuning",
",",
"filter_scale",
",",
"norm",
",",
"sparsity",
",",
"hop_length",
"=",
"hop_length",
",",
"window",
"=",
"window",
")",
"fft_basis",
"=",
"np",
".",
"abs",
"(",
"fft_basis",
")",
"# Compute the magnitude STFT with Hann window",
"D",
"=",
"np",
".",
"abs",
"(",
"stft",
"(",
"y",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"pad_mode",
"=",
"pad_mode",
")",
")",
"# Project onto the pseudo-cqt basis",
"C",
"=",
"fft_basis",
".",
"dot",
"(",
"D",
")",
"if",
"scale",
":",
"C",
"/=",
"np",
".",
"sqrt",
"(",
"n_fft",
")",
"else",
":",
"lengths",
"=",
"filters",
".",
"constant_q_lengths",
"(",
"sr",
",",
"fmin",
",",
"n_bins",
"=",
"n_bins",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"tuning",
"=",
"tuning",
",",
"window",
"=",
"window",
",",
"filter_scale",
"=",
"filter_scale",
")",
"C",
"*=",
"np",
".",
"sqrt",
"(",
"lengths",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"/",
"n_fft",
")",
"return",
"C"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
icqt
|
Compute the inverse constant-Q transform.
Given a constant-Q transform representation `C` of an audio signal `y`,
this function produces an approximation `y_hat`.
Parameters
----------
C : np.ndarray, [shape=(n_bins, n_frames)]
Constant-Q representation as produced by `core.cqt`
hop_length : int > 0 [scalar]
number of samples between successive frames
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
tuning : float in `[-0.5, 0.5)` [scalar]
Tuning offset in fractions of a bin (cents).
filter_scale : float > 0 [scalar]
Filter scale factor. Small values (<1) use shorter windows
for improved time resolution.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See `librosa.util.normalize`.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
scale : bool
If `True`, scale the CQT response by square-root the length
of each channel's filter. This is analogous to `norm='ortho'` in FFT.
If `False`, do not scale the CQT. This is analogous to `norm=None`
in FFT.
length : int > 0, optional
If provided, the output `y` is zero-padded or clipped to exactly
`length` samples.
amin : float or None [DEPRECATED]
.. note:: This parameter is deprecated in 0.7.0 and will be removed in 0.8.0.
res_type : string
Resampling mode. By default, this uses `fft` mode for high-quality
reconstruction, but this may be slow depending on your signal duration.
See `librosa.resample` for supported modes.
Returns
-------
y : np.ndarray, [shape=(n_samples), dtype=np.float]
Audio time-series reconstructed from the CQT representation.
See Also
--------
cqt
core.resample
Notes
-----
This function caches at level 40.
Examples
--------
Using default parameters
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> C = librosa.cqt(y=y, sr=sr)
>>> y_hat = librosa.icqt(C=C, sr=sr)
Or with a different hop length and frequency resolution:
>>> hop_length = 256
>>> bins_per_octave = 12 * 3
>>> C = librosa.cqt(y=y, sr=sr, hop_length=256, n_bins=7*bins_per_octave,
... bins_per_octave=bins_per_octave)
>>> y_hat = librosa.icqt(C=C, sr=sr, hop_length=hop_length,
... bins_per_octave=bins_per_octave)
|
librosa/core/constantq.py
|
def icqt(C, sr=22050, hop_length=512, fmin=None, bins_per_octave=12,
tuning=0.0, filter_scale=1, norm=1, sparsity=0.01, window='hann',
scale=True, length=None, amin=util.Deprecated(), res_type='fft'):
'''Compute the inverse constant-Q transform.
Given a constant-Q transform representation `C` of an audio signal `y`,
this function produces an approximation `y_hat`.
Parameters
----------
C : np.ndarray, [shape=(n_bins, n_frames)]
Constant-Q representation as produced by `core.cqt`
hop_length : int > 0 [scalar]
number of samples between successive frames
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
tuning : float in `[-0.5, 0.5)` [scalar]
Tuning offset in fractions of a bin (cents).
filter_scale : float > 0 [scalar]
Filter scale factor. Small values (<1) use shorter windows
for improved time resolution.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See `librosa.util.normalize`.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
scale : bool
If `True`, scale the CQT response by square-root the length
of each channel's filter. This is analogous to `norm='ortho'` in FFT.
If `False`, do not scale the CQT. This is analogous to `norm=None`
in FFT.
length : int > 0, optional
If provided, the output `y` is zero-padded or clipped to exactly
`length` samples.
amin : float or None [DEPRECATED]
.. note:: This parameter is deprecated in 0.7.0 and will be removed in 0.8.0.
res_type : string
Resampling mode. By default, this uses `fft` mode for high-quality
reconstruction, but this may be slow depending on your signal duration.
See `librosa.resample` for supported modes.
Returns
-------
y : np.ndarray, [shape=(n_samples), dtype=np.float]
Audio time-series reconstructed from the CQT representation.
See Also
--------
cqt
core.resample
Notes
-----
This function caches at level 40.
Examples
--------
Using default parameters
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> C = librosa.cqt(y=y, sr=sr)
>>> y_hat = librosa.icqt(C=C, sr=sr)
Or with a different hop length and frequency resolution:
>>> hop_length = 256
>>> bins_per_octave = 12 * 3
>>> C = librosa.cqt(y=y, sr=sr, hop_length=256, n_bins=7*bins_per_octave,
... bins_per_octave=bins_per_octave)
>>> y_hat = librosa.icqt(C=C, sr=sr, hop_length=hop_length,
... bins_per_octave=bins_per_octave)
'''
if fmin is None:
fmin = note_to_hz('C1')
# Get the top octave of frequencies
n_bins = len(C)
freqs = cqt_frequencies(n_bins, fmin,
bins_per_octave=bins_per_octave,
tuning=tuning)[-bins_per_octave:]
n_filters = min(n_bins, bins_per_octave)
fft_basis, n_fft, lengths = __cqt_filter_fft(sr, np.min(freqs),
n_filters,
bins_per_octave,
tuning,
filter_scale,
norm,
sparsity=sparsity,
window=window)
if hop_length > min(lengths):
warnings.warn('hop_length={} exceeds minimum CQT filter length={:.3f}.\n'
'This will probably cause unpleasant acoustic artifacts. '
'Consider decreasing your hop length or increasing the frequency resolution of your CQT.'.format(hop_length, min(lengths)))
# The basis gets renormalized by the effective window length above;
# This step undoes that
fft_basis = fft_basis.todense() * n_fft / lengths[:, np.newaxis]
# This step conjugate-transposes the filter
inv_basis = fft_basis.H
# How many octaves do we have?
n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
y = None
for octave in range(n_octaves - 1, -1, -1):
slice_ = slice(-(octave+1) * bins_per_octave - 1,
-(octave) * bins_per_octave - 1)
# Slice this octave
C_oct = C[slice_]
inv_oct = inv_basis[:, -C_oct.shape[0]:]
oct_hop = hop_length // 2**octave
# Apply energy corrections
if scale:
C_scale = np.sqrt(lengths[-C_oct.shape[0]:, np.newaxis]) / n_fft
else:
C_scale = lengths[-C_oct.shape[0]:, np.newaxis] * np.sqrt(2**octave) / n_fft
# Inverse-project the basis for each octave
D_oct = inv_oct.dot(C_oct / C_scale)
# Inverse-STFT that response
y_oct = istft(D_oct, window='ones', hop_length=oct_hop)
# Up-sample that octave
if y is None:
y = y_oct
else:
# Up-sample the previous buffer and add in the new one
# Scipy-resampling is fast here, since it's a power-of-two relation
y = audio.resample(y, 1, 2, scale=True, res_type=res_type, fix=False)
y[:len(y_oct)] += y_oct
if length:
y = util.fix_length(y, length)
return y
|
def icqt(C, sr=22050, hop_length=512, fmin=None, bins_per_octave=12,
tuning=0.0, filter_scale=1, norm=1, sparsity=0.01, window='hann',
scale=True, length=None, amin=util.Deprecated(), res_type='fft'):
'''Compute the inverse constant-Q transform.
Given a constant-Q transform representation `C` of an audio signal `y`,
this function produces an approximation `y_hat`.
Parameters
----------
C : np.ndarray, [shape=(n_bins, n_frames)]
Constant-Q representation as produced by `core.cqt`
hop_length : int > 0 [scalar]
number of samples between successive frames
fmin : float > 0 [scalar]
Minimum frequency. Defaults to C1 ~= 32.70 Hz
tuning : float in `[-0.5, 0.5)` [scalar]
Tuning offset in fractions of a bin (cents).
filter_scale : float > 0 [scalar]
Filter scale factor. Small values (<1) use shorter windows
for improved time resolution.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See `librosa.util.normalize`.
sparsity : float in [0, 1)
Sparsify the CQT basis by discarding up to `sparsity`
fraction of the energy in each basis.
Set `sparsity=0` to disable sparsification.
window : str, tuple, number, or function
Window specification for the basis filters.
See `filters.get_window` for details.
scale : bool
If `True`, scale the CQT response by square-root the length
of each channel's filter. This is analogous to `norm='ortho'` in FFT.
If `False`, do not scale the CQT. This is analogous to `norm=None`
in FFT.
length : int > 0, optional
If provided, the output `y` is zero-padded or clipped to exactly
`length` samples.
amin : float or None [DEPRECATED]
.. note:: This parameter is deprecated in 0.7.0 and will be removed in 0.8.0.
res_type : string
Resampling mode. By default, this uses `fft` mode for high-quality
reconstruction, but this may be slow depending on your signal duration.
See `librosa.resample` for supported modes.
Returns
-------
y : np.ndarray, [shape=(n_samples), dtype=np.float]
Audio time-series reconstructed from the CQT representation.
See Also
--------
cqt
core.resample
Notes
-----
This function caches at level 40.
Examples
--------
Using default parameters
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> C = librosa.cqt(y=y, sr=sr)
>>> y_hat = librosa.icqt(C=C, sr=sr)
Or with a different hop length and frequency resolution:
>>> hop_length = 256
>>> bins_per_octave = 12 * 3
>>> C = librosa.cqt(y=y, sr=sr, hop_length=256, n_bins=7*bins_per_octave,
... bins_per_octave=bins_per_octave)
>>> y_hat = librosa.icqt(C=C, sr=sr, hop_length=hop_length,
... bins_per_octave=bins_per_octave)
'''
if fmin is None:
fmin = note_to_hz('C1')
# Get the top octave of frequencies
n_bins = len(C)
freqs = cqt_frequencies(n_bins, fmin,
bins_per_octave=bins_per_octave,
tuning=tuning)[-bins_per_octave:]
n_filters = min(n_bins, bins_per_octave)
fft_basis, n_fft, lengths = __cqt_filter_fft(sr, np.min(freqs),
n_filters,
bins_per_octave,
tuning,
filter_scale,
norm,
sparsity=sparsity,
window=window)
if hop_length > min(lengths):
warnings.warn('hop_length={} exceeds minimum CQT filter length={:.3f}.\n'
'This will probably cause unpleasant acoustic artifacts. '
'Consider decreasing your hop length or increasing the frequency resolution of your CQT.'.format(hop_length, min(lengths)))
# The basis gets renormalized by the effective window length above;
# This step undoes that
fft_basis = fft_basis.todense() * n_fft / lengths[:, np.newaxis]
# This step conjugate-transposes the filter
inv_basis = fft_basis.H
# How many octaves do we have?
n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
y = None
for octave in range(n_octaves - 1, -1, -1):
slice_ = slice(-(octave+1) * bins_per_octave - 1,
-(octave) * bins_per_octave - 1)
# Slice this octave
C_oct = C[slice_]
inv_oct = inv_basis[:, -C_oct.shape[0]:]
oct_hop = hop_length // 2**octave
# Apply energy corrections
if scale:
C_scale = np.sqrt(lengths[-C_oct.shape[0]:, np.newaxis]) / n_fft
else:
C_scale = lengths[-C_oct.shape[0]:, np.newaxis] * np.sqrt(2**octave) / n_fft
# Inverse-project the basis for each octave
D_oct = inv_oct.dot(C_oct / C_scale)
# Inverse-STFT that response
y_oct = istft(D_oct, window='ones', hop_length=oct_hop)
# Up-sample that octave
if y is None:
y = y_oct
else:
# Up-sample the previous buffer and add in the new one
# Scipy-resampling is fast here, since it's a power-of-two relation
y = audio.resample(y, 1, 2, scale=True, res_type=res_type, fix=False)
y[:len(y_oct)] += y_oct
if length:
y = util.fix_length(y, length)
return y
|
[
"Compute",
"the",
"inverse",
"constant",
"-",
"Q",
"transform",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/constantq.py#L538-L703
|
[
"def",
"icqt",
"(",
"C",
",",
"sr",
"=",
"22050",
",",
"hop_length",
"=",
"512",
",",
"fmin",
"=",
"None",
",",
"bins_per_octave",
"=",
"12",
",",
"tuning",
"=",
"0.0",
",",
"filter_scale",
"=",
"1",
",",
"norm",
"=",
"1",
",",
"sparsity",
"=",
"0.01",
",",
"window",
"=",
"'hann'",
",",
"scale",
"=",
"True",
",",
"length",
"=",
"None",
",",
"amin",
"=",
"util",
".",
"Deprecated",
"(",
")",
",",
"res_type",
"=",
"'fft'",
")",
":",
"if",
"fmin",
"is",
"None",
":",
"fmin",
"=",
"note_to_hz",
"(",
"'C1'",
")",
"# Get the top octave of frequencies",
"n_bins",
"=",
"len",
"(",
"C",
")",
"freqs",
"=",
"cqt_frequencies",
"(",
"n_bins",
",",
"fmin",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"tuning",
"=",
"tuning",
")",
"[",
"-",
"bins_per_octave",
":",
"]",
"n_filters",
"=",
"min",
"(",
"n_bins",
",",
"bins_per_octave",
")",
"fft_basis",
",",
"n_fft",
",",
"lengths",
"=",
"__cqt_filter_fft",
"(",
"sr",
",",
"np",
".",
"min",
"(",
"freqs",
")",
",",
"n_filters",
",",
"bins_per_octave",
",",
"tuning",
",",
"filter_scale",
",",
"norm",
",",
"sparsity",
"=",
"sparsity",
",",
"window",
"=",
"window",
")",
"if",
"hop_length",
">",
"min",
"(",
"lengths",
")",
":",
"warnings",
".",
"warn",
"(",
"'hop_length={} exceeds minimum CQT filter length={:.3f}.\\n'",
"'This will probably cause unpleasant acoustic artifacts. '",
"'Consider decreasing your hop length or increasing the frequency resolution of your CQT.'",
".",
"format",
"(",
"hop_length",
",",
"min",
"(",
"lengths",
")",
")",
")",
"# The basis gets renormalized by the effective window length above;",
"# This step undoes that",
"fft_basis",
"=",
"fft_basis",
".",
"todense",
"(",
")",
"*",
"n_fft",
"/",
"lengths",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"# This step conjugate-transposes the filter",
"inv_basis",
"=",
"fft_basis",
".",
"H",
"# How many octaves do we have?",
"n_octaves",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"float",
"(",
"n_bins",
")",
"/",
"bins_per_octave",
")",
")",
"y",
"=",
"None",
"for",
"octave",
"in",
"range",
"(",
"n_octaves",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"slice_",
"=",
"slice",
"(",
"-",
"(",
"octave",
"+",
"1",
")",
"*",
"bins_per_octave",
"-",
"1",
",",
"-",
"(",
"octave",
")",
"*",
"bins_per_octave",
"-",
"1",
")",
"# Slice this octave",
"C_oct",
"=",
"C",
"[",
"slice_",
"]",
"inv_oct",
"=",
"inv_basis",
"[",
":",
",",
"-",
"C_oct",
".",
"shape",
"[",
"0",
"]",
":",
"]",
"oct_hop",
"=",
"hop_length",
"//",
"2",
"**",
"octave",
"# Apply energy corrections",
"if",
"scale",
":",
"C_scale",
"=",
"np",
".",
"sqrt",
"(",
"lengths",
"[",
"-",
"C_oct",
".",
"shape",
"[",
"0",
"]",
":",
",",
"np",
".",
"newaxis",
"]",
")",
"/",
"n_fft",
"else",
":",
"C_scale",
"=",
"lengths",
"[",
"-",
"C_oct",
".",
"shape",
"[",
"0",
"]",
":",
",",
"np",
".",
"newaxis",
"]",
"*",
"np",
".",
"sqrt",
"(",
"2",
"**",
"octave",
")",
"/",
"n_fft",
"# Inverse-project the basis for each octave",
"D_oct",
"=",
"inv_oct",
".",
"dot",
"(",
"C_oct",
"/",
"C_scale",
")",
"# Inverse-STFT that response",
"y_oct",
"=",
"istft",
"(",
"D_oct",
",",
"window",
"=",
"'ones'",
",",
"hop_length",
"=",
"oct_hop",
")",
"# Up-sample that octave",
"if",
"y",
"is",
"None",
":",
"y",
"=",
"y_oct",
"else",
":",
"# Up-sample the previous buffer and add in the new one",
"# Scipy-resampling is fast here, since it's a power-of-two relation",
"y",
"=",
"audio",
".",
"resample",
"(",
"y",
",",
"1",
",",
"2",
",",
"scale",
"=",
"True",
",",
"res_type",
"=",
"res_type",
",",
"fix",
"=",
"False",
")",
"y",
"[",
":",
"len",
"(",
"y_oct",
")",
"]",
"+=",
"y_oct",
"if",
"length",
":",
"y",
"=",
"util",
".",
"fix_length",
"(",
"y",
",",
"length",
")",
"return",
"y"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__cqt_filter_fft
|
Generate the frequency domain constant-Q filter basis.
|
librosa/core/constantq.py
|
def __cqt_filter_fft(sr, fmin, n_bins, bins_per_octave, tuning,
filter_scale, norm, sparsity, hop_length=None,
window='hann'):
'''Generate the frequency domain constant-Q filter basis.'''
basis, lengths = filters.constant_q(sr,
fmin=fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
norm=norm,
pad_fft=True,
window=window)
# Filters are padded up to the nearest integral power of 2
n_fft = basis.shape[1]
if (hop_length is not None and
n_fft < 2.0**(1 + np.ceil(np.log2(hop_length)))):
n_fft = int(2.0 ** (1 + np.ceil(np.log2(hop_length))))
# re-normalize bases with respect to the FFT window length
basis *= lengths[:, np.newaxis] / float(n_fft)
# FFT and retain only the non-negative frequencies
fft = get_fftlib()
fft_basis = fft.fft(basis, n=n_fft, axis=1)[:, :(n_fft // 2)+1]
# sparsify the basis
fft_basis = util.sparsify_rows(fft_basis, quantile=sparsity)
return fft_basis, n_fft, lengths
|
def __cqt_filter_fft(sr, fmin, n_bins, bins_per_octave, tuning,
filter_scale, norm, sparsity, hop_length=None,
window='hann'):
'''Generate the frequency domain constant-Q filter basis.'''
basis, lengths = filters.constant_q(sr,
fmin=fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
norm=norm,
pad_fft=True,
window=window)
# Filters are padded up to the nearest integral power of 2
n_fft = basis.shape[1]
if (hop_length is not None and
n_fft < 2.0**(1 + np.ceil(np.log2(hop_length)))):
n_fft = int(2.0 ** (1 + np.ceil(np.log2(hop_length))))
# re-normalize bases with respect to the FFT window length
basis *= lengths[:, np.newaxis] / float(n_fft)
# FFT and retain only the non-negative frequencies
fft = get_fftlib()
fft_basis = fft.fft(basis, n=n_fft, axis=1)[:, :(n_fft // 2)+1]
# sparsify the basis
fft_basis = util.sparsify_rows(fft_basis, quantile=sparsity)
return fft_basis, n_fft, lengths
|
[
"Generate",
"the",
"frequency",
"domain",
"constant",
"-",
"Q",
"filter",
"basis",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/constantq.py#L707-L740
|
[
"def",
"__cqt_filter_fft",
"(",
"sr",
",",
"fmin",
",",
"n_bins",
",",
"bins_per_octave",
",",
"tuning",
",",
"filter_scale",
",",
"norm",
",",
"sparsity",
",",
"hop_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
")",
":",
"basis",
",",
"lengths",
"=",
"filters",
".",
"constant_q",
"(",
"sr",
",",
"fmin",
"=",
"fmin",
",",
"n_bins",
"=",
"n_bins",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"tuning",
"=",
"tuning",
",",
"filter_scale",
"=",
"filter_scale",
",",
"norm",
"=",
"norm",
",",
"pad_fft",
"=",
"True",
",",
"window",
"=",
"window",
")",
"# Filters are padded up to the nearest integral power of 2",
"n_fft",
"=",
"basis",
".",
"shape",
"[",
"1",
"]",
"if",
"(",
"hop_length",
"is",
"not",
"None",
"and",
"n_fft",
"<",
"2.0",
"**",
"(",
"1",
"+",
"np",
".",
"ceil",
"(",
"np",
".",
"log2",
"(",
"hop_length",
")",
")",
")",
")",
":",
"n_fft",
"=",
"int",
"(",
"2.0",
"**",
"(",
"1",
"+",
"np",
".",
"ceil",
"(",
"np",
".",
"log2",
"(",
"hop_length",
")",
")",
")",
")",
"# re-normalize bases with respect to the FFT window length",
"basis",
"*=",
"lengths",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"/",
"float",
"(",
"n_fft",
")",
"# FFT and retain only the non-negative frequencies",
"fft",
"=",
"get_fftlib",
"(",
")",
"fft_basis",
"=",
"fft",
".",
"fft",
"(",
"basis",
",",
"n",
"=",
"n_fft",
",",
"axis",
"=",
"1",
")",
"[",
":",
",",
":",
"(",
"n_fft",
"//",
"2",
")",
"+",
"1",
"]",
"# sparsify the basis",
"fft_basis",
"=",
"util",
".",
"sparsify_rows",
"(",
"fft_basis",
",",
"quantile",
"=",
"sparsity",
")",
"return",
"fft_basis",
",",
"n_fft",
",",
"lengths"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__trim_stack
|
Helper function to trim and stack a collection of CQT responses
|
librosa/core/constantq.py
|
def __trim_stack(cqt_resp, n_bins):
'''Helper function to trim and stack a collection of CQT responses'''
# cleanup any framing errors at the boundaries
max_col = min(x.shape[1] for x in cqt_resp)
cqt_resp = np.vstack([x[:, :max_col] for x in cqt_resp][::-1])
# Finally, clip out any bottom frequencies that we don't really want
# Transpose magic here to ensure column-contiguity
return np.ascontiguousarray(cqt_resp[-n_bins:].T).T
|
def __trim_stack(cqt_resp, n_bins):
'''Helper function to trim and stack a collection of CQT responses'''
# cleanup any framing errors at the boundaries
max_col = min(x.shape[1] for x in cqt_resp)
cqt_resp = np.vstack([x[:, :max_col] for x in cqt_resp][::-1])
# Finally, clip out any bottom frequencies that we don't really want
# Transpose magic here to ensure column-contiguity
return np.ascontiguousarray(cqt_resp[-n_bins:].T).T
|
[
"Helper",
"function",
"to",
"trim",
"and",
"stack",
"a",
"collection",
"of",
"CQT",
"responses"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/constantq.py#L743-L753
|
[
"def",
"__trim_stack",
"(",
"cqt_resp",
",",
"n_bins",
")",
":",
"# cleanup any framing errors at the boundaries",
"max_col",
"=",
"min",
"(",
"x",
".",
"shape",
"[",
"1",
"]",
"for",
"x",
"in",
"cqt_resp",
")",
"cqt_resp",
"=",
"np",
".",
"vstack",
"(",
"[",
"x",
"[",
":",
",",
":",
"max_col",
"]",
"for",
"x",
"in",
"cqt_resp",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
"# Finally, clip out any bottom frequencies that we don't really want",
"# Transpose magic here to ensure column-contiguity",
"return",
"np",
".",
"ascontiguousarray",
"(",
"cqt_resp",
"[",
"-",
"n_bins",
":",
"]",
".",
"T",
")",
".",
"T"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__cqt_response
|
Compute the filter response with a target STFT hop.
|
librosa/core/constantq.py
|
def __cqt_response(y, n_fft, hop_length, fft_basis, mode):
'''Compute the filter response with a target STFT hop.'''
# Compute the STFT matrix
D = stft(y, n_fft=n_fft, hop_length=hop_length,
window='ones',
pad_mode=mode)
# And filter response energy
return fft_basis.dot(D)
|
def __cqt_response(y, n_fft, hop_length, fft_basis, mode):
'''Compute the filter response with a target STFT hop.'''
# Compute the STFT matrix
D = stft(y, n_fft=n_fft, hop_length=hop_length,
window='ones',
pad_mode=mode)
# And filter response energy
return fft_basis.dot(D)
|
[
"Compute",
"the",
"filter",
"response",
"with",
"a",
"target",
"STFT",
"hop",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/constantq.py#L756-L765
|
[
"def",
"__cqt_response",
"(",
"y",
",",
"n_fft",
",",
"hop_length",
",",
"fft_basis",
",",
"mode",
")",
":",
"# Compute the STFT matrix",
"D",
"=",
"stft",
"(",
"y",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"window",
"=",
"'ones'",
",",
"pad_mode",
"=",
"mode",
")",
"# And filter response energy",
"return",
"fft_basis",
".",
"dot",
"(",
"D",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__early_downsample_count
|
Compute the number of early downsampling operations
|
librosa/core/constantq.py
|
def __early_downsample_count(nyquist, filter_cutoff, hop_length, n_octaves):
'''Compute the number of early downsampling operations'''
downsample_count1 = max(0, int(np.ceil(np.log2(audio.BW_FASTEST * nyquist /
filter_cutoff)) - 1) - 1)
num_twos = __num_two_factors(hop_length)
downsample_count2 = max(0, num_twos - n_octaves + 1)
return min(downsample_count1, downsample_count2)
|
def __early_downsample_count(nyquist, filter_cutoff, hop_length, n_octaves):
'''Compute the number of early downsampling operations'''
downsample_count1 = max(0, int(np.ceil(np.log2(audio.BW_FASTEST * nyquist /
filter_cutoff)) - 1) - 1)
num_twos = __num_two_factors(hop_length)
downsample_count2 = max(0, num_twos - n_octaves + 1)
return min(downsample_count1, downsample_count2)
|
[
"Compute",
"the",
"number",
"of",
"early",
"downsampling",
"operations"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/constantq.py#L768-L777
|
[
"def",
"__early_downsample_count",
"(",
"nyquist",
",",
"filter_cutoff",
",",
"hop_length",
",",
"n_octaves",
")",
":",
"downsample_count1",
"=",
"max",
"(",
"0",
",",
"int",
"(",
"np",
".",
"ceil",
"(",
"np",
".",
"log2",
"(",
"audio",
".",
"BW_FASTEST",
"*",
"nyquist",
"/",
"filter_cutoff",
")",
")",
"-",
"1",
")",
"-",
"1",
")",
"num_twos",
"=",
"__num_two_factors",
"(",
"hop_length",
")",
"downsample_count2",
"=",
"max",
"(",
"0",
",",
"num_twos",
"-",
"n_octaves",
"+",
"1",
")",
"return",
"min",
"(",
"downsample_count1",
",",
"downsample_count2",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__early_downsample
|
Perform early downsampling on an audio signal, if it applies.
|
librosa/core/constantq.py
|
def __early_downsample(y, sr, hop_length, res_type, n_octaves,
nyquist, filter_cutoff, scale):
'''Perform early downsampling on an audio signal, if it applies.'''
downsample_count = __early_downsample_count(nyquist, filter_cutoff,
hop_length, n_octaves)
if downsample_count > 0 and res_type == 'kaiser_fast':
downsample_factor = 2**(downsample_count)
hop_length //= downsample_factor
if len(y) < downsample_factor:
raise ParameterError('Input signal length={:d} is too short for '
'{:d}-octave CQT'.format(len(y), n_octaves))
new_sr = sr / float(downsample_factor)
y = audio.resample(y, sr, new_sr,
res_type=res_type,
scale=True)
# If we're not going to length-scale after CQT, we
# need to compensate for the downsampling factor here
if not scale:
y *= np.sqrt(downsample_factor)
sr = new_sr
return y, sr, hop_length
|
def __early_downsample(y, sr, hop_length, res_type, n_octaves,
nyquist, filter_cutoff, scale):
'''Perform early downsampling on an audio signal, if it applies.'''
downsample_count = __early_downsample_count(nyquist, filter_cutoff,
hop_length, n_octaves)
if downsample_count > 0 and res_type == 'kaiser_fast':
downsample_factor = 2**(downsample_count)
hop_length //= downsample_factor
if len(y) < downsample_factor:
raise ParameterError('Input signal length={:d} is too short for '
'{:d}-octave CQT'.format(len(y), n_octaves))
new_sr = sr / float(downsample_factor)
y = audio.resample(y, sr, new_sr,
res_type=res_type,
scale=True)
# If we're not going to length-scale after CQT, we
# need to compensate for the downsampling factor here
if not scale:
y *= np.sqrt(downsample_factor)
sr = new_sr
return y, sr, hop_length
|
[
"Perform",
"early",
"downsampling",
"on",
"an",
"audio",
"signal",
"if",
"it",
"applies",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/constantq.py#L780-L808
|
[
"def",
"__early_downsample",
"(",
"y",
",",
"sr",
",",
"hop_length",
",",
"res_type",
",",
"n_octaves",
",",
"nyquist",
",",
"filter_cutoff",
",",
"scale",
")",
":",
"downsample_count",
"=",
"__early_downsample_count",
"(",
"nyquist",
",",
"filter_cutoff",
",",
"hop_length",
",",
"n_octaves",
")",
"if",
"downsample_count",
">",
"0",
"and",
"res_type",
"==",
"'kaiser_fast'",
":",
"downsample_factor",
"=",
"2",
"**",
"(",
"downsample_count",
")",
"hop_length",
"//=",
"downsample_factor",
"if",
"len",
"(",
"y",
")",
"<",
"downsample_factor",
":",
"raise",
"ParameterError",
"(",
"'Input signal length={:d} is too short for '",
"'{:d}-octave CQT'",
".",
"format",
"(",
"len",
"(",
"y",
")",
",",
"n_octaves",
")",
")",
"new_sr",
"=",
"sr",
"/",
"float",
"(",
"downsample_factor",
")",
"y",
"=",
"audio",
".",
"resample",
"(",
"y",
",",
"sr",
",",
"new_sr",
",",
"res_type",
"=",
"res_type",
",",
"scale",
"=",
"True",
")",
"# If we're not going to length-scale after CQT, we",
"# need to compensate for the downsampling factor here",
"if",
"not",
"scale",
":",
"y",
"*=",
"np",
".",
"sqrt",
"(",
"downsample_factor",
")",
"sr",
"=",
"new_sr",
"return",
"y",
",",
"sr",
",",
"hop_length"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
delta
|
r'''Compute delta features: local estimate of the derivative
of the input data along the selected axis.
Delta features are computed Savitsky-Golay filtering.
Parameters
----------
data : np.ndarray
the input data matrix (eg, spectrogram)
width : int, positive, odd [scalar]
Number of frames over which to compute the delta features.
Cannot exceed the length of `data` along the specified axis.
If `mode='interp'`, then `width` must be at least `data.shape[axis]`.
order : int > 0 [scalar]
the order of the difference operator.
1 for first derivative, 2 for second, etc.
axis : int [scalar]
the axis along which to compute deltas.
Default is -1 (columns).
mode : str, {'interp', 'nearest', 'mirror', 'constant', 'wrap'}
Padding mode for estimating differences at the boundaries.
kwargs : additional keyword arguments
See `scipy.signal.savgol_filter`
Returns
-------
delta_data : np.ndarray [shape=(d, t)]
delta matrix of `data` at specified order
Notes
-----
This function caches at level 40.
See Also
--------
scipy.signal.savgol_filter
Examples
--------
Compute MFCC deltas, delta-deltas
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfcc = librosa.feature.mfcc(y=y, sr=sr)
>>> mfcc_delta = librosa.feature.delta(mfcc)
>>> mfcc_delta
array([[ 1.666e+01, 1.666e+01, ..., 1.869e-15, 1.869e-15],
[ 1.784e+01, 1.784e+01, ..., 6.085e-31, 6.085e-31],
...,
[ 7.262e-01, 7.262e-01, ..., 9.259e-31, 9.259e-31],
[ 6.578e-01, 6.578e-01, ..., 7.597e-31, 7.597e-31]])
>>> mfcc_delta2 = librosa.feature.delta(mfcc, order=2)
>>> mfcc_delta2
array([[ -1.703e+01, -1.703e+01, ..., 3.834e-14, 3.834e-14],
[ -1.108e+01, -1.108e+01, ..., -1.068e-30, -1.068e-30],
...,
[ 4.075e-01, 4.075e-01, ..., -1.565e-30, -1.565e-30],
[ 1.676e-01, 1.676e-01, ..., -2.104e-30, -2.104e-30]])
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(mfcc)
>>> plt.title('MFCC')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(mfcc_delta)
>>> plt.title(r'MFCC-$\Delta$')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(mfcc_delta2, x_axis='time')
>>> plt.title(r'MFCC-$\Delta^2$')
>>> plt.colorbar()
>>> plt.tight_layout()
|
librosa/feature/utils.py
|
def delta(data, width=9, order=1, axis=-1, mode='interp', **kwargs):
r'''Compute delta features: local estimate of the derivative
of the input data along the selected axis.
Delta features are computed Savitsky-Golay filtering.
Parameters
----------
data : np.ndarray
the input data matrix (eg, spectrogram)
width : int, positive, odd [scalar]
Number of frames over which to compute the delta features.
Cannot exceed the length of `data` along the specified axis.
If `mode='interp'`, then `width` must be at least `data.shape[axis]`.
order : int > 0 [scalar]
the order of the difference operator.
1 for first derivative, 2 for second, etc.
axis : int [scalar]
the axis along which to compute deltas.
Default is -1 (columns).
mode : str, {'interp', 'nearest', 'mirror', 'constant', 'wrap'}
Padding mode for estimating differences at the boundaries.
kwargs : additional keyword arguments
See `scipy.signal.savgol_filter`
Returns
-------
delta_data : np.ndarray [shape=(d, t)]
delta matrix of `data` at specified order
Notes
-----
This function caches at level 40.
See Also
--------
scipy.signal.savgol_filter
Examples
--------
Compute MFCC deltas, delta-deltas
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfcc = librosa.feature.mfcc(y=y, sr=sr)
>>> mfcc_delta = librosa.feature.delta(mfcc)
>>> mfcc_delta
array([[ 1.666e+01, 1.666e+01, ..., 1.869e-15, 1.869e-15],
[ 1.784e+01, 1.784e+01, ..., 6.085e-31, 6.085e-31],
...,
[ 7.262e-01, 7.262e-01, ..., 9.259e-31, 9.259e-31],
[ 6.578e-01, 6.578e-01, ..., 7.597e-31, 7.597e-31]])
>>> mfcc_delta2 = librosa.feature.delta(mfcc, order=2)
>>> mfcc_delta2
array([[ -1.703e+01, -1.703e+01, ..., 3.834e-14, 3.834e-14],
[ -1.108e+01, -1.108e+01, ..., -1.068e-30, -1.068e-30],
...,
[ 4.075e-01, 4.075e-01, ..., -1.565e-30, -1.565e-30],
[ 1.676e-01, 1.676e-01, ..., -2.104e-30, -2.104e-30]])
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(mfcc)
>>> plt.title('MFCC')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(mfcc_delta)
>>> plt.title(r'MFCC-$\Delta$')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(mfcc_delta2, x_axis='time')
>>> plt.title(r'MFCC-$\Delta^2$')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
data = np.atleast_1d(data)
if mode == 'interp' and width > data.shape[axis]:
raise ParameterError("when mode='interp', width={} "
"cannot exceed data.shape[axis]={}".format(width, data.shape[axis]))
if width < 3 or np.mod(width, 2) != 1:
raise ParameterError('width must be an odd integer >= 3')
if order <= 0 or not isinstance(order, int):
raise ParameterError('order must be a positive integer')
kwargs.pop('deriv', None)
kwargs.setdefault('polyorder', order)
return scipy.signal.savgol_filter(data, width,
deriv=order,
axis=axis,
mode=mode,
**kwargs)
|
def delta(data, width=9, order=1, axis=-1, mode='interp', **kwargs):
r'''Compute delta features: local estimate of the derivative
of the input data along the selected axis.
Delta features are computed Savitsky-Golay filtering.
Parameters
----------
data : np.ndarray
the input data matrix (eg, spectrogram)
width : int, positive, odd [scalar]
Number of frames over which to compute the delta features.
Cannot exceed the length of `data` along the specified axis.
If `mode='interp'`, then `width` must be at least `data.shape[axis]`.
order : int > 0 [scalar]
the order of the difference operator.
1 for first derivative, 2 for second, etc.
axis : int [scalar]
the axis along which to compute deltas.
Default is -1 (columns).
mode : str, {'interp', 'nearest', 'mirror', 'constant', 'wrap'}
Padding mode for estimating differences at the boundaries.
kwargs : additional keyword arguments
See `scipy.signal.savgol_filter`
Returns
-------
delta_data : np.ndarray [shape=(d, t)]
delta matrix of `data` at specified order
Notes
-----
This function caches at level 40.
See Also
--------
scipy.signal.savgol_filter
Examples
--------
Compute MFCC deltas, delta-deltas
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> mfcc = librosa.feature.mfcc(y=y, sr=sr)
>>> mfcc_delta = librosa.feature.delta(mfcc)
>>> mfcc_delta
array([[ 1.666e+01, 1.666e+01, ..., 1.869e-15, 1.869e-15],
[ 1.784e+01, 1.784e+01, ..., 6.085e-31, 6.085e-31],
...,
[ 7.262e-01, 7.262e-01, ..., 9.259e-31, 9.259e-31],
[ 6.578e-01, 6.578e-01, ..., 7.597e-31, 7.597e-31]])
>>> mfcc_delta2 = librosa.feature.delta(mfcc, order=2)
>>> mfcc_delta2
array([[ -1.703e+01, -1.703e+01, ..., 3.834e-14, 3.834e-14],
[ -1.108e+01, -1.108e+01, ..., -1.068e-30, -1.068e-30],
...,
[ 4.075e-01, 4.075e-01, ..., -1.565e-30, -1.565e-30],
[ 1.676e-01, 1.676e-01, ..., -2.104e-30, -2.104e-30]])
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(mfcc)
>>> plt.title('MFCC')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(mfcc_delta)
>>> plt.title(r'MFCC-$\Delta$')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(mfcc_delta2, x_axis='time')
>>> plt.title(r'MFCC-$\Delta^2$')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
data = np.atleast_1d(data)
if mode == 'interp' and width > data.shape[axis]:
raise ParameterError("when mode='interp', width={} "
"cannot exceed data.shape[axis]={}".format(width, data.shape[axis]))
if width < 3 or np.mod(width, 2) != 1:
raise ParameterError('width must be an odd integer >= 3')
if order <= 0 or not isinstance(order, int):
raise ParameterError('order must be a positive integer')
kwargs.pop('deriv', None)
kwargs.setdefault('polyorder', order)
return scipy.signal.savgol_filter(data, width,
deriv=order,
axis=axis,
mode=mode,
**kwargs)
|
[
"r",
"Compute",
"delta",
"features",
":",
"local",
"estimate",
"of",
"the",
"derivative",
"of",
"the",
"input",
"data",
"along",
"the",
"selected",
"axis",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/utils.py#L15-L115
|
[
"def",
"delta",
"(",
"data",
",",
"width",
"=",
"9",
",",
"order",
"=",
"1",
",",
"axis",
"=",
"-",
"1",
",",
"mode",
"=",
"'interp'",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"np",
".",
"atleast_1d",
"(",
"data",
")",
"if",
"mode",
"==",
"'interp'",
"and",
"width",
">",
"data",
".",
"shape",
"[",
"axis",
"]",
":",
"raise",
"ParameterError",
"(",
"\"when mode='interp', width={} \"",
"\"cannot exceed data.shape[axis]={}\"",
".",
"format",
"(",
"width",
",",
"data",
".",
"shape",
"[",
"axis",
"]",
")",
")",
"if",
"width",
"<",
"3",
"or",
"np",
".",
"mod",
"(",
"width",
",",
"2",
")",
"!=",
"1",
":",
"raise",
"ParameterError",
"(",
"'width must be an odd integer >= 3'",
")",
"if",
"order",
"<=",
"0",
"or",
"not",
"isinstance",
"(",
"order",
",",
"int",
")",
":",
"raise",
"ParameterError",
"(",
"'order must be a positive integer'",
")",
"kwargs",
".",
"pop",
"(",
"'deriv'",
",",
"None",
")",
"kwargs",
".",
"setdefault",
"(",
"'polyorder'",
",",
"order",
")",
"return",
"scipy",
".",
"signal",
".",
"savgol_filter",
"(",
"data",
",",
"width",
",",
"deriv",
"=",
"order",
",",
"axis",
"=",
"axis",
",",
"mode",
"=",
"mode",
",",
"*",
"*",
"kwargs",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
stack_memory
|
Short-term history embedding: vertically concatenate a data
vector or matrix with delayed copies of itself.
Each column `data[:, i]` is mapped to::
data[:, i] -> [data[:, i],
data[:, i - delay],
...
data[:, i - (n_steps-1)*delay]]
For columns `i < (n_steps - 1) * delay` , the data will be padded.
By default, the data is padded with zeros, but this behavior can be
overridden by supplying additional keyword arguments which are passed
to `np.pad()`.
Parameters
----------
data : np.ndarray [shape=(t,) or (d, t)]
Input data matrix. If `data` is a vector (`data.ndim == 1`),
it will be interpreted as a row matrix and reshaped to `(1, t)`.
n_steps : int > 0 [scalar]
embedding dimension, the number of steps back in time to stack
delay : int != 0 [scalar]
the number of columns to step.
Positive values embed from the past (previous columns).
Negative values embed from the future (subsequent columns).
kwargs : additional keyword arguments
Additional arguments to pass to `np.pad`.
Returns
-------
data_history : np.ndarray [shape=(m * d, t)]
data augmented with lagged copies of itself,
where `m == n_steps - 1`.
Notes
-----
This function caches at level 40.
Examples
--------
Keep two steps (current and previous)
>>> data = np.arange(-3, 3)
>>> librosa.feature.stack_memory(data)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1]])
Or three steps
>>> librosa.feature.stack_memory(data, n_steps=3)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1],
[ 0, 0, -3, -2, -1, 0]])
Use reflection padding instead of zero-padding
>>> librosa.feature.stack_memory(data, n_steps=3, mode='reflect')
array([[-3, -2, -1, 0, 1, 2],
[-2, -3, -2, -1, 0, 1],
[-1, -2, -3, -2, -1, 0]])
Or pad with edge-values, and delay by 2
>>> librosa.feature.stack_memory(data, n_steps=3, delay=2, mode='edge')
array([[-3, -2, -1, 0, 1, 2],
[-3, -3, -3, -2, -1, 0],
[-3, -3, -3, -3, -3, -2]])
Stack time-lagged beat-synchronous chroma edge padding
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)
>>> beats = librosa.util.fix_frames(beats, x_min=0, x_max=chroma.shape[1])
>>> chroma_sync = librosa.util.sync(chroma, beats)
>>> chroma_lag = librosa.feature.stack_memory(chroma_sync, n_steps=3,
... mode='edge')
Plot the result
>>> import matplotlib.pyplot as plt
>>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)
>>> librosa.display.specshow(chroma_lag, y_axis='chroma', x_axis='time',
... x_coords=beat_times)
>>> plt.yticks([0, 12, 24], ['Lag=0', 'Lag=1', 'Lag=2'])
>>> plt.title('Time-lagged chroma')
>>> plt.colorbar()
>>> plt.tight_layout()
|
librosa/feature/utils.py
|
def stack_memory(data, n_steps=2, delay=1, **kwargs):
"""Short-term history embedding: vertically concatenate a data
vector or matrix with delayed copies of itself.
Each column `data[:, i]` is mapped to::
data[:, i] -> [data[:, i],
data[:, i - delay],
...
data[:, i - (n_steps-1)*delay]]
For columns `i < (n_steps - 1) * delay` , the data will be padded.
By default, the data is padded with zeros, but this behavior can be
overridden by supplying additional keyword arguments which are passed
to `np.pad()`.
Parameters
----------
data : np.ndarray [shape=(t,) or (d, t)]
Input data matrix. If `data` is a vector (`data.ndim == 1`),
it will be interpreted as a row matrix and reshaped to `(1, t)`.
n_steps : int > 0 [scalar]
embedding dimension, the number of steps back in time to stack
delay : int != 0 [scalar]
the number of columns to step.
Positive values embed from the past (previous columns).
Negative values embed from the future (subsequent columns).
kwargs : additional keyword arguments
Additional arguments to pass to `np.pad`.
Returns
-------
data_history : np.ndarray [shape=(m * d, t)]
data augmented with lagged copies of itself,
where `m == n_steps - 1`.
Notes
-----
This function caches at level 40.
Examples
--------
Keep two steps (current and previous)
>>> data = np.arange(-3, 3)
>>> librosa.feature.stack_memory(data)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1]])
Or three steps
>>> librosa.feature.stack_memory(data, n_steps=3)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1],
[ 0, 0, -3, -2, -1, 0]])
Use reflection padding instead of zero-padding
>>> librosa.feature.stack_memory(data, n_steps=3, mode='reflect')
array([[-3, -2, -1, 0, 1, 2],
[-2, -3, -2, -1, 0, 1],
[-1, -2, -3, -2, -1, 0]])
Or pad with edge-values, and delay by 2
>>> librosa.feature.stack_memory(data, n_steps=3, delay=2, mode='edge')
array([[-3, -2, -1, 0, 1, 2],
[-3, -3, -3, -2, -1, 0],
[-3, -3, -3, -3, -3, -2]])
Stack time-lagged beat-synchronous chroma edge padding
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)
>>> beats = librosa.util.fix_frames(beats, x_min=0, x_max=chroma.shape[1])
>>> chroma_sync = librosa.util.sync(chroma, beats)
>>> chroma_lag = librosa.feature.stack_memory(chroma_sync, n_steps=3,
... mode='edge')
Plot the result
>>> import matplotlib.pyplot as plt
>>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)
>>> librosa.display.specshow(chroma_lag, y_axis='chroma', x_axis='time',
... x_coords=beat_times)
>>> plt.yticks([0, 12, 24], ['Lag=0', 'Lag=1', 'Lag=2'])
>>> plt.title('Time-lagged chroma')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if n_steps < 1:
raise ParameterError('n_steps must be a positive integer')
if delay == 0:
raise ParameterError('delay must be a non-zero integer')
data = np.atleast_2d(data)
t = data.shape[1]
kwargs.setdefault('mode', 'constant')
if kwargs['mode'] == 'constant':
kwargs.setdefault('constant_values', [0])
# Pad the end with zeros, which will roll to the front below
if delay > 0:
padding = (int((n_steps - 1) * delay), 0)
else:
padding = (0, int((n_steps - 1) * -delay))
data = np.pad(data, [(0, 0), padding], **kwargs)
history = data
for i in range(1, n_steps):
history = np.vstack([np.roll(data, -i * delay, axis=1), history])
# Trim to original width
if delay > 0:
history = history[:, :t]
else:
history = history[:, -t:]
# Make contiguous
return np.ascontiguousarray(history.T).T
|
def stack_memory(data, n_steps=2, delay=1, **kwargs):
"""Short-term history embedding: vertically concatenate a data
vector or matrix with delayed copies of itself.
Each column `data[:, i]` is mapped to::
data[:, i] -> [data[:, i],
data[:, i - delay],
...
data[:, i - (n_steps-1)*delay]]
For columns `i < (n_steps - 1) * delay` , the data will be padded.
By default, the data is padded with zeros, but this behavior can be
overridden by supplying additional keyword arguments which are passed
to `np.pad()`.
Parameters
----------
data : np.ndarray [shape=(t,) or (d, t)]
Input data matrix. If `data` is a vector (`data.ndim == 1`),
it will be interpreted as a row matrix and reshaped to `(1, t)`.
n_steps : int > 0 [scalar]
embedding dimension, the number of steps back in time to stack
delay : int != 0 [scalar]
the number of columns to step.
Positive values embed from the past (previous columns).
Negative values embed from the future (subsequent columns).
kwargs : additional keyword arguments
Additional arguments to pass to `np.pad`.
Returns
-------
data_history : np.ndarray [shape=(m * d, t)]
data augmented with lagged copies of itself,
where `m == n_steps - 1`.
Notes
-----
This function caches at level 40.
Examples
--------
Keep two steps (current and previous)
>>> data = np.arange(-3, 3)
>>> librosa.feature.stack_memory(data)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1]])
Or three steps
>>> librosa.feature.stack_memory(data, n_steps=3)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1],
[ 0, 0, -3, -2, -1, 0]])
Use reflection padding instead of zero-padding
>>> librosa.feature.stack_memory(data, n_steps=3, mode='reflect')
array([[-3, -2, -1, 0, 1, 2],
[-2, -3, -2, -1, 0, 1],
[-1, -2, -3, -2, -1, 0]])
Or pad with edge-values, and delay by 2
>>> librosa.feature.stack_memory(data, n_steps=3, delay=2, mode='edge')
array([[-3, -2, -1, 0, 1, 2],
[-3, -3, -3, -2, -1, 0],
[-3, -3, -3, -3, -3, -2]])
Stack time-lagged beat-synchronous chroma edge padding
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)
>>> beats = librosa.util.fix_frames(beats, x_min=0, x_max=chroma.shape[1])
>>> chroma_sync = librosa.util.sync(chroma, beats)
>>> chroma_lag = librosa.feature.stack_memory(chroma_sync, n_steps=3,
... mode='edge')
Plot the result
>>> import matplotlib.pyplot as plt
>>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)
>>> librosa.display.specshow(chroma_lag, y_axis='chroma', x_axis='time',
... x_coords=beat_times)
>>> plt.yticks([0, 12, 24], ['Lag=0', 'Lag=1', 'Lag=2'])
>>> plt.title('Time-lagged chroma')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if n_steps < 1:
raise ParameterError('n_steps must be a positive integer')
if delay == 0:
raise ParameterError('delay must be a non-zero integer')
data = np.atleast_2d(data)
t = data.shape[1]
kwargs.setdefault('mode', 'constant')
if kwargs['mode'] == 'constant':
kwargs.setdefault('constant_values', [0])
# Pad the end with zeros, which will roll to the front below
if delay > 0:
padding = (int((n_steps - 1) * delay), 0)
else:
padding = (0, int((n_steps - 1) * -delay))
data = np.pad(data, [(0, 0), padding], **kwargs)
history = data
for i in range(1, n_steps):
history = np.vstack([np.roll(data, -i * delay, axis=1), history])
# Trim to original width
if delay > 0:
history = history[:, :t]
else:
history = history[:, -t:]
# Make contiguous
return np.ascontiguousarray(history.T).T
|
[
"Short",
"-",
"term",
"history",
"embedding",
":",
"vertically",
"concatenate",
"a",
"data",
"vector",
"or",
"matrix",
"with",
"delayed",
"copies",
"of",
"itself",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/utils.py#L119-L252
|
[
"def",
"stack_memory",
"(",
"data",
",",
"n_steps",
"=",
"2",
",",
"delay",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"n_steps",
"<",
"1",
":",
"raise",
"ParameterError",
"(",
"'n_steps must be a positive integer'",
")",
"if",
"delay",
"==",
"0",
":",
"raise",
"ParameterError",
"(",
"'delay must be a non-zero integer'",
")",
"data",
"=",
"np",
".",
"atleast_2d",
"(",
"data",
")",
"t",
"=",
"data",
".",
"shape",
"[",
"1",
"]",
"kwargs",
".",
"setdefault",
"(",
"'mode'",
",",
"'constant'",
")",
"if",
"kwargs",
"[",
"'mode'",
"]",
"==",
"'constant'",
":",
"kwargs",
".",
"setdefault",
"(",
"'constant_values'",
",",
"[",
"0",
"]",
")",
"# Pad the end with zeros, which will roll to the front below",
"if",
"delay",
">",
"0",
":",
"padding",
"=",
"(",
"int",
"(",
"(",
"n_steps",
"-",
"1",
")",
"*",
"delay",
")",
",",
"0",
")",
"else",
":",
"padding",
"=",
"(",
"0",
",",
"int",
"(",
"(",
"n_steps",
"-",
"1",
")",
"*",
"-",
"delay",
")",
")",
"data",
"=",
"np",
".",
"pad",
"(",
"data",
",",
"[",
"(",
"0",
",",
"0",
")",
",",
"padding",
"]",
",",
"*",
"*",
"kwargs",
")",
"history",
"=",
"data",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"n_steps",
")",
":",
"history",
"=",
"np",
".",
"vstack",
"(",
"[",
"np",
".",
"roll",
"(",
"data",
",",
"-",
"i",
"*",
"delay",
",",
"axis",
"=",
"1",
")",
",",
"history",
"]",
")",
"# Trim to original width",
"if",
"delay",
">",
"0",
":",
"history",
"=",
"history",
"[",
":",
",",
":",
"t",
"]",
"else",
":",
"history",
"=",
"history",
"[",
":",
",",
"-",
"t",
":",
"]",
"# Make contiguous",
"return",
"np",
".",
"ascontiguousarray",
"(",
"history",
".",
"T",
")",
".",
"T"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
dtw
|
Dynamic time warping (DTW).
This function performs a DTW and path backtracking on two sequences.
We follow the nomenclature and algorithmic approach as described in [1]_.
.. [1] Meinard Mueller
Fundamentals of Music Processing — Audio, Analysis, Algorithms, Applications
Springer Verlag, ISBN: 978-3-319-21944-8, 2015.
Parameters
----------
X : np.ndarray [shape=(K, N)]
audio feature matrix (e.g., chroma features)
Y : np.ndarray [shape=(K, M)]
audio feature matrix (e.g., chroma features)
C : np.ndarray [shape=(N, M)]
Precomputed distance matrix. If supplied, X and Y must not be supplied and
``metric`` will be ignored.
metric : str
Identifier for the cost-function as documented
in `scipy.spatial.cdist()`
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
subseq : binary
Enable subsequence DTW, e.g., for retrieval tasks.
backtrack : binary
Enable backtracking in accumulated cost matrix.
global_constraints : binary
Applies global constraints to the cost matrix ``C`` (Sakoe-Chiba band).
band_rad : float
The Sakoe-Chiba band radius (1/2 of the width) will be
``int(radius*min(C.shape))``.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
wp : np.ndarray [shape=(N,2)]
Warping path with index pairs.
Each row of the array contains an index pair n,m).
Only returned when ``backtrack`` is True.
Raises
------
ParameterError
If you are doing diagonal matching and Y is shorter than X or if an incompatible
combination of X, Y, and C are supplied.
If your input dimensions are incompatible.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=10, duration=15)
>>> X = librosa.feature.chroma_cens(y=y, sr=sr)
>>> noise = np.random.rand(X.shape[0], 200)
>>> Y = np.concatenate((noise, noise, X, noise), axis=1)
>>> D, wp = librosa.sequence.dtw(X, Y, subseq=True)
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(D, x_axis='frames', y_axis='frames')
>>> plt.title('Database excerpt')
>>> plt.plot(wp[:, 1], wp[:, 0], label='Optimal path', color='y')
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> plt.plot(D[-1, :] / wp.shape[0])
>>> plt.xlim([0, Y.shape[1]])
>>> plt.ylim([0, 2])
>>> plt.title('Matching cost function')
>>> plt.tight_layout()
|
librosa/sequence.py
|
def dtw(X=None, Y=None, C=None, metric='euclidean', step_sizes_sigma=None,
weights_add=None, weights_mul=None, subseq=False, backtrack=True,
global_constraints=False, band_rad=0.25):
'''Dynamic time warping (DTW).
This function performs a DTW and path backtracking on two sequences.
We follow the nomenclature and algorithmic approach as described in [1]_.
.. [1] Meinard Mueller
Fundamentals of Music Processing — Audio, Analysis, Algorithms, Applications
Springer Verlag, ISBN: 978-3-319-21944-8, 2015.
Parameters
----------
X : np.ndarray [shape=(K, N)]
audio feature matrix (e.g., chroma features)
Y : np.ndarray [shape=(K, M)]
audio feature matrix (e.g., chroma features)
C : np.ndarray [shape=(N, M)]
Precomputed distance matrix. If supplied, X and Y must not be supplied and
``metric`` will be ignored.
metric : str
Identifier for the cost-function as documented
in `scipy.spatial.cdist()`
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
subseq : binary
Enable subsequence DTW, e.g., for retrieval tasks.
backtrack : binary
Enable backtracking in accumulated cost matrix.
global_constraints : binary
Applies global constraints to the cost matrix ``C`` (Sakoe-Chiba band).
band_rad : float
The Sakoe-Chiba band radius (1/2 of the width) will be
``int(radius*min(C.shape))``.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
wp : np.ndarray [shape=(N,2)]
Warping path with index pairs.
Each row of the array contains an index pair n,m).
Only returned when ``backtrack`` is True.
Raises
------
ParameterError
If you are doing diagonal matching and Y is shorter than X or if an incompatible
combination of X, Y, and C are supplied.
If your input dimensions are incompatible.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=10, duration=15)
>>> X = librosa.feature.chroma_cens(y=y, sr=sr)
>>> noise = np.random.rand(X.shape[0], 200)
>>> Y = np.concatenate((noise, noise, X, noise), axis=1)
>>> D, wp = librosa.sequence.dtw(X, Y, subseq=True)
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(D, x_axis='frames', y_axis='frames')
>>> plt.title('Database excerpt')
>>> plt.plot(wp[:, 1], wp[:, 0], label='Optimal path', color='y')
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> plt.plot(D[-1, :] / wp.shape[0])
>>> plt.xlim([0, Y.shape[1]])
>>> plt.ylim([0, 2])
>>> plt.title('Matching cost function')
>>> plt.tight_layout()
'''
# Default Parameters
if step_sizes_sigma is None:
step_sizes_sigma = np.array([[1, 1], [0, 1], [1, 0]])
if weights_add is None:
weights_add = np.zeros(len(step_sizes_sigma))
if weights_mul is None:
weights_mul = np.ones(len(step_sizes_sigma))
if len(step_sizes_sigma) != len(weights_add):
raise ParameterError('len(weights_add) must be equal to len(step_sizes_sigma)')
if len(step_sizes_sigma) != len(weights_mul):
raise ParameterError('len(weights_mul) must be equal to len(step_sizes_sigma)')
if C is None and (X is None or Y is None):
raise ParameterError('If C is not supplied, both X and Y must be supplied')
if C is not None and (X is not None or Y is not None):
raise ParameterError('If C is supplied, both X and Y must not be supplied')
# calculate pair-wise distances, unless already supplied.
if C is None:
# take care of dimensions
X = np.atleast_2d(X)
Y = np.atleast_2d(Y)
try:
C = cdist(X.T, Y.T, metric=metric)
except ValueError:
msg = ('scipy.spatial.distance.cdist returned an error.\n'
'Please provide your input in the form X.shape=(K, N) and Y.shape=(K, M).\n'
'1-dimensional sequences should be reshaped to X.shape=(1, N) and Y.shape=(1, M).')
six.reraise(ParameterError, ParameterError(msg))
# for subsequence matching:
# if N > M, Y can be a subsequence of X
if subseq and (X.shape[1] > Y.shape[1]):
C = C.T
C = np.atleast_2d(C)
# if diagonal matching, Y has to be longer than X
# (X simply cannot be contained in Y)
if np.array_equal(step_sizes_sigma, np.array([[1, 1]])) and (C.shape[0] > C.shape[1]):
raise ParameterError('For diagonal matching: Y.shape[1] >= X.shape[1] '
'(C.shape[1] >= C.shape[0])')
max_0 = step_sizes_sigma[:, 0].max()
max_1 = step_sizes_sigma[:, 1].max()
if global_constraints:
# Apply global constraints to the cost matrix
fill_off_diagonal(C, band_rad, value=np.inf)
# initialize whole matrix with infinity values
D = np.ones(C.shape + np.array([max_0, max_1])) * np.inf
# set starting point to C[0, 0]
D[max_0, max_1] = C[0, 0]
if subseq:
D[max_0, max_1:] = C[0, :]
# initialize step matrix with -1
# will be filled in calc_accu_cost() with indices from step_sizes_sigma
D_steps = -1 * np.ones(D.shape, dtype=np.int)
# calculate accumulated cost matrix
D, D_steps = __dtw_calc_accu_cost(C, D, D_steps,
step_sizes_sigma,
weights_mul, weights_add,
max_0, max_1)
# delete infinity rows and columns
D = D[max_0:, max_1:]
D_steps = D_steps[max_0:, max_1:]
if backtrack:
if subseq:
# search for global minimum in last row of D-matrix
wp_end_idx = np.argmin(D[-1, :]) + 1
wp = __dtw_backtracking(D_steps[:, :wp_end_idx], step_sizes_sigma)
else:
# perform warping path backtracking
wp = __dtw_backtracking(D_steps, step_sizes_sigma)
wp = np.asarray(wp, dtype=int)
# since we transposed in the beginning, we have to adjust the index pairs back
if subseq and (X.shape[1] > Y.shape[1]):
wp = np.fliplr(wp)
return D, wp
else:
return D
|
def dtw(X=None, Y=None, C=None, metric='euclidean', step_sizes_sigma=None,
weights_add=None, weights_mul=None, subseq=False, backtrack=True,
global_constraints=False, band_rad=0.25):
'''Dynamic time warping (DTW).
This function performs a DTW and path backtracking on two sequences.
We follow the nomenclature and algorithmic approach as described in [1]_.
.. [1] Meinard Mueller
Fundamentals of Music Processing — Audio, Analysis, Algorithms, Applications
Springer Verlag, ISBN: 978-3-319-21944-8, 2015.
Parameters
----------
X : np.ndarray [shape=(K, N)]
audio feature matrix (e.g., chroma features)
Y : np.ndarray [shape=(K, M)]
audio feature matrix (e.g., chroma features)
C : np.ndarray [shape=(N, M)]
Precomputed distance matrix. If supplied, X and Y must not be supplied and
``metric`` will be ignored.
metric : str
Identifier for the cost-function as documented
in `scipy.spatial.cdist()`
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
subseq : binary
Enable subsequence DTW, e.g., for retrieval tasks.
backtrack : binary
Enable backtracking in accumulated cost matrix.
global_constraints : binary
Applies global constraints to the cost matrix ``C`` (Sakoe-Chiba band).
band_rad : float
The Sakoe-Chiba band radius (1/2 of the width) will be
``int(radius*min(C.shape))``.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
wp : np.ndarray [shape=(N,2)]
Warping path with index pairs.
Each row of the array contains an index pair n,m).
Only returned when ``backtrack`` is True.
Raises
------
ParameterError
If you are doing diagonal matching and Y is shorter than X or if an incompatible
combination of X, Y, and C are supplied.
If your input dimensions are incompatible.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(), offset=10, duration=15)
>>> X = librosa.feature.chroma_cens(y=y, sr=sr)
>>> noise = np.random.rand(X.shape[0], 200)
>>> Y = np.concatenate((noise, noise, X, noise), axis=1)
>>> D, wp = librosa.sequence.dtw(X, Y, subseq=True)
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(D, x_axis='frames', y_axis='frames')
>>> plt.title('Database excerpt')
>>> plt.plot(wp[:, 1], wp[:, 0], label='Optimal path', color='y')
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> plt.plot(D[-1, :] / wp.shape[0])
>>> plt.xlim([0, Y.shape[1]])
>>> plt.ylim([0, 2])
>>> plt.title('Matching cost function')
>>> plt.tight_layout()
'''
# Default Parameters
if step_sizes_sigma is None:
step_sizes_sigma = np.array([[1, 1], [0, 1], [1, 0]])
if weights_add is None:
weights_add = np.zeros(len(step_sizes_sigma))
if weights_mul is None:
weights_mul = np.ones(len(step_sizes_sigma))
if len(step_sizes_sigma) != len(weights_add):
raise ParameterError('len(weights_add) must be equal to len(step_sizes_sigma)')
if len(step_sizes_sigma) != len(weights_mul):
raise ParameterError('len(weights_mul) must be equal to len(step_sizes_sigma)')
if C is None and (X is None or Y is None):
raise ParameterError('If C is not supplied, both X and Y must be supplied')
if C is not None and (X is not None or Y is not None):
raise ParameterError('If C is supplied, both X and Y must not be supplied')
# calculate pair-wise distances, unless already supplied.
if C is None:
# take care of dimensions
X = np.atleast_2d(X)
Y = np.atleast_2d(Y)
try:
C = cdist(X.T, Y.T, metric=metric)
except ValueError:
msg = ('scipy.spatial.distance.cdist returned an error.\n'
'Please provide your input in the form X.shape=(K, N) and Y.shape=(K, M).\n'
'1-dimensional sequences should be reshaped to X.shape=(1, N) and Y.shape=(1, M).')
six.reraise(ParameterError, ParameterError(msg))
# for subsequence matching:
# if N > M, Y can be a subsequence of X
if subseq and (X.shape[1] > Y.shape[1]):
C = C.T
C = np.atleast_2d(C)
# if diagonal matching, Y has to be longer than X
# (X simply cannot be contained in Y)
if np.array_equal(step_sizes_sigma, np.array([[1, 1]])) and (C.shape[0] > C.shape[1]):
raise ParameterError('For diagonal matching: Y.shape[1] >= X.shape[1] '
'(C.shape[1] >= C.shape[0])')
max_0 = step_sizes_sigma[:, 0].max()
max_1 = step_sizes_sigma[:, 1].max()
if global_constraints:
# Apply global constraints to the cost matrix
fill_off_diagonal(C, band_rad, value=np.inf)
# initialize whole matrix with infinity values
D = np.ones(C.shape + np.array([max_0, max_1])) * np.inf
# set starting point to C[0, 0]
D[max_0, max_1] = C[0, 0]
if subseq:
D[max_0, max_1:] = C[0, :]
# initialize step matrix with -1
# will be filled in calc_accu_cost() with indices from step_sizes_sigma
D_steps = -1 * np.ones(D.shape, dtype=np.int)
# calculate accumulated cost matrix
D, D_steps = __dtw_calc_accu_cost(C, D, D_steps,
step_sizes_sigma,
weights_mul, weights_add,
max_0, max_1)
# delete infinity rows and columns
D = D[max_0:, max_1:]
D_steps = D_steps[max_0:, max_1:]
if backtrack:
if subseq:
# search for global minimum in last row of D-matrix
wp_end_idx = np.argmin(D[-1, :]) + 1
wp = __dtw_backtracking(D_steps[:, :wp_end_idx], step_sizes_sigma)
else:
# perform warping path backtracking
wp = __dtw_backtracking(D_steps, step_sizes_sigma)
wp = np.asarray(wp, dtype=int)
# since we transposed in the beginning, we have to adjust the index pairs back
if subseq and (X.shape[1] > Y.shape[1]):
wp = np.fliplr(wp)
return D, wp
else:
return D
|
[
"Dynamic",
"time",
"warping",
"(",
"DTW",
")",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/sequence.py#L52-L234
|
[
"def",
"dtw",
"(",
"X",
"=",
"None",
",",
"Y",
"=",
"None",
",",
"C",
"=",
"None",
",",
"metric",
"=",
"'euclidean'",
",",
"step_sizes_sigma",
"=",
"None",
",",
"weights_add",
"=",
"None",
",",
"weights_mul",
"=",
"None",
",",
"subseq",
"=",
"False",
",",
"backtrack",
"=",
"True",
",",
"global_constraints",
"=",
"False",
",",
"band_rad",
"=",
"0.25",
")",
":",
"# Default Parameters",
"if",
"step_sizes_sigma",
"is",
"None",
":",
"step_sizes_sigma",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"1",
"]",
",",
"[",
"0",
",",
"1",
"]",
",",
"[",
"1",
",",
"0",
"]",
"]",
")",
"if",
"weights_add",
"is",
"None",
":",
"weights_add",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"step_sizes_sigma",
")",
")",
"if",
"weights_mul",
"is",
"None",
":",
"weights_mul",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"step_sizes_sigma",
")",
")",
"if",
"len",
"(",
"step_sizes_sigma",
")",
"!=",
"len",
"(",
"weights_add",
")",
":",
"raise",
"ParameterError",
"(",
"'len(weights_add) must be equal to len(step_sizes_sigma)'",
")",
"if",
"len",
"(",
"step_sizes_sigma",
")",
"!=",
"len",
"(",
"weights_mul",
")",
":",
"raise",
"ParameterError",
"(",
"'len(weights_mul) must be equal to len(step_sizes_sigma)'",
")",
"if",
"C",
"is",
"None",
"and",
"(",
"X",
"is",
"None",
"or",
"Y",
"is",
"None",
")",
":",
"raise",
"ParameterError",
"(",
"'If C is not supplied, both X and Y must be supplied'",
")",
"if",
"C",
"is",
"not",
"None",
"and",
"(",
"X",
"is",
"not",
"None",
"or",
"Y",
"is",
"not",
"None",
")",
":",
"raise",
"ParameterError",
"(",
"'If C is supplied, both X and Y must not be supplied'",
")",
"# calculate pair-wise distances, unless already supplied.",
"if",
"C",
"is",
"None",
":",
"# take care of dimensions",
"X",
"=",
"np",
".",
"atleast_2d",
"(",
"X",
")",
"Y",
"=",
"np",
".",
"atleast_2d",
"(",
"Y",
")",
"try",
":",
"C",
"=",
"cdist",
"(",
"X",
".",
"T",
",",
"Y",
".",
"T",
",",
"metric",
"=",
"metric",
")",
"except",
"ValueError",
":",
"msg",
"=",
"(",
"'scipy.spatial.distance.cdist returned an error.\\n'",
"'Please provide your input in the form X.shape=(K, N) and Y.shape=(K, M).\\n'",
"'1-dimensional sequences should be reshaped to X.shape=(1, N) and Y.shape=(1, M).'",
")",
"six",
".",
"reraise",
"(",
"ParameterError",
",",
"ParameterError",
"(",
"msg",
")",
")",
"# for subsequence matching:",
"# if N > M, Y can be a subsequence of X",
"if",
"subseq",
"and",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
">",
"Y",
".",
"shape",
"[",
"1",
"]",
")",
":",
"C",
"=",
"C",
".",
"T",
"C",
"=",
"np",
".",
"atleast_2d",
"(",
"C",
")",
"# if diagonal matching, Y has to be longer than X",
"# (X simply cannot be contained in Y)",
"if",
"np",
".",
"array_equal",
"(",
"step_sizes_sigma",
",",
"np",
".",
"array",
"(",
"[",
"[",
"1",
",",
"1",
"]",
"]",
")",
")",
"and",
"(",
"C",
".",
"shape",
"[",
"0",
"]",
">",
"C",
".",
"shape",
"[",
"1",
"]",
")",
":",
"raise",
"ParameterError",
"(",
"'For diagonal matching: Y.shape[1] >= X.shape[1] '",
"'(C.shape[1] >= C.shape[0])'",
")",
"max_0",
"=",
"step_sizes_sigma",
"[",
":",
",",
"0",
"]",
".",
"max",
"(",
")",
"max_1",
"=",
"step_sizes_sigma",
"[",
":",
",",
"1",
"]",
".",
"max",
"(",
")",
"if",
"global_constraints",
":",
"# Apply global constraints to the cost matrix",
"fill_off_diagonal",
"(",
"C",
",",
"band_rad",
",",
"value",
"=",
"np",
".",
"inf",
")",
"# initialize whole matrix with infinity values",
"D",
"=",
"np",
".",
"ones",
"(",
"C",
".",
"shape",
"+",
"np",
".",
"array",
"(",
"[",
"max_0",
",",
"max_1",
"]",
")",
")",
"*",
"np",
".",
"inf",
"# set starting point to C[0, 0]",
"D",
"[",
"max_0",
",",
"max_1",
"]",
"=",
"C",
"[",
"0",
",",
"0",
"]",
"if",
"subseq",
":",
"D",
"[",
"max_0",
",",
"max_1",
":",
"]",
"=",
"C",
"[",
"0",
",",
":",
"]",
"# initialize step matrix with -1",
"# will be filled in calc_accu_cost() with indices from step_sizes_sigma",
"D_steps",
"=",
"-",
"1",
"*",
"np",
".",
"ones",
"(",
"D",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"# calculate accumulated cost matrix",
"D",
",",
"D_steps",
"=",
"__dtw_calc_accu_cost",
"(",
"C",
",",
"D",
",",
"D_steps",
",",
"step_sizes_sigma",
",",
"weights_mul",
",",
"weights_add",
",",
"max_0",
",",
"max_1",
")",
"# delete infinity rows and columns",
"D",
"=",
"D",
"[",
"max_0",
":",
",",
"max_1",
":",
"]",
"D_steps",
"=",
"D_steps",
"[",
"max_0",
":",
",",
"max_1",
":",
"]",
"if",
"backtrack",
":",
"if",
"subseq",
":",
"# search for global minimum in last row of D-matrix",
"wp_end_idx",
"=",
"np",
".",
"argmin",
"(",
"D",
"[",
"-",
"1",
",",
":",
"]",
")",
"+",
"1",
"wp",
"=",
"__dtw_backtracking",
"(",
"D_steps",
"[",
":",
",",
":",
"wp_end_idx",
"]",
",",
"step_sizes_sigma",
")",
"else",
":",
"# perform warping path backtracking",
"wp",
"=",
"__dtw_backtracking",
"(",
"D_steps",
",",
"step_sizes_sigma",
")",
"wp",
"=",
"np",
".",
"asarray",
"(",
"wp",
",",
"dtype",
"=",
"int",
")",
"# since we transposed in the beginning, we have to adjust the index pairs back",
"if",
"subseq",
"and",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
">",
"Y",
".",
"shape",
"[",
"1",
"]",
")",
":",
"wp",
"=",
"np",
".",
"fliplr",
"(",
"wp",
")",
"return",
"D",
",",
"wp",
"else",
":",
"return",
"D"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__dtw_calc_accu_cost
|
Calculate the accumulated cost matrix D.
Use dynamic programming to calculate the accumulated costs.
Parameters
----------
C : np.ndarray [shape=(N, M)]
pre-computed cost matrix
D : np.ndarray [shape=(N, M)]
accumulated cost matrix
D_steps : np.ndarray [shape=(N, M)]
steps which were used for calculating D
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
max_0 : int
maximum number of steps in step_sizes_sigma in dim 0.
max_1 : int
maximum number of steps in step_sizes_sigma in dim 1.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
D_steps : np.ndarray [shape=(N,M)]
steps which were used for calculating D.
See Also
--------
dtw
|
librosa/sequence.py
|
def __dtw_calc_accu_cost(C, D, D_steps, step_sizes_sigma,
weights_mul, weights_add, max_0, max_1): # pragma: no cover
'''Calculate the accumulated cost matrix D.
Use dynamic programming to calculate the accumulated costs.
Parameters
----------
C : np.ndarray [shape=(N, M)]
pre-computed cost matrix
D : np.ndarray [shape=(N, M)]
accumulated cost matrix
D_steps : np.ndarray [shape=(N, M)]
steps which were used for calculating D
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
max_0 : int
maximum number of steps in step_sizes_sigma in dim 0.
max_1 : int
maximum number of steps in step_sizes_sigma in dim 1.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
D_steps : np.ndarray [shape=(N,M)]
steps which were used for calculating D.
See Also
--------
dtw
'''
for cur_n in range(max_0, D.shape[0]):
for cur_m in range(max_1, D.shape[1]):
# accumulate costs
for cur_step_idx, cur_w_add, cur_w_mul in zip(range(step_sizes_sigma.shape[0]),
weights_add, weights_mul):
cur_D = D[cur_n - step_sizes_sigma[cur_step_idx, 0],
cur_m - step_sizes_sigma[cur_step_idx, 1]]
cur_C = cur_w_mul * C[cur_n - max_0, cur_m - max_1]
cur_C += cur_w_add
cur_cost = cur_D + cur_C
# check if cur_cost is smaller than the one stored in D
if cur_cost < D[cur_n, cur_m]:
D[cur_n, cur_m] = cur_cost
# save step-index
D_steps[cur_n, cur_m] = cur_step_idx
return D, D_steps
|
def __dtw_calc_accu_cost(C, D, D_steps, step_sizes_sigma,
weights_mul, weights_add, max_0, max_1): # pragma: no cover
'''Calculate the accumulated cost matrix D.
Use dynamic programming to calculate the accumulated costs.
Parameters
----------
C : np.ndarray [shape=(N, M)]
pre-computed cost matrix
D : np.ndarray [shape=(N, M)]
accumulated cost matrix
D_steps : np.ndarray [shape=(N, M)]
steps which were used for calculating D
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
weights_add : np.ndarray [shape=[n, ]]
Additive weights to penalize certain step sizes.
weights_mul : np.ndarray [shape=[n, ]]
Multiplicative weights to penalize certain step sizes.
max_0 : int
maximum number of steps in step_sizes_sigma in dim 0.
max_1 : int
maximum number of steps in step_sizes_sigma in dim 1.
Returns
-------
D : np.ndarray [shape=(N,M)]
accumulated cost matrix.
D[N,M] is the total alignment cost.
When doing subsequence DTW, D[N,:] indicates a matching function.
D_steps : np.ndarray [shape=(N,M)]
steps which were used for calculating D.
See Also
--------
dtw
'''
for cur_n in range(max_0, D.shape[0]):
for cur_m in range(max_1, D.shape[1]):
# accumulate costs
for cur_step_idx, cur_w_add, cur_w_mul in zip(range(step_sizes_sigma.shape[0]),
weights_add, weights_mul):
cur_D = D[cur_n - step_sizes_sigma[cur_step_idx, 0],
cur_m - step_sizes_sigma[cur_step_idx, 1]]
cur_C = cur_w_mul * C[cur_n - max_0, cur_m - max_1]
cur_C += cur_w_add
cur_cost = cur_D + cur_C
# check if cur_cost is smaller than the one stored in D
if cur_cost < D[cur_n, cur_m]:
D[cur_n, cur_m] = cur_cost
# save step-index
D_steps[cur_n, cur_m] = cur_step_idx
return D, D_steps
|
[
"Calculate",
"the",
"accumulated",
"cost",
"matrix",
"D",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/sequence.py#L238-L302
|
[
"def",
"__dtw_calc_accu_cost",
"(",
"C",
",",
"D",
",",
"D_steps",
",",
"step_sizes_sigma",
",",
"weights_mul",
",",
"weights_add",
",",
"max_0",
",",
"max_1",
")",
":",
"# pragma: no cover",
"for",
"cur_n",
"in",
"range",
"(",
"max_0",
",",
"D",
".",
"shape",
"[",
"0",
"]",
")",
":",
"for",
"cur_m",
"in",
"range",
"(",
"max_1",
",",
"D",
".",
"shape",
"[",
"1",
"]",
")",
":",
"# accumulate costs",
"for",
"cur_step_idx",
",",
"cur_w_add",
",",
"cur_w_mul",
"in",
"zip",
"(",
"range",
"(",
"step_sizes_sigma",
".",
"shape",
"[",
"0",
"]",
")",
",",
"weights_add",
",",
"weights_mul",
")",
":",
"cur_D",
"=",
"D",
"[",
"cur_n",
"-",
"step_sizes_sigma",
"[",
"cur_step_idx",
",",
"0",
"]",
",",
"cur_m",
"-",
"step_sizes_sigma",
"[",
"cur_step_idx",
",",
"1",
"]",
"]",
"cur_C",
"=",
"cur_w_mul",
"*",
"C",
"[",
"cur_n",
"-",
"max_0",
",",
"cur_m",
"-",
"max_1",
"]",
"cur_C",
"+=",
"cur_w_add",
"cur_cost",
"=",
"cur_D",
"+",
"cur_C",
"# check if cur_cost is smaller than the one stored in D",
"if",
"cur_cost",
"<",
"D",
"[",
"cur_n",
",",
"cur_m",
"]",
":",
"D",
"[",
"cur_n",
",",
"cur_m",
"]",
"=",
"cur_cost",
"# save step-index",
"D_steps",
"[",
"cur_n",
",",
"cur_m",
"]",
"=",
"cur_step_idx",
"return",
"D",
",",
"D_steps"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__dtw_backtracking
|
Backtrack optimal warping path.
Uses the saved step sizes from the cost accumulation
step to backtrack the index pairs for an optimal
warping path.
Parameters
----------
D_steps : np.ndarray [shape=(N, M)]
Saved indices of the used steps used in the calculation of D.
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
Returns
-------
wp : list [shape=(N,)]
Warping path with index pairs.
Each list entry contains an index pair
(n,m) as a tuple
See Also
--------
dtw
|
librosa/sequence.py
|
def __dtw_backtracking(D_steps, step_sizes_sigma): # pragma: no cover
'''Backtrack optimal warping path.
Uses the saved step sizes from the cost accumulation
step to backtrack the index pairs for an optimal
warping path.
Parameters
----------
D_steps : np.ndarray [shape=(N, M)]
Saved indices of the used steps used in the calculation of D.
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
Returns
-------
wp : list [shape=(N,)]
Warping path with index pairs.
Each list entry contains an index pair
(n,m) as a tuple
See Also
--------
dtw
'''
wp = []
# Set starting point D(N,M) and append it to the path
cur_idx = (D_steps.shape[0] - 1, D_steps.shape[1] - 1)
wp.append((cur_idx[0], cur_idx[1]))
# Loop backwards.
# Stop criteria:
# Setting it to (0, 0) does not work for the subsequence dtw,
# so we only ask to reach the first row of the matrix.
while cur_idx[0] > 0:
cur_step_idx = D_steps[(cur_idx[0], cur_idx[1])]
# save tuple with minimal acc. cost in path
cur_idx = (cur_idx[0] - step_sizes_sigma[cur_step_idx][0],
cur_idx[1] - step_sizes_sigma[cur_step_idx][1])
# append to warping path
wp.append((cur_idx[0], cur_idx[1]))
return wp
|
def __dtw_backtracking(D_steps, step_sizes_sigma): # pragma: no cover
'''Backtrack optimal warping path.
Uses the saved step sizes from the cost accumulation
step to backtrack the index pairs for an optimal
warping path.
Parameters
----------
D_steps : np.ndarray [shape=(N, M)]
Saved indices of the used steps used in the calculation of D.
step_sizes_sigma : np.ndarray [shape=[n, 2]]
Specifies allowed step sizes as used by the dtw.
Returns
-------
wp : list [shape=(N,)]
Warping path with index pairs.
Each list entry contains an index pair
(n,m) as a tuple
See Also
--------
dtw
'''
wp = []
# Set starting point D(N,M) and append it to the path
cur_idx = (D_steps.shape[0] - 1, D_steps.shape[1] - 1)
wp.append((cur_idx[0], cur_idx[1]))
# Loop backwards.
# Stop criteria:
# Setting it to (0, 0) does not work for the subsequence dtw,
# so we only ask to reach the first row of the matrix.
while cur_idx[0] > 0:
cur_step_idx = D_steps[(cur_idx[0], cur_idx[1])]
# save tuple with minimal acc. cost in path
cur_idx = (cur_idx[0] - step_sizes_sigma[cur_step_idx][0],
cur_idx[1] - step_sizes_sigma[cur_step_idx][1])
# append to warping path
wp.append((cur_idx[0], cur_idx[1]))
return wp
|
[
"Backtrack",
"optimal",
"warping",
"path",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/sequence.py#L306-L352
|
[
"def",
"__dtw_backtracking",
"(",
"D_steps",
",",
"step_sizes_sigma",
")",
":",
"# pragma: no cover",
"wp",
"=",
"[",
"]",
"# Set starting point D(N,M) and append it to the path",
"cur_idx",
"=",
"(",
"D_steps",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
",",
"D_steps",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
")",
"wp",
".",
"append",
"(",
"(",
"cur_idx",
"[",
"0",
"]",
",",
"cur_idx",
"[",
"1",
"]",
")",
")",
"# Loop backwards.",
"# Stop criteria:",
"# Setting it to (0, 0) does not work for the subsequence dtw,",
"# so we only ask to reach the first row of the matrix.",
"while",
"cur_idx",
"[",
"0",
"]",
">",
"0",
":",
"cur_step_idx",
"=",
"D_steps",
"[",
"(",
"cur_idx",
"[",
"0",
"]",
",",
"cur_idx",
"[",
"1",
"]",
")",
"]",
"# save tuple with minimal acc. cost in path",
"cur_idx",
"=",
"(",
"cur_idx",
"[",
"0",
"]",
"-",
"step_sizes_sigma",
"[",
"cur_step_idx",
"]",
"[",
"0",
"]",
",",
"cur_idx",
"[",
"1",
"]",
"-",
"step_sizes_sigma",
"[",
"cur_step_idx",
"]",
"[",
"1",
"]",
")",
"# append to warping path",
"wp",
".",
"append",
"(",
"(",
"cur_idx",
"[",
"0",
"]",
",",
"cur_idx",
"[",
"1",
"]",
")",
")",
"return",
"wp"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
_viterbi
|
Core Viterbi algorithm.
This is intended for internal use only.
Parameters
----------
log_prob : np.ndarray [shape=(T, m)]
`log_prob[t, s]` is the conditional log-likelihood
log P[X = X(t) | State(t) = s]
log_trans : np.ndarray [shape=(m, m)]
The log transition matrix
`log_trans[i, j]` = log P[State(t+1) = j | State(t) = i]
log_p_init : np.ndarray [shape=(m,)]
log of the initial state distribution
state : np.ndarray [shape=(T,), dtype=int]
Pre-allocated state index array
value : np.ndarray [shape=(T, m)] float
Pre-allocated value array
ptr : np.ndarray [shape=(T, m), dtype=int]
Pre-allocated pointer array
Returns
-------
None
All computations are performed in-place on `state, value, ptr`.
|
librosa/sequence.py
|
def _viterbi(log_prob, log_trans, log_p_init, state, value, ptr): # pragma: no cover
'''Core Viterbi algorithm.
This is intended for internal use only.
Parameters
----------
log_prob : np.ndarray [shape=(T, m)]
`log_prob[t, s]` is the conditional log-likelihood
log P[X = X(t) | State(t) = s]
log_trans : np.ndarray [shape=(m, m)]
The log transition matrix
`log_trans[i, j]` = log P[State(t+1) = j | State(t) = i]
log_p_init : np.ndarray [shape=(m,)]
log of the initial state distribution
state : np.ndarray [shape=(T,), dtype=int]
Pre-allocated state index array
value : np.ndarray [shape=(T, m)] float
Pre-allocated value array
ptr : np.ndarray [shape=(T, m), dtype=int]
Pre-allocated pointer array
Returns
-------
None
All computations are performed in-place on `state, value, ptr`.
'''
n_steps, n_states = log_prob.shape
# factor in initial state distribution
value[0] = log_prob[0] + log_p_init
for t in range(1, n_steps):
# Want V[t, j] <- p[t, j] * max_k V[t-1, k] * A[k, j]
# assume at time t-1 we were in state k
# transition k -> j
# Broadcast over rows:
# Tout[k, j] = V[t-1, k] * A[k, j]
# then take the max over columns
# We'll do this in log-space for stability
trans_out = value[t - 1] + log_trans.T
# Unroll the max/argmax loop to enable numba support
for j in range(n_states):
ptr[t, j] = np.argmax(trans_out[j])
# value[t, j] = log_prob[t, j] + np.max(trans_out[j])
value[t, j] = log_prob[t, j] + trans_out[j, ptr[t][j]]
# Now roll backward
# Get the last state
state[-1] = np.argmax(value[-1])
for t in range(n_steps - 2, -1, -1):
state[t] = ptr[t+1, state[t+1]]
|
def _viterbi(log_prob, log_trans, log_p_init, state, value, ptr): # pragma: no cover
'''Core Viterbi algorithm.
This is intended for internal use only.
Parameters
----------
log_prob : np.ndarray [shape=(T, m)]
`log_prob[t, s]` is the conditional log-likelihood
log P[X = X(t) | State(t) = s]
log_trans : np.ndarray [shape=(m, m)]
The log transition matrix
`log_trans[i, j]` = log P[State(t+1) = j | State(t) = i]
log_p_init : np.ndarray [shape=(m,)]
log of the initial state distribution
state : np.ndarray [shape=(T,), dtype=int]
Pre-allocated state index array
value : np.ndarray [shape=(T, m)] float
Pre-allocated value array
ptr : np.ndarray [shape=(T, m), dtype=int]
Pre-allocated pointer array
Returns
-------
None
All computations are performed in-place on `state, value, ptr`.
'''
n_steps, n_states = log_prob.shape
# factor in initial state distribution
value[0] = log_prob[0] + log_p_init
for t in range(1, n_steps):
# Want V[t, j] <- p[t, j] * max_k V[t-1, k] * A[k, j]
# assume at time t-1 we were in state k
# transition k -> j
# Broadcast over rows:
# Tout[k, j] = V[t-1, k] * A[k, j]
# then take the max over columns
# We'll do this in log-space for stability
trans_out = value[t - 1] + log_trans.T
# Unroll the max/argmax loop to enable numba support
for j in range(n_states):
ptr[t, j] = np.argmax(trans_out[j])
# value[t, j] = log_prob[t, j] + np.max(trans_out[j])
value[t, j] = log_prob[t, j] + trans_out[j, ptr[t][j]]
# Now roll backward
# Get the last state
state[-1] = np.argmax(value[-1])
for t in range(n_steps - 2, -1, -1):
state[t] = ptr[t+1, state[t+1]]
|
[
"Core",
"Viterbi",
"algorithm",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/sequence.py#L356-L417
|
[
"def",
"_viterbi",
"(",
"log_prob",
",",
"log_trans",
",",
"log_p_init",
",",
"state",
",",
"value",
",",
"ptr",
")",
":",
"# pragma: no cover",
"n_steps",
",",
"n_states",
"=",
"log_prob",
".",
"shape",
"# factor in initial state distribution",
"value",
"[",
"0",
"]",
"=",
"log_prob",
"[",
"0",
"]",
"+",
"log_p_init",
"for",
"t",
"in",
"range",
"(",
"1",
",",
"n_steps",
")",
":",
"# Want V[t, j] <- p[t, j] * max_k V[t-1, k] * A[k, j]",
"# assume at time t-1 we were in state k",
"# transition k -> j",
"# Broadcast over rows:",
"# Tout[k, j] = V[t-1, k] * A[k, j]",
"# then take the max over columns",
"# We'll do this in log-space for stability",
"trans_out",
"=",
"value",
"[",
"t",
"-",
"1",
"]",
"+",
"log_trans",
".",
"T",
"# Unroll the max/argmax loop to enable numba support",
"for",
"j",
"in",
"range",
"(",
"n_states",
")",
":",
"ptr",
"[",
"t",
",",
"j",
"]",
"=",
"np",
".",
"argmax",
"(",
"trans_out",
"[",
"j",
"]",
")",
"# value[t, j] = log_prob[t, j] + np.max(trans_out[j])",
"value",
"[",
"t",
",",
"j",
"]",
"=",
"log_prob",
"[",
"t",
",",
"j",
"]",
"+",
"trans_out",
"[",
"j",
",",
"ptr",
"[",
"t",
"]",
"[",
"j",
"]",
"]",
"# Now roll backward",
"# Get the last state",
"state",
"[",
"-",
"1",
"]",
"=",
"np",
".",
"argmax",
"(",
"value",
"[",
"-",
"1",
"]",
")",
"for",
"t",
"in",
"range",
"(",
"n_steps",
"-",
"2",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"state",
"[",
"t",
"]",
"=",
"ptr",
"[",
"t",
"+",
"1",
",",
"state",
"[",
"t",
"+",
"1",
"]",
"]"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
viterbi_discriminative
|
Viterbi decoding from discriminative state predictions.
Given a sequence of conditional state predictions `prob[s, t]`,
indicating the conditional likelihood of state `s` given the
observation at time `t`, and a transition matrix `transition[i, j]`
which encodes the conditional probability of moving from state `i`
to state `j`, the Viterbi algorithm computes the most likely sequence
of states from the observations.
This implementation uses the standard Viterbi decoding algorithm
for observation likelihood sequences, under the assumption that
`P[Obs(t) | State(t) = s]` is proportional to
`P[State(t) = s | Obs(t)] / P[State(t) = s]`, where the denominator
is the marginal probability of state `s` occurring as given by `p_state`.
Parameters
----------
prob : np.ndarray [shape=(n_states, n_steps), non-negative]
`prob[s, t]` is the probability of state `s` conditional on
the observation at time `t`.
Must be non-negative and sum to 1 along each column.
transition : np.ndarray [shape=(n_states, n_states), non-negative]
`transition[i, j]` is the probability of a transition from i->j.
Each row must sum to 1.
p_state : np.ndarray [shape=(n_states,)]
Optional: marginal probability distribution over states,
must be non-negative and sum to 1.
If not provided, a uniform distribution is assumed.
p_init : np.ndarray [shape=(n_states,)]
Optional: initial state distribution.
If not provided, it is assumed to be uniform.
return_logp : bool
If `True`, return the log-likelihood of the state sequence.
Returns
-------
Either `states` or `(states, logp)`:
states : np.ndarray [shape=(n_steps,)]
The most likely state sequence.
logp : scalar [float]
If `return_logp=True`, the log probability of `states` given
the observations.
See Also
--------
viterbi : Viterbi decoding from observation likelihoods
viterbi_binary: Viterbi decoding for multi-label, conditional state likelihoods
Examples
--------
This example constructs a simple, template-based discriminative chord estimator,
using CENS chroma as input features.
.. note:: this chord model is not accurate enough to use in practice. It is only
intended to demonstrate how to use discriminative Viterbi decoding.
>>> # Create templates for major, minor, and no-chord qualities
>>> maj_template = np.array([1,0,0, 0,1,0, 0,1,0, 0,0,0])
>>> min_template = np.array([1,0,0, 1,0,0, 0,1,0, 0,0,0])
>>> N_template = np.array([1,1,1, 1,1,1, 1,1,1, 1,1,1.]) / 4.
>>> # Generate the weighting matrix that maps chroma to labels
>>> weights = np.zeros((25, 12), dtype=float)
>>> labels = ['C:maj', 'C#:maj', 'D:maj', 'D#:maj', 'E:maj', 'F:maj',
... 'F#:maj', 'G:maj', 'G#:maj', 'A:maj', 'A#:maj', 'B:maj',
... 'C:min', 'C#:min', 'D:min', 'D#:min', 'E:min', 'F:min',
... 'F#:min', 'G:min', 'G#:min', 'A:min', 'A#:min', 'B:min',
... 'N']
>>> for c in range(12):
... weights[c, :] = np.roll(maj_template, c) # c:maj
... weights[c + 12, :] = np.roll(min_template, c) # c:min
>>> weights[-1] = N_template # the last row is the no-chord class
>>> # Make a self-loop transition matrix over 25 states
>>> trans = librosa.sequence.transition_loop(25, 0.9)
>>> # Load in audio and make features
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_cens(y=y, sr=sr, bins_per_octave=36)
>>> # Map chroma (observations) to class (state) likelihoods
>>> probs = np.exp(weights.dot(chroma)) # P[class | chroma] proportional to exp(template' chroma)
>>> probs /= probs.sum(axis=0, keepdims=True) # probabilities must sum to 1 in each column
>>> # Compute independent frame-wise estimates
>>> chords_ind = np.argmax(probs, axis=0)
>>> # And viterbi estimates
>>> chords_vit = librosa.sequence.viterbi_discriminative(probs, trans)
>>> # Plot the features and prediction map
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(chroma, x_axis='time', y_axis='chroma')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(weights, x_axis='chroma')
>>> plt.yticks(np.arange(25) + 0.5, labels)
>>> plt.ylabel('Chord')
>>> plt.colorbar()
>>> plt.tight_layout()
>>> # And plot the results
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(probs, x_axis='time', cmap='gray')
>>> plt.colorbar()
>>> times = librosa.frames_to_time(np.arange(len(chords_vit)))
>>> plt.scatter(times, chords_ind + 0.75, color='lime', alpha=0.5, marker='+', s=15, label='Independent')
>>> plt.scatter(times, chords_vit + 0.25, color='deeppink', alpha=0.5, marker='o', s=15, label='Viterbi')
>>> plt.yticks(0.5 + np.unique(chords_vit), [labels[i] for i in np.unique(chords_vit)], va='center')
>>> plt.legend(loc='best')
>>> plt.tight_layout()
|
librosa/sequence.py
|
def viterbi_discriminative(prob, transition, p_state=None, p_init=None, return_logp=False):
'''Viterbi decoding from discriminative state predictions.
Given a sequence of conditional state predictions `prob[s, t]`,
indicating the conditional likelihood of state `s` given the
observation at time `t`, and a transition matrix `transition[i, j]`
which encodes the conditional probability of moving from state `i`
to state `j`, the Viterbi algorithm computes the most likely sequence
of states from the observations.
This implementation uses the standard Viterbi decoding algorithm
for observation likelihood sequences, under the assumption that
`P[Obs(t) | State(t) = s]` is proportional to
`P[State(t) = s | Obs(t)] / P[State(t) = s]`, where the denominator
is the marginal probability of state `s` occurring as given by `p_state`.
Parameters
----------
prob : np.ndarray [shape=(n_states, n_steps), non-negative]
`prob[s, t]` is the probability of state `s` conditional on
the observation at time `t`.
Must be non-negative and sum to 1 along each column.
transition : np.ndarray [shape=(n_states, n_states), non-negative]
`transition[i, j]` is the probability of a transition from i->j.
Each row must sum to 1.
p_state : np.ndarray [shape=(n_states,)]
Optional: marginal probability distribution over states,
must be non-negative and sum to 1.
If not provided, a uniform distribution is assumed.
p_init : np.ndarray [shape=(n_states,)]
Optional: initial state distribution.
If not provided, it is assumed to be uniform.
return_logp : bool
If `True`, return the log-likelihood of the state sequence.
Returns
-------
Either `states` or `(states, logp)`:
states : np.ndarray [shape=(n_steps,)]
The most likely state sequence.
logp : scalar [float]
If `return_logp=True`, the log probability of `states` given
the observations.
See Also
--------
viterbi : Viterbi decoding from observation likelihoods
viterbi_binary: Viterbi decoding for multi-label, conditional state likelihoods
Examples
--------
This example constructs a simple, template-based discriminative chord estimator,
using CENS chroma as input features.
.. note:: this chord model is not accurate enough to use in practice. It is only
intended to demonstrate how to use discriminative Viterbi decoding.
>>> # Create templates for major, minor, and no-chord qualities
>>> maj_template = np.array([1,0,0, 0,1,0, 0,1,0, 0,0,0])
>>> min_template = np.array([1,0,0, 1,0,0, 0,1,0, 0,0,0])
>>> N_template = np.array([1,1,1, 1,1,1, 1,1,1, 1,1,1.]) / 4.
>>> # Generate the weighting matrix that maps chroma to labels
>>> weights = np.zeros((25, 12), dtype=float)
>>> labels = ['C:maj', 'C#:maj', 'D:maj', 'D#:maj', 'E:maj', 'F:maj',
... 'F#:maj', 'G:maj', 'G#:maj', 'A:maj', 'A#:maj', 'B:maj',
... 'C:min', 'C#:min', 'D:min', 'D#:min', 'E:min', 'F:min',
... 'F#:min', 'G:min', 'G#:min', 'A:min', 'A#:min', 'B:min',
... 'N']
>>> for c in range(12):
... weights[c, :] = np.roll(maj_template, c) # c:maj
... weights[c + 12, :] = np.roll(min_template, c) # c:min
>>> weights[-1] = N_template # the last row is the no-chord class
>>> # Make a self-loop transition matrix over 25 states
>>> trans = librosa.sequence.transition_loop(25, 0.9)
>>> # Load in audio and make features
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_cens(y=y, sr=sr, bins_per_octave=36)
>>> # Map chroma (observations) to class (state) likelihoods
>>> probs = np.exp(weights.dot(chroma)) # P[class | chroma] proportional to exp(template' chroma)
>>> probs /= probs.sum(axis=0, keepdims=True) # probabilities must sum to 1 in each column
>>> # Compute independent frame-wise estimates
>>> chords_ind = np.argmax(probs, axis=0)
>>> # And viterbi estimates
>>> chords_vit = librosa.sequence.viterbi_discriminative(probs, trans)
>>> # Plot the features and prediction map
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(chroma, x_axis='time', y_axis='chroma')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(weights, x_axis='chroma')
>>> plt.yticks(np.arange(25) + 0.5, labels)
>>> plt.ylabel('Chord')
>>> plt.colorbar()
>>> plt.tight_layout()
>>> # And plot the results
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(probs, x_axis='time', cmap='gray')
>>> plt.colorbar()
>>> times = librosa.frames_to_time(np.arange(len(chords_vit)))
>>> plt.scatter(times, chords_ind + 0.75, color='lime', alpha=0.5, marker='+', s=15, label='Independent')
>>> plt.scatter(times, chords_vit + 0.25, color='deeppink', alpha=0.5, marker='o', s=15, label='Viterbi')
>>> plt.yticks(0.5 + np.unique(chords_vit), [labels[i] for i in np.unique(chords_vit)], va='center')
>>> plt.legend(loc='best')
>>> plt.tight_layout()
'''
n_states, n_steps = prob.shape
if transition.shape != (n_states, n_states):
raise ParameterError('transition.shape={}, must be '
'(n_states, n_states)={}'.format(transition.shape,
(n_states, n_states)))
if np.any(transition < 0) or not np.allclose(transition.sum(axis=1), 1):
raise ParameterError('Invalid transition matrix: must be non-negative '
'and sum to 1 on each row.')
if np.any(prob < 0) or not np.allclose(prob.sum(axis=0), 1):
raise ParameterError('Invalid probability values: each column must '
'sum to 1 and be non-negative')
states = np.zeros(n_steps, dtype=int)
values = np.zeros((n_steps, n_states), dtype=float)
ptr = np.zeros((n_steps, n_states), dtype=int)
# Compute log-likelihoods while avoiding log-underflow
epsilon = np.finfo(prob.dtype).tiny
# Compute marginal log probabilities while avoiding underflow
if p_state is None:
p_state = np.empty(n_states)
p_state.fill(1./n_states)
elif p_state.shape != (n_states,):
raise ParameterError('Marginal distribution p_state must have shape (n_states,). '
'Got p_state.shape={}'.format(p_state.shape))
elif np.any(p_state < 0) or not np.allclose(p_state.sum(axis=-1), 1):
raise ParameterError('Invalid marginal state distribution: '
'p_state={}'.format(p_state))
log_trans = np.log(transition + epsilon)
log_marginal = np.log(p_state + epsilon)
# By Bayes' rule, P[X | Y] * P[Y] = P[Y | X] * P[X]
# P[X] is constant for the sake of maximum likelihood inference
# and P[Y] is given by the marginal distribution p_state.
#
# So we have P[X | y] \propto P[Y | x] / P[Y]
# if X = observation and Y = states, this can be done in log space as
# log P[X | y] \propto \log P[Y | x] - \log P[Y]
log_prob = np.log(prob.T + epsilon) - log_marginal
if p_init is None:
p_init = np.empty(n_states)
p_init.fill(1./n_states)
elif np.any(p_init < 0) or not np.allclose(p_init.sum(), 1):
raise ParameterError('Invalid initial state distribution: '
'p_init={}'.format(p_init))
log_p_init = np.log(p_init + epsilon)
_viterbi(log_prob, log_trans, log_p_init, states, values, ptr)
if return_logp:
return states, values[-1, states[-1]]
return states
|
def viterbi_discriminative(prob, transition, p_state=None, p_init=None, return_logp=False):
'''Viterbi decoding from discriminative state predictions.
Given a sequence of conditional state predictions `prob[s, t]`,
indicating the conditional likelihood of state `s` given the
observation at time `t`, and a transition matrix `transition[i, j]`
which encodes the conditional probability of moving from state `i`
to state `j`, the Viterbi algorithm computes the most likely sequence
of states from the observations.
This implementation uses the standard Viterbi decoding algorithm
for observation likelihood sequences, under the assumption that
`P[Obs(t) | State(t) = s]` is proportional to
`P[State(t) = s | Obs(t)] / P[State(t) = s]`, where the denominator
is the marginal probability of state `s` occurring as given by `p_state`.
Parameters
----------
prob : np.ndarray [shape=(n_states, n_steps), non-negative]
`prob[s, t]` is the probability of state `s` conditional on
the observation at time `t`.
Must be non-negative and sum to 1 along each column.
transition : np.ndarray [shape=(n_states, n_states), non-negative]
`transition[i, j]` is the probability of a transition from i->j.
Each row must sum to 1.
p_state : np.ndarray [shape=(n_states,)]
Optional: marginal probability distribution over states,
must be non-negative and sum to 1.
If not provided, a uniform distribution is assumed.
p_init : np.ndarray [shape=(n_states,)]
Optional: initial state distribution.
If not provided, it is assumed to be uniform.
return_logp : bool
If `True`, return the log-likelihood of the state sequence.
Returns
-------
Either `states` or `(states, logp)`:
states : np.ndarray [shape=(n_steps,)]
The most likely state sequence.
logp : scalar [float]
If `return_logp=True`, the log probability of `states` given
the observations.
See Also
--------
viterbi : Viterbi decoding from observation likelihoods
viterbi_binary: Viterbi decoding for multi-label, conditional state likelihoods
Examples
--------
This example constructs a simple, template-based discriminative chord estimator,
using CENS chroma as input features.
.. note:: this chord model is not accurate enough to use in practice. It is only
intended to demonstrate how to use discriminative Viterbi decoding.
>>> # Create templates for major, minor, and no-chord qualities
>>> maj_template = np.array([1,0,0, 0,1,0, 0,1,0, 0,0,0])
>>> min_template = np.array([1,0,0, 1,0,0, 0,1,0, 0,0,0])
>>> N_template = np.array([1,1,1, 1,1,1, 1,1,1, 1,1,1.]) / 4.
>>> # Generate the weighting matrix that maps chroma to labels
>>> weights = np.zeros((25, 12), dtype=float)
>>> labels = ['C:maj', 'C#:maj', 'D:maj', 'D#:maj', 'E:maj', 'F:maj',
... 'F#:maj', 'G:maj', 'G#:maj', 'A:maj', 'A#:maj', 'B:maj',
... 'C:min', 'C#:min', 'D:min', 'D#:min', 'E:min', 'F:min',
... 'F#:min', 'G:min', 'G#:min', 'A:min', 'A#:min', 'B:min',
... 'N']
>>> for c in range(12):
... weights[c, :] = np.roll(maj_template, c) # c:maj
... weights[c + 12, :] = np.roll(min_template, c) # c:min
>>> weights[-1] = N_template # the last row is the no-chord class
>>> # Make a self-loop transition matrix over 25 states
>>> trans = librosa.sequence.transition_loop(25, 0.9)
>>> # Load in audio and make features
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_cens(y=y, sr=sr, bins_per_octave=36)
>>> # Map chroma (observations) to class (state) likelihoods
>>> probs = np.exp(weights.dot(chroma)) # P[class | chroma] proportional to exp(template' chroma)
>>> probs /= probs.sum(axis=0, keepdims=True) # probabilities must sum to 1 in each column
>>> # Compute independent frame-wise estimates
>>> chords_ind = np.argmax(probs, axis=0)
>>> # And viterbi estimates
>>> chords_vit = librosa.sequence.viterbi_discriminative(probs, trans)
>>> # Plot the features and prediction map
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(chroma, x_axis='time', y_axis='chroma')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(weights, x_axis='chroma')
>>> plt.yticks(np.arange(25) + 0.5, labels)
>>> plt.ylabel('Chord')
>>> plt.colorbar()
>>> plt.tight_layout()
>>> # And plot the results
>>> plt.figure(figsize=(10, 4))
>>> librosa.display.specshow(probs, x_axis='time', cmap='gray')
>>> plt.colorbar()
>>> times = librosa.frames_to_time(np.arange(len(chords_vit)))
>>> plt.scatter(times, chords_ind + 0.75, color='lime', alpha=0.5, marker='+', s=15, label='Independent')
>>> plt.scatter(times, chords_vit + 0.25, color='deeppink', alpha=0.5, marker='o', s=15, label='Viterbi')
>>> plt.yticks(0.5 + np.unique(chords_vit), [labels[i] for i in np.unique(chords_vit)], va='center')
>>> plt.legend(loc='best')
>>> plt.tight_layout()
'''
n_states, n_steps = prob.shape
if transition.shape != (n_states, n_states):
raise ParameterError('transition.shape={}, must be '
'(n_states, n_states)={}'.format(transition.shape,
(n_states, n_states)))
if np.any(transition < 0) or not np.allclose(transition.sum(axis=1), 1):
raise ParameterError('Invalid transition matrix: must be non-negative '
'and sum to 1 on each row.')
if np.any(prob < 0) or not np.allclose(prob.sum(axis=0), 1):
raise ParameterError('Invalid probability values: each column must '
'sum to 1 and be non-negative')
states = np.zeros(n_steps, dtype=int)
values = np.zeros((n_steps, n_states), dtype=float)
ptr = np.zeros((n_steps, n_states), dtype=int)
# Compute log-likelihoods while avoiding log-underflow
epsilon = np.finfo(prob.dtype).tiny
# Compute marginal log probabilities while avoiding underflow
if p_state is None:
p_state = np.empty(n_states)
p_state.fill(1./n_states)
elif p_state.shape != (n_states,):
raise ParameterError('Marginal distribution p_state must have shape (n_states,). '
'Got p_state.shape={}'.format(p_state.shape))
elif np.any(p_state < 0) or not np.allclose(p_state.sum(axis=-1), 1):
raise ParameterError('Invalid marginal state distribution: '
'p_state={}'.format(p_state))
log_trans = np.log(transition + epsilon)
log_marginal = np.log(p_state + epsilon)
# By Bayes' rule, P[X | Y] * P[Y] = P[Y | X] * P[X]
# P[X] is constant for the sake of maximum likelihood inference
# and P[Y] is given by the marginal distribution p_state.
#
# So we have P[X | y] \propto P[Y | x] / P[Y]
# if X = observation and Y = states, this can be done in log space as
# log P[X | y] \propto \log P[Y | x] - \log P[Y]
log_prob = np.log(prob.T + epsilon) - log_marginal
if p_init is None:
p_init = np.empty(n_states)
p_init.fill(1./n_states)
elif np.any(p_init < 0) or not np.allclose(p_init.sum(), 1):
raise ParameterError('Invalid initial state distribution: '
'p_init={}'.format(p_init))
log_p_init = np.log(p_init + epsilon)
_viterbi(log_prob, log_trans, log_p_init, states, values, ptr)
if return_logp:
return states, values[-1, states[-1]]
return states
|
[
"Viterbi",
"decoding",
"from",
"discriminative",
"state",
"predictions",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/sequence.py#L540-L717
|
[
"def",
"viterbi_discriminative",
"(",
"prob",
",",
"transition",
",",
"p_state",
"=",
"None",
",",
"p_init",
"=",
"None",
",",
"return_logp",
"=",
"False",
")",
":",
"n_states",
",",
"n_steps",
"=",
"prob",
".",
"shape",
"if",
"transition",
".",
"shape",
"!=",
"(",
"n_states",
",",
"n_states",
")",
":",
"raise",
"ParameterError",
"(",
"'transition.shape={}, must be '",
"'(n_states, n_states)={}'",
".",
"format",
"(",
"transition",
".",
"shape",
",",
"(",
"n_states",
",",
"n_states",
")",
")",
")",
"if",
"np",
".",
"any",
"(",
"transition",
"<",
"0",
")",
"or",
"not",
"np",
".",
"allclose",
"(",
"transition",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
",",
"1",
")",
":",
"raise",
"ParameterError",
"(",
"'Invalid transition matrix: must be non-negative '",
"'and sum to 1 on each row.'",
")",
"if",
"np",
".",
"any",
"(",
"prob",
"<",
"0",
")",
"or",
"not",
"np",
".",
"allclose",
"(",
"prob",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
",",
"1",
")",
":",
"raise",
"ParameterError",
"(",
"'Invalid probability values: each column must '",
"'sum to 1 and be non-negative'",
")",
"states",
"=",
"np",
".",
"zeros",
"(",
"n_steps",
",",
"dtype",
"=",
"int",
")",
"values",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_steps",
",",
"n_states",
")",
",",
"dtype",
"=",
"float",
")",
"ptr",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_steps",
",",
"n_states",
")",
",",
"dtype",
"=",
"int",
")",
"# Compute log-likelihoods while avoiding log-underflow",
"epsilon",
"=",
"np",
".",
"finfo",
"(",
"prob",
".",
"dtype",
")",
".",
"tiny",
"# Compute marginal log probabilities while avoiding underflow",
"if",
"p_state",
"is",
"None",
":",
"p_state",
"=",
"np",
".",
"empty",
"(",
"n_states",
")",
"p_state",
".",
"fill",
"(",
"1.",
"/",
"n_states",
")",
"elif",
"p_state",
".",
"shape",
"!=",
"(",
"n_states",
",",
")",
":",
"raise",
"ParameterError",
"(",
"'Marginal distribution p_state must have shape (n_states,). '",
"'Got p_state.shape={}'",
".",
"format",
"(",
"p_state",
".",
"shape",
")",
")",
"elif",
"np",
".",
"any",
"(",
"p_state",
"<",
"0",
")",
"or",
"not",
"np",
".",
"allclose",
"(",
"p_state",
".",
"sum",
"(",
"axis",
"=",
"-",
"1",
")",
",",
"1",
")",
":",
"raise",
"ParameterError",
"(",
"'Invalid marginal state distribution: '",
"'p_state={}'",
".",
"format",
"(",
"p_state",
")",
")",
"log_trans",
"=",
"np",
".",
"log",
"(",
"transition",
"+",
"epsilon",
")",
"log_marginal",
"=",
"np",
".",
"log",
"(",
"p_state",
"+",
"epsilon",
")",
"# By Bayes' rule, P[X | Y] * P[Y] = P[Y | X] * P[X]",
"# P[X] is constant for the sake of maximum likelihood inference",
"# and P[Y] is given by the marginal distribution p_state.",
"#",
"# So we have P[X | y] \\propto P[Y | x] / P[Y]",
"# if X = observation and Y = states, this can be done in log space as",
"# log P[X | y] \\propto \\log P[Y | x] - \\log P[Y]",
"log_prob",
"=",
"np",
".",
"log",
"(",
"prob",
".",
"T",
"+",
"epsilon",
")",
"-",
"log_marginal",
"if",
"p_init",
"is",
"None",
":",
"p_init",
"=",
"np",
".",
"empty",
"(",
"n_states",
")",
"p_init",
".",
"fill",
"(",
"1.",
"/",
"n_states",
")",
"elif",
"np",
".",
"any",
"(",
"p_init",
"<",
"0",
")",
"or",
"not",
"np",
".",
"allclose",
"(",
"p_init",
".",
"sum",
"(",
")",
",",
"1",
")",
":",
"raise",
"ParameterError",
"(",
"'Invalid initial state distribution: '",
"'p_init={}'",
".",
"format",
"(",
"p_init",
")",
")",
"log_p_init",
"=",
"np",
".",
"log",
"(",
"p_init",
"+",
"epsilon",
")",
"_viterbi",
"(",
"log_prob",
",",
"log_trans",
",",
"log_p_init",
",",
"states",
",",
"values",
",",
"ptr",
")",
"if",
"return_logp",
":",
"return",
"states",
",",
"values",
"[",
"-",
"1",
",",
"states",
"[",
"-",
"1",
"]",
"]",
"return",
"states"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
viterbi_binary
|
Viterbi decoding from binary (multi-label), discriminative state predictions.
Given a sequence of conditional state predictions `prob[s, t]`,
indicating the conditional likelihood of state `s` being active
conditional on observation at time `t`, and a 2*2 transition matrix
`transition` which encodes the conditional probability of moving from
state `s` to state `~s` (not-`s`), the Viterbi algorithm computes the
most likely sequence of states from the observations.
This function differs from `viterbi_discriminative` in that it does not assume the
states to be mutually exclusive. `viterbi_binary` is implemented by
transforming the multi-label decoding problem to a collection
of binary Viterbi problems (one for each *state* or label).
The output is a binary matrix `states[s, t]` indicating whether each
state `s` is active at time `t`.
Parameters
----------
prob : np.ndarray [shape=(n_steps,) or (n_states, n_steps)], non-negative
`prob[s, t]` is the probability of state `s` being active
conditional on the observation at time `t`.
Must be non-negative and less than 1.
If `prob` is 1-dimensional, it is expanded to shape `(1, n_steps)`.
transition : np.ndarray [shape=(2, 2) or (n_states, 2, 2)], non-negative
If 2-dimensional, the same transition matrix is applied to each sub-problem.
`transition[0, i]` is the probability of the state going from inactive to `i`,
`transition[1, i]` is the probability of the state going from active to `i`.
Each row must sum to 1.
If 3-dimensional, `transition[s]` is interpreted as the 2x2 transition matrix
for state label `s`.
p_state : np.ndarray [shape=(n_states,)]
Optional: marginal probability for each state (between [0,1]).
If not provided, a uniform distribution (0.5 for each state)
is assumed.
p_init : np.ndarray [shape=(n_states,)]
Optional: initial state distribution.
If not provided, it is assumed to be uniform.
return_logp : bool
If `True`, return the log-likelihood of the state sequence.
Returns
-------
Either `states` or `(states, logp)`:
states : np.ndarray [shape=(n_states, n_steps)]
The most likely state sequence.
logp : np.ndarray [shape=(n_states,)]
If `return_logp=True`, the log probability of each state activation
sequence `states`
See Also
--------
viterbi : Viterbi decoding from observation likelihoods
viterbi_discriminative : Viterbi decoding for discriminative (mutually exclusive) state predictions
Examples
--------
In this example, we have a sequence of binary state likelihoods that we want to de-noise
under the assumption that state changes are relatively uncommon. Positive predictions
should only be retained if they persist for multiple steps, and any transient predictions
should be considered as errors. This use case arises frequently in problems such as
instrument recognition, where state activations tend to be stable over time, but subject
to abrupt changes (e.g., when an instrument joins the mix).
We assume that the 0 state has a self-transition probability of 90%, and the 1 state
has a self-transition probability of 70%. We assume the marginal and initial
probability of either state is 50%.
>>> trans = np.array([[0.9, 0.1], [0.3, 0.7]])
>>> prob = np.array([0.1, 0.7, 0.4, 0.3, 0.8, 0.9, 0.8, 0.2, 0.6, 0.3])
>>> librosa.sequence.viterbi_binary(prob, trans, p_state=0.5, p_init=0.5)
array([[0, 0, 0, 0, 1, 1, 1, 0, 0, 0]])
|
librosa/sequence.py
|
def viterbi_binary(prob, transition, p_state=None, p_init=None, return_logp=False):
'''Viterbi decoding from binary (multi-label), discriminative state predictions.
Given a sequence of conditional state predictions `prob[s, t]`,
indicating the conditional likelihood of state `s` being active
conditional on observation at time `t`, and a 2*2 transition matrix
`transition` which encodes the conditional probability of moving from
state `s` to state `~s` (not-`s`), the Viterbi algorithm computes the
most likely sequence of states from the observations.
This function differs from `viterbi_discriminative` in that it does not assume the
states to be mutually exclusive. `viterbi_binary` is implemented by
transforming the multi-label decoding problem to a collection
of binary Viterbi problems (one for each *state* or label).
The output is a binary matrix `states[s, t]` indicating whether each
state `s` is active at time `t`.
Parameters
----------
prob : np.ndarray [shape=(n_steps,) or (n_states, n_steps)], non-negative
`prob[s, t]` is the probability of state `s` being active
conditional on the observation at time `t`.
Must be non-negative and less than 1.
If `prob` is 1-dimensional, it is expanded to shape `(1, n_steps)`.
transition : np.ndarray [shape=(2, 2) or (n_states, 2, 2)], non-negative
If 2-dimensional, the same transition matrix is applied to each sub-problem.
`transition[0, i]` is the probability of the state going from inactive to `i`,
`transition[1, i]` is the probability of the state going from active to `i`.
Each row must sum to 1.
If 3-dimensional, `transition[s]` is interpreted as the 2x2 transition matrix
for state label `s`.
p_state : np.ndarray [shape=(n_states,)]
Optional: marginal probability for each state (between [0,1]).
If not provided, a uniform distribution (0.5 for each state)
is assumed.
p_init : np.ndarray [shape=(n_states,)]
Optional: initial state distribution.
If not provided, it is assumed to be uniform.
return_logp : bool
If `True`, return the log-likelihood of the state sequence.
Returns
-------
Either `states` or `(states, logp)`:
states : np.ndarray [shape=(n_states, n_steps)]
The most likely state sequence.
logp : np.ndarray [shape=(n_states,)]
If `return_logp=True`, the log probability of each state activation
sequence `states`
See Also
--------
viterbi : Viterbi decoding from observation likelihoods
viterbi_discriminative : Viterbi decoding for discriminative (mutually exclusive) state predictions
Examples
--------
In this example, we have a sequence of binary state likelihoods that we want to de-noise
under the assumption that state changes are relatively uncommon. Positive predictions
should only be retained if they persist for multiple steps, and any transient predictions
should be considered as errors. This use case arises frequently in problems such as
instrument recognition, where state activations tend to be stable over time, but subject
to abrupt changes (e.g., when an instrument joins the mix).
We assume that the 0 state has a self-transition probability of 90%, and the 1 state
has a self-transition probability of 70%. We assume the marginal and initial
probability of either state is 50%.
>>> trans = np.array([[0.9, 0.1], [0.3, 0.7]])
>>> prob = np.array([0.1, 0.7, 0.4, 0.3, 0.8, 0.9, 0.8, 0.2, 0.6, 0.3])
>>> librosa.sequence.viterbi_binary(prob, trans, p_state=0.5, p_init=0.5)
array([[0, 0, 0, 0, 1, 1, 1, 0, 0, 0]])
'''
prob = np.atleast_2d(prob)
n_states, n_steps = prob.shape
if transition.shape == (2, 2):
transition = np.tile(transition, (n_states, 1, 1))
elif transition.shape != (n_states, 2, 2):
raise ParameterError('transition.shape={}, must be (2,2) or '
'(n_states, 2, 2)={}'.format(transition.shape, (n_states)))
if np.any(transition < 0) or not np.allclose(transition.sum(axis=-1), 1):
raise ParameterError('Invalid transition matrix: must be non-negative '
'and sum to 1 on each row.')
if np.any(prob < 0) or np.any(prob > 1):
raise ParameterError('Invalid probability values: prob must be between [0, 1]')
if p_state is None:
p_state = np.empty(n_states)
p_state.fill(0.5)
else:
p_state = np.atleast_1d(p_state)
if p_state.shape != (n_states,) or np.any(p_state < 0) or np.any(p_state > 1):
raise ParameterError('Invalid marginal state distributions: p_state={}'.format(p_state))
if p_init is None:
p_init = np.empty(n_states)
p_init.fill(0.5)
else:
p_init = np.atleast_1d(p_init)
if p_init.shape != (n_states,) or np.any(p_init < 0) or np.any(p_init > 1):
raise ParameterError('Invalid initial state distributions: p_init={}'.format(p_init))
states = np.empty((n_states, n_steps), dtype=int)
logp = np.empty(n_states)
prob_binary = np.empty((2, n_steps))
p_state_binary = np.empty(2)
p_init_binary = np.empty(2)
for state in range(n_states):
prob_binary[0] = 1 - prob[state]
prob_binary[1] = prob[state]
p_state_binary[0] = 1 - p_state[state]
p_state_binary[1] = p_state[state]
p_init_binary[0] = 1 - p_init[state]
p_init_binary[1] = p_init[state]
states[state, :], logp[state] = viterbi_discriminative(prob_binary,
transition[state],
p_state=p_state_binary,
p_init=p_init_binary,
return_logp=True)
if return_logp:
return states, logp
return states
|
def viterbi_binary(prob, transition, p_state=None, p_init=None, return_logp=False):
'''Viterbi decoding from binary (multi-label), discriminative state predictions.
Given a sequence of conditional state predictions `prob[s, t]`,
indicating the conditional likelihood of state `s` being active
conditional on observation at time `t`, and a 2*2 transition matrix
`transition` which encodes the conditional probability of moving from
state `s` to state `~s` (not-`s`), the Viterbi algorithm computes the
most likely sequence of states from the observations.
This function differs from `viterbi_discriminative` in that it does not assume the
states to be mutually exclusive. `viterbi_binary` is implemented by
transforming the multi-label decoding problem to a collection
of binary Viterbi problems (one for each *state* or label).
The output is a binary matrix `states[s, t]` indicating whether each
state `s` is active at time `t`.
Parameters
----------
prob : np.ndarray [shape=(n_steps,) or (n_states, n_steps)], non-negative
`prob[s, t]` is the probability of state `s` being active
conditional on the observation at time `t`.
Must be non-negative and less than 1.
If `prob` is 1-dimensional, it is expanded to shape `(1, n_steps)`.
transition : np.ndarray [shape=(2, 2) or (n_states, 2, 2)], non-negative
If 2-dimensional, the same transition matrix is applied to each sub-problem.
`transition[0, i]` is the probability of the state going from inactive to `i`,
`transition[1, i]` is the probability of the state going from active to `i`.
Each row must sum to 1.
If 3-dimensional, `transition[s]` is interpreted as the 2x2 transition matrix
for state label `s`.
p_state : np.ndarray [shape=(n_states,)]
Optional: marginal probability for each state (between [0,1]).
If not provided, a uniform distribution (0.5 for each state)
is assumed.
p_init : np.ndarray [shape=(n_states,)]
Optional: initial state distribution.
If not provided, it is assumed to be uniform.
return_logp : bool
If `True`, return the log-likelihood of the state sequence.
Returns
-------
Either `states` or `(states, logp)`:
states : np.ndarray [shape=(n_states, n_steps)]
The most likely state sequence.
logp : np.ndarray [shape=(n_states,)]
If `return_logp=True`, the log probability of each state activation
sequence `states`
See Also
--------
viterbi : Viterbi decoding from observation likelihoods
viterbi_discriminative : Viterbi decoding for discriminative (mutually exclusive) state predictions
Examples
--------
In this example, we have a sequence of binary state likelihoods that we want to de-noise
under the assumption that state changes are relatively uncommon. Positive predictions
should only be retained if they persist for multiple steps, and any transient predictions
should be considered as errors. This use case arises frequently in problems such as
instrument recognition, where state activations tend to be stable over time, but subject
to abrupt changes (e.g., when an instrument joins the mix).
We assume that the 0 state has a self-transition probability of 90%, and the 1 state
has a self-transition probability of 70%. We assume the marginal and initial
probability of either state is 50%.
>>> trans = np.array([[0.9, 0.1], [0.3, 0.7]])
>>> prob = np.array([0.1, 0.7, 0.4, 0.3, 0.8, 0.9, 0.8, 0.2, 0.6, 0.3])
>>> librosa.sequence.viterbi_binary(prob, trans, p_state=0.5, p_init=0.5)
array([[0, 0, 0, 0, 1, 1, 1, 0, 0, 0]])
'''
prob = np.atleast_2d(prob)
n_states, n_steps = prob.shape
if transition.shape == (2, 2):
transition = np.tile(transition, (n_states, 1, 1))
elif transition.shape != (n_states, 2, 2):
raise ParameterError('transition.shape={}, must be (2,2) or '
'(n_states, 2, 2)={}'.format(transition.shape, (n_states)))
if np.any(transition < 0) or not np.allclose(transition.sum(axis=-1), 1):
raise ParameterError('Invalid transition matrix: must be non-negative '
'and sum to 1 on each row.')
if np.any(prob < 0) or np.any(prob > 1):
raise ParameterError('Invalid probability values: prob must be between [0, 1]')
if p_state is None:
p_state = np.empty(n_states)
p_state.fill(0.5)
else:
p_state = np.atleast_1d(p_state)
if p_state.shape != (n_states,) or np.any(p_state < 0) or np.any(p_state > 1):
raise ParameterError('Invalid marginal state distributions: p_state={}'.format(p_state))
if p_init is None:
p_init = np.empty(n_states)
p_init.fill(0.5)
else:
p_init = np.atleast_1d(p_init)
if p_init.shape != (n_states,) or np.any(p_init < 0) or np.any(p_init > 1):
raise ParameterError('Invalid initial state distributions: p_init={}'.format(p_init))
states = np.empty((n_states, n_steps), dtype=int)
logp = np.empty(n_states)
prob_binary = np.empty((2, n_steps))
p_state_binary = np.empty(2)
p_init_binary = np.empty(2)
for state in range(n_states):
prob_binary[0] = 1 - prob[state]
prob_binary[1] = prob[state]
p_state_binary[0] = 1 - p_state[state]
p_state_binary[1] = p_state[state]
p_init_binary[0] = 1 - p_init[state]
p_init_binary[1] = p_init[state]
states[state, :], logp[state] = viterbi_discriminative(prob_binary,
transition[state],
p_state=p_state_binary,
p_init=p_init_binary,
return_logp=True)
if return_logp:
return states, logp
return states
|
[
"Viterbi",
"decoding",
"from",
"binary",
"(",
"multi",
"-",
"label",
")",
"discriminative",
"state",
"predictions",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/sequence.py#L720-L864
|
[
"def",
"viterbi_binary",
"(",
"prob",
",",
"transition",
",",
"p_state",
"=",
"None",
",",
"p_init",
"=",
"None",
",",
"return_logp",
"=",
"False",
")",
":",
"prob",
"=",
"np",
".",
"atleast_2d",
"(",
"prob",
")",
"n_states",
",",
"n_steps",
"=",
"prob",
".",
"shape",
"if",
"transition",
".",
"shape",
"==",
"(",
"2",
",",
"2",
")",
":",
"transition",
"=",
"np",
".",
"tile",
"(",
"transition",
",",
"(",
"n_states",
",",
"1",
",",
"1",
")",
")",
"elif",
"transition",
".",
"shape",
"!=",
"(",
"n_states",
",",
"2",
",",
"2",
")",
":",
"raise",
"ParameterError",
"(",
"'transition.shape={}, must be (2,2) or '",
"'(n_states, 2, 2)={}'",
".",
"format",
"(",
"transition",
".",
"shape",
",",
"(",
"n_states",
")",
")",
")",
"if",
"np",
".",
"any",
"(",
"transition",
"<",
"0",
")",
"or",
"not",
"np",
".",
"allclose",
"(",
"transition",
".",
"sum",
"(",
"axis",
"=",
"-",
"1",
")",
",",
"1",
")",
":",
"raise",
"ParameterError",
"(",
"'Invalid transition matrix: must be non-negative '",
"'and sum to 1 on each row.'",
")",
"if",
"np",
".",
"any",
"(",
"prob",
"<",
"0",
")",
"or",
"np",
".",
"any",
"(",
"prob",
">",
"1",
")",
":",
"raise",
"ParameterError",
"(",
"'Invalid probability values: prob must be between [0, 1]'",
")",
"if",
"p_state",
"is",
"None",
":",
"p_state",
"=",
"np",
".",
"empty",
"(",
"n_states",
")",
"p_state",
".",
"fill",
"(",
"0.5",
")",
"else",
":",
"p_state",
"=",
"np",
".",
"atleast_1d",
"(",
"p_state",
")",
"if",
"p_state",
".",
"shape",
"!=",
"(",
"n_states",
",",
")",
"or",
"np",
".",
"any",
"(",
"p_state",
"<",
"0",
")",
"or",
"np",
".",
"any",
"(",
"p_state",
">",
"1",
")",
":",
"raise",
"ParameterError",
"(",
"'Invalid marginal state distributions: p_state={}'",
".",
"format",
"(",
"p_state",
")",
")",
"if",
"p_init",
"is",
"None",
":",
"p_init",
"=",
"np",
".",
"empty",
"(",
"n_states",
")",
"p_init",
".",
"fill",
"(",
"0.5",
")",
"else",
":",
"p_init",
"=",
"np",
".",
"atleast_1d",
"(",
"p_init",
")",
"if",
"p_init",
".",
"shape",
"!=",
"(",
"n_states",
",",
")",
"or",
"np",
".",
"any",
"(",
"p_init",
"<",
"0",
")",
"or",
"np",
".",
"any",
"(",
"p_init",
">",
"1",
")",
":",
"raise",
"ParameterError",
"(",
"'Invalid initial state distributions: p_init={}'",
".",
"format",
"(",
"p_init",
")",
")",
"states",
"=",
"np",
".",
"empty",
"(",
"(",
"n_states",
",",
"n_steps",
")",
",",
"dtype",
"=",
"int",
")",
"logp",
"=",
"np",
".",
"empty",
"(",
"n_states",
")",
"prob_binary",
"=",
"np",
".",
"empty",
"(",
"(",
"2",
",",
"n_steps",
")",
")",
"p_state_binary",
"=",
"np",
".",
"empty",
"(",
"2",
")",
"p_init_binary",
"=",
"np",
".",
"empty",
"(",
"2",
")",
"for",
"state",
"in",
"range",
"(",
"n_states",
")",
":",
"prob_binary",
"[",
"0",
"]",
"=",
"1",
"-",
"prob",
"[",
"state",
"]",
"prob_binary",
"[",
"1",
"]",
"=",
"prob",
"[",
"state",
"]",
"p_state_binary",
"[",
"0",
"]",
"=",
"1",
"-",
"p_state",
"[",
"state",
"]",
"p_state_binary",
"[",
"1",
"]",
"=",
"p_state",
"[",
"state",
"]",
"p_init_binary",
"[",
"0",
"]",
"=",
"1",
"-",
"p_init",
"[",
"state",
"]",
"p_init_binary",
"[",
"1",
"]",
"=",
"p_init",
"[",
"state",
"]",
"states",
"[",
"state",
",",
":",
"]",
",",
"logp",
"[",
"state",
"]",
"=",
"viterbi_discriminative",
"(",
"prob_binary",
",",
"transition",
"[",
"state",
"]",
",",
"p_state",
"=",
"p_state_binary",
",",
"p_init",
"=",
"p_init_binary",
",",
"return_logp",
"=",
"True",
")",
"if",
"return_logp",
":",
"return",
"states",
",",
"logp",
"return",
"states"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
transition_uniform
|
Construct a uniform transition matrix over `n_states`.
Parameters
----------
n_states : int > 0
The number of states
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
`transition[i, j] = 1./n_states`
Examples
--------
>>> librosa.sequence.transition_uniform(3)
array([[0.333, 0.333, 0.333],
[0.333, 0.333, 0.333],
[0.333, 0.333, 0.333]])
|
librosa/sequence.py
|
def transition_uniform(n_states):
'''Construct a uniform transition matrix over `n_states`.
Parameters
----------
n_states : int > 0
The number of states
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
`transition[i, j] = 1./n_states`
Examples
--------
>>> librosa.sequence.transition_uniform(3)
array([[0.333, 0.333, 0.333],
[0.333, 0.333, 0.333],
[0.333, 0.333, 0.333]])
'''
if not isinstance(n_states, int) or n_states <= 0:
raise ParameterError('n_states={} must be a positive integer')
transition = np.empty((n_states, n_states), dtype=np.float)
transition.fill(1./n_states)
return transition
|
def transition_uniform(n_states):
'''Construct a uniform transition matrix over `n_states`.
Parameters
----------
n_states : int > 0
The number of states
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
`transition[i, j] = 1./n_states`
Examples
--------
>>> librosa.sequence.transition_uniform(3)
array([[0.333, 0.333, 0.333],
[0.333, 0.333, 0.333],
[0.333, 0.333, 0.333]])
'''
if not isinstance(n_states, int) or n_states <= 0:
raise ParameterError('n_states={} must be a positive integer')
transition = np.empty((n_states, n_states), dtype=np.float)
transition.fill(1./n_states)
return transition
|
[
"Construct",
"a",
"uniform",
"transition",
"matrix",
"over",
"n_states",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/sequence.py#L867-L894
|
[
"def",
"transition_uniform",
"(",
"n_states",
")",
":",
"if",
"not",
"isinstance",
"(",
"n_states",
",",
"int",
")",
"or",
"n_states",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'n_states={} must be a positive integer'",
")",
"transition",
"=",
"np",
".",
"empty",
"(",
"(",
"n_states",
",",
"n_states",
")",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"transition",
".",
"fill",
"(",
"1.",
"/",
"n_states",
")",
"return",
"transition"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
transition_loop
|
Construct a self-loop transition matrix over `n_states`.
The transition matrix will have the following properties:
- `transition[i, i] = p` for all i
- `transition[i, j] = (1 - p) / (n_states - 1)` for all `j != i`
This type of transition matrix is appropriate when states tend to be
locally stable, and there is no additional structure between different
states. This is primarily useful for de-noising frame-wise predictions.
Parameters
----------
n_states : int > 1
The number of states
prob : float in [0, 1] or iterable, length=n_states
If a scalar, this is the probability of a self-transition.
If a vector of length `n_states`, `p[i]` is the probability of state `i`'s self-transition.
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
>>> librosa.sequence.transition_loop(3, 0.5)
array([[0.5 , 0.25, 0.25],
[0.25, 0.5 , 0.25],
[0.25, 0.25, 0.5 ]])
>>> librosa.sequence.transition_loop(3, [0.8, 0.5, 0.25])
array([[0.8 , 0.1 , 0.1 ],
[0.25 , 0.5 , 0.25 ],
[0.375, 0.375, 0.25 ]])
|
librosa/sequence.py
|
def transition_loop(n_states, prob):
'''Construct a self-loop transition matrix over `n_states`.
The transition matrix will have the following properties:
- `transition[i, i] = p` for all i
- `transition[i, j] = (1 - p) / (n_states - 1)` for all `j != i`
This type of transition matrix is appropriate when states tend to be
locally stable, and there is no additional structure between different
states. This is primarily useful for de-noising frame-wise predictions.
Parameters
----------
n_states : int > 1
The number of states
prob : float in [0, 1] or iterable, length=n_states
If a scalar, this is the probability of a self-transition.
If a vector of length `n_states`, `p[i]` is the probability of state `i`'s self-transition.
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
>>> librosa.sequence.transition_loop(3, 0.5)
array([[0.5 , 0.25, 0.25],
[0.25, 0.5 , 0.25],
[0.25, 0.25, 0.5 ]])
>>> librosa.sequence.transition_loop(3, [0.8, 0.5, 0.25])
array([[0.8 , 0.1 , 0.1 ],
[0.25 , 0.5 , 0.25 ],
[0.375, 0.375, 0.25 ]])
'''
if not isinstance(n_states, int) or n_states <= 1:
raise ParameterError('n_states={} must be a positive integer > 1')
transition = np.empty((n_states, n_states), dtype=np.float)
# if it's a float, make it a vector
prob = np.asarray(prob, dtype=np.float)
if prob.ndim == 0:
prob = np.tile(prob, n_states)
if prob.shape != (n_states,):
raise ParameterError('prob={} must have length equal to n_states={}'.format(prob, n_states))
if np.any(prob < 0) or np.any(prob > 1):
raise ParameterError('prob={} must have values in the range [0, 1]'.format(prob))
for i, prob_i in enumerate(prob):
transition[i] = (1. - prob_i) / (n_states - 1)
transition[i, i] = prob_i
return transition
|
def transition_loop(n_states, prob):
'''Construct a self-loop transition matrix over `n_states`.
The transition matrix will have the following properties:
- `transition[i, i] = p` for all i
- `transition[i, j] = (1 - p) / (n_states - 1)` for all `j != i`
This type of transition matrix is appropriate when states tend to be
locally stable, and there is no additional structure between different
states. This is primarily useful for de-noising frame-wise predictions.
Parameters
----------
n_states : int > 1
The number of states
prob : float in [0, 1] or iterable, length=n_states
If a scalar, this is the probability of a self-transition.
If a vector of length `n_states`, `p[i]` is the probability of state `i`'s self-transition.
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
>>> librosa.sequence.transition_loop(3, 0.5)
array([[0.5 , 0.25, 0.25],
[0.25, 0.5 , 0.25],
[0.25, 0.25, 0.5 ]])
>>> librosa.sequence.transition_loop(3, [0.8, 0.5, 0.25])
array([[0.8 , 0.1 , 0.1 ],
[0.25 , 0.5 , 0.25 ],
[0.375, 0.375, 0.25 ]])
'''
if not isinstance(n_states, int) or n_states <= 1:
raise ParameterError('n_states={} must be a positive integer > 1')
transition = np.empty((n_states, n_states), dtype=np.float)
# if it's a float, make it a vector
prob = np.asarray(prob, dtype=np.float)
if prob.ndim == 0:
prob = np.tile(prob, n_states)
if prob.shape != (n_states,):
raise ParameterError('prob={} must have length equal to n_states={}'.format(prob, n_states))
if np.any(prob < 0) or np.any(prob > 1):
raise ParameterError('prob={} must have values in the range [0, 1]'.format(prob))
for i, prob_i in enumerate(prob):
transition[i] = (1. - prob_i) / (n_states - 1)
transition[i, i] = prob_i
return transition
|
[
"Construct",
"a",
"self",
"-",
"loop",
"transition",
"matrix",
"over",
"n_states",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/sequence.py#L897-L958
|
[
"def",
"transition_loop",
"(",
"n_states",
",",
"prob",
")",
":",
"if",
"not",
"isinstance",
"(",
"n_states",
",",
"int",
")",
"or",
"n_states",
"<=",
"1",
":",
"raise",
"ParameterError",
"(",
"'n_states={} must be a positive integer > 1'",
")",
"transition",
"=",
"np",
".",
"empty",
"(",
"(",
"n_states",
",",
"n_states",
")",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"# if it's a float, make it a vector",
"prob",
"=",
"np",
".",
"asarray",
"(",
"prob",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"if",
"prob",
".",
"ndim",
"==",
"0",
":",
"prob",
"=",
"np",
".",
"tile",
"(",
"prob",
",",
"n_states",
")",
"if",
"prob",
".",
"shape",
"!=",
"(",
"n_states",
",",
")",
":",
"raise",
"ParameterError",
"(",
"'prob={} must have length equal to n_states={}'",
".",
"format",
"(",
"prob",
",",
"n_states",
")",
")",
"if",
"np",
".",
"any",
"(",
"prob",
"<",
"0",
")",
"or",
"np",
".",
"any",
"(",
"prob",
">",
"1",
")",
":",
"raise",
"ParameterError",
"(",
"'prob={} must have values in the range [0, 1]'",
".",
"format",
"(",
"prob",
")",
")",
"for",
"i",
",",
"prob_i",
"in",
"enumerate",
"(",
"prob",
")",
":",
"transition",
"[",
"i",
"]",
"=",
"(",
"1.",
"-",
"prob_i",
")",
"/",
"(",
"n_states",
"-",
"1",
")",
"transition",
"[",
"i",
",",
"i",
"]",
"=",
"prob_i",
"return",
"transition"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
transition_cycle
|
Construct a cyclic transition matrix over `n_states`.
The transition matrix will have the following properties:
- `transition[i, i] = p`
- `transition[i, i + 1] = (1 - p)`
This type of transition matrix is appropriate for state spaces
with cyclical structure, such as metrical position within a bar.
For example, a song in 4/4 time has state transitions of the form
1->{1, 2}, 2->{2, 3}, 3->{3, 4}, 4->{4, 1}.
Parameters
----------
n_states : int > 1
The number of states
prob : float in [0, 1] or iterable, length=n_states
If a scalar, this is the probability of a self-transition.
If a vector of length `n_states`, `p[i]` is the probability of state
`i`'s self-transition.
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
>>> librosa.sequence.transition_cycle(4, 0.9)
array([[0.9, 0.1, 0. , 0. ],
[0. , 0.9, 0.1, 0. ],
[0. , 0. , 0.9, 0.1],
[0.1, 0. , 0. , 0.9]])
|
librosa/sequence.py
|
def transition_cycle(n_states, prob):
'''Construct a cyclic transition matrix over `n_states`.
The transition matrix will have the following properties:
- `transition[i, i] = p`
- `transition[i, i + 1] = (1 - p)`
This type of transition matrix is appropriate for state spaces
with cyclical structure, such as metrical position within a bar.
For example, a song in 4/4 time has state transitions of the form
1->{1, 2}, 2->{2, 3}, 3->{3, 4}, 4->{4, 1}.
Parameters
----------
n_states : int > 1
The number of states
prob : float in [0, 1] or iterable, length=n_states
If a scalar, this is the probability of a self-transition.
If a vector of length `n_states`, `p[i]` is the probability of state
`i`'s self-transition.
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
>>> librosa.sequence.transition_cycle(4, 0.9)
array([[0.9, 0.1, 0. , 0. ],
[0. , 0.9, 0.1, 0. ],
[0. , 0. , 0.9, 0.1],
[0.1, 0. , 0. , 0.9]])
'''
if not isinstance(n_states, int) or n_states <= 1:
raise ParameterError('n_states={} must be a positive integer > 1')
transition = np.zeros((n_states, n_states), dtype=np.float)
# if it's a float, make it a vector
prob = np.asarray(prob, dtype=np.float)
if prob.ndim == 0:
prob = np.tile(prob, n_states)
if prob.shape != (n_states,):
raise ParameterError('prob={} must have length equal to n_states={}'.format(prob, n_states))
if np.any(prob < 0) or np.any(prob > 1):
raise ParameterError('prob={} must have values in the range [0, 1]'.format(prob))
for i, prob_i in enumerate(prob):
transition[i, np.mod(i + 1, n_states)] = 1. - prob_i
transition[i, i] = prob_i
return transition
|
def transition_cycle(n_states, prob):
'''Construct a cyclic transition matrix over `n_states`.
The transition matrix will have the following properties:
- `transition[i, i] = p`
- `transition[i, i + 1] = (1 - p)`
This type of transition matrix is appropriate for state spaces
with cyclical structure, such as metrical position within a bar.
For example, a song in 4/4 time has state transitions of the form
1->{1, 2}, 2->{2, 3}, 3->{3, 4}, 4->{4, 1}.
Parameters
----------
n_states : int > 1
The number of states
prob : float in [0, 1] or iterable, length=n_states
If a scalar, this is the probability of a self-transition.
If a vector of length `n_states`, `p[i]` is the probability of state
`i`'s self-transition.
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
>>> librosa.sequence.transition_cycle(4, 0.9)
array([[0.9, 0.1, 0. , 0. ],
[0. , 0.9, 0.1, 0. ],
[0. , 0. , 0.9, 0.1],
[0.1, 0. , 0. , 0.9]])
'''
if not isinstance(n_states, int) or n_states <= 1:
raise ParameterError('n_states={} must be a positive integer > 1')
transition = np.zeros((n_states, n_states), dtype=np.float)
# if it's a float, make it a vector
prob = np.asarray(prob, dtype=np.float)
if prob.ndim == 0:
prob = np.tile(prob, n_states)
if prob.shape != (n_states,):
raise ParameterError('prob={} must have length equal to n_states={}'.format(prob, n_states))
if np.any(prob < 0) or np.any(prob > 1):
raise ParameterError('prob={} must have values in the range [0, 1]'.format(prob))
for i, prob_i in enumerate(prob):
transition[i, np.mod(i + 1, n_states)] = 1. - prob_i
transition[i, i] = prob_i
return transition
|
[
"Construct",
"a",
"cyclic",
"transition",
"matrix",
"over",
"n_states",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/sequence.py#L961-L1021
|
[
"def",
"transition_cycle",
"(",
"n_states",
",",
"prob",
")",
":",
"if",
"not",
"isinstance",
"(",
"n_states",
",",
"int",
")",
"or",
"n_states",
"<=",
"1",
":",
"raise",
"ParameterError",
"(",
"'n_states={} must be a positive integer > 1'",
")",
"transition",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_states",
",",
"n_states",
")",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"# if it's a float, make it a vector",
"prob",
"=",
"np",
".",
"asarray",
"(",
"prob",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"if",
"prob",
".",
"ndim",
"==",
"0",
":",
"prob",
"=",
"np",
".",
"tile",
"(",
"prob",
",",
"n_states",
")",
"if",
"prob",
".",
"shape",
"!=",
"(",
"n_states",
",",
")",
":",
"raise",
"ParameterError",
"(",
"'prob={} must have length equal to n_states={}'",
".",
"format",
"(",
"prob",
",",
"n_states",
")",
")",
"if",
"np",
".",
"any",
"(",
"prob",
"<",
"0",
")",
"or",
"np",
".",
"any",
"(",
"prob",
">",
"1",
")",
":",
"raise",
"ParameterError",
"(",
"'prob={} must have values in the range [0, 1]'",
".",
"format",
"(",
"prob",
")",
")",
"for",
"i",
",",
"prob_i",
"in",
"enumerate",
"(",
"prob",
")",
":",
"transition",
"[",
"i",
",",
"np",
".",
"mod",
"(",
"i",
"+",
"1",
",",
"n_states",
")",
"]",
"=",
"1.",
"-",
"prob_i",
"transition",
"[",
"i",
",",
"i",
"]",
"=",
"prob_i",
"return",
"transition"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
transition_local
|
Construct a localized transition matrix.
The transition matrix will have the following properties:
- `transition[i, j] = 0` if `|i - j| > width`
- `transition[i, i]` is maximal
- `transition[i, i - width//2 : i + width//2]` has shape `window`
This type of transition matrix is appropriate for state spaces
that discretely approximate continuous variables, such as in fundamental
frequency estimation.
Parameters
----------
n_states : int > 1
The number of states
width : int >= 1 or iterable
The maximum number of states to treat as "local".
If iterable, it should have length equal to `n_states`,
and specify the width independently for each state.
window : str, callable, or window specification
The window function to determine the shape of the "local" distribution.
Any window specification supported by `filters.get_window` will work here.
.. note:: Certain windows (e.g., 'hann') are identically 0 at the boundaries,
so and effectively have `width-2` non-zero values. You may have to expand
`width` to get the desired behavior.
wrap : bool
If `True`, then state locality `|i - j|` is computed modulo `n_states`.
If `False` (default), then locality is absolute.
See Also
--------
filters.get_window
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
Triangular distributions with and without wrapping
>>> librosa.sequence.transition_local(5, 3, window='triangle', wrap=False)
array([[0.667, 0.333, 0. , 0. , 0. ],
[0.25 , 0.5 , 0.25 , 0. , 0. ],
[0. , 0.25 , 0.5 , 0.25 , 0. ],
[0. , 0. , 0.25 , 0.5 , 0.25 ],
[0. , 0. , 0. , 0.333, 0.667]])
>>> librosa.sequence.transition_local(5, 3, window='triangle', wrap=True)
array([[0.5 , 0.25, 0. , 0. , 0.25],
[0.25, 0.5 , 0.25, 0. , 0. ],
[0. , 0.25, 0.5 , 0.25, 0. ],
[0. , 0. , 0.25, 0.5 , 0.25],
[0.25, 0. , 0. , 0.25, 0.5 ]])
Uniform local distributions with variable widths and no wrapping
>>> librosa.sequence.transition_local(5, [1, 2, 3, 3, 1], window='ones', wrap=False)
array([[1. , 0. , 0. , 0. , 0. ],
[0.5 , 0.5 , 0. , 0. , 0. ],
[0. , 0.333, 0.333, 0.333, 0. ],
[0. , 0. , 0.333, 0.333, 0.333],
[0. , 0. , 0. , 0. , 1. ]])
|
librosa/sequence.py
|
def transition_local(n_states, width, window='triangle', wrap=False):
'''Construct a localized transition matrix.
The transition matrix will have the following properties:
- `transition[i, j] = 0` if `|i - j| > width`
- `transition[i, i]` is maximal
- `transition[i, i - width//2 : i + width//2]` has shape `window`
This type of transition matrix is appropriate for state spaces
that discretely approximate continuous variables, such as in fundamental
frequency estimation.
Parameters
----------
n_states : int > 1
The number of states
width : int >= 1 or iterable
The maximum number of states to treat as "local".
If iterable, it should have length equal to `n_states`,
and specify the width independently for each state.
window : str, callable, or window specification
The window function to determine the shape of the "local" distribution.
Any window specification supported by `filters.get_window` will work here.
.. note:: Certain windows (e.g., 'hann') are identically 0 at the boundaries,
so and effectively have `width-2` non-zero values. You may have to expand
`width` to get the desired behavior.
wrap : bool
If `True`, then state locality `|i - j|` is computed modulo `n_states`.
If `False` (default), then locality is absolute.
See Also
--------
filters.get_window
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
Triangular distributions with and without wrapping
>>> librosa.sequence.transition_local(5, 3, window='triangle', wrap=False)
array([[0.667, 0.333, 0. , 0. , 0. ],
[0.25 , 0.5 , 0.25 , 0. , 0. ],
[0. , 0.25 , 0.5 , 0.25 , 0. ],
[0. , 0. , 0.25 , 0.5 , 0.25 ],
[0. , 0. , 0. , 0.333, 0.667]])
>>> librosa.sequence.transition_local(5, 3, window='triangle', wrap=True)
array([[0.5 , 0.25, 0. , 0. , 0.25],
[0.25, 0.5 , 0.25, 0. , 0. ],
[0. , 0.25, 0.5 , 0.25, 0. ],
[0. , 0. , 0.25, 0.5 , 0.25],
[0.25, 0. , 0. , 0.25, 0.5 ]])
Uniform local distributions with variable widths and no wrapping
>>> librosa.sequence.transition_local(5, [1, 2, 3, 3, 1], window='ones', wrap=False)
array([[1. , 0. , 0. , 0. , 0. ],
[0.5 , 0.5 , 0. , 0. , 0. ],
[0. , 0.333, 0.333, 0.333, 0. ],
[0. , 0. , 0.333, 0.333, 0.333],
[0. , 0. , 0. , 0. , 1. ]])
'''
if not isinstance(n_states, int) or n_states <= 1:
raise ParameterError('n_states={} must be a positive integer > 1')
width = np.asarray(width, dtype=int)
if width.ndim == 0:
width = np.tile(width, n_states)
if width.shape != (n_states,):
raise ParameterError('width={} must have length equal to n_states={}'.format(width, n_states))
if np.any(width < 1):
raise ParameterError('width={} must be at least 1')
transition = np.zeros((n_states, n_states), dtype=np.float)
# Fill in the widths. This is inefficient, but simple
for i, width_i in enumerate(width):
trans_row = pad_center(get_window(window, width_i, fftbins=False), n_states)
trans_row = np.roll(trans_row, n_states//2 + i + 1)
if not wrap:
# Knock out the off-diagonal-band elements
trans_row[min(n_states, i + width_i//2 + 1):] = 0
trans_row[:max(0, i - width_i//2)] = 0
transition[i] = trans_row
# Row-normalize
transition /= transition.sum(axis=1, keepdims=True)
return transition
|
def transition_local(n_states, width, window='triangle', wrap=False):
'''Construct a localized transition matrix.
The transition matrix will have the following properties:
- `transition[i, j] = 0` if `|i - j| > width`
- `transition[i, i]` is maximal
- `transition[i, i - width//2 : i + width//2]` has shape `window`
This type of transition matrix is appropriate for state spaces
that discretely approximate continuous variables, such as in fundamental
frequency estimation.
Parameters
----------
n_states : int > 1
The number of states
width : int >= 1 or iterable
The maximum number of states to treat as "local".
If iterable, it should have length equal to `n_states`,
and specify the width independently for each state.
window : str, callable, or window specification
The window function to determine the shape of the "local" distribution.
Any window specification supported by `filters.get_window` will work here.
.. note:: Certain windows (e.g., 'hann') are identically 0 at the boundaries,
so and effectively have `width-2` non-zero values. You may have to expand
`width` to get the desired behavior.
wrap : bool
If `True`, then state locality `|i - j|` is computed modulo `n_states`.
If `False` (default), then locality is absolute.
See Also
--------
filters.get_window
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
Triangular distributions with and without wrapping
>>> librosa.sequence.transition_local(5, 3, window='triangle', wrap=False)
array([[0.667, 0.333, 0. , 0. , 0. ],
[0.25 , 0.5 , 0.25 , 0. , 0. ],
[0. , 0.25 , 0.5 , 0.25 , 0. ],
[0. , 0. , 0.25 , 0.5 , 0.25 ],
[0. , 0. , 0. , 0.333, 0.667]])
>>> librosa.sequence.transition_local(5, 3, window='triangle', wrap=True)
array([[0.5 , 0.25, 0. , 0. , 0.25],
[0.25, 0.5 , 0.25, 0. , 0. ],
[0. , 0.25, 0.5 , 0.25, 0. ],
[0. , 0. , 0.25, 0.5 , 0.25],
[0.25, 0. , 0. , 0.25, 0.5 ]])
Uniform local distributions with variable widths and no wrapping
>>> librosa.sequence.transition_local(5, [1, 2, 3, 3, 1], window='ones', wrap=False)
array([[1. , 0. , 0. , 0. , 0. ],
[0.5 , 0.5 , 0. , 0. , 0. ],
[0. , 0.333, 0.333, 0.333, 0. ],
[0. , 0. , 0.333, 0.333, 0.333],
[0. , 0. , 0. , 0. , 1. ]])
'''
if not isinstance(n_states, int) or n_states <= 1:
raise ParameterError('n_states={} must be a positive integer > 1')
width = np.asarray(width, dtype=int)
if width.ndim == 0:
width = np.tile(width, n_states)
if width.shape != (n_states,):
raise ParameterError('width={} must have length equal to n_states={}'.format(width, n_states))
if np.any(width < 1):
raise ParameterError('width={} must be at least 1')
transition = np.zeros((n_states, n_states), dtype=np.float)
# Fill in the widths. This is inefficient, but simple
for i, width_i in enumerate(width):
trans_row = pad_center(get_window(window, width_i, fftbins=False), n_states)
trans_row = np.roll(trans_row, n_states//2 + i + 1)
if not wrap:
# Knock out the off-diagonal-band elements
trans_row[min(n_states, i + width_i//2 + 1):] = 0
trans_row[:max(0, i - width_i//2)] = 0
transition[i] = trans_row
# Row-normalize
transition /= transition.sum(axis=1, keepdims=True)
return transition
|
[
"Construct",
"a",
"localized",
"transition",
"matrix",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/sequence.py#L1024-L1129
|
[
"def",
"transition_local",
"(",
"n_states",
",",
"width",
",",
"window",
"=",
"'triangle'",
",",
"wrap",
"=",
"False",
")",
":",
"if",
"not",
"isinstance",
"(",
"n_states",
",",
"int",
")",
"or",
"n_states",
"<=",
"1",
":",
"raise",
"ParameterError",
"(",
"'n_states={} must be a positive integer > 1'",
")",
"width",
"=",
"np",
".",
"asarray",
"(",
"width",
",",
"dtype",
"=",
"int",
")",
"if",
"width",
".",
"ndim",
"==",
"0",
":",
"width",
"=",
"np",
".",
"tile",
"(",
"width",
",",
"n_states",
")",
"if",
"width",
".",
"shape",
"!=",
"(",
"n_states",
",",
")",
":",
"raise",
"ParameterError",
"(",
"'width={} must have length equal to n_states={}'",
".",
"format",
"(",
"width",
",",
"n_states",
")",
")",
"if",
"np",
".",
"any",
"(",
"width",
"<",
"1",
")",
":",
"raise",
"ParameterError",
"(",
"'width={} must be at least 1'",
")",
"transition",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_states",
",",
"n_states",
")",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"# Fill in the widths. This is inefficient, but simple",
"for",
"i",
",",
"width_i",
"in",
"enumerate",
"(",
"width",
")",
":",
"trans_row",
"=",
"pad_center",
"(",
"get_window",
"(",
"window",
",",
"width_i",
",",
"fftbins",
"=",
"False",
")",
",",
"n_states",
")",
"trans_row",
"=",
"np",
".",
"roll",
"(",
"trans_row",
",",
"n_states",
"//",
"2",
"+",
"i",
"+",
"1",
")",
"if",
"not",
"wrap",
":",
"# Knock out the off-diagonal-band elements",
"trans_row",
"[",
"min",
"(",
"n_states",
",",
"i",
"+",
"width_i",
"//",
"2",
"+",
"1",
")",
":",
"]",
"=",
"0",
"trans_row",
"[",
":",
"max",
"(",
"0",
",",
"i",
"-",
"width_i",
"//",
"2",
")",
"]",
"=",
"0",
"transition",
"[",
"i",
"]",
"=",
"trans_row",
"# Row-normalize",
"transition",
"/=",
"transition",
".",
"sum",
"(",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"return",
"transition"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
onset_detect
|
Basic onset detector. Locate note onset events by picking peaks in an
onset strength envelope.
The `peak_pick` parameters were chosen by large-scale hyper-parameter
optimization over the dataset provided by [1]_.
.. [1] https://github.com/CPJKU/onset_db
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(m,)]
(optional) pre-computed onset strength envelope
hop_length : int > 0 [scalar]
hop length (in samples)
units : {'frames', 'samples', 'time'}
The units to encode detected onset events in.
By default, 'frames' are used.
backtrack : bool
If `True`, detected onset events are backtracked to the nearest
preceding minimum of `energy`.
This is primarily useful when using onsets as slice points for segmentation.
energy : np.ndarray [shape=(m,)] (optional)
An energy function to use for backtracking detected onset events.
If none is provided, then `onset_envelope` is used.
kwargs : additional keyword arguments
Additional parameters for peak picking.
See `librosa.util.peak_pick` for details.
Returns
-------
onsets : np.ndarray [shape=(n_onsets,)]
estimated positions of detected onsets, in whichever units
are specified. By default, frame indices.
.. note::
If no onset strength could be detected, onset_detect returns
an empty list.
Raises
------
ParameterError
if neither `y` nor `onsets` are provided
or if `units` is not one of 'frames', 'samples', or 'time'
See Also
--------
onset_strength : compute onset strength per-frame
onset_backtrack : backtracking onset events
librosa.util.peak_pick : pick peaks from a time series
Examples
--------
Get onset times from a signal
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=2.0)
>>> onset_frames = librosa.onset.onset_detect(y=y, sr=sr)
>>> librosa.frames_to_time(onset_frames, sr=sr)
array([ 0.07 , 0.395, 0.511, 0.627, 0.766, 0.975,
1.207, 1.324, 1.44 , 1.788, 1.881])
Or use a pre-computed onset envelope
>>> o_env = librosa.onset.onset_strength(y, sr=sr)
>>> times = librosa.frames_to_time(np.arange(len(o_env)), sr=sr)
>>> onset_frames = librosa.onset.onset_detect(onset_envelope=o_env, sr=sr)
>>> import matplotlib.pyplot as plt
>>> D = np.abs(librosa.stft(y))
>>> plt.figure()
>>> ax1 = plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... x_axis='time', y_axis='log')
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2, sharex=ax1)
>>> plt.plot(times, o_env, label='Onset strength')
>>> plt.vlines(times[onset_frames], 0, o_env.max(), color='r', alpha=0.9,
... linestyle='--', label='Onsets')
>>> plt.axis('tight')
>>> plt.legend(frameon=True, framealpha=0.75)
|
librosa/onset.py
|
def onset_detect(y=None, sr=22050, onset_envelope=None, hop_length=512,
backtrack=False, energy=None,
units='frames', **kwargs):
"""Basic onset detector. Locate note onset events by picking peaks in an
onset strength envelope.
The `peak_pick` parameters were chosen by large-scale hyper-parameter
optimization over the dataset provided by [1]_.
.. [1] https://github.com/CPJKU/onset_db
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(m,)]
(optional) pre-computed onset strength envelope
hop_length : int > 0 [scalar]
hop length (in samples)
units : {'frames', 'samples', 'time'}
The units to encode detected onset events in.
By default, 'frames' are used.
backtrack : bool
If `True`, detected onset events are backtracked to the nearest
preceding minimum of `energy`.
This is primarily useful when using onsets as slice points for segmentation.
energy : np.ndarray [shape=(m,)] (optional)
An energy function to use for backtracking detected onset events.
If none is provided, then `onset_envelope` is used.
kwargs : additional keyword arguments
Additional parameters for peak picking.
See `librosa.util.peak_pick` for details.
Returns
-------
onsets : np.ndarray [shape=(n_onsets,)]
estimated positions of detected onsets, in whichever units
are specified. By default, frame indices.
.. note::
If no onset strength could be detected, onset_detect returns
an empty list.
Raises
------
ParameterError
if neither `y` nor `onsets` are provided
or if `units` is not one of 'frames', 'samples', or 'time'
See Also
--------
onset_strength : compute onset strength per-frame
onset_backtrack : backtracking onset events
librosa.util.peak_pick : pick peaks from a time series
Examples
--------
Get onset times from a signal
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=2.0)
>>> onset_frames = librosa.onset.onset_detect(y=y, sr=sr)
>>> librosa.frames_to_time(onset_frames, sr=sr)
array([ 0.07 , 0.395, 0.511, 0.627, 0.766, 0.975,
1.207, 1.324, 1.44 , 1.788, 1.881])
Or use a pre-computed onset envelope
>>> o_env = librosa.onset.onset_strength(y, sr=sr)
>>> times = librosa.frames_to_time(np.arange(len(o_env)), sr=sr)
>>> onset_frames = librosa.onset.onset_detect(onset_envelope=o_env, sr=sr)
>>> import matplotlib.pyplot as plt
>>> D = np.abs(librosa.stft(y))
>>> plt.figure()
>>> ax1 = plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... x_axis='time', y_axis='log')
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2, sharex=ax1)
>>> plt.plot(times, o_env, label='Onset strength')
>>> plt.vlines(times[onset_frames], 0, o_env.max(), color='r', alpha=0.9,
... linestyle='--', label='Onsets')
>>> plt.axis('tight')
>>> plt.legend(frameon=True, framealpha=0.75)
"""
# First, get the frame->beat strength profile if we don't already have one
if onset_envelope is None:
if y is None:
raise ParameterError('y or onset_envelope must be provided')
onset_envelope = onset_strength(y=y, sr=sr, hop_length=hop_length)
# Shift onset envelope up to be non-negative
# (a common normalization step to make the threshold more consistent)
onset_envelope -= onset_envelope.min()
# Do we have any onsets to grab?
if not onset_envelope.any():
return np.array([], dtype=np.int)
# Normalize onset strength function to [0, 1] range
onset_envelope /= onset_envelope.max()
# These parameter settings found by large-scale search
kwargs.setdefault('pre_max', 0.03*sr//hop_length) # 30ms
kwargs.setdefault('post_max', 0.00*sr//hop_length + 1) # 0ms
kwargs.setdefault('pre_avg', 0.10*sr//hop_length) # 100ms
kwargs.setdefault('post_avg', 0.10*sr//hop_length + 1) # 100ms
kwargs.setdefault('wait', 0.03*sr//hop_length) # 30ms
kwargs.setdefault('delta', 0.07)
# Peak pick the onset envelope
onsets = util.peak_pick(onset_envelope, **kwargs)
# Optionally backtrack the events
if backtrack:
if energy is None:
energy = onset_envelope
onsets = onset_backtrack(onsets, energy)
if units == 'frames':
pass
elif units == 'samples':
onsets = core.frames_to_samples(onsets, hop_length=hop_length)
elif units == 'time':
onsets = core.frames_to_time(onsets, hop_length=hop_length, sr=sr)
else:
raise ParameterError('Invalid unit type: {}'.format(units))
return onsets
|
def onset_detect(y=None, sr=22050, onset_envelope=None, hop_length=512,
backtrack=False, energy=None,
units='frames', **kwargs):
"""Basic onset detector. Locate note onset events by picking peaks in an
onset strength envelope.
The `peak_pick` parameters were chosen by large-scale hyper-parameter
optimization over the dataset provided by [1]_.
.. [1] https://github.com/CPJKU/onset_db
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(m,)]
(optional) pre-computed onset strength envelope
hop_length : int > 0 [scalar]
hop length (in samples)
units : {'frames', 'samples', 'time'}
The units to encode detected onset events in.
By default, 'frames' are used.
backtrack : bool
If `True`, detected onset events are backtracked to the nearest
preceding minimum of `energy`.
This is primarily useful when using onsets as slice points for segmentation.
energy : np.ndarray [shape=(m,)] (optional)
An energy function to use for backtracking detected onset events.
If none is provided, then `onset_envelope` is used.
kwargs : additional keyword arguments
Additional parameters for peak picking.
See `librosa.util.peak_pick` for details.
Returns
-------
onsets : np.ndarray [shape=(n_onsets,)]
estimated positions of detected onsets, in whichever units
are specified. By default, frame indices.
.. note::
If no onset strength could be detected, onset_detect returns
an empty list.
Raises
------
ParameterError
if neither `y` nor `onsets` are provided
or if `units` is not one of 'frames', 'samples', or 'time'
See Also
--------
onset_strength : compute onset strength per-frame
onset_backtrack : backtracking onset events
librosa.util.peak_pick : pick peaks from a time series
Examples
--------
Get onset times from a signal
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=2.0)
>>> onset_frames = librosa.onset.onset_detect(y=y, sr=sr)
>>> librosa.frames_to_time(onset_frames, sr=sr)
array([ 0.07 , 0.395, 0.511, 0.627, 0.766, 0.975,
1.207, 1.324, 1.44 , 1.788, 1.881])
Or use a pre-computed onset envelope
>>> o_env = librosa.onset.onset_strength(y, sr=sr)
>>> times = librosa.frames_to_time(np.arange(len(o_env)), sr=sr)
>>> onset_frames = librosa.onset.onset_detect(onset_envelope=o_env, sr=sr)
>>> import matplotlib.pyplot as plt
>>> D = np.abs(librosa.stft(y))
>>> plt.figure()
>>> ax1 = plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... x_axis='time', y_axis='log')
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2, sharex=ax1)
>>> plt.plot(times, o_env, label='Onset strength')
>>> plt.vlines(times[onset_frames], 0, o_env.max(), color='r', alpha=0.9,
... linestyle='--', label='Onsets')
>>> plt.axis('tight')
>>> plt.legend(frameon=True, framealpha=0.75)
"""
# First, get the frame->beat strength profile if we don't already have one
if onset_envelope is None:
if y is None:
raise ParameterError('y or onset_envelope must be provided')
onset_envelope = onset_strength(y=y, sr=sr, hop_length=hop_length)
# Shift onset envelope up to be non-negative
# (a common normalization step to make the threshold more consistent)
onset_envelope -= onset_envelope.min()
# Do we have any onsets to grab?
if not onset_envelope.any():
return np.array([], dtype=np.int)
# Normalize onset strength function to [0, 1] range
onset_envelope /= onset_envelope.max()
# These parameter settings found by large-scale search
kwargs.setdefault('pre_max', 0.03*sr//hop_length) # 30ms
kwargs.setdefault('post_max', 0.00*sr//hop_length + 1) # 0ms
kwargs.setdefault('pre_avg', 0.10*sr//hop_length) # 100ms
kwargs.setdefault('post_avg', 0.10*sr//hop_length + 1) # 100ms
kwargs.setdefault('wait', 0.03*sr//hop_length) # 30ms
kwargs.setdefault('delta', 0.07)
# Peak pick the onset envelope
onsets = util.peak_pick(onset_envelope, **kwargs)
# Optionally backtrack the events
if backtrack:
if energy is None:
energy = onset_envelope
onsets = onset_backtrack(onsets, energy)
if units == 'frames':
pass
elif units == 'samples':
onsets = core.frames_to_samples(onsets, hop_length=hop_length)
elif units == 'time':
onsets = core.frames_to_time(onsets, hop_length=hop_length, sr=sr)
else:
raise ParameterError('Invalid unit type: {}'.format(units))
return onsets
|
[
"Basic",
"onset",
"detector",
".",
"Locate",
"note",
"onset",
"events",
"by",
"picking",
"peaks",
"in",
"an",
"onset",
"strength",
"envelope",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/onset.py#L31-L182
|
[
"def",
"onset_detect",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"onset_envelope",
"=",
"None",
",",
"hop_length",
"=",
"512",
",",
"backtrack",
"=",
"False",
",",
"energy",
"=",
"None",
",",
"units",
"=",
"'frames'",
",",
"*",
"*",
"kwargs",
")",
":",
"# First, get the frame->beat strength profile if we don't already have one",
"if",
"onset_envelope",
"is",
"None",
":",
"if",
"y",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'y or onset_envelope must be provided'",
")",
"onset_envelope",
"=",
"onset_strength",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"hop_length",
")",
"# Shift onset envelope up to be non-negative",
"# (a common normalization step to make the threshold more consistent)",
"onset_envelope",
"-=",
"onset_envelope",
".",
"min",
"(",
")",
"# Do we have any onsets to grab?",
"if",
"not",
"onset_envelope",
".",
"any",
"(",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"# Normalize onset strength function to [0, 1] range",
"onset_envelope",
"/=",
"onset_envelope",
".",
"max",
"(",
")",
"# These parameter settings found by large-scale search",
"kwargs",
".",
"setdefault",
"(",
"'pre_max'",
",",
"0.03",
"*",
"sr",
"//",
"hop_length",
")",
"# 30ms",
"kwargs",
".",
"setdefault",
"(",
"'post_max'",
",",
"0.00",
"*",
"sr",
"//",
"hop_length",
"+",
"1",
")",
"# 0ms",
"kwargs",
".",
"setdefault",
"(",
"'pre_avg'",
",",
"0.10",
"*",
"sr",
"//",
"hop_length",
")",
"# 100ms",
"kwargs",
".",
"setdefault",
"(",
"'post_avg'",
",",
"0.10",
"*",
"sr",
"//",
"hop_length",
"+",
"1",
")",
"# 100ms",
"kwargs",
".",
"setdefault",
"(",
"'wait'",
",",
"0.03",
"*",
"sr",
"//",
"hop_length",
")",
"# 30ms",
"kwargs",
".",
"setdefault",
"(",
"'delta'",
",",
"0.07",
")",
"# Peak pick the onset envelope",
"onsets",
"=",
"util",
".",
"peak_pick",
"(",
"onset_envelope",
",",
"*",
"*",
"kwargs",
")",
"# Optionally backtrack the events",
"if",
"backtrack",
":",
"if",
"energy",
"is",
"None",
":",
"energy",
"=",
"onset_envelope",
"onsets",
"=",
"onset_backtrack",
"(",
"onsets",
",",
"energy",
")",
"if",
"units",
"==",
"'frames'",
":",
"pass",
"elif",
"units",
"==",
"'samples'",
":",
"onsets",
"=",
"core",
".",
"frames_to_samples",
"(",
"onsets",
",",
"hop_length",
"=",
"hop_length",
")",
"elif",
"units",
"==",
"'time'",
":",
"onsets",
"=",
"core",
".",
"frames_to_time",
"(",
"onsets",
",",
"hop_length",
"=",
"hop_length",
",",
"sr",
"=",
"sr",
")",
"else",
":",
"raise",
"ParameterError",
"(",
"'Invalid unit type: {}'",
".",
"format",
"(",
"units",
")",
")",
"return",
"onsets"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
onset_strength
|
Compute a spectral flux onset strength envelope.
Onset strength at time `t` is determined by:
`mean_f max(0, S[f, t] - ref[f, t - lag])`
where `ref` is `S` after local max filtering along the frequency
axis [1]_.
By default, if a time series `y` is provided, S will be the
log-power Mel spectrogram.
.. [1] Böck, Sebastian, and Gerhard Widmer.
"Maximum filter vibrato suppression for onset detection."
16th International Conference on Digital Audio Effects,
Maynooth, Ireland. 2013.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
ref : None or np.ndarray [shape=(d, m)]
An optional pre-computed reference spectrum, of the same shape as `S`.
If not provided, it will be computed from `S`.
If provided, it will override any local max filtering governed by `max_size`.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by `n_fft / (2 * hop_length)` frames
feature : function
Function for computing time-series features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with `fmax=11025.0`
aggregate : function
Aggregation function to use when combining onsets
at different frequency bins.
Default: `np.mean`
kwargs : additional keyword arguments
Additional parameters to `feature()`, if `S` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(m,)]
vector containing the onset strength envelope
Raises
------
ParameterError
if neither `(y, sr)` nor `S` are provided
or if `lag` or `max_size` are not positive integers
See Also
--------
onset_detect
onset_strength_multi
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=10.0)
>>> D = np.abs(librosa.stft(y))
>>> times = librosa.frames_to_time(np.arange(D.shape[1]))
>>> plt.figure()
>>> ax1 = plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
Construct a standard onset function
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr)
>>> plt.subplot(2, 1, 2, sharex=ax1)
>>> plt.plot(times, 2 + onset_env / onset_env.max(), alpha=0.8,
... label='Mean (mel)')
Median aggregation, and custom mel options
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... aggregate=np.median,
... fmax=8000, n_mels=256)
>>> plt.plot(times, 1 + onset_env / onset_env.max(), alpha=0.8,
... label='Median (custom mel)')
Constant-Q spectrogram instead of Mel
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... feature=librosa.cqt)
>>> plt.plot(times, onset_env / onset_env.max(), alpha=0.8,
... label='Mean (CQT)')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.ylabel('Normalized strength')
>>> plt.yticks([])
>>> plt.axis('tight')
>>> plt.tight_layout()
|
librosa/onset.py
|
def onset_strength(y=None, sr=22050, S=None, lag=1, max_size=1,
ref=None,
detrend=False, center=True,
feature=None, aggregate=None,
centering=None,
**kwargs):
"""Compute a spectral flux onset strength envelope.
Onset strength at time `t` is determined by:
`mean_f max(0, S[f, t] - ref[f, t - lag])`
where `ref` is `S` after local max filtering along the frequency
axis [1]_.
By default, if a time series `y` is provided, S will be the
log-power Mel spectrogram.
.. [1] Böck, Sebastian, and Gerhard Widmer.
"Maximum filter vibrato suppression for onset detection."
16th International Conference on Digital Audio Effects,
Maynooth, Ireland. 2013.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
ref : None or np.ndarray [shape=(d, m)]
An optional pre-computed reference spectrum, of the same shape as `S`.
If not provided, it will be computed from `S`.
If provided, it will override any local max filtering governed by `max_size`.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by `n_fft / (2 * hop_length)` frames
feature : function
Function for computing time-series features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with `fmax=11025.0`
aggregate : function
Aggregation function to use when combining onsets
at different frequency bins.
Default: `np.mean`
kwargs : additional keyword arguments
Additional parameters to `feature()`, if `S` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(m,)]
vector containing the onset strength envelope
Raises
------
ParameterError
if neither `(y, sr)` nor `S` are provided
or if `lag` or `max_size` are not positive integers
See Also
--------
onset_detect
onset_strength_multi
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=10.0)
>>> D = np.abs(librosa.stft(y))
>>> times = librosa.frames_to_time(np.arange(D.shape[1]))
>>> plt.figure()
>>> ax1 = plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
Construct a standard onset function
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr)
>>> plt.subplot(2, 1, 2, sharex=ax1)
>>> plt.plot(times, 2 + onset_env / onset_env.max(), alpha=0.8,
... label='Mean (mel)')
Median aggregation, and custom mel options
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... aggregate=np.median,
... fmax=8000, n_mels=256)
>>> plt.plot(times, 1 + onset_env / onset_env.max(), alpha=0.8,
... label='Median (custom mel)')
Constant-Q spectrogram instead of Mel
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... feature=librosa.cqt)
>>> plt.plot(times, onset_env / onset_env.max(), alpha=0.8,
... label='Mean (CQT)')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.ylabel('Normalized strength')
>>> plt.yticks([])
>>> plt.axis('tight')
>>> plt.tight_layout()
"""
if aggregate is False:
raise ParameterError('aggregate={} cannot be False when computing full-spectrum onset strength.')
odf_all = onset_strength_multi(y=y,
sr=sr,
S=S,
lag=lag,
max_size=max_size,
ref=ref,
detrend=detrend,
center=center,
feature=feature,
aggregate=aggregate,
channels=None,
**kwargs)
return odf_all[0]
|
def onset_strength(y=None, sr=22050, S=None, lag=1, max_size=1,
ref=None,
detrend=False, center=True,
feature=None, aggregate=None,
centering=None,
**kwargs):
"""Compute a spectral flux onset strength envelope.
Onset strength at time `t` is determined by:
`mean_f max(0, S[f, t] - ref[f, t - lag])`
where `ref` is `S` after local max filtering along the frequency
axis [1]_.
By default, if a time series `y` is provided, S will be the
log-power Mel spectrogram.
.. [1] Böck, Sebastian, and Gerhard Widmer.
"Maximum filter vibrato suppression for onset detection."
16th International Conference on Digital Audio Effects,
Maynooth, Ireland. 2013.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
ref : None or np.ndarray [shape=(d, m)]
An optional pre-computed reference spectrum, of the same shape as `S`.
If not provided, it will be computed from `S`.
If provided, it will override any local max filtering governed by `max_size`.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by `n_fft / (2 * hop_length)` frames
feature : function
Function for computing time-series features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with `fmax=11025.0`
aggregate : function
Aggregation function to use when combining onsets
at different frequency bins.
Default: `np.mean`
kwargs : additional keyword arguments
Additional parameters to `feature()`, if `S` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(m,)]
vector containing the onset strength envelope
Raises
------
ParameterError
if neither `(y, sr)` nor `S` are provided
or if `lag` or `max_size` are not positive integers
See Also
--------
onset_detect
onset_strength_multi
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=10.0)
>>> D = np.abs(librosa.stft(y))
>>> times = librosa.frames_to_time(np.arange(D.shape[1]))
>>> plt.figure()
>>> ax1 = plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
Construct a standard onset function
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr)
>>> plt.subplot(2, 1, 2, sharex=ax1)
>>> plt.plot(times, 2 + onset_env / onset_env.max(), alpha=0.8,
... label='Mean (mel)')
Median aggregation, and custom mel options
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... aggregate=np.median,
... fmax=8000, n_mels=256)
>>> plt.plot(times, 1 + onset_env / onset_env.max(), alpha=0.8,
... label='Median (custom mel)')
Constant-Q spectrogram instead of Mel
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... feature=librosa.cqt)
>>> plt.plot(times, onset_env / onset_env.max(), alpha=0.8,
... label='Mean (CQT)')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.ylabel('Normalized strength')
>>> plt.yticks([])
>>> plt.axis('tight')
>>> plt.tight_layout()
"""
if aggregate is False:
raise ParameterError('aggregate={} cannot be False when computing full-spectrum onset strength.')
odf_all = onset_strength_multi(y=y,
sr=sr,
S=S,
lag=lag,
max_size=max_size,
ref=ref,
detrend=detrend,
center=center,
feature=feature,
aggregate=aggregate,
channels=None,
**kwargs)
return odf_all[0]
|
[
"Compute",
"a",
"spectral",
"flux",
"onset",
"strength",
"envelope",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/onset.py#L185-L333
|
[
"def",
"onset_strength",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"lag",
"=",
"1",
",",
"max_size",
"=",
"1",
",",
"ref",
"=",
"None",
",",
"detrend",
"=",
"False",
",",
"center",
"=",
"True",
",",
"feature",
"=",
"None",
",",
"aggregate",
"=",
"None",
",",
"centering",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"aggregate",
"is",
"False",
":",
"raise",
"ParameterError",
"(",
"'aggregate={} cannot be False when computing full-spectrum onset strength.'",
")",
"odf_all",
"=",
"onset_strength_multi",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
",",
"S",
"=",
"S",
",",
"lag",
"=",
"lag",
",",
"max_size",
"=",
"max_size",
",",
"ref",
"=",
"ref",
",",
"detrend",
"=",
"detrend",
",",
"center",
"=",
"center",
",",
"feature",
"=",
"feature",
",",
"aggregate",
"=",
"aggregate",
",",
"channels",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"return",
"odf_all",
"[",
"0",
"]"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
onset_backtrack
|
Backtrack detected onset events to the nearest preceding local
minimum of an energy function.
This function can be used to roll back the timing of detected onsets
from a detected peak amplitude to the preceding minimum.
This is most useful when using onsets to determine slice points for
segmentation, as described by [1]_.
.. [1] Jehan, Tristan.
"Creating music by listening"
Doctoral dissertation
Massachusetts Institute of Technology, 2005.
Parameters
----------
events : np.ndarray, dtype=int
List of onset event frame indices, as computed by `onset_detect`
energy : np.ndarray, shape=(m,)
An energy function
Returns
-------
events_backtracked : np.ndarray, shape=events.shape
The input events matched to nearest preceding minima of `energy`.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=2.0)
>>> oenv = librosa.onset.onset_strength(y=y, sr=sr)
>>> # Detect events without backtracking
>>> onset_raw = librosa.onset.onset_detect(onset_envelope=oenv,
... backtrack=False)
>>> # Backtrack the events using the onset envelope
>>> onset_bt = librosa.onset.onset_backtrack(onset_raw, oenv)
>>> # Backtrack the events using the RMS values
>>> rms = librosa.feature.rms(S=np.abs(librosa.stft(y=y)))
>>> onset_bt_rms = librosa.onset.onset_backtrack(onset_raw, rms[0])
>>> # Plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> plt.plot(oenv, label='Onset strength')
>>> plt.vlines(onset_raw, 0, oenv.max(), label='Raw onsets')
>>> plt.vlines(onset_bt, 0, oenv.max(), label='Backtracked', color='r')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.subplot(2,1,2)
>>> plt.plot(rms[0], label='RMS')
>>> plt.vlines(onset_bt_rms, 0, rms.max(), label='Backtracked (RMS)', color='r')
>>> plt.legend(frameon=True, framealpha=0.75)
|
librosa/onset.py
|
def onset_backtrack(events, energy):
'''Backtrack detected onset events to the nearest preceding local
minimum of an energy function.
This function can be used to roll back the timing of detected onsets
from a detected peak amplitude to the preceding minimum.
This is most useful when using onsets to determine slice points for
segmentation, as described by [1]_.
.. [1] Jehan, Tristan.
"Creating music by listening"
Doctoral dissertation
Massachusetts Institute of Technology, 2005.
Parameters
----------
events : np.ndarray, dtype=int
List of onset event frame indices, as computed by `onset_detect`
energy : np.ndarray, shape=(m,)
An energy function
Returns
-------
events_backtracked : np.ndarray, shape=events.shape
The input events matched to nearest preceding minima of `energy`.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=2.0)
>>> oenv = librosa.onset.onset_strength(y=y, sr=sr)
>>> # Detect events without backtracking
>>> onset_raw = librosa.onset.onset_detect(onset_envelope=oenv,
... backtrack=False)
>>> # Backtrack the events using the onset envelope
>>> onset_bt = librosa.onset.onset_backtrack(onset_raw, oenv)
>>> # Backtrack the events using the RMS values
>>> rms = librosa.feature.rms(S=np.abs(librosa.stft(y=y)))
>>> onset_bt_rms = librosa.onset.onset_backtrack(onset_raw, rms[0])
>>> # Plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> plt.plot(oenv, label='Onset strength')
>>> plt.vlines(onset_raw, 0, oenv.max(), label='Raw onsets')
>>> plt.vlines(onset_bt, 0, oenv.max(), label='Backtracked', color='r')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.subplot(2,1,2)
>>> plt.plot(rms[0], label='RMS')
>>> plt.vlines(onset_bt_rms, 0, rms.max(), label='Backtracked (RMS)', color='r')
>>> plt.legend(frameon=True, framealpha=0.75)
'''
# Find points where energy is non-increasing
# all points: energy[i] <= energy[i-1]
# tail points: energy[i] < energy[i+1]
minima = np.flatnonzero((energy[1:-1] <= energy[:-2]) &
(energy[1:-1] < energy[2:]))
# Pad on a 0, just in case we have onsets with no preceding minimum
# Shift by one to account for slicing in minima detection
minima = util.fix_frames(1 + minima, x_min=0)
# Only match going left from the detected events
return minima[util.match_events(events, minima, right=False)]
|
def onset_backtrack(events, energy):
'''Backtrack detected onset events to the nearest preceding local
minimum of an energy function.
This function can be used to roll back the timing of detected onsets
from a detected peak amplitude to the preceding minimum.
This is most useful when using onsets to determine slice points for
segmentation, as described by [1]_.
.. [1] Jehan, Tristan.
"Creating music by listening"
Doctoral dissertation
Massachusetts Institute of Technology, 2005.
Parameters
----------
events : np.ndarray, dtype=int
List of onset event frame indices, as computed by `onset_detect`
energy : np.ndarray, shape=(m,)
An energy function
Returns
-------
events_backtracked : np.ndarray, shape=events.shape
The input events matched to nearest preceding minima of `energy`.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=2.0)
>>> oenv = librosa.onset.onset_strength(y=y, sr=sr)
>>> # Detect events without backtracking
>>> onset_raw = librosa.onset.onset_detect(onset_envelope=oenv,
... backtrack=False)
>>> # Backtrack the events using the onset envelope
>>> onset_bt = librosa.onset.onset_backtrack(onset_raw, oenv)
>>> # Backtrack the events using the RMS values
>>> rms = librosa.feature.rms(S=np.abs(librosa.stft(y=y)))
>>> onset_bt_rms = librosa.onset.onset_backtrack(onset_raw, rms[0])
>>> # Plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> plt.plot(oenv, label='Onset strength')
>>> plt.vlines(onset_raw, 0, oenv.max(), label='Raw onsets')
>>> plt.vlines(onset_bt, 0, oenv.max(), label='Backtracked', color='r')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> plt.subplot(2,1,2)
>>> plt.plot(rms[0], label='RMS')
>>> plt.vlines(onset_bt_rms, 0, rms.max(), label='Backtracked (RMS)', color='r')
>>> plt.legend(frameon=True, framealpha=0.75)
'''
# Find points where energy is non-increasing
# all points: energy[i] <= energy[i-1]
# tail points: energy[i] < energy[i+1]
minima = np.flatnonzero((energy[1:-1] <= energy[:-2]) &
(energy[1:-1] < energy[2:]))
# Pad on a 0, just in case we have onsets with no preceding minimum
# Shift by one to account for slicing in minima detection
minima = util.fix_frames(1 + minima, x_min=0)
# Only match going left from the detected events
return minima[util.match_events(events, minima, right=False)]
|
[
"Backtrack",
"detected",
"onset",
"events",
"to",
"the",
"nearest",
"preceding",
"local",
"minimum",
"of",
"an",
"energy",
"function",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/onset.py#L336-L403
|
[
"def",
"onset_backtrack",
"(",
"events",
",",
"energy",
")",
":",
"# Find points where energy is non-increasing",
"# all points: energy[i] <= energy[i-1]",
"# tail points: energy[i] < energy[i+1]",
"minima",
"=",
"np",
".",
"flatnonzero",
"(",
"(",
"energy",
"[",
"1",
":",
"-",
"1",
"]",
"<=",
"energy",
"[",
":",
"-",
"2",
"]",
")",
"&",
"(",
"energy",
"[",
"1",
":",
"-",
"1",
"]",
"<",
"energy",
"[",
"2",
":",
"]",
")",
")",
"# Pad on a 0, just in case we have onsets with no preceding minimum",
"# Shift by one to account for slicing in minima detection",
"minima",
"=",
"util",
".",
"fix_frames",
"(",
"1",
"+",
"minima",
",",
"x_min",
"=",
"0",
")",
"# Only match going left from the detected events",
"return",
"minima",
"[",
"util",
".",
"match_events",
"(",
"events",
",",
"minima",
",",
"right",
"=",
"False",
")",
"]"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
onset_strength_multi
|
Compute a spectral flux onset strength envelope across multiple channels.
Onset strength for channel `i` at time `t` is determined by:
`mean_{f in channels[i]} max(0, S[f, t+1] - S[f, t])`
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
ref : None or np.ndarray [shape=(d, m)]
An optional pre-computed reference spectrum, of the same shape as `S`.
If not provided, it will be computed from `S`.
If provided, it will override any local max filtering governed by `max_size`.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by `n_fft / (2 * hop_length)` frames
feature : function
Function for computing time-series features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with `fmax=11025.0`
aggregate : function or False
Aggregation function to use when combining onsets
at different frequency bins.
If `False`, then no aggregation is performed.
Default: `np.mean`
channels : list or None
Array of channel boundaries or slice objects.
If `None`, then a single channel is generated to span all bands.
kwargs : additional keyword arguments
Additional parameters to `feature()`, if `S` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(n_channels, m)]
array containing the onset strength envelope for each specified channel
Raises
------
ParameterError
if neither `(y, sr)` nor `S` are provided
See Also
--------
onset_strength
Notes
-----
This function caches at level 30.
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=10.0)
>>> D = np.abs(librosa.stft(y))
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log')
>>> plt.title('Power spectrogram')
Construct a standard onset function over four sub-bands
>>> onset_subbands = librosa.onset.onset_strength_multi(y=y, sr=sr,
... channels=[0, 32, 64, 96, 128])
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(onset_subbands, x_axis='time')
>>> plt.ylabel('Sub-bands')
>>> plt.title('Sub-band onset strength')
|
librosa/onset.py
|
def onset_strength_multi(y=None, sr=22050, S=None, lag=1, max_size=1,
ref=None, detrend=False, center=True, feature=None,
aggregate=None, channels=None, **kwargs):
"""Compute a spectral flux onset strength envelope across multiple channels.
Onset strength for channel `i` at time `t` is determined by:
`mean_{f in channels[i]} max(0, S[f, t+1] - S[f, t])`
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
ref : None or np.ndarray [shape=(d, m)]
An optional pre-computed reference spectrum, of the same shape as `S`.
If not provided, it will be computed from `S`.
If provided, it will override any local max filtering governed by `max_size`.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by `n_fft / (2 * hop_length)` frames
feature : function
Function for computing time-series features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with `fmax=11025.0`
aggregate : function or False
Aggregation function to use when combining onsets
at different frequency bins.
If `False`, then no aggregation is performed.
Default: `np.mean`
channels : list or None
Array of channel boundaries or slice objects.
If `None`, then a single channel is generated to span all bands.
kwargs : additional keyword arguments
Additional parameters to `feature()`, if `S` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(n_channels, m)]
array containing the onset strength envelope for each specified channel
Raises
------
ParameterError
if neither `(y, sr)` nor `S` are provided
See Also
--------
onset_strength
Notes
-----
This function caches at level 30.
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=10.0)
>>> D = np.abs(librosa.stft(y))
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log')
>>> plt.title('Power spectrogram')
Construct a standard onset function over four sub-bands
>>> onset_subbands = librosa.onset.onset_strength_multi(y=y, sr=sr,
... channels=[0, 32, 64, 96, 128])
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(onset_subbands, x_axis='time')
>>> plt.ylabel('Sub-bands')
>>> plt.title('Sub-band onset strength')
"""
if feature is None:
feature = melspectrogram
kwargs.setdefault('fmax', 11025.0)
if aggregate is None:
aggregate = np.mean
if lag < 1 or not isinstance(lag, int):
raise ParameterError('lag must be a positive integer')
if max_size < 1 or not isinstance(max_size, int):
raise ParameterError('max_size must be a positive integer')
# First, compute mel spectrogram
if S is None:
S = np.abs(feature(y=y, sr=sr, **kwargs))
# Convert to dBs
S = core.power_to_db(S)
# Retrieve the n_fft and hop_length,
# or default values for onsets if not provided
n_fft = kwargs.get('n_fft', 2048)
hop_length = kwargs.get('hop_length', 512)
# Ensure that S is at least 2-d
S = np.atleast_2d(S)
# Compute the reference spectrogram.
# Efficiency hack: skip filtering step and pass by reference
# if max_size will produce a no-op.
if ref is None:
if max_size == 1:
ref = S
else:
ref = scipy.ndimage.maximum_filter1d(S, max_size, axis=0)
elif ref.shape != S.shape:
raise ParameterError('Reference spectrum shape {} must match input spectrum {}'.format(ref.shape, S.shape))
# Compute difference to the reference, spaced by lag
onset_env = S[:, lag:] - ref[:, :-lag]
# Discard negatives (decreasing amplitude)
onset_env = np.maximum(0.0, onset_env)
# Aggregate within channels
pad = True
if channels is None:
channels = [slice(None)]
else:
pad = False
if aggregate:
onset_env = util.sync(onset_env, channels,
aggregate=aggregate,
pad=pad, axis=0)
# compensate for lag
pad_width = lag
if center:
# Counter-act framing effects. Shift the onsets by n_fft / hop_length
pad_width += n_fft // (2 * hop_length)
onset_env = np.pad(onset_env, ([0, 0], [int(pad_width), 0]),
mode='constant')
# remove the DC component
if detrend:
onset_env = scipy.signal.lfilter([1.0, -1.0], [1.0, -0.99],
onset_env, axis=-1)
# Trim to match the input duration
if center:
onset_env = onset_env[:, :S.shape[1]]
return onset_env
|
def onset_strength_multi(y=None, sr=22050, S=None, lag=1, max_size=1,
ref=None, detrend=False, center=True, feature=None,
aggregate=None, channels=None, **kwargs):
"""Compute a spectral flux onset strength envelope across multiple channels.
Onset strength for channel `i` at time `t` is determined by:
`mean_{f in channels[i]} max(0, S[f, t+1] - S[f, t])`
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of `y`
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
ref : None or np.ndarray [shape=(d, m)]
An optional pre-computed reference spectrum, of the same shape as `S`.
If not provided, it will be computed from `S`.
If provided, it will override any local max filtering governed by `max_size`.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by `n_fft / (2 * hop_length)` frames
feature : function
Function for computing time-series features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with `fmax=11025.0`
aggregate : function or False
Aggregation function to use when combining onsets
at different frequency bins.
If `False`, then no aggregation is performed.
Default: `np.mean`
channels : list or None
Array of channel boundaries or slice objects.
If `None`, then a single channel is generated to span all bands.
kwargs : additional keyword arguments
Additional parameters to `feature()`, if `S` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(n_channels, m)]
array containing the onset strength envelope for each specified channel
Raises
------
ParameterError
if neither `(y, sr)` nor `S` are provided
See Also
--------
onset_strength
Notes
-----
This function caches at level 30.
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=10.0)
>>> D = np.abs(librosa.stft(y))
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log')
>>> plt.title('Power spectrogram')
Construct a standard onset function over four sub-bands
>>> onset_subbands = librosa.onset.onset_strength_multi(y=y, sr=sr,
... channels=[0, 32, 64, 96, 128])
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(onset_subbands, x_axis='time')
>>> plt.ylabel('Sub-bands')
>>> plt.title('Sub-band onset strength')
"""
if feature is None:
feature = melspectrogram
kwargs.setdefault('fmax', 11025.0)
if aggregate is None:
aggregate = np.mean
if lag < 1 or not isinstance(lag, int):
raise ParameterError('lag must be a positive integer')
if max_size < 1 or not isinstance(max_size, int):
raise ParameterError('max_size must be a positive integer')
# First, compute mel spectrogram
if S is None:
S = np.abs(feature(y=y, sr=sr, **kwargs))
# Convert to dBs
S = core.power_to_db(S)
# Retrieve the n_fft and hop_length,
# or default values for onsets if not provided
n_fft = kwargs.get('n_fft', 2048)
hop_length = kwargs.get('hop_length', 512)
# Ensure that S is at least 2-d
S = np.atleast_2d(S)
# Compute the reference spectrogram.
# Efficiency hack: skip filtering step and pass by reference
# if max_size will produce a no-op.
if ref is None:
if max_size == 1:
ref = S
else:
ref = scipy.ndimage.maximum_filter1d(S, max_size, axis=0)
elif ref.shape != S.shape:
raise ParameterError('Reference spectrum shape {} must match input spectrum {}'.format(ref.shape, S.shape))
# Compute difference to the reference, spaced by lag
onset_env = S[:, lag:] - ref[:, :-lag]
# Discard negatives (decreasing amplitude)
onset_env = np.maximum(0.0, onset_env)
# Aggregate within channels
pad = True
if channels is None:
channels = [slice(None)]
else:
pad = False
if aggregate:
onset_env = util.sync(onset_env, channels,
aggregate=aggregate,
pad=pad, axis=0)
# compensate for lag
pad_width = lag
if center:
# Counter-act framing effects. Shift the onsets by n_fft / hop_length
pad_width += n_fft // (2 * hop_length)
onset_env = np.pad(onset_env, ([0, 0], [int(pad_width), 0]),
mode='constant')
# remove the DC component
if detrend:
onset_env = scipy.signal.lfilter([1.0, -1.0], [1.0, -0.99],
onset_env, axis=-1)
# Trim to match the input duration
if center:
onset_env = onset_env[:, :S.shape[1]]
return onset_env
|
[
"Compute",
"a",
"spectral",
"flux",
"onset",
"strength",
"envelope",
"across",
"multiple",
"channels",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/onset.py#L407-L586
|
[
"def",
"onset_strength_multi",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"lag",
"=",
"1",
",",
"max_size",
"=",
"1",
",",
"ref",
"=",
"None",
",",
"detrend",
"=",
"False",
",",
"center",
"=",
"True",
",",
"feature",
"=",
"None",
",",
"aggregate",
"=",
"None",
",",
"channels",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"feature",
"is",
"None",
":",
"feature",
"=",
"melspectrogram",
"kwargs",
".",
"setdefault",
"(",
"'fmax'",
",",
"11025.0",
")",
"if",
"aggregate",
"is",
"None",
":",
"aggregate",
"=",
"np",
".",
"mean",
"if",
"lag",
"<",
"1",
"or",
"not",
"isinstance",
"(",
"lag",
",",
"int",
")",
":",
"raise",
"ParameterError",
"(",
"'lag must be a positive integer'",
")",
"if",
"max_size",
"<",
"1",
"or",
"not",
"isinstance",
"(",
"max_size",
",",
"int",
")",
":",
"raise",
"ParameterError",
"(",
"'max_size must be a positive integer'",
")",
"# First, compute mel spectrogram",
"if",
"S",
"is",
"None",
":",
"S",
"=",
"np",
".",
"abs",
"(",
"feature",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
",",
"*",
"*",
"kwargs",
")",
")",
"# Convert to dBs",
"S",
"=",
"core",
".",
"power_to_db",
"(",
"S",
")",
"# Retrieve the n_fft and hop_length,",
"# or default values for onsets if not provided",
"n_fft",
"=",
"kwargs",
".",
"get",
"(",
"'n_fft'",
",",
"2048",
")",
"hop_length",
"=",
"kwargs",
".",
"get",
"(",
"'hop_length'",
",",
"512",
")",
"# Ensure that S is at least 2-d",
"S",
"=",
"np",
".",
"atleast_2d",
"(",
"S",
")",
"# Compute the reference spectrogram.",
"# Efficiency hack: skip filtering step and pass by reference",
"# if max_size will produce a no-op.",
"if",
"ref",
"is",
"None",
":",
"if",
"max_size",
"==",
"1",
":",
"ref",
"=",
"S",
"else",
":",
"ref",
"=",
"scipy",
".",
"ndimage",
".",
"maximum_filter1d",
"(",
"S",
",",
"max_size",
",",
"axis",
"=",
"0",
")",
"elif",
"ref",
".",
"shape",
"!=",
"S",
".",
"shape",
":",
"raise",
"ParameterError",
"(",
"'Reference spectrum shape {} must match input spectrum {}'",
".",
"format",
"(",
"ref",
".",
"shape",
",",
"S",
".",
"shape",
")",
")",
"# Compute difference to the reference, spaced by lag",
"onset_env",
"=",
"S",
"[",
":",
",",
"lag",
":",
"]",
"-",
"ref",
"[",
":",
",",
":",
"-",
"lag",
"]",
"# Discard negatives (decreasing amplitude)",
"onset_env",
"=",
"np",
".",
"maximum",
"(",
"0.0",
",",
"onset_env",
")",
"# Aggregate within channels",
"pad",
"=",
"True",
"if",
"channels",
"is",
"None",
":",
"channels",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"else",
":",
"pad",
"=",
"False",
"if",
"aggregate",
":",
"onset_env",
"=",
"util",
".",
"sync",
"(",
"onset_env",
",",
"channels",
",",
"aggregate",
"=",
"aggregate",
",",
"pad",
"=",
"pad",
",",
"axis",
"=",
"0",
")",
"# compensate for lag",
"pad_width",
"=",
"lag",
"if",
"center",
":",
"# Counter-act framing effects. Shift the onsets by n_fft / hop_length",
"pad_width",
"+=",
"n_fft",
"//",
"(",
"2",
"*",
"hop_length",
")",
"onset_env",
"=",
"np",
".",
"pad",
"(",
"onset_env",
",",
"(",
"[",
"0",
",",
"0",
"]",
",",
"[",
"int",
"(",
"pad_width",
")",
",",
"0",
"]",
")",
",",
"mode",
"=",
"'constant'",
")",
"# remove the DC component",
"if",
"detrend",
":",
"onset_env",
"=",
"scipy",
".",
"signal",
".",
"lfilter",
"(",
"[",
"1.0",
",",
"-",
"1.0",
"]",
",",
"[",
"1.0",
",",
"-",
"0.99",
"]",
",",
"onset_env",
",",
"axis",
"=",
"-",
"1",
")",
"# Trim to match the input duration",
"if",
"center",
":",
"onset_env",
"=",
"onset_env",
"[",
":",
",",
":",
"S",
".",
"shape",
"[",
"1",
"]",
"]",
"return",
"onset_env"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
annotation
|
r'''Save annotations in a 3-column format::
intervals[0, 0],intervals[0, 1],annotations[0]\n
intervals[1, 0],intervals[1, 1],annotations[1]\n
intervals[2, 0],intervals[2, 1],annotations[2]\n
...
This can be used for segment or chord annotations.
Parameters
----------
path : str
path to save the output CSV file
intervals : np.ndarray [shape=(n, 2)]
array of interval start and end-times.
`intervals[i, 0]` marks the start time of interval `i`
`intervals[i, 1]` marks the end time of interval `i`
annotations : None or list-like [shape=(n,)]
optional list of annotation strings. `annotations[i]` applies
to the time range `intervals[i, 0]` to `intervals[i, 1]`
delimiter : str
character to separate fields
fmt : str
format-string for rendering time data
Raises
------
ParameterError
if `annotations` is not `None` and length does
not match `intervals`
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> data = librosa.feature.mfcc(y=y, sr=sr, hop_length=512)
Detect segment boundaries
>>> boundaries = librosa.segment.agglomerative(data, k=10)
Convert to time
>>> boundary_times = librosa.frames_to_time(boundaries, sr=sr,
... hop_length=512)
Convert events boundaries to intervals
>>> intervals = np.hstack([boundary_times[:-1, np.newaxis],
... boundary_times[1:, np.newaxis]])
Make some fake annotations
>>> labels = ['Seg #{:03d}'.format(i) for i in range(len(intervals))]
Save the output
>>> librosa.output.annotation('segments.csv', intervals,
... annotations=labels)
|
librosa/output.py
|
def annotation(path, intervals, annotations=None, delimiter=',', fmt='%0.3f'):
r'''Save annotations in a 3-column format::
intervals[0, 0],intervals[0, 1],annotations[0]\n
intervals[1, 0],intervals[1, 1],annotations[1]\n
intervals[2, 0],intervals[2, 1],annotations[2]\n
...
This can be used for segment or chord annotations.
Parameters
----------
path : str
path to save the output CSV file
intervals : np.ndarray [shape=(n, 2)]
array of interval start and end-times.
`intervals[i, 0]` marks the start time of interval `i`
`intervals[i, 1]` marks the end time of interval `i`
annotations : None or list-like [shape=(n,)]
optional list of annotation strings. `annotations[i]` applies
to the time range `intervals[i, 0]` to `intervals[i, 1]`
delimiter : str
character to separate fields
fmt : str
format-string for rendering time data
Raises
------
ParameterError
if `annotations` is not `None` and length does
not match `intervals`
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> data = librosa.feature.mfcc(y=y, sr=sr, hop_length=512)
Detect segment boundaries
>>> boundaries = librosa.segment.agglomerative(data, k=10)
Convert to time
>>> boundary_times = librosa.frames_to_time(boundaries, sr=sr,
... hop_length=512)
Convert events boundaries to intervals
>>> intervals = np.hstack([boundary_times[:-1, np.newaxis],
... boundary_times[1:, np.newaxis]])
Make some fake annotations
>>> labels = ['Seg #{:03d}'.format(i) for i in range(len(intervals))]
Save the output
>>> librosa.output.annotation('segments.csv', intervals,
... annotations=labels)
'''
util.valid_intervals(intervals)
if annotations is not None and len(annotations) != len(intervals):
raise ParameterError('len(annotations) != len(intervals)')
with open(path, 'w') as output_file:
writer = csv.writer(output_file, delimiter=delimiter)
if annotations is None:
for t_int in intervals:
writer.writerow([fmt % t_int[0], fmt % t_int[1]])
else:
for t_int, lab in zip(intervals, annotations):
writer.writerow([fmt % t_int[0], fmt % t_int[1], lab])
|
def annotation(path, intervals, annotations=None, delimiter=',', fmt='%0.3f'):
r'''Save annotations in a 3-column format::
intervals[0, 0],intervals[0, 1],annotations[0]\n
intervals[1, 0],intervals[1, 1],annotations[1]\n
intervals[2, 0],intervals[2, 1],annotations[2]\n
...
This can be used for segment or chord annotations.
Parameters
----------
path : str
path to save the output CSV file
intervals : np.ndarray [shape=(n, 2)]
array of interval start and end-times.
`intervals[i, 0]` marks the start time of interval `i`
`intervals[i, 1]` marks the end time of interval `i`
annotations : None or list-like [shape=(n,)]
optional list of annotation strings. `annotations[i]` applies
to the time range `intervals[i, 0]` to `intervals[i, 1]`
delimiter : str
character to separate fields
fmt : str
format-string for rendering time data
Raises
------
ParameterError
if `annotations` is not `None` and length does
not match `intervals`
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> data = librosa.feature.mfcc(y=y, sr=sr, hop_length=512)
Detect segment boundaries
>>> boundaries = librosa.segment.agglomerative(data, k=10)
Convert to time
>>> boundary_times = librosa.frames_to_time(boundaries, sr=sr,
... hop_length=512)
Convert events boundaries to intervals
>>> intervals = np.hstack([boundary_times[:-1, np.newaxis],
... boundary_times[1:, np.newaxis]])
Make some fake annotations
>>> labels = ['Seg #{:03d}'.format(i) for i in range(len(intervals))]
Save the output
>>> librosa.output.annotation('segments.csv', intervals,
... annotations=labels)
'''
util.valid_intervals(intervals)
if annotations is not None and len(annotations) != len(intervals):
raise ParameterError('len(annotations) != len(intervals)')
with open(path, 'w') as output_file:
writer = csv.writer(output_file, delimiter=delimiter)
if annotations is None:
for t_int in intervals:
writer.writerow([fmt % t_int[0], fmt % t_int[1]])
else:
for t_int, lab in zip(intervals, annotations):
writer.writerow([fmt % t_int[0], fmt % t_int[1], lab])
|
[
"r",
"Save",
"annotations",
"in",
"a",
"3",
"-",
"column",
"format",
"::"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/output.py#L36-L117
|
[
"def",
"annotation",
"(",
"path",
",",
"intervals",
",",
"annotations",
"=",
"None",
",",
"delimiter",
"=",
"','",
",",
"fmt",
"=",
"'%0.3f'",
")",
":",
"util",
".",
"valid_intervals",
"(",
"intervals",
")",
"if",
"annotations",
"is",
"not",
"None",
"and",
"len",
"(",
"annotations",
")",
"!=",
"len",
"(",
"intervals",
")",
":",
"raise",
"ParameterError",
"(",
"'len(annotations) != len(intervals)'",
")",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"output_file",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"output_file",
",",
"delimiter",
"=",
"delimiter",
")",
"if",
"annotations",
"is",
"None",
":",
"for",
"t_int",
"in",
"intervals",
":",
"writer",
".",
"writerow",
"(",
"[",
"fmt",
"%",
"t_int",
"[",
"0",
"]",
",",
"fmt",
"%",
"t_int",
"[",
"1",
"]",
"]",
")",
"else",
":",
"for",
"t_int",
",",
"lab",
"in",
"zip",
"(",
"intervals",
",",
"annotations",
")",
":",
"writer",
".",
"writerow",
"(",
"[",
"fmt",
"%",
"t_int",
"[",
"0",
"]",
",",
"fmt",
"%",
"t_int",
"[",
"1",
"]",
",",
"lab",
"]",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
times_csv
|
r"""Save time steps as in CSV format. This can be used to store the output
of a beat-tracker or segmentation algorithm.
If only `times` are provided, the file will contain each value
of `times` on a row::
times[0]\n
times[1]\n
times[2]\n
...
If `annotations` are also provided, the file will contain
delimiter-separated values::
times[0],annotations[0]\n
times[1],annotations[1]\n
times[2],annotations[2]\n
...
Parameters
----------
path : string
path to save the output CSV file
times : list-like of floats
list of frame numbers for beat events
annotations : None or list-like
optional annotations for each time step
delimiter : str
character to separate fields
fmt : str
format-string for rendering time
Raises
------
ParameterError
if `annotations` is not `None` and length does not
match `times`
Examples
--------
Write beat-tracker time to CSV
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y, sr=sr, units='time')
>>> librosa.output.times_csv('beat_times.csv', beats)
|
librosa/output.py
|
def times_csv(path, times, annotations=None, delimiter=',', fmt='%0.3f'):
r"""Save time steps as in CSV format. This can be used to store the output
of a beat-tracker or segmentation algorithm.
If only `times` are provided, the file will contain each value
of `times` on a row::
times[0]\n
times[1]\n
times[2]\n
...
If `annotations` are also provided, the file will contain
delimiter-separated values::
times[0],annotations[0]\n
times[1],annotations[1]\n
times[2],annotations[2]\n
...
Parameters
----------
path : string
path to save the output CSV file
times : list-like of floats
list of frame numbers for beat events
annotations : None or list-like
optional annotations for each time step
delimiter : str
character to separate fields
fmt : str
format-string for rendering time
Raises
------
ParameterError
if `annotations` is not `None` and length does not
match `times`
Examples
--------
Write beat-tracker time to CSV
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y, sr=sr, units='time')
>>> librosa.output.times_csv('beat_times.csv', beats)
"""
if annotations is not None and len(annotations) != len(times):
raise ParameterError('len(annotations) != len(times)')
with open(path, 'w') as output_file:
writer = csv.writer(output_file, delimiter=delimiter)
if annotations is None:
for t in times:
writer.writerow([fmt % t])
else:
for t, lab in zip(times, annotations):
writer.writerow([(fmt % t), lab])
|
def times_csv(path, times, annotations=None, delimiter=',', fmt='%0.3f'):
r"""Save time steps as in CSV format. This can be used to store the output
of a beat-tracker or segmentation algorithm.
If only `times` are provided, the file will contain each value
of `times` on a row::
times[0]\n
times[1]\n
times[2]\n
...
If `annotations` are also provided, the file will contain
delimiter-separated values::
times[0],annotations[0]\n
times[1],annotations[1]\n
times[2],annotations[2]\n
...
Parameters
----------
path : string
path to save the output CSV file
times : list-like of floats
list of frame numbers for beat events
annotations : None or list-like
optional annotations for each time step
delimiter : str
character to separate fields
fmt : str
format-string for rendering time
Raises
------
ParameterError
if `annotations` is not `None` and length does not
match `times`
Examples
--------
Write beat-tracker time to CSV
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y, sr=sr, units='time')
>>> librosa.output.times_csv('beat_times.csv', beats)
"""
if annotations is not None and len(annotations) != len(times):
raise ParameterError('len(annotations) != len(times)')
with open(path, 'w') as output_file:
writer = csv.writer(output_file, delimiter=delimiter)
if annotations is None:
for t in times:
writer.writerow([fmt % t])
else:
for t, lab in zip(times, annotations):
writer.writerow([(fmt % t), lab])
|
[
"r",
"Save",
"time",
"steps",
"as",
"in",
"CSV",
"format",
".",
"This",
"can",
"be",
"used",
"to",
"store",
"the",
"output",
"of",
"a",
"beat",
"-",
"tracker",
"or",
"segmentation",
"algorithm",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/output.py#L120-L184
|
[
"def",
"times_csv",
"(",
"path",
",",
"times",
",",
"annotations",
"=",
"None",
",",
"delimiter",
"=",
"','",
",",
"fmt",
"=",
"'%0.3f'",
")",
":",
"if",
"annotations",
"is",
"not",
"None",
"and",
"len",
"(",
"annotations",
")",
"!=",
"len",
"(",
"times",
")",
":",
"raise",
"ParameterError",
"(",
"'len(annotations) != len(times)'",
")",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"output_file",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"output_file",
",",
"delimiter",
"=",
"delimiter",
")",
"if",
"annotations",
"is",
"None",
":",
"for",
"t",
"in",
"times",
":",
"writer",
".",
"writerow",
"(",
"[",
"fmt",
"%",
"t",
"]",
")",
"else",
":",
"for",
"t",
",",
"lab",
"in",
"zip",
"(",
"times",
",",
"annotations",
")",
":",
"writer",
".",
"writerow",
"(",
"[",
"(",
"fmt",
"%",
"t",
")",
",",
"lab",
"]",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
write_wav
|
Output a time series as a .wav file
Note: only mono or stereo, floating-point data is supported.
For more advanced and flexible output options, refer to
`soundfile`.
Parameters
----------
path : str
path to save the output wav file
y : np.ndarray [shape=(n,) or (2,n), dtype=np.float]
audio time series (mono or stereo).
Note that only floating-point values are supported.
sr : int > 0 [scalar]
sampling rate of `y`
norm : boolean [scalar]
enable amplitude normalization.
For floating point `y`, scale the data to the range [-1, +1].
Examples
--------
Trim a signal to 5 seconds and save it back
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=5.0)
>>> librosa.output.write_wav('file_trim_5s.wav', y, sr)
See Also
--------
soundfile.write
|
librosa/output.py
|
def write_wav(path, y, sr, norm=False):
"""Output a time series as a .wav file
Note: only mono or stereo, floating-point data is supported.
For more advanced and flexible output options, refer to
`soundfile`.
Parameters
----------
path : str
path to save the output wav file
y : np.ndarray [shape=(n,) or (2,n), dtype=np.float]
audio time series (mono or stereo).
Note that only floating-point values are supported.
sr : int > 0 [scalar]
sampling rate of `y`
norm : boolean [scalar]
enable amplitude normalization.
For floating point `y`, scale the data to the range [-1, +1].
Examples
--------
Trim a signal to 5 seconds and save it back
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=5.0)
>>> librosa.output.write_wav('file_trim_5s.wav', y, sr)
See Also
--------
soundfile.write
"""
# Validate the buffer. Stereo is okay here.
util.valid_audio(y, mono=False)
# normalize
if norm and np.issubdtype(y.dtype, np.floating):
wav = util.normalize(y, norm=np.inf, axis=None)
else:
wav = y
# Check for stereo
if wav.ndim > 1 and wav.shape[0] == 2:
wav = wav.T
# Save
scipy.io.wavfile.write(path, sr, wav)
|
def write_wav(path, y, sr, norm=False):
"""Output a time series as a .wav file
Note: only mono or stereo, floating-point data is supported.
For more advanced and flexible output options, refer to
`soundfile`.
Parameters
----------
path : str
path to save the output wav file
y : np.ndarray [shape=(n,) or (2,n), dtype=np.float]
audio time series (mono or stereo).
Note that only floating-point values are supported.
sr : int > 0 [scalar]
sampling rate of `y`
norm : boolean [scalar]
enable amplitude normalization.
For floating point `y`, scale the data to the range [-1, +1].
Examples
--------
Trim a signal to 5 seconds and save it back
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... duration=5.0)
>>> librosa.output.write_wav('file_trim_5s.wav', y, sr)
See Also
--------
soundfile.write
"""
# Validate the buffer. Stereo is okay here.
util.valid_audio(y, mono=False)
# normalize
if norm and np.issubdtype(y.dtype, np.floating):
wav = util.normalize(y, norm=np.inf, axis=None)
else:
wav = y
# Check for stereo
if wav.ndim > 1 and wav.shape[0] == 2:
wav = wav.T
# Save
scipy.io.wavfile.write(path, sr, wav)
|
[
"Output",
"a",
"time",
"series",
"as",
"a",
".",
"wav",
"file"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/output.py#L187-L238
|
[
"def",
"write_wav",
"(",
"path",
",",
"y",
",",
"sr",
",",
"norm",
"=",
"False",
")",
":",
"# Validate the buffer. Stereo is okay here.",
"util",
".",
"valid_audio",
"(",
"y",
",",
"mono",
"=",
"False",
")",
"# normalize",
"if",
"norm",
"and",
"np",
".",
"issubdtype",
"(",
"y",
".",
"dtype",
",",
"np",
".",
"floating",
")",
":",
"wav",
"=",
"util",
".",
"normalize",
"(",
"y",
",",
"norm",
"=",
"np",
".",
"inf",
",",
"axis",
"=",
"None",
")",
"else",
":",
"wav",
"=",
"y",
"# Check for stereo",
"if",
"wav",
".",
"ndim",
">",
"1",
"and",
"wav",
".",
"shape",
"[",
"0",
"]",
"==",
"2",
":",
"wav",
"=",
"wav",
".",
"T",
"# Save",
"scipy",
".",
"io",
".",
"wavfile",
".",
"write",
"(",
"path",
",",
"sr",
",",
"wav",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
cmap
|
Get a default colormap from the given data.
If the data is boolean, use a black and white colormap.
If the data has both positive and negative values,
use a diverging colormap.
Otherwise, use a sequential colormap.
Parameters
----------
data : np.ndarray
Input data
robust : bool
If True, discard the top and bottom 2% of data when calculating
range.
cmap_seq : str
The sequential colormap name
cmap_bool : str
The boolean colormap name
cmap_div : str
The diverging colormap name
Returns
-------
cmap : matplotlib.colors.Colormap
The colormap to use for `data`
See Also
--------
matplotlib.pyplot.colormaps
|
librosa/display.py
|
def cmap(data, robust=True, cmap_seq='magma', cmap_bool='gray_r', cmap_div='coolwarm'):
'''Get a default colormap from the given data.
If the data is boolean, use a black and white colormap.
If the data has both positive and negative values,
use a diverging colormap.
Otherwise, use a sequential colormap.
Parameters
----------
data : np.ndarray
Input data
robust : bool
If True, discard the top and bottom 2% of data when calculating
range.
cmap_seq : str
The sequential colormap name
cmap_bool : str
The boolean colormap name
cmap_div : str
The diverging colormap name
Returns
-------
cmap : matplotlib.colors.Colormap
The colormap to use for `data`
See Also
--------
matplotlib.pyplot.colormaps
'''
data = np.atleast_1d(data)
if data.dtype == 'bool':
return get_cmap(cmap_bool)
data = data[np.isfinite(data)]
if robust:
min_p, max_p = 2, 98
else:
min_p, max_p = 0, 100
max_val = np.percentile(data, max_p)
min_val = np.percentile(data, min_p)
if min_val >= 0 or max_val <= 0:
return get_cmap(cmap_seq)
return get_cmap(cmap_div)
|
def cmap(data, robust=True, cmap_seq='magma', cmap_bool='gray_r', cmap_div='coolwarm'):
'''Get a default colormap from the given data.
If the data is boolean, use a black and white colormap.
If the data has both positive and negative values,
use a diverging colormap.
Otherwise, use a sequential colormap.
Parameters
----------
data : np.ndarray
Input data
robust : bool
If True, discard the top and bottom 2% of data when calculating
range.
cmap_seq : str
The sequential colormap name
cmap_bool : str
The boolean colormap name
cmap_div : str
The diverging colormap name
Returns
-------
cmap : matplotlib.colors.Colormap
The colormap to use for `data`
See Also
--------
matplotlib.pyplot.colormaps
'''
data = np.atleast_1d(data)
if data.dtype == 'bool':
return get_cmap(cmap_bool)
data = data[np.isfinite(data)]
if robust:
min_p, max_p = 2, 98
else:
min_p, max_p = 0, 100
max_val = np.percentile(data, max_p)
min_val = np.percentile(data, min_p)
if min_val >= 0 or max_val <= 0:
return get_cmap(cmap_seq)
return get_cmap(cmap_div)
|
[
"Get",
"a",
"default",
"colormap",
"from",
"the",
"given",
"data",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L293-L349
|
[
"def",
"cmap",
"(",
"data",
",",
"robust",
"=",
"True",
",",
"cmap_seq",
"=",
"'magma'",
",",
"cmap_bool",
"=",
"'gray_r'",
",",
"cmap_div",
"=",
"'coolwarm'",
")",
":",
"data",
"=",
"np",
".",
"atleast_1d",
"(",
"data",
")",
"if",
"data",
".",
"dtype",
"==",
"'bool'",
":",
"return",
"get_cmap",
"(",
"cmap_bool",
")",
"data",
"=",
"data",
"[",
"np",
".",
"isfinite",
"(",
"data",
")",
"]",
"if",
"robust",
":",
"min_p",
",",
"max_p",
"=",
"2",
",",
"98",
"else",
":",
"min_p",
",",
"max_p",
"=",
"0",
",",
"100",
"max_val",
"=",
"np",
".",
"percentile",
"(",
"data",
",",
"max_p",
")",
"min_val",
"=",
"np",
".",
"percentile",
"(",
"data",
",",
"min_p",
")",
"if",
"min_val",
">=",
"0",
"or",
"max_val",
"<=",
"0",
":",
"return",
"get_cmap",
"(",
"cmap_seq",
")",
"return",
"get_cmap",
"(",
"cmap_div",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__envelope
|
Compute the max-envelope of x at a stride/frame length of h
|
librosa/display.py
|
def __envelope(x, hop):
'''Compute the max-envelope of x at a stride/frame length of h'''
return util.frame(x, hop_length=hop, frame_length=hop).max(axis=0)
|
def __envelope(x, hop):
'''Compute the max-envelope of x at a stride/frame length of h'''
return util.frame(x, hop_length=hop, frame_length=hop).max(axis=0)
|
[
"Compute",
"the",
"max",
"-",
"envelope",
"of",
"x",
"at",
"a",
"stride",
"/",
"frame",
"length",
"of",
"h"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L352-L354
|
[
"def",
"__envelope",
"(",
"x",
",",
"hop",
")",
":",
"return",
"util",
".",
"frame",
"(",
"x",
",",
"hop_length",
"=",
"hop",
",",
"frame_length",
"=",
"hop",
")",
".",
"max",
"(",
"axis",
"=",
"0",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
waveplot
|
Plot the amplitude envelope of a waveform.
If `y` is monophonic, a filled curve is drawn between `[-abs(y), abs(y)]`.
If `y` is stereo, the curve is drawn between `[-abs(y[1]), abs(y[0])]`,
so that the left and right channels are drawn above and below the axis,
respectively.
Long signals (`duration >= max_points`) are down-sampled to at
most `max_sr` before plotting.
Parameters
----------
y : np.ndarray [shape=(n,) or (2,n)]
audio time series (mono or stereo)
sr : number > 0 [scalar]
sampling rate of `y`
max_points : postive number or None
Maximum number of time-points to plot: if `max_points` exceeds
the duration of `y`, then `y` is downsampled.
If `None`, no downsampling is performed.
x_axis : str {'time', 'off', 'none'} or None
If 'time', the x-axis is given time tick-marks.
ax : matplotlib.axes.Axes or None
Axes to plot on instead of the default `plt.gca()`.
offset : float
Horizontal offset (in seconds) to start the waveform plot
max_sr : number > 0 [scalar]
Maximum sampling rate for the visualization
kwargs
Additional keyword arguments to `matplotlib.pyplot.fill_between`
Returns
-------
pc : matplotlib.collections.PolyCollection
The PolyCollection created by `fill_between`.
See also
--------
librosa.core.resample
matplotlib.pyplot.fill_between
Examples
--------
Plot a monophonic waveform
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.waveplot(y, sr=sr)
>>> plt.title('Monophonic')
Or a stereo waveform
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... mono=False, duration=10)
>>> plt.subplot(3, 1, 2)
>>> librosa.display.waveplot(y, sr=sr)
>>> plt.title('Stereo')
Or harmonic and percussive components with transparency
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)
>>> y_harm, y_perc = librosa.effects.hpss(y)
>>> plt.subplot(3, 1, 3)
>>> librosa.display.waveplot(y_harm, sr=sr, alpha=0.25)
>>> librosa.display.waveplot(y_perc, sr=sr, color='r', alpha=0.5)
>>> plt.title('Harmonic + Percussive')
>>> plt.tight_layout()
|
librosa/display.py
|
def waveplot(y, sr=22050, max_points=5e4, x_axis='time', offset=0.0,
max_sr=1000, ax=None, **kwargs):
'''Plot the amplitude envelope of a waveform.
If `y` is monophonic, a filled curve is drawn between `[-abs(y), abs(y)]`.
If `y` is stereo, the curve is drawn between `[-abs(y[1]), abs(y[0])]`,
so that the left and right channels are drawn above and below the axis,
respectively.
Long signals (`duration >= max_points`) are down-sampled to at
most `max_sr` before plotting.
Parameters
----------
y : np.ndarray [shape=(n,) or (2,n)]
audio time series (mono or stereo)
sr : number > 0 [scalar]
sampling rate of `y`
max_points : postive number or None
Maximum number of time-points to plot: if `max_points` exceeds
the duration of `y`, then `y` is downsampled.
If `None`, no downsampling is performed.
x_axis : str {'time', 'off', 'none'} or None
If 'time', the x-axis is given time tick-marks.
ax : matplotlib.axes.Axes or None
Axes to plot on instead of the default `plt.gca()`.
offset : float
Horizontal offset (in seconds) to start the waveform plot
max_sr : number > 0 [scalar]
Maximum sampling rate for the visualization
kwargs
Additional keyword arguments to `matplotlib.pyplot.fill_between`
Returns
-------
pc : matplotlib.collections.PolyCollection
The PolyCollection created by `fill_between`.
See also
--------
librosa.core.resample
matplotlib.pyplot.fill_between
Examples
--------
Plot a monophonic waveform
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.waveplot(y, sr=sr)
>>> plt.title('Monophonic')
Or a stereo waveform
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... mono=False, duration=10)
>>> plt.subplot(3, 1, 2)
>>> librosa.display.waveplot(y, sr=sr)
>>> plt.title('Stereo')
Or harmonic and percussive components with transparency
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)
>>> y_harm, y_perc = librosa.effects.hpss(y)
>>> plt.subplot(3, 1, 3)
>>> librosa.display.waveplot(y_harm, sr=sr, alpha=0.25)
>>> librosa.display.waveplot(y_perc, sr=sr, color='r', alpha=0.5)
>>> plt.title('Harmonic + Percussive')
>>> plt.tight_layout()
'''
util.valid_audio(y, mono=False)
if not (isinstance(max_sr, int) and max_sr > 0):
raise ParameterError('max_sr must be a non-negative integer')
target_sr = sr
hop_length = 1
if max_points is not None:
if max_points <= 0:
raise ParameterError('max_points must be strictly positive')
if max_points < y.shape[-1]:
target_sr = min(max_sr, (sr * y.shape[-1]) // max_points)
hop_length = sr // target_sr
if y.ndim == 1:
y = __envelope(y, hop_length)
else:
y = np.vstack([__envelope(_, hop_length) for _ in y])
if y.ndim > 1:
y_top = y[0]
y_bottom = -y[1]
else:
y_top = y
y_bottom = -y
axes = __check_axes(ax)
kwargs.setdefault('color', next(axes._get_lines.prop_cycler)['color'])
locs = offset + core.frames_to_time(np.arange(len(y_top)),
sr=sr,
hop_length=hop_length)
out = axes.fill_between(locs, y_bottom, y_top, **kwargs)
axes.set_xlim([locs.min(), locs.max()])
if x_axis == 'time':
axes.xaxis.set_major_formatter(TimeFormatter(lag=False))
axes.xaxis.set_label_text('Time')
elif x_axis is None or x_axis in ['off', 'none']:
axes.set_xticks([])
else:
raise ParameterError('Unknown x_axis value: {}'.format(x_axis))
return out
|
def waveplot(y, sr=22050, max_points=5e4, x_axis='time', offset=0.0,
max_sr=1000, ax=None, **kwargs):
'''Plot the amplitude envelope of a waveform.
If `y` is monophonic, a filled curve is drawn between `[-abs(y), abs(y)]`.
If `y` is stereo, the curve is drawn between `[-abs(y[1]), abs(y[0])]`,
so that the left and right channels are drawn above and below the axis,
respectively.
Long signals (`duration >= max_points`) are down-sampled to at
most `max_sr` before plotting.
Parameters
----------
y : np.ndarray [shape=(n,) or (2,n)]
audio time series (mono or stereo)
sr : number > 0 [scalar]
sampling rate of `y`
max_points : postive number or None
Maximum number of time-points to plot: if `max_points` exceeds
the duration of `y`, then `y` is downsampled.
If `None`, no downsampling is performed.
x_axis : str {'time', 'off', 'none'} or None
If 'time', the x-axis is given time tick-marks.
ax : matplotlib.axes.Axes or None
Axes to plot on instead of the default `plt.gca()`.
offset : float
Horizontal offset (in seconds) to start the waveform plot
max_sr : number > 0 [scalar]
Maximum sampling rate for the visualization
kwargs
Additional keyword arguments to `matplotlib.pyplot.fill_between`
Returns
-------
pc : matplotlib.collections.PolyCollection
The PolyCollection created by `fill_between`.
See also
--------
librosa.core.resample
matplotlib.pyplot.fill_between
Examples
--------
Plot a monophonic waveform
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.waveplot(y, sr=sr)
>>> plt.title('Monophonic')
Or a stereo waveform
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... mono=False, duration=10)
>>> plt.subplot(3, 1, 2)
>>> librosa.display.waveplot(y, sr=sr)
>>> plt.title('Stereo')
Or harmonic and percussive components with transparency
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=10)
>>> y_harm, y_perc = librosa.effects.hpss(y)
>>> plt.subplot(3, 1, 3)
>>> librosa.display.waveplot(y_harm, sr=sr, alpha=0.25)
>>> librosa.display.waveplot(y_perc, sr=sr, color='r', alpha=0.5)
>>> plt.title('Harmonic + Percussive')
>>> plt.tight_layout()
'''
util.valid_audio(y, mono=False)
if not (isinstance(max_sr, int) and max_sr > 0):
raise ParameterError('max_sr must be a non-negative integer')
target_sr = sr
hop_length = 1
if max_points is not None:
if max_points <= 0:
raise ParameterError('max_points must be strictly positive')
if max_points < y.shape[-1]:
target_sr = min(max_sr, (sr * y.shape[-1]) // max_points)
hop_length = sr // target_sr
if y.ndim == 1:
y = __envelope(y, hop_length)
else:
y = np.vstack([__envelope(_, hop_length) for _ in y])
if y.ndim > 1:
y_top = y[0]
y_bottom = -y[1]
else:
y_top = y
y_bottom = -y
axes = __check_axes(ax)
kwargs.setdefault('color', next(axes._get_lines.prop_cycler)['color'])
locs = offset + core.frames_to_time(np.arange(len(y_top)),
sr=sr,
hop_length=hop_length)
out = axes.fill_between(locs, y_bottom, y_top, **kwargs)
axes.set_xlim([locs.min(), locs.max()])
if x_axis == 'time':
axes.xaxis.set_major_formatter(TimeFormatter(lag=False))
axes.xaxis.set_label_text('Time')
elif x_axis is None or x_axis in ['off', 'none']:
axes.set_xticks([])
else:
raise ParameterError('Unknown x_axis value: {}'.format(x_axis))
return out
|
[
"Plot",
"the",
"amplitude",
"envelope",
"of",
"a",
"waveform",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L357-L488
|
[
"def",
"waveplot",
"(",
"y",
",",
"sr",
"=",
"22050",
",",
"max_points",
"=",
"5e4",
",",
"x_axis",
"=",
"'time'",
",",
"offset",
"=",
"0.0",
",",
"max_sr",
"=",
"1000",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"util",
".",
"valid_audio",
"(",
"y",
",",
"mono",
"=",
"False",
")",
"if",
"not",
"(",
"isinstance",
"(",
"max_sr",
",",
"int",
")",
"and",
"max_sr",
">",
"0",
")",
":",
"raise",
"ParameterError",
"(",
"'max_sr must be a non-negative integer'",
")",
"target_sr",
"=",
"sr",
"hop_length",
"=",
"1",
"if",
"max_points",
"is",
"not",
"None",
":",
"if",
"max_points",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'max_points must be strictly positive'",
")",
"if",
"max_points",
"<",
"y",
".",
"shape",
"[",
"-",
"1",
"]",
":",
"target_sr",
"=",
"min",
"(",
"max_sr",
",",
"(",
"sr",
"*",
"y",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"//",
"max_points",
")",
"hop_length",
"=",
"sr",
"//",
"target_sr",
"if",
"y",
".",
"ndim",
"==",
"1",
":",
"y",
"=",
"__envelope",
"(",
"y",
",",
"hop_length",
")",
"else",
":",
"y",
"=",
"np",
".",
"vstack",
"(",
"[",
"__envelope",
"(",
"_",
",",
"hop_length",
")",
"for",
"_",
"in",
"y",
"]",
")",
"if",
"y",
".",
"ndim",
">",
"1",
":",
"y_top",
"=",
"y",
"[",
"0",
"]",
"y_bottom",
"=",
"-",
"y",
"[",
"1",
"]",
"else",
":",
"y_top",
"=",
"y",
"y_bottom",
"=",
"-",
"y",
"axes",
"=",
"__check_axes",
"(",
"ax",
")",
"kwargs",
".",
"setdefault",
"(",
"'color'",
",",
"next",
"(",
"axes",
".",
"_get_lines",
".",
"prop_cycler",
")",
"[",
"'color'",
"]",
")",
"locs",
"=",
"offset",
"+",
"core",
".",
"frames_to_time",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"y_top",
")",
")",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"hop_length",
")",
"out",
"=",
"axes",
".",
"fill_between",
"(",
"locs",
",",
"y_bottom",
",",
"y_top",
",",
"*",
"*",
"kwargs",
")",
"axes",
".",
"set_xlim",
"(",
"[",
"locs",
".",
"min",
"(",
")",
",",
"locs",
".",
"max",
"(",
")",
"]",
")",
"if",
"x_axis",
"==",
"'time'",
":",
"axes",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"lag",
"=",
"False",
")",
")",
"axes",
".",
"xaxis",
".",
"set_label_text",
"(",
"'Time'",
")",
"elif",
"x_axis",
"is",
"None",
"or",
"x_axis",
"in",
"[",
"'off'",
",",
"'none'",
"]",
":",
"axes",
".",
"set_xticks",
"(",
"[",
"]",
")",
"else",
":",
"raise",
"ParameterError",
"(",
"'Unknown x_axis value: {}'",
".",
"format",
"(",
"x_axis",
")",
")",
"return",
"out"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
specshow
|
Display a spectrogram/chromagram/cqt/etc.
Parameters
----------
data : np.ndarray [shape=(d, n)]
Matrix to display (e.g., spectrogram)
sr : number > 0 [scalar]
Sample rate used to determine time scale in x-axis.
hop_length : int > 0 [scalar]
Hop length, also used to determine time scale in x-axis
x_axis : None or str
y_axis : None or str
Range for the x- and y-axes.
Valid types are:
- None, 'none', or 'off' : no axis decoration is displayed.
Frequency types:
- 'linear', 'fft', 'hz' : frequency range is determined by
the FFT window and sampling rate.
- 'log' : the spectrum is displayed on a log scale.
- 'mel' : frequencies are determined by the mel scale.
- 'cqt_hz' : frequencies are determined by the CQT scale.
- 'cqt_note' : pitches are determined by the CQT scale.
All frequency types are plotted in units of Hz.
Categorical types:
- 'chroma' : pitches are determined by the chroma filters.
Pitch classes are arranged at integer locations (0-11).
- 'tonnetz' : axes are labeled by Tonnetz dimensions (0-5)
- 'frames' : markers are shown as frame counts.
Time types:
- 'time' : markers are shown as milliseconds, seconds,
minutes, or hours.
Values are plotted in units of seconds.
- 's' : markers are shown as seconds.
- 'ms' : markers are shown as milliseconds.
- 'lag' : like time, but past the halfway point counts
as negative values.
- 'lag_s' : same as lag, but in seconds.
- 'lag_ms' : same as lag, but in milliseconds.
Other:
- 'tempo' : markers are shown as beats-per-minute (BPM)
using a logarithmic scale.
x_coords : np.ndarray [shape=data.shape[1]+1]
y_coords : np.ndarray [shape=data.shape[0]+1]
Optional positioning coordinates of the input data.
These can be use to explicitly set the location of each
element `data[i, j]`, e.g., for displaying beat-synchronous
features in natural time coordinates.
If not provided, they are inferred from `x_axis` and `y_axis`.
fmin : float > 0 [scalar] or None
Frequency of the lowest spectrogram bin. Used for Mel and CQT
scales.
If `y_axis` is `cqt_hz` or `cqt_note` and `fmin` is not given,
it is set by default to `note_to_hz('C1')`.
fmax : float > 0 [scalar] or None
Used for setting the Mel frequency scales
bins_per_octave : int > 0 [scalar]
Number of bins per octave. Used for CQT frequency scale.
ax : matplotlib.axes.Axes or None
Axes to plot on instead of the default `plt.gca()`.
kwargs : additional keyword arguments
Arguments passed through to `matplotlib.pyplot.pcolormesh`.
By default, the following options are set:
- `rasterized=True`
- `shading='flat'`
- `edgecolors='None'`
Returns
-------
axes
The axis handle for the figure.
See Also
--------
cmap : Automatic colormap detection
matplotlib.pyplot.pcolormesh
Examples
--------
Visualize an STFT power spectrum
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> plt.figure(figsize=(12, 8))
>>> D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max)
>>> plt.subplot(4, 2, 1)
>>> librosa.display.specshow(D, y_axis='linear')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Linear-frequency power spectrogram')
Or on a logarithmic scale
>>> plt.subplot(4, 2, 2)
>>> librosa.display.specshow(D, y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log-frequency power spectrogram')
Or use a CQT scale
>>> CQT = librosa.amplitude_to_db(np.abs(librosa.cqt(y, sr=sr)), ref=np.max)
>>> plt.subplot(4, 2, 3)
>>> librosa.display.specshow(CQT, y_axis='cqt_note')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Constant-Q power spectrogram (note)')
>>> plt.subplot(4, 2, 4)
>>> librosa.display.specshow(CQT, y_axis='cqt_hz')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Constant-Q power spectrogram (Hz)')
Draw a chromagram with pitch classes
>>> C = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> plt.subplot(4, 2, 5)
>>> librosa.display.specshow(C, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Chromagram')
Force a grayscale colormap (white -> black)
>>> plt.subplot(4, 2, 6)
>>> librosa.display.specshow(D, cmap='gray_r', y_axis='linear')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Linear power spectrogram (grayscale)')
Draw time markers automatically
>>> plt.subplot(4, 2, 7)
>>> librosa.display.specshow(D, x_axis='time', y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log power spectrogram')
Draw a tempogram with BPM markers
>>> plt.subplot(4, 2, 8)
>>> Tgram = librosa.feature.tempogram(y=y, sr=sr)
>>> librosa.display.specshow(Tgram, x_axis='time', y_axis='tempo')
>>> plt.colorbar()
>>> plt.title('Tempogram')
>>> plt.tight_layout()
Draw beat-synchronous chroma in natural time
>>> plt.figure()
>>> tempo, beat_f = librosa.beat.beat_track(y=y, sr=sr, trim=False)
>>> beat_f = librosa.util.fix_frames(beat_f, x_max=C.shape[1])
>>> Csync = librosa.util.sync(C, beat_f, aggregate=np.median)
>>> beat_t = librosa.frames_to_time(beat_f, sr=sr)
>>> ax1 = plt.subplot(2,1,1)
>>> librosa.display.specshow(C, y_axis='chroma', x_axis='time')
>>> plt.title('Chroma (linear time)')
>>> ax2 = plt.subplot(2,1,2, sharex=ax1)
>>> librosa.display.specshow(Csync, y_axis='chroma', x_axis='time',
... x_coords=beat_t)
>>> plt.title('Chroma (beat time)')
>>> plt.tight_layout()
|
librosa/display.py
|
def specshow(data, x_coords=None, y_coords=None,
x_axis=None, y_axis=None,
sr=22050, hop_length=512,
fmin=None, fmax=None,
bins_per_octave=12,
ax=None,
**kwargs):
'''Display a spectrogram/chromagram/cqt/etc.
Parameters
----------
data : np.ndarray [shape=(d, n)]
Matrix to display (e.g., spectrogram)
sr : number > 0 [scalar]
Sample rate used to determine time scale in x-axis.
hop_length : int > 0 [scalar]
Hop length, also used to determine time scale in x-axis
x_axis : None or str
y_axis : None or str
Range for the x- and y-axes.
Valid types are:
- None, 'none', or 'off' : no axis decoration is displayed.
Frequency types:
- 'linear', 'fft', 'hz' : frequency range is determined by
the FFT window and sampling rate.
- 'log' : the spectrum is displayed on a log scale.
- 'mel' : frequencies are determined by the mel scale.
- 'cqt_hz' : frequencies are determined by the CQT scale.
- 'cqt_note' : pitches are determined by the CQT scale.
All frequency types are plotted in units of Hz.
Categorical types:
- 'chroma' : pitches are determined by the chroma filters.
Pitch classes are arranged at integer locations (0-11).
- 'tonnetz' : axes are labeled by Tonnetz dimensions (0-5)
- 'frames' : markers are shown as frame counts.
Time types:
- 'time' : markers are shown as milliseconds, seconds,
minutes, or hours.
Values are plotted in units of seconds.
- 's' : markers are shown as seconds.
- 'ms' : markers are shown as milliseconds.
- 'lag' : like time, but past the halfway point counts
as negative values.
- 'lag_s' : same as lag, but in seconds.
- 'lag_ms' : same as lag, but in milliseconds.
Other:
- 'tempo' : markers are shown as beats-per-minute (BPM)
using a logarithmic scale.
x_coords : np.ndarray [shape=data.shape[1]+1]
y_coords : np.ndarray [shape=data.shape[0]+1]
Optional positioning coordinates of the input data.
These can be use to explicitly set the location of each
element `data[i, j]`, e.g., for displaying beat-synchronous
features in natural time coordinates.
If not provided, they are inferred from `x_axis` and `y_axis`.
fmin : float > 0 [scalar] or None
Frequency of the lowest spectrogram bin. Used for Mel and CQT
scales.
If `y_axis` is `cqt_hz` or `cqt_note` and `fmin` is not given,
it is set by default to `note_to_hz('C1')`.
fmax : float > 0 [scalar] or None
Used for setting the Mel frequency scales
bins_per_octave : int > 0 [scalar]
Number of bins per octave. Used for CQT frequency scale.
ax : matplotlib.axes.Axes or None
Axes to plot on instead of the default `plt.gca()`.
kwargs : additional keyword arguments
Arguments passed through to `matplotlib.pyplot.pcolormesh`.
By default, the following options are set:
- `rasterized=True`
- `shading='flat'`
- `edgecolors='None'`
Returns
-------
axes
The axis handle for the figure.
See Also
--------
cmap : Automatic colormap detection
matplotlib.pyplot.pcolormesh
Examples
--------
Visualize an STFT power spectrum
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> plt.figure(figsize=(12, 8))
>>> D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max)
>>> plt.subplot(4, 2, 1)
>>> librosa.display.specshow(D, y_axis='linear')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Linear-frequency power spectrogram')
Or on a logarithmic scale
>>> plt.subplot(4, 2, 2)
>>> librosa.display.specshow(D, y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log-frequency power spectrogram')
Or use a CQT scale
>>> CQT = librosa.amplitude_to_db(np.abs(librosa.cqt(y, sr=sr)), ref=np.max)
>>> plt.subplot(4, 2, 3)
>>> librosa.display.specshow(CQT, y_axis='cqt_note')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Constant-Q power spectrogram (note)')
>>> plt.subplot(4, 2, 4)
>>> librosa.display.specshow(CQT, y_axis='cqt_hz')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Constant-Q power spectrogram (Hz)')
Draw a chromagram with pitch classes
>>> C = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> plt.subplot(4, 2, 5)
>>> librosa.display.specshow(C, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Chromagram')
Force a grayscale colormap (white -> black)
>>> plt.subplot(4, 2, 6)
>>> librosa.display.specshow(D, cmap='gray_r', y_axis='linear')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Linear power spectrogram (grayscale)')
Draw time markers automatically
>>> plt.subplot(4, 2, 7)
>>> librosa.display.specshow(D, x_axis='time', y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log power spectrogram')
Draw a tempogram with BPM markers
>>> plt.subplot(4, 2, 8)
>>> Tgram = librosa.feature.tempogram(y=y, sr=sr)
>>> librosa.display.specshow(Tgram, x_axis='time', y_axis='tempo')
>>> plt.colorbar()
>>> plt.title('Tempogram')
>>> plt.tight_layout()
Draw beat-synchronous chroma in natural time
>>> plt.figure()
>>> tempo, beat_f = librosa.beat.beat_track(y=y, sr=sr, trim=False)
>>> beat_f = librosa.util.fix_frames(beat_f, x_max=C.shape[1])
>>> Csync = librosa.util.sync(C, beat_f, aggregate=np.median)
>>> beat_t = librosa.frames_to_time(beat_f, sr=sr)
>>> ax1 = plt.subplot(2,1,1)
>>> librosa.display.specshow(C, y_axis='chroma', x_axis='time')
>>> plt.title('Chroma (linear time)')
>>> ax2 = plt.subplot(2,1,2, sharex=ax1)
>>> librosa.display.specshow(Csync, y_axis='chroma', x_axis='time',
... x_coords=beat_t)
>>> plt.title('Chroma (beat time)')
>>> plt.tight_layout()
'''
if np.issubdtype(data.dtype, np.complexfloating):
warnings.warn('Trying to display complex-valued input. '
'Showing magnitude instead.')
data = np.abs(data)
kwargs.setdefault('cmap', cmap(data))
kwargs.setdefault('rasterized', True)
kwargs.setdefault('edgecolors', 'None')
kwargs.setdefault('shading', 'flat')
all_params = dict(kwargs=kwargs,
sr=sr,
fmin=fmin,
fmax=fmax,
bins_per_octave=bins_per_octave,
hop_length=hop_length)
# Get the x and y coordinates
y_coords = __mesh_coords(y_axis, y_coords, data.shape[0], **all_params)
x_coords = __mesh_coords(x_axis, x_coords, data.shape[1], **all_params)
axes = __check_axes(ax)
out = axes.pcolormesh(x_coords, y_coords, data, **kwargs)
__set_current_image(ax, out)
axes.set_xlim(x_coords.min(), x_coords.max())
axes.set_ylim(y_coords.min(), y_coords.max())
# Set up axis scaling
__scale_axes(axes, x_axis, 'x')
__scale_axes(axes, y_axis, 'y')
# Construct tickers and locators
__decorate_axis(axes.xaxis, x_axis)
__decorate_axis(axes.yaxis, y_axis)
return axes
|
def specshow(data, x_coords=None, y_coords=None,
x_axis=None, y_axis=None,
sr=22050, hop_length=512,
fmin=None, fmax=None,
bins_per_octave=12,
ax=None,
**kwargs):
'''Display a spectrogram/chromagram/cqt/etc.
Parameters
----------
data : np.ndarray [shape=(d, n)]
Matrix to display (e.g., spectrogram)
sr : number > 0 [scalar]
Sample rate used to determine time scale in x-axis.
hop_length : int > 0 [scalar]
Hop length, also used to determine time scale in x-axis
x_axis : None or str
y_axis : None or str
Range for the x- and y-axes.
Valid types are:
- None, 'none', or 'off' : no axis decoration is displayed.
Frequency types:
- 'linear', 'fft', 'hz' : frequency range is determined by
the FFT window and sampling rate.
- 'log' : the spectrum is displayed on a log scale.
- 'mel' : frequencies are determined by the mel scale.
- 'cqt_hz' : frequencies are determined by the CQT scale.
- 'cqt_note' : pitches are determined by the CQT scale.
All frequency types are plotted in units of Hz.
Categorical types:
- 'chroma' : pitches are determined by the chroma filters.
Pitch classes are arranged at integer locations (0-11).
- 'tonnetz' : axes are labeled by Tonnetz dimensions (0-5)
- 'frames' : markers are shown as frame counts.
Time types:
- 'time' : markers are shown as milliseconds, seconds,
minutes, or hours.
Values are plotted in units of seconds.
- 's' : markers are shown as seconds.
- 'ms' : markers are shown as milliseconds.
- 'lag' : like time, but past the halfway point counts
as negative values.
- 'lag_s' : same as lag, but in seconds.
- 'lag_ms' : same as lag, but in milliseconds.
Other:
- 'tempo' : markers are shown as beats-per-minute (BPM)
using a logarithmic scale.
x_coords : np.ndarray [shape=data.shape[1]+1]
y_coords : np.ndarray [shape=data.shape[0]+1]
Optional positioning coordinates of the input data.
These can be use to explicitly set the location of each
element `data[i, j]`, e.g., for displaying beat-synchronous
features in natural time coordinates.
If not provided, they are inferred from `x_axis` and `y_axis`.
fmin : float > 0 [scalar] or None
Frequency of the lowest spectrogram bin. Used for Mel and CQT
scales.
If `y_axis` is `cqt_hz` or `cqt_note` and `fmin` is not given,
it is set by default to `note_to_hz('C1')`.
fmax : float > 0 [scalar] or None
Used for setting the Mel frequency scales
bins_per_octave : int > 0 [scalar]
Number of bins per octave. Used for CQT frequency scale.
ax : matplotlib.axes.Axes or None
Axes to plot on instead of the default `plt.gca()`.
kwargs : additional keyword arguments
Arguments passed through to `matplotlib.pyplot.pcolormesh`.
By default, the following options are set:
- `rasterized=True`
- `shading='flat'`
- `edgecolors='None'`
Returns
-------
axes
The axis handle for the figure.
See Also
--------
cmap : Automatic colormap detection
matplotlib.pyplot.pcolormesh
Examples
--------
Visualize an STFT power spectrum
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> plt.figure(figsize=(12, 8))
>>> D = librosa.amplitude_to_db(np.abs(librosa.stft(y)), ref=np.max)
>>> plt.subplot(4, 2, 1)
>>> librosa.display.specshow(D, y_axis='linear')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Linear-frequency power spectrogram')
Or on a logarithmic scale
>>> plt.subplot(4, 2, 2)
>>> librosa.display.specshow(D, y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log-frequency power spectrogram')
Or use a CQT scale
>>> CQT = librosa.amplitude_to_db(np.abs(librosa.cqt(y, sr=sr)), ref=np.max)
>>> plt.subplot(4, 2, 3)
>>> librosa.display.specshow(CQT, y_axis='cqt_note')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Constant-Q power spectrogram (note)')
>>> plt.subplot(4, 2, 4)
>>> librosa.display.specshow(CQT, y_axis='cqt_hz')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Constant-Q power spectrogram (Hz)')
Draw a chromagram with pitch classes
>>> C = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> plt.subplot(4, 2, 5)
>>> librosa.display.specshow(C, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Chromagram')
Force a grayscale colormap (white -> black)
>>> plt.subplot(4, 2, 6)
>>> librosa.display.specshow(D, cmap='gray_r', y_axis='linear')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Linear power spectrogram (grayscale)')
Draw time markers automatically
>>> plt.subplot(4, 2, 7)
>>> librosa.display.specshow(D, x_axis='time', y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log power spectrogram')
Draw a tempogram with BPM markers
>>> plt.subplot(4, 2, 8)
>>> Tgram = librosa.feature.tempogram(y=y, sr=sr)
>>> librosa.display.specshow(Tgram, x_axis='time', y_axis='tempo')
>>> plt.colorbar()
>>> plt.title('Tempogram')
>>> plt.tight_layout()
Draw beat-synchronous chroma in natural time
>>> plt.figure()
>>> tempo, beat_f = librosa.beat.beat_track(y=y, sr=sr, trim=False)
>>> beat_f = librosa.util.fix_frames(beat_f, x_max=C.shape[1])
>>> Csync = librosa.util.sync(C, beat_f, aggregate=np.median)
>>> beat_t = librosa.frames_to_time(beat_f, sr=sr)
>>> ax1 = plt.subplot(2,1,1)
>>> librosa.display.specshow(C, y_axis='chroma', x_axis='time')
>>> plt.title('Chroma (linear time)')
>>> ax2 = plt.subplot(2,1,2, sharex=ax1)
>>> librosa.display.specshow(Csync, y_axis='chroma', x_axis='time',
... x_coords=beat_t)
>>> plt.title('Chroma (beat time)')
>>> plt.tight_layout()
'''
if np.issubdtype(data.dtype, np.complexfloating):
warnings.warn('Trying to display complex-valued input. '
'Showing magnitude instead.')
data = np.abs(data)
kwargs.setdefault('cmap', cmap(data))
kwargs.setdefault('rasterized', True)
kwargs.setdefault('edgecolors', 'None')
kwargs.setdefault('shading', 'flat')
all_params = dict(kwargs=kwargs,
sr=sr,
fmin=fmin,
fmax=fmax,
bins_per_octave=bins_per_octave,
hop_length=hop_length)
# Get the x and y coordinates
y_coords = __mesh_coords(y_axis, y_coords, data.shape[0], **all_params)
x_coords = __mesh_coords(x_axis, x_coords, data.shape[1], **all_params)
axes = __check_axes(ax)
out = axes.pcolormesh(x_coords, y_coords, data, **kwargs)
__set_current_image(ax, out)
axes.set_xlim(x_coords.min(), x_coords.max())
axes.set_ylim(y_coords.min(), y_coords.max())
# Set up axis scaling
__scale_axes(axes, x_axis, 'x')
__scale_axes(axes, y_axis, 'y')
# Construct tickers and locators
__decorate_axis(axes.xaxis, x_axis)
__decorate_axis(axes.yaxis, y_axis)
return axes
|
[
"Display",
"a",
"spectrogram",
"/",
"chromagram",
"/",
"cqt",
"/",
"etc",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L491-L731
|
[
"def",
"specshow",
"(",
"data",
",",
"x_coords",
"=",
"None",
",",
"y_coords",
"=",
"None",
",",
"x_axis",
"=",
"None",
",",
"y_axis",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"hop_length",
"=",
"512",
",",
"fmin",
"=",
"None",
",",
"fmax",
"=",
"None",
",",
"bins_per_octave",
"=",
"12",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"np",
".",
"issubdtype",
"(",
"data",
".",
"dtype",
",",
"np",
".",
"complexfloating",
")",
":",
"warnings",
".",
"warn",
"(",
"'Trying to display complex-valued input. '",
"'Showing magnitude instead.'",
")",
"data",
"=",
"np",
".",
"abs",
"(",
"data",
")",
"kwargs",
".",
"setdefault",
"(",
"'cmap'",
",",
"cmap",
"(",
"data",
")",
")",
"kwargs",
".",
"setdefault",
"(",
"'rasterized'",
",",
"True",
")",
"kwargs",
".",
"setdefault",
"(",
"'edgecolors'",
",",
"'None'",
")",
"kwargs",
".",
"setdefault",
"(",
"'shading'",
",",
"'flat'",
")",
"all_params",
"=",
"dict",
"(",
"kwargs",
"=",
"kwargs",
",",
"sr",
"=",
"sr",
",",
"fmin",
"=",
"fmin",
",",
"fmax",
"=",
"fmax",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"hop_length",
"=",
"hop_length",
")",
"# Get the x and y coordinates",
"y_coords",
"=",
"__mesh_coords",
"(",
"y_axis",
",",
"y_coords",
",",
"data",
".",
"shape",
"[",
"0",
"]",
",",
"*",
"*",
"all_params",
")",
"x_coords",
"=",
"__mesh_coords",
"(",
"x_axis",
",",
"x_coords",
",",
"data",
".",
"shape",
"[",
"1",
"]",
",",
"*",
"*",
"all_params",
")",
"axes",
"=",
"__check_axes",
"(",
"ax",
")",
"out",
"=",
"axes",
".",
"pcolormesh",
"(",
"x_coords",
",",
"y_coords",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
"__set_current_image",
"(",
"ax",
",",
"out",
")",
"axes",
".",
"set_xlim",
"(",
"x_coords",
".",
"min",
"(",
")",
",",
"x_coords",
".",
"max",
"(",
")",
")",
"axes",
".",
"set_ylim",
"(",
"y_coords",
".",
"min",
"(",
")",
",",
"y_coords",
".",
"max",
"(",
")",
")",
"# Set up axis scaling",
"__scale_axes",
"(",
"axes",
",",
"x_axis",
",",
"'x'",
")",
"__scale_axes",
"(",
"axes",
",",
"y_axis",
",",
"'y'",
")",
"# Construct tickers and locators",
"__decorate_axis",
"(",
"axes",
".",
"xaxis",
",",
"x_axis",
")",
"__decorate_axis",
"(",
"axes",
".",
"yaxis",
",",
"y_axis",
")",
"return",
"axes"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__set_current_image
|
Helper to set the current image in pyplot mode.
If the provided `ax` is not `None`, then we assume that the user is using the object API.
In this case, the pyplot current image is not set.
|
librosa/display.py
|
def __set_current_image(ax, img):
'''Helper to set the current image in pyplot mode.
If the provided `ax` is not `None`, then we assume that the user is using the object API.
In this case, the pyplot current image is not set.
'''
if ax is None:
import matplotlib.pyplot as plt
plt.sci(img)
|
def __set_current_image(ax, img):
'''Helper to set the current image in pyplot mode.
If the provided `ax` is not `None`, then we assume that the user is using the object API.
In this case, the pyplot current image is not set.
'''
if ax is None:
import matplotlib.pyplot as plt
plt.sci(img)
|
[
"Helper",
"to",
"set",
"the",
"current",
"image",
"in",
"pyplot",
"mode",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L734-L743
|
[
"def",
"__set_current_image",
"(",
"ax",
",",
"img",
")",
":",
"if",
"ax",
"is",
"None",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"plt",
".",
"sci",
"(",
"img",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__mesh_coords
|
Compute axis coordinates
|
librosa/display.py
|
def __mesh_coords(ax_type, coords, n, **kwargs):
'''Compute axis coordinates'''
if coords is not None:
if len(coords) < n:
raise ParameterError('Coordinate shape mismatch: '
'{}<{}'.format(len(coords), n))
return coords
coord_map = {'linear': __coord_fft_hz,
'hz': __coord_fft_hz,
'log': __coord_fft_hz,
'mel': __coord_mel_hz,
'cqt': __coord_cqt_hz,
'cqt_hz': __coord_cqt_hz,
'cqt_note': __coord_cqt_hz,
'chroma': __coord_chroma,
'time': __coord_time,
's': __coord_time,
'ms': __coord_time,
'lag': __coord_time,
'lag_s': __coord_time,
'lag_ms': __coord_time,
'tonnetz': __coord_n,
'off': __coord_n,
'tempo': __coord_tempo,
'frames': __coord_n,
None: __coord_n}
if ax_type not in coord_map:
raise ParameterError('Unknown axis type: {}'.format(ax_type))
return coord_map[ax_type](n, **kwargs)
|
def __mesh_coords(ax_type, coords, n, **kwargs):
'''Compute axis coordinates'''
if coords is not None:
if len(coords) < n:
raise ParameterError('Coordinate shape mismatch: '
'{}<{}'.format(len(coords), n))
return coords
coord_map = {'linear': __coord_fft_hz,
'hz': __coord_fft_hz,
'log': __coord_fft_hz,
'mel': __coord_mel_hz,
'cqt': __coord_cqt_hz,
'cqt_hz': __coord_cqt_hz,
'cqt_note': __coord_cqt_hz,
'chroma': __coord_chroma,
'time': __coord_time,
's': __coord_time,
'ms': __coord_time,
'lag': __coord_time,
'lag_s': __coord_time,
'lag_ms': __coord_time,
'tonnetz': __coord_n,
'off': __coord_n,
'tempo': __coord_tempo,
'frames': __coord_n,
None: __coord_n}
if ax_type not in coord_map:
raise ParameterError('Unknown axis type: {}'.format(ax_type))
return coord_map[ax_type](n, **kwargs)
|
[
"Compute",
"axis",
"coordinates"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L746-L777
|
[
"def",
"__mesh_coords",
"(",
"ax_type",
",",
"coords",
",",
"n",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"coords",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"coords",
")",
"<",
"n",
":",
"raise",
"ParameterError",
"(",
"'Coordinate shape mismatch: '",
"'{}<{}'",
".",
"format",
"(",
"len",
"(",
"coords",
")",
",",
"n",
")",
")",
"return",
"coords",
"coord_map",
"=",
"{",
"'linear'",
":",
"__coord_fft_hz",
",",
"'hz'",
":",
"__coord_fft_hz",
",",
"'log'",
":",
"__coord_fft_hz",
",",
"'mel'",
":",
"__coord_mel_hz",
",",
"'cqt'",
":",
"__coord_cqt_hz",
",",
"'cqt_hz'",
":",
"__coord_cqt_hz",
",",
"'cqt_note'",
":",
"__coord_cqt_hz",
",",
"'chroma'",
":",
"__coord_chroma",
",",
"'time'",
":",
"__coord_time",
",",
"'s'",
":",
"__coord_time",
",",
"'ms'",
":",
"__coord_time",
",",
"'lag'",
":",
"__coord_time",
",",
"'lag_s'",
":",
"__coord_time",
",",
"'lag_ms'",
":",
"__coord_time",
",",
"'tonnetz'",
":",
"__coord_n",
",",
"'off'",
":",
"__coord_n",
",",
"'tempo'",
":",
"__coord_tempo",
",",
"'frames'",
":",
"__coord_n",
",",
"None",
":",
"__coord_n",
"}",
"if",
"ax_type",
"not",
"in",
"coord_map",
":",
"raise",
"ParameterError",
"(",
"'Unknown axis type: {}'",
".",
"format",
"(",
"ax_type",
")",
")",
"return",
"coord_map",
"[",
"ax_type",
"]",
"(",
"n",
",",
"*",
"*",
"kwargs",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__check_axes
|
Check if "axes" is an instance of an axis object. If not, use `gca`.
|
librosa/display.py
|
def __check_axes(axes):
'''Check if "axes" is an instance of an axis object. If not, use `gca`.'''
if axes is None:
import matplotlib.pyplot as plt
axes = plt.gca()
elif not isinstance(axes, Axes):
raise ValueError("`axes` must be an instance of matplotlib.axes.Axes. "
"Found type(axes)={}".format(type(axes)))
return axes
|
def __check_axes(axes):
'''Check if "axes" is an instance of an axis object. If not, use `gca`.'''
if axes is None:
import matplotlib.pyplot as plt
axes = plt.gca()
elif not isinstance(axes, Axes):
raise ValueError("`axes` must be an instance of matplotlib.axes.Axes. "
"Found type(axes)={}".format(type(axes)))
return axes
|
[
"Check",
"if",
"axes",
"is",
"an",
"instance",
"of",
"an",
"axis",
"object",
".",
"If",
"not",
"use",
"gca",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L780-L788
|
[
"def",
"__check_axes",
"(",
"axes",
")",
":",
"if",
"axes",
"is",
"None",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"axes",
"=",
"plt",
".",
"gca",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"axes",
",",
"Axes",
")",
":",
"raise",
"ValueError",
"(",
"\"`axes` must be an instance of matplotlib.axes.Axes. \"",
"\"Found type(axes)={}\"",
".",
"format",
"(",
"type",
"(",
"axes",
")",
")",
")",
"return",
"axes"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__scale_axes
|
Set the axis scaling
|
librosa/display.py
|
def __scale_axes(axes, ax_type, which):
'''Set the axis scaling'''
kwargs = dict()
if which == 'x':
thresh = 'linthreshx'
base = 'basex'
scale = 'linscalex'
scaler = axes.set_xscale
limit = axes.set_xlim
else:
thresh = 'linthreshy'
base = 'basey'
scale = 'linscaley'
scaler = axes.set_yscale
limit = axes.set_ylim
# Map ticker scales
if ax_type == 'mel':
mode = 'symlog'
kwargs[thresh] = 1000.0
kwargs[base] = 2
elif ax_type == 'log':
mode = 'symlog'
kwargs[base] = 2
kwargs[thresh] = core.note_to_hz('C2')
kwargs[scale] = 0.5
elif ax_type in ['cqt', 'cqt_hz', 'cqt_note']:
mode = 'log'
kwargs[base] = 2
elif ax_type == 'tempo':
mode = 'log'
kwargs[base] = 2
limit(16, 480)
else:
return
scaler(mode, **kwargs)
|
def __scale_axes(axes, ax_type, which):
'''Set the axis scaling'''
kwargs = dict()
if which == 'x':
thresh = 'linthreshx'
base = 'basex'
scale = 'linscalex'
scaler = axes.set_xscale
limit = axes.set_xlim
else:
thresh = 'linthreshy'
base = 'basey'
scale = 'linscaley'
scaler = axes.set_yscale
limit = axes.set_ylim
# Map ticker scales
if ax_type == 'mel':
mode = 'symlog'
kwargs[thresh] = 1000.0
kwargs[base] = 2
elif ax_type == 'log':
mode = 'symlog'
kwargs[base] = 2
kwargs[thresh] = core.note_to_hz('C2')
kwargs[scale] = 0.5
elif ax_type in ['cqt', 'cqt_hz', 'cqt_note']:
mode = 'log'
kwargs[base] = 2
elif ax_type == 'tempo':
mode = 'log'
kwargs[base] = 2
limit(16, 480)
else:
return
scaler(mode, **kwargs)
|
[
"Set",
"the",
"axis",
"scaling"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L791-L831
|
[
"def",
"__scale_axes",
"(",
"axes",
",",
"ax_type",
",",
"which",
")",
":",
"kwargs",
"=",
"dict",
"(",
")",
"if",
"which",
"==",
"'x'",
":",
"thresh",
"=",
"'linthreshx'",
"base",
"=",
"'basex'",
"scale",
"=",
"'linscalex'",
"scaler",
"=",
"axes",
".",
"set_xscale",
"limit",
"=",
"axes",
".",
"set_xlim",
"else",
":",
"thresh",
"=",
"'linthreshy'",
"base",
"=",
"'basey'",
"scale",
"=",
"'linscaley'",
"scaler",
"=",
"axes",
".",
"set_yscale",
"limit",
"=",
"axes",
".",
"set_ylim",
"# Map ticker scales",
"if",
"ax_type",
"==",
"'mel'",
":",
"mode",
"=",
"'symlog'",
"kwargs",
"[",
"thresh",
"]",
"=",
"1000.0",
"kwargs",
"[",
"base",
"]",
"=",
"2",
"elif",
"ax_type",
"==",
"'log'",
":",
"mode",
"=",
"'symlog'",
"kwargs",
"[",
"base",
"]",
"=",
"2",
"kwargs",
"[",
"thresh",
"]",
"=",
"core",
".",
"note_to_hz",
"(",
"'C2'",
")",
"kwargs",
"[",
"scale",
"]",
"=",
"0.5",
"elif",
"ax_type",
"in",
"[",
"'cqt'",
",",
"'cqt_hz'",
",",
"'cqt_note'",
"]",
":",
"mode",
"=",
"'log'",
"kwargs",
"[",
"base",
"]",
"=",
"2",
"elif",
"ax_type",
"==",
"'tempo'",
":",
"mode",
"=",
"'log'",
"kwargs",
"[",
"base",
"]",
"=",
"2",
"limit",
"(",
"16",
",",
"480",
")",
"else",
":",
"return",
"scaler",
"(",
"mode",
",",
"*",
"*",
"kwargs",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__decorate_axis
|
Configure axis tickers, locators, and labels
|
librosa/display.py
|
def __decorate_axis(axis, ax_type):
'''Configure axis tickers, locators, and labels'''
if ax_type == 'tonnetz':
axis.set_major_formatter(TonnetzFormatter())
axis.set_major_locator(FixedLocator(0.5 + np.arange(6)))
axis.set_label_text('Tonnetz')
elif ax_type == 'chroma':
axis.set_major_formatter(ChromaFormatter())
axis.set_major_locator(FixedLocator(0.5 +
np.add.outer(12 * np.arange(10),
[0, 2, 4, 5, 7, 9, 11]).ravel()))
axis.set_label_text('Pitch class')
elif ax_type == 'tempo':
axis.set_major_formatter(ScalarFormatter())
axis.set_major_locator(LogLocator(base=2.0))
axis.set_label_text('BPM')
elif ax_type == 'time':
axis.set_major_formatter(TimeFormatter(unit=None, lag=False))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Time')
elif ax_type == 's':
axis.set_major_formatter(TimeFormatter(unit='s', lag=False))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Time (s)')
elif ax_type == 'ms':
axis.set_major_formatter(TimeFormatter(unit='ms', lag=False))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Time (ms)')
elif ax_type == 'lag':
axis.set_major_formatter(TimeFormatter(unit=None, lag=True))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Lag')
elif ax_type == 'lag_s':
axis.set_major_formatter(TimeFormatter(unit='s', lag=True))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Lag (s)')
elif ax_type == 'lag_ms':
axis.set_major_formatter(TimeFormatter(unit='ms', lag=True))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Lag (ms)')
elif ax_type == 'cqt_note':
axis.set_major_formatter(NoteFormatter())
axis.set_major_locator(LogLocator(base=2.0))
axis.set_minor_formatter(NoteFormatter(major=False))
axis.set_minor_locator(LogLocator(base=2.0,
subs=2.0**(np.arange(1, 12)/12.0)))
axis.set_label_text('Note')
elif ax_type in ['cqt_hz']:
axis.set_major_formatter(LogHzFormatter())
axis.set_major_locator(LogLocator(base=2.0))
axis.set_minor_formatter(LogHzFormatter(major=False))
axis.set_minor_locator(LogLocator(base=2.0,
subs=2.0**(np.arange(1, 12)/12.0)))
axis.set_label_text('Hz')
elif ax_type in ['mel', 'log']:
axis.set_major_formatter(ScalarFormatter())
axis.set_major_locator(SymmetricalLogLocator(axis.get_transform()))
axis.set_label_text('Hz')
elif ax_type in ['linear', 'hz']:
axis.set_major_formatter(ScalarFormatter())
axis.set_label_text('Hz')
elif ax_type in ['frames']:
axis.set_label_text('Frames')
elif ax_type in ['off', 'none', None]:
axis.set_label_text('')
axis.set_ticks([])
|
def __decorate_axis(axis, ax_type):
'''Configure axis tickers, locators, and labels'''
if ax_type == 'tonnetz':
axis.set_major_formatter(TonnetzFormatter())
axis.set_major_locator(FixedLocator(0.5 + np.arange(6)))
axis.set_label_text('Tonnetz')
elif ax_type == 'chroma':
axis.set_major_formatter(ChromaFormatter())
axis.set_major_locator(FixedLocator(0.5 +
np.add.outer(12 * np.arange(10),
[0, 2, 4, 5, 7, 9, 11]).ravel()))
axis.set_label_text('Pitch class')
elif ax_type == 'tempo':
axis.set_major_formatter(ScalarFormatter())
axis.set_major_locator(LogLocator(base=2.0))
axis.set_label_text('BPM')
elif ax_type == 'time':
axis.set_major_formatter(TimeFormatter(unit=None, lag=False))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Time')
elif ax_type == 's':
axis.set_major_formatter(TimeFormatter(unit='s', lag=False))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Time (s)')
elif ax_type == 'ms':
axis.set_major_formatter(TimeFormatter(unit='ms', lag=False))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Time (ms)')
elif ax_type == 'lag':
axis.set_major_formatter(TimeFormatter(unit=None, lag=True))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Lag')
elif ax_type == 'lag_s':
axis.set_major_formatter(TimeFormatter(unit='s', lag=True))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Lag (s)')
elif ax_type == 'lag_ms':
axis.set_major_formatter(TimeFormatter(unit='ms', lag=True))
axis.set_major_locator(MaxNLocator(prune=None,
steps=[1, 1.5, 5, 6, 10]))
axis.set_label_text('Lag (ms)')
elif ax_type == 'cqt_note':
axis.set_major_formatter(NoteFormatter())
axis.set_major_locator(LogLocator(base=2.0))
axis.set_minor_formatter(NoteFormatter(major=False))
axis.set_minor_locator(LogLocator(base=2.0,
subs=2.0**(np.arange(1, 12)/12.0)))
axis.set_label_text('Note')
elif ax_type in ['cqt_hz']:
axis.set_major_formatter(LogHzFormatter())
axis.set_major_locator(LogLocator(base=2.0))
axis.set_minor_formatter(LogHzFormatter(major=False))
axis.set_minor_locator(LogLocator(base=2.0,
subs=2.0**(np.arange(1, 12)/12.0)))
axis.set_label_text('Hz')
elif ax_type in ['mel', 'log']:
axis.set_major_formatter(ScalarFormatter())
axis.set_major_locator(SymmetricalLogLocator(axis.get_transform()))
axis.set_label_text('Hz')
elif ax_type in ['linear', 'hz']:
axis.set_major_formatter(ScalarFormatter())
axis.set_label_text('Hz')
elif ax_type in ['frames']:
axis.set_label_text('Frames')
elif ax_type in ['off', 'none', None]:
axis.set_label_text('')
axis.set_ticks([])
|
[
"Configure",
"axis",
"tickers",
"locators",
"and",
"labels"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L834-L920
|
[
"def",
"__decorate_axis",
"(",
"axis",
",",
"ax_type",
")",
":",
"if",
"ax_type",
"==",
"'tonnetz'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TonnetzFormatter",
"(",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"FixedLocator",
"(",
"0.5",
"+",
"np",
".",
"arange",
"(",
"6",
")",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Tonnetz'",
")",
"elif",
"ax_type",
"==",
"'chroma'",
":",
"axis",
".",
"set_major_formatter",
"(",
"ChromaFormatter",
"(",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"FixedLocator",
"(",
"0.5",
"+",
"np",
".",
"add",
".",
"outer",
"(",
"12",
"*",
"np",
".",
"arange",
"(",
"10",
")",
",",
"[",
"0",
",",
"2",
",",
"4",
",",
"5",
",",
"7",
",",
"9",
",",
"11",
"]",
")",
".",
"ravel",
"(",
")",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Pitch class'",
")",
"elif",
"ax_type",
"==",
"'tempo'",
":",
"axis",
".",
"set_major_formatter",
"(",
"ScalarFormatter",
"(",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"LogLocator",
"(",
"base",
"=",
"2.0",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'BPM'",
")",
"elif",
"ax_type",
"==",
"'time'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"unit",
"=",
"None",
",",
"lag",
"=",
"False",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"prune",
"=",
"None",
",",
"steps",
"=",
"[",
"1",
",",
"1.5",
",",
"5",
",",
"6",
",",
"10",
"]",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Time'",
")",
"elif",
"ax_type",
"==",
"'s'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"unit",
"=",
"'s'",
",",
"lag",
"=",
"False",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"prune",
"=",
"None",
",",
"steps",
"=",
"[",
"1",
",",
"1.5",
",",
"5",
",",
"6",
",",
"10",
"]",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Time (s)'",
")",
"elif",
"ax_type",
"==",
"'ms'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"unit",
"=",
"'ms'",
",",
"lag",
"=",
"False",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"prune",
"=",
"None",
",",
"steps",
"=",
"[",
"1",
",",
"1.5",
",",
"5",
",",
"6",
",",
"10",
"]",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Time (ms)'",
")",
"elif",
"ax_type",
"==",
"'lag'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"unit",
"=",
"None",
",",
"lag",
"=",
"True",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"prune",
"=",
"None",
",",
"steps",
"=",
"[",
"1",
",",
"1.5",
",",
"5",
",",
"6",
",",
"10",
"]",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Lag'",
")",
"elif",
"ax_type",
"==",
"'lag_s'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"unit",
"=",
"'s'",
",",
"lag",
"=",
"True",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"prune",
"=",
"None",
",",
"steps",
"=",
"[",
"1",
",",
"1.5",
",",
"5",
",",
"6",
",",
"10",
"]",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Lag (s)'",
")",
"elif",
"ax_type",
"==",
"'lag_ms'",
":",
"axis",
".",
"set_major_formatter",
"(",
"TimeFormatter",
"(",
"unit",
"=",
"'ms'",
",",
"lag",
"=",
"True",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"prune",
"=",
"None",
",",
"steps",
"=",
"[",
"1",
",",
"1.5",
",",
"5",
",",
"6",
",",
"10",
"]",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Lag (ms)'",
")",
"elif",
"ax_type",
"==",
"'cqt_note'",
":",
"axis",
".",
"set_major_formatter",
"(",
"NoteFormatter",
"(",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"LogLocator",
"(",
"base",
"=",
"2.0",
")",
")",
"axis",
".",
"set_minor_formatter",
"(",
"NoteFormatter",
"(",
"major",
"=",
"False",
")",
")",
"axis",
".",
"set_minor_locator",
"(",
"LogLocator",
"(",
"base",
"=",
"2.0",
",",
"subs",
"=",
"2.0",
"**",
"(",
"np",
".",
"arange",
"(",
"1",
",",
"12",
")",
"/",
"12.0",
")",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Note'",
")",
"elif",
"ax_type",
"in",
"[",
"'cqt_hz'",
"]",
":",
"axis",
".",
"set_major_formatter",
"(",
"LogHzFormatter",
"(",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"LogLocator",
"(",
"base",
"=",
"2.0",
")",
")",
"axis",
".",
"set_minor_formatter",
"(",
"LogHzFormatter",
"(",
"major",
"=",
"False",
")",
")",
"axis",
".",
"set_minor_locator",
"(",
"LogLocator",
"(",
"base",
"=",
"2.0",
",",
"subs",
"=",
"2.0",
"**",
"(",
"np",
".",
"arange",
"(",
"1",
",",
"12",
")",
"/",
"12.0",
")",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Hz'",
")",
"elif",
"ax_type",
"in",
"[",
"'mel'",
",",
"'log'",
"]",
":",
"axis",
".",
"set_major_formatter",
"(",
"ScalarFormatter",
"(",
")",
")",
"axis",
".",
"set_major_locator",
"(",
"SymmetricalLogLocator",
"(",
"axis",
".",
"get_transform",
"(",
")",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Hz'",
")",
"elif",
"ax_type",
"in",
"[",
"'linear'",
",",
"'hz'",
"]",
":",
"axis",
".",
"set_major_formatter",
"(",
"ScalarFormatter",
"(",
")",
")",
"axis",
".",
"set_label_text",
"(",
"'Hz'",
")",
"elif",
"ax_type",
"in",
"[",
"'frames'",
"]",
":",
"axis",
".",
"set_label_text",
"(",
"'Frames'",
")",
"elif",
"ax_type",
"in",
"[",
"'off'",
",",
"'none'",
",",
"None",
"]",
":",
"axis",
".",
"set_label_text",
"(",
"''",
")",
"axis",
".",
"set_ticks",
"(",
"[",
"]",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__coord_fft_hz
|
Get the frequencies for FFT bins
|
librosa/display.py
|
def __coord_fft_hz(n, sr=22050, **_kwargs):
'''Get the frequencies for FFT bins'''
n_fft = 2 * (n - 1)
# The following code centers the FFT bins at their frequencies
# and clips to the non-negative frequency range [0, nyquist]
basis = core.fft_frequencies(sr=sr, n_fft=n_fft)
fmax = basis[-1]
basis -= 0.5 * (basis[1] - basis[0])
basis = np.append(np.maximum(0, basis), [fmax])
return basis
|
def __coord_fft_hz(n, sr=22050, **_kwargs):
'''Get the frequencies for FFT bins'''
n_fft = 2 * (n - 1)
# The following code centers the FFT bins at their frequencies
# and clips to the non-negative frequency range [0, nyquist]
basis = core.fft_frequencies(sr=sr, n_fft=n_fft)
fmax = basis[-1]
basis -= 0.5 * (basis[1] - basis[0])
basis = np.append(np.maximum(0, basis), [fmax])
return basis
|
[
"Get",
"the",
"frequencies",
"for",
"FFT",
"bins"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L923-L932
|
[
"def",
"__coord_fft_hz",
"(",
"n",
",",
"sr",
"=",
"22050",
",",
"*",
"*",
"_kwargs",
")",
":",
"n_fft",
"=",
"2",
"*",
"(",
"n",
"-",
"1",
")",
"# The following code centers the FFT bins at their frequencies",
"# and clips to the non-negative frequency range [0, nyquist]",
"basis",
"=",
"core",
".",
"fft_frequencies",
"(",
"sr",
"=",
"sr",
",",
"n_fft",
"=",
"n_fft",
")",
"fmax",
"=",
"basis",
"[",
"-",
"1",
"]",
"basis",
"-=",
"0.5",
"*",
"(",
"basis",
"[",
"1",
"]",
"-",
"basis",
"[",
"0",
"]",
")",
"basis",
"=",
"np",
".",
"append",
"(",
"np",
".",
"maximum",
"(",
"0",
",",
"basis",
")",
",",
"[",
"fmax",
"]",
")",
"return",
"basis"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__coord_mel_hz
|
Get the frequencies for Mel bins
|
librosa/display.py
|
def __coord_mel_hz(n, fmin=0, fmax=11025.0, **_kwargs):
'''Get the frequencies for Mel bins'''
if fmin is None:
fmin = 0
if fmax is None:
fmax = 11025.0
basis = core.mel_frequencies(n, fmin=fmin, fmax=fmax)
basis[1:] -= 0.5 * np.diff(basis)
basis = np.append(np.maximum(0, basis), [fmax])
return basis
|
def __coord_mel_hz(n, fmin=0, fmax=11025.0, **_kwargs):
'''Get the frequencies for Mel bins'''
if fmin is None:
fmin = 0
if fmax is None:
fmax = 11025.0
basis = core.mel_frequencies(n, fmin=fmin, fmax=fmax)
basis[1:] -= 0.5 * np.diff(basis)
basis = np.append(np.maximum(0, basis), [fmax])
return basis
|
[
"Get",
"the",
"frequencies",
"for",
"Mel",
"bins"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L935-L946
|
[
"def",
"__coord_mel_hz",
"(",
"n",
",",
"fmin",
"=",
"0",
",",
"fmax",
"=",
"11025.0",
",",
"*",
"*",
"_kwargs",
")",
":",
"if",
"fmin",
"is",
"None",
":",
"fmin",
"=",
"0",
"if",
"fmax",
"is",
"None",
":",
"fmax",
"=",
"11025.0",
"basis",
"=",
"core",
".",
"mel_frequencies",
"(",
"n",
",",
"fmin",
"=",
"fmin",
",",
"fmax",
"=",
"fmax",
")",
"basis",
"[",
"1",
":",
"]",
"-=",
"0.5",
"*",
"np",
".",
"diff",
"(",
"basis",
")",
"basis",
"=",
"np",
".",
"append",
"(",
"np",
".",
"maximum",
"(",
"0",
",",
"basis",
")",
",",
"[",
"fmax",
"]",
")",
"return",
"basis"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__coord_cqt_hz
|
Get CQT bin frequencies
|
librosa/display.py
|
def __coord_cqt_hz(n, fmin=None, bins_per_octave=12, **_kwargs):
'''Get CQT bin frequencies'''
if fmin is None:
fmin = core.note_to_hz('C1')
# we drop by half a bin so that CQT bins are centered vertically
return core.cqt_frequencies(n+1,
fmin=fmin / 2.0**(0.5/bins_per_octave),
bins_per_octave=bins_per_octave)
|
def __coord_cqt_hz(n, fmin=None, bins_per_octave=12, **_kwargs):
'''Get CQT bin frequencies'''
if fmin is None:
fmin = core.note_to_hz('C1')
# we drop by half a bin so that CQT bins are centered vertically
return core.cqt_frequencies(n+1,
fmin=fmin / 2.0**(0.5/bins_per_octave),
bins_per_octave=bins_per_octave)
|
[
"Get",
"CQT",
"bin",
"frequencies"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L949-L957
|
[
"def",
"__coord_cqt_hz",
"(",
"n",
",",
"fmin",
"=",
"None",
",",
"bins_per_octave",
"=",
"12",
",",
"*",
"*",
"_kwargs",
")",
":",
"if",
"fmin",
"is",
"None",
":",
"fmin",
"=",
"core",
".",
"note_to_hz",
"(",
"'C1'",
")",
"# we drop by half a bin so that CQT bins are centered vertically",
"return",
"core",
".",
"cqt_frequencies",
"(",
"n",
"+",
"1",
",",
"fmin",
"=",
"fmin",
"/",
"2.0",
"**",
"(",
"0.5",
"/",
"bins_per_octave",
")",
",",
"bins_per_octave",
"=",
"bins_per_octave",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__coord_chroma
|
Get chroma bin numbers
|
librosa/display.py
|
def __coord_chroma(n, bins_per_octave=12, **_kwargs):
'''Get chroma bin numbers'''
return np.linspace(0, (12.0 * n) / bins_per_octave, num=n+1, endpoint=True)
|
def __coord_chroma(n, bins_per_octave=12, **_kwargs):
'''Get chroma bin numbers'''
return np.linspace(0, (12.0 * n) / bins_per_octave, num=n+1, endpoint=True)
|
[
"Get",
"chroma",
"bin",
"numbers"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L960-L962
|
[
"def",
"__coord_chroma",
"(",
"n",
",",
"bins_per_octave",
"=",
"12",
",",
"*",
"*",
"_kwargs",
")",
":",
"return",
"np",
".",
"linspace",
"(",
"0",
",",
"(",
"12.0",
"*",
"n",
")",
"/",
"bins_per_octave",
",",
"num",
"=",
"n",
"+",
"1",
",",
"endpoint",
"=",
"True",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__coord_tempo
|
Tempo coordinates
|
librosa/display.py
|
def __coord_tempo(n, sr=22050, hop_length=512, **_kwargs):
'''Tempo coordinates'''
basis = core.tempo_frequencies(n+2, sr=sr, hop_length=hop_length)[1:]
edges = np.arange(1, n+2)
return basis * (edges + 0.5) / edges
|
def __coord_tempo(n, sr=22050, hop_length=512, **_kwargs):
'''Tempo coordinates'''
basis = core.tempo_frequencies(n+2, sr=sr, hop_length=hop_length)[1:]
edges = np.arange(1, n+2)
return basis * (edges + 0.5) / edges
|
[
"Tempo",
"coordinates"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L965-L969
|
[
"def",
"__coord_tempo",
"(",
"n",
",",
"sr",
"=",
"22050",
",",
"hop_length",
"=",
"512",
",",
"*",
"*",
"_kwargs",
")",
":",
"basis",
"=",
"core",
".",
"tempo_frequencies",
"(",
"n",
"+",
"2",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"hop_length",
")",
"[",
"1",
":",
"]",
"edges",
"=",
"np",
".",
"arange",
"(",
"1",
",",
"n",
"+",
"2",
")",
"return",
"basis",
"*",
"(",
"edges",
"+",
"0.5",
")",
"/",
"edges"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__coord_time
|
Get time coordinates from frames
|
librosa/display.py
|
def __coord_time(n, sr=22050, hop_length=512, **_kwargs):
'''Get time coordinates from frames'''
return core.frames_to_time(np.arange(n+1), sr=sr, hop_length=hop_length)
|
def __coord_time(n, sr=22050, hop_length=512, **_kwargs):
'''Get time coordinates from frames'''
return core.frames_to_time(np.arange(n+1), sr=sr, hop_length=hop_length)
|
[
"Get",
"time",
"coordinates",
"from",
"frames"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/display.py#L977-L979
|
[
"def",
"__coord_time",
"(",
"n",
",",
"sr",
"=",
"22050",
",",
"hop_length",
"=",
"512",
",",
"*",
"*",
"_kwargs",
")",
":",
"return",
"core",
".",
"frames_to_time",
"(",
"np",
".",
"arange",
"(",
"n",
"+",
"1",
")",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"hop_length",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
estimate_tuning
|
Estimate the tuning of an audio time series or spectrogram input.
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to measurements in cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
kwargs : additional keyword arguments
Additional arguments passed to `piptrack`
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
piptrack
Pitch tracking by parabolic interpolation
Examples
--------
>>> # With time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr)
0.089999999999999969
>>> # In tenths of a cent
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
0.093999999999999972
>>> # Using spectrogram input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.estimate_tuning(S=S, sr=sr)
0.089999999999999969
>>> # Using pass-through arguments to `librosa.piptrack`
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, n_fft=8192,
... fmax=librosa.note_to_hz('G#9'))
0.070000000000000062
|
librosa/core/pitch.py
|
def estimate_tuning(y=None, sr=22050, S=None, n_fft=2048,
resolution=0.01, bins_per_octave=12, **kwargs):
'''Estimate the tuning of an audio time series or spectrogram input.
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to measurements in cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
kwargs : additional keyword arguments
Additional arguments passed to `piptrack`
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
piptrack
Pitch tracking by parabolic interpolation
Examples
--------
>>> # With time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr)
0.089999999999999969
>>> # In tenths of a cent
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
0.093999999999999972
>>> # Using spectrogram input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.estimate_tuning(S=S, sr=sr)
0.089999999999999969
>>> # Using pass-through arguments to `librosa.piptrack`
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, n_fft=8192,
... fmax=librosa.note_to_hz('G#9'))
0.070000000000000062
'''
pitch, mag = piptrack(y=y, sr=sr, S=S, n_fft=n_fft, **kwargs)
# Only count magnitude where frequency is > 0
pitch_mask = pitch > 0
if pitch_mask.any():
threshold = np.median(mag[pitch_mask])
else:
threshold = 0.0
return pitch_tuning(pitch[(mag >= threshold) & pitch_mask],
resolution=resolution,
bins_per_octave=bins_per_octave)
|
def estimate_tuning(y=None, sr=22050, S=None, n_fft=2048,
resolution=0.01, bins_per_octave=12, **kwargs):
'''Estimate the tuning of an audio time series or spectrogram input.
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to measurements in cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
kwargs : additional keyword arguments
Additional arguments passed to `piptrack`
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
piptrack
Pitch tracking by parabolic interpolation
Examples
--------
>>> # With time-series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr)
0.089999999999999969
>>> # In tenths of a cent
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, resolution=1e-3)
0.093999999999999972
>>> # Using spectrogram input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.estimate_tuning(S=S, sr=sr)
0.089999999999999969
>>> # Using pass-through arguments to `librosa.piptrack`
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> librosa.estimate_tuning(y=y, sr=sr, n_fft=8192,
... fmax=librosa.note_to_hz('G#9'))
0.070000000000000062
'''
pitch, mag = piptrack(y=y, sr=sr, S=S, n_fft=n_fft, **kwargs)
# Only count magnitude where frequency is > 0
pitch_mask = pitch > 0
if pitch_mask.any():
threshold = np.median(mag[pitch_mask])
else:
threshold = 0.0
return pitch_tuning(pitch[(mag >= threshold) & pitch_mask],
resolution=resolution,
bins_per_octave=bins_per_octave)
|
[
"Estimate",
"the",
"tuning",
"of",
"an",
"audio",
"time",
"series",
"or",
"spectrogram",
"input",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/pitch.py#L17-L93
|
[
"def",
"estimate_tuning",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"n_fft",
"=",
"2048",
",",
"resolution",
"=",
"0.01",
",",
"bins_per_octave",
"=",
"12",
",",
"*",
"*",
"kwargs",
")",
":",
"pitch",
",",
"mag",
"=",
"piptrack",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
",",
"S",
"=",
"S",
",",
"n_fft",
"=",
"n_fft",
",",
"*",
"*",
"kwargs",
")",
"# Only count magnitude where frequency is > 0",
"pitch_mask",
"=",
"pitch",
">",
"0",
"if",
"pitch_mask",
".",
"any",
"(",
")",
":",
"threshold",
"=",
"np",
".",
"median",
"(",
"mag",
"[",
"pitch_mask",
"]",
")",
"else",
":",
"threshold",
"=",
"0.0",
"return",
"pitch_tuning",
"(",
"pitch",
"[",
"(",
"mag",
">=",
"threshold",
")",
"&",
"pitch_mask",
"]",
",",
"resolution",
"=",
"resolution",
",",
"bins_per_octave",
"=",
"bins_per_octave",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
pitch_tuning
|
Given a collection of pitches, estimate its tuning offset
(in fractions of a bin) relative to A440=440.0Hz.
Parameters
----------
frequencies : array-like, float
A collection of frequencies detected in the signal.
See `piptrack`
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
estimate_tuning
Estimating tuning from time-series or spectrogram input
Examples
--------
>>> # Generate notes at +25 cents
>>> freqs = librosa.cqt_frequencies(24, 55, tuning=0.25)
>>> librosa.pitch_tuning(freqs)
0.25
>>> # Track frequencies from a real spectrogram
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes, stft = librosa.ifptrack(y, sr)
>>> # Select out pitches with high energy
>>> pitches = pitches[magnitudes > np.median(magnitudes)]
>>> librosa.pitch_tuning(pitches)
0.089999999999999969
|
librosa/core/pitch.py
|
def pitch_tuning(frequencies, resolution=0.01, bins_per_octave=12):
'''Given a collection of pitches, estimate its tuning offset
(in fractions of a bin) relative to A440=440.0Hz.
Parameters
----------
frequencies : array-like, float
A collection of frequencies detected in the signal.
See `piptrack`
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
estimate_tuning
Estimating tuning from time-series or spectrogram input
Examples
--------
>>> # Generate notes at +25 cents
>>> freqs = librosa.cqt_frequencies(24, 55, tuning=0.25)
>>> librosa.pitch_tuning(freqs)
0.25
>>> # Track frequencies from a real spectrogram
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes, stft = librosa.ifptrack(y, sr)
>>> # Select out pitches with high energy
>>> pitches = pitches[magnitudes > np.median(magnitudes)]
>>> librosa.pitch_tuning(pitches)
0.089999999999999969
'''
frequencies = np.atleast_1d(frequencies)
# Trim out any DC components
frequencies = frequencies[frequencies > 0]
if not np.any(frequencies):
warnings.warn('Trying to estimate tuning from empty frequency set.')
return 0.0
# Compute the residual relative to the number of bins
residual = np.mod(bins_per_octave *
time_frequency.hz_to_octs(frequencies), 1.0)
# Are we on the wrong side of the semitone?
# A residual of 0.95 is more likely to be a deviation of -0.05
# from the next tone up.
residual[residual >= 0.5] -= 1.0
bins = np.linspace(-0.5, 0.5, int(np.ceil(1. / resolution)) + 1)
counts, tuning = np.histogram(residual, bins)
# return the histogram peak
return tuning[np.argmax(counts)]
|
def pitch_tuning(frequencies, resolution=0.01, bins_per_octave=12):
'''Given a collection of pitches, estimate its tuning offset
(in fractions of a bin) relative to A440=440.0Hz.
Parameters
----------
frequencies : array-like, float
A collection of frequencies detected in the signal.
See `piptrack`
resolution : float in `(0, 1)`
Resolution of the tuning as a fraction of a bin.
0.01 corresponds to cents.
bins_per_octave : int > 0 [scalar]
How many frequency bins per octave
Returns
-------
tuning: float in `[-0.5, 0.5)`
estimated tuning deviation (fractions of a bin)
See Also
--------
estimate_tuning
Estimating tuning from time-series or spectrogram input
Examples
--------
>>> # Generate notes at +25 cents
>>> freqs = librosa.cqt_frequencies(24, 55, tuning=0.25)
>>> librosa.pitch_tuning(freqs)
0.25
>>> # Track frequencies from a real spectrogram
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes, stft = librosa.ifptrack(y, sr)
>>> # Select out pitches with high energy
>>> pitches = pitches[magnitudes > np.median(magnitudes)]
>>> librosa.pitch_tuning(pitches)
0.089999999999999969
'''
frequencies = np.atleast_1d(frequencies)
# Trim out any DC components
frequencies = frequencies[frequencies > 0]
if not np.any(frequencies):
warnings.warn('Trying to estimate tuning from empty frequency set.')
return 0.0
# Compute the residual relative to the number of bins
residual = np.mod(bins_per_octave *
time_frequency.hz_to_octs(frequencies), 1.0)
# Are we on the wrong side of the semitone?
# A residual of 0.95 is more likely to be a deviation of -0.05
# from the next tone up.
residual[residual >= 0.5] -= 1.0
bins = np.linspace(-0.5, 0.5, int(np.ceil(1. / resolution)) + 1)
counts, tuning = np.histogram(residual, bins)
# return the histogram peak
return tuning[np.argmax(counts)]
|
[
"Given",
"a",
"collection",
"of",
"pitches",
"estimate",
"its",
"tuning",
"offset",
"(",
"in",
"fractions",
"of",
"a",
"bin",
")",
"relative",
"to",
"A440",
"=",
"440",
".",
"0Hz",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/pitch.py#L96-L163
|
[
"def",
"pitch_tuning",
"(",
"frequencies",
",",
"resolution",
"=",
"0.01",
",",
"bins_per_octave",
"=",
"12",
")",
":",
"frequencies",
"=",
"np",
".",
"atleast_1d",
"(",
"frequencies",
")",
"# Trim out any DC components",
"frequencies",
"=",
"frequencies",
"[",
"frequencies",
">",
"0",
"]",
"if",
"not",
"np",
".",
"any",
"(",
"frequencies",
")",
":",
"warnings",
".",
"warn",
"(",
"'Trying to estimate tuning from empty frequency set.'",
")",
"return",
"0.0",
"# Compute the residual relative to the number of bins",
"residual",
"=",
"np",
".",
"mod",
"(",
"bins_per_octave",
"*",
"time_frequency",
".",
"hz_to_octs",
"(",
"frequencies",
")",
",",
"1.0",
")",
"# Are we on the wrong side of the semitone?",
"# A residual of 0.95 is more likely to be a deviation of -0.05",
"# from the next tone up.",
"residual",
"[",
"residual",
">=",
"0.5",
"]",
"-=",
"1.0",
"bins",
"=",
"np",
".",
"linspace",
"(",
"-",
"0.5",
",",
"0.5",
",",
"int",
"(",
"np",
".",
"ceil",
"(",
"1.",
"/",
"resolution",
")",
")",
"+",
"1",
")",
"counts",
",",
"tuning",
"=",
"np",
".",
"histogram",
"(",
"residual",
",",
"bins",
")",
"# return the histogram peak",
"return",
"tuning",
"[",
"np",
".",
"argmax",
"(",
"counts",
")",
"]"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
piptrack
|
Pitch tracking on thresholded parabolically-interpolated STFT.
This implementation uses the parabolic interpolation method described by [1]_.
.. [1] https://ccrma.stanford.edu/~jos/sasp/Sinusoidal_Peak_Interpolation.html
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
hop_length : int > 0 [scalar] or None
number of samples to hop
threshold : float in `(0, 1)`
A bin in spectrum `S` is considered a pitch when it is greater than
`threshold*ref(S)`.
By default, `ref(S)` is taken to be `max(S, axis=0)` (the maximum value in
each column).
fmin : float > 0 [scalar]
lower frequency cutoff.
fmax : float > 0 [scalar]
upper frequency cutoff.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
ref : scalar or callable [default=np.max]
If scalar, the reference value against which `S` is compared for determining
pitches.
If callable, the reference value is computed as `ref(S, axis=0)`.
.. note::
One of `S` or `y` must be provided.
If `S` is not given, it is computed from `y` using
the default parameters of `librosa.core.stft`.
Returns
-------
pitches : np.ndarray [shape=(d, t)]
magnitudes : np.ndarray [shape=(d,t)]
Where `d` is the subset of FFT bins within `fmin` and `fmax`.
`pitches[f, t]` contains instantaneous frequency at bin
`f`, time `t`
`magnitudes[f, t]` contains the corresponding magnitudes.
Both `pitches` and `magnitudes` take value 0 at bins
of non-maximal magnitude.
Notes
-----
This function caches at level 30.
Examples
--------
Computing pitches from a waveform input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes = librosa.piptrack(y=y, sr=sr)
Or from a spectrogram input
>>> S = np.abs(librosa.stft(y))
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr)
Or with an alternate reference value for pitch detection, where
values above the mean spectral energy in each frame are counted as pitches
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr, threshold=1,
... ref=np.mean)
|
librosa/core/pitch.py
|
def piptrack(y=None, sr=22050, S=None, n_fft=2048, hop_length=None,
fmin=150.0, fmax=4000.0, threshold=0.1,
win_length=None, window='hann', center=True, pad_mode='reflect',
ref=None):
'''Pitch tracking on thresholded parabolically-interpolated STFT.
This implementation uses the parabolic interpolation method described by [1]_.
.. [1] https://ccrma.stanford.edu/~jos/sasp/Sinusoidal_Peak_Interpolation.html
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
hop_length : int > 0 [scalar] or None
number of samples to hop
threshold : float in `(0, 1)`
A bin in spectrum `S` is considered a pitch when it is greater than
`threshold*ref(S)`.
By default, `ref(S)` is taken to be `max(S, axis=0)` (the maximum value in
each column).
fmin : float > 0 [scalar]
lower frequency cutoff.
fmax : float > 0 [scalar]
upper frequency cutoff.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
ref : scalar or callable [default=np.max]
If scalar, the reference value against which `S` is compared for determining
pitches.
If callable, the reference value is computed as `ref(S, axis=0)`.
.. note::
One of `S` or `y` must be provided.
If `S` is not given, it is computed from `y` using
the default parameters of `librosa.core.stft`.
Returns
-------
pitches : np.ndarray [shape=(d, t)]
magnitudes : np.ndarray [shape=(d,t)]
Where `d` is the subset of FFT bins within `fmin` and `fmax`.
`pitches[f, t]` contains instantaneous frequency at bin
`f`, time `t`
`magnitudes[f, t]` contains the corresponding magnitudes.
Both `pitches` and `magnitudes` take value 0 at bins
of non-maximal magnitude.
Notes
-----
This function caches at level 30.
Examples
--------
Computing pitches from a waveform input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes = librosa.piptrack(y=y, sr=sr)
Or from a spectrogram input
>>> S = np.abs(librosa.stft(y))
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr)
Or with an alternate reference value for pitch detection, where
values above the mean spectral energy in each frame are counted as pitches
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr, threshold=1,
... ref=np.mean)
'''
# Check that we received an audio time series or STFT
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window,
center=center, pad_mode=pad_mode)
# Make sure we're dealing with magnitudes
S = np.abs(S)
# Truncate to feasible region
fmin = np.maximum(fmin, 0)
fmax = np.minimum(fmax, float(sr) / 2)
fft_freqs = time_frequency.fft_frequencies(sr=sr, n_fft=n_fft)
# Do the parabolic interpolation everywhere,
# then figure out where the peaks are
# then restrict to the feasible range (fmin:fmax)
avg = 0.5 * (S[2:] - S[:-2])
shift = 2 * S[1:-1] - S[2:] - S[:-2]
# Suppress divide-by-zeros.
# Points where shift == 0 will never be selected by localmax anyway
shift = avg / (shift + (np.abs(shift) < util.tiny(shift)))
# Pad back up to the same shape as S
avg = np.pad(avg, ([1, 1], [0, 0]), mode='constant')
shift = np.pad(shift, ([1, 1], [0, 0]), mode='constant')
dskew = 0.5 * avg * shift
# Pre-allocate output
pitches = np.zeros_like(S)
mags = np.zeros_like(S)
# Clip to the viable frequency range
freq_mask = ((fmin <= fft_freqs) & (fft_freqs < fmax)).reshape((-1, 1))
# Compute the column-wise local max of S after thresholding
# Find the argmax coordinates
if ref is None:
ref = np.max
if six.callable(ref):
ref_value = threshold * ref(S, axis=0)
else:
ref_value = np.abs(ref)
idx = np.argwhere(freq_mask & util.localmax(S * (S > ref_value)))
# Store pitch and magnitude
pitches[idx[:, 0], idx[:, 1]] = ((idx[:, 0] + shift[idx[:, 0], idx[:, 1]])
* float(sr) / n_fft)
mags[idx[:, 0], idx[:, 1]] = (S[idx[:, 0], idx[:, 1]]
+ dskew[idx[:, 0], idx[:, 1]])
return pitches, mags
|
def piptrack(y=None, sr=22050, S=None, n_fft=2048, hop_length=None,
fmin=150.0, fmax=4000.0, threshold=0.1,
win_length=None, window='hann', center=True, pad_mode='reflect',
ref=None):
'''Pitch tracking on thresholded parabolically-interpolated STFT.
This implementation uses the parabolic interpolation method described by [1]_.
.. [1] https://ccrma.stanford.edu/~jos/sasp/Sinusoidal_Peak_Interpolation.html
Parameters
----------
y: np.ndarray [shape=(n,)] or None
audio signal
sr : number > 0 [scalar]
audio sampling rate of `y`
S: np.ndarray [shape=(d, t)] or None
magnitude or power spectrogram
n_fft : int > 0 [scalar] or None
number of FFT bins to use, if `y` is provided.
hop_length : int > 0 [scalar] or None
number of samples to hop
threshold : float in `(0, 1)`
A bin in spectrum `S` is considered a pitch when it is greater than
`threshold*ref(S)`.
By default, `ref(S)` is taken to be `max(S, axis=0)` (the maximum value in
each column).
fmin : float > 0 [scalar]
lower frequency cutoff.
fmax : float > 0 [scalar]
upper frequency cutoff.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
ref : scalar or callable [default=np.max]
If scalar, the reference value against which `S` is compared for determining
pitches.
If callable, the reference value is computed as `ref(S, axis=0)`.
.. note::
One of `S` or `y` must be provided.
If `S` is not given, it is computed from `y` using
the default parameters of `librosa.core.stft`.
Returns
-------
pitches : np.ndarray [shape=(d, t)]
magnitudes : np.ndarray [shape=(d,t)]
Where `d` is the subset of FFT bins within `fmin` and `fmax`.
`pitches[f, t]` contains instantaneous frequency at bin
`f`, time `t`
`magnitudes[f, t]` contains the corresponding magnitudes.
Both `pitches` and `magnitudes` take value 0 at bins
of non-maximal magnitude.
Notes
-----
This function caches at level 30.
Examples
--------
Computing pitches from a waveform input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> pitches, magnitudes = librosa.piptrack(y=y, sr=sr)
Or from a spectrogram input
>>> S = np.abs(librosa.stft(y))
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr)
Or with an alternate reference value for pitch detection, where
values above the mean spectral energy in each frame are counted as pitches
>>> pitches, magnitudes = librosa.piptrack(S=S, sr=sr, threshold=1,
... ref=np.mean)
'''
# Check that we received an audio time series or STFT
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window,
center=center, pad_mode=pad_mode)
# Make sure we're dealing with magnitudes
S = np.abs(S)
# Truncate to feasible region
fmin = np.maximum(fmin, 0)
fmax = np.minimum(fmax, float(sr) / 2)
fft_freqs = time_frequency.fft_frequencies(sr=sr, n_fft=n_fft)
# Do the parabolic interpolation everywhere,
# then figure out where the peaks are
# then restrict to the feasible range (fmin:fmax)
avg = 0.5 * (S[2:] - S[:-2])
shift = 2 * S[1:-1] - S[2:] - S[:-2]
# Suppress divide-by-zeros.
# Points where shift == 0 will never be selected by localmax anyway
shift = avg / (shift + (np.abs(shift) < util.tiny(shift)))
# Pad back up to the same shape as S
avg = np.pad(avg, ([1, 1], [0, 0]), mode='constant')
shift = np.pad(shift, ([1, 1], [0, 0]), mode='constant')
dskew = 0.5 * avg * shift
# Pre-allocate output
pitches = np.zeros_like(S)
mags = np.zeros_like(S)
# Clip to the viable frequency range
freq_mask = ((fmin <= fft_freqs) & (fft_freqs < fmax)).reshape((-1, 1))
# Compute the column-wise local max of S after thresholding
# Find the argmax coordinates
if ref is None:
ref = np.max
if six.callable(ref):
ref_value = threshold * ref(S, axis=0)
else:
ref_value = np.abs(ref)
idx = np.argwhere(freq_mask & util.localmax(S * (S > ref_value)))
# Store pitch and magnitude
pitches[idx[:, 0], idx[:, 1]] = ((idx[:, 0] + shift[idx[:, 0], idx[:, 1]])
* float(sr) / n_fft)
mags[idx[:, 0], idx[:, 1]] = (S[idx[:, 0], idx[:, 1]]
+ dskew[idx[:, 0], idx[:, 1]])
return pitches, mags
|
[
"Pitch",
"tracking",
"on",
"thresholded",
"parabolically",
"-",
"interpolated",
"STFT",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/pitch.py#L167-L338
|
[
"def",
"piptrack",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"None",
",",
"fmin",
"=",
"150.0",
",",
"fmax",
"=",
"4000.0",
",",
"threshold",
"=",
"0.1",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"center",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
",",
"ref",
"=",
"None",
")",
":",
"# Check that we received an audio time series or STFT",
"S",
",",
"n_fft",
"=",
"_spectrogram",
"(",
"y",
"=",
"y",
",",
"S",
"=",
"S",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"win_length",
"=",
"win_length",
",",
"window",
"=",
"window",
",",
"center",
"=",
"center",
",",
"pad_mode",
"=",
"pad_mode",
")",
"# Make sure we're dealing with magnitudes",
"S",
"=",
"np",
".",
"abs",
"(",
"S",
")",
"# Truncate to feasible region",
"fmin",
"=",
"np",
".",
"maximum",
"(",
"fmin",
",",
"0",
")",
"fmax",
"=",
"np",
".",
"minimum",
"(",
"fmax",
",",
"float",
"(",
"sr",
")",
"/",
"2",
")",
"fft_freqs",
"=",
"time_frequency",
".",
"fft_frequencies",
"(",
"sr",
"=",
"sr",
",",
"n_fft",
"=",
"n_fft",
")",
"# Do the parabolic interpolation everywhere,",
"# then figure out where the peaks are",
"# then restrict to the feasible range (fmin:fmax)",
"avg",
"=",
"0.5",
"*",
"(",
"S",
"[",
"2",
":",
"]",
"-",
"S",
"[",
":",
"-",
"2",
"]",
")",
"shift",
"=",
"2",
"*",
"S",
"[",
"1",
":",
"-",
"1",
"]",
"-",
"S",
"[",
"2",
":",
"]",
"-",
"S",
"[",
":",
"-",
"2",
"]",
"# Suppress divide-by-zeros.",
"# Points where shift == 0 will never be selected by localmax anyway",
"shift",
"=",
"avg",
"/",
"(",
"shift",
"+",
"(",
"np",
".",
"abs",
"(",
"shift",
")",
"<",
"util",
".",
"tiny",
"(",
"shift",
")",
")",
")",
"# Pad back up to the same shape as S",
"avg",
"=",
"np",
".",
"pad",
"(",
"avg",
",",
"(",
"[",
"1",
",",
"1",
"]",
",",
"[",
"0",
",",
"0",
"]",
")",
",",
"mode",
"=",
"'constant'",
")",
"shift",
"=",
"np",
".",
"pad",
"(",
"shift",
",",
"(",
"[",
"1",
",",
"1",
"]",
",",
"[",
"0",
",",
"0",
"]",
")",
",",
"mode",
"=",
"'constant'",
")",
"dskew",
"=",
"0.5",
"*",
"avg",
"*",
"shift",
"# Pre-allocate output",
"pitches",
"=",
"np",
".",
"zeros_like",
"(",
"S",
")",
"mags",
"=",
"np",
".",
"zeros_like",
"(",
"S",
")",
"# Clip to the viable frequency range",
"freq_mask",
"=",
"(",
"(",
"fmin",
"<=",
"fft_freqs",
")",
"&",
"(",
"fft_freqs",
"<",
"fmax",
")",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"# Compute the column-wise local max of S after thresholding",
"# Find the argmax coordinates",
"if",
"ref",
"is",
"None",
":",
"ref",
"=",
"np",
".",
"max",
"if",
"six",
".",
"callable",
"(",
"ref",
")",
":",
"ref_value",
"=",
"threshold",
"*",
"ref",
"(",
"S",
",",
"axis",
"=",
"0",
")",
"else",
":",
"ref_value",
"=",
"np",
".",
"abs",
"(",
"ref",
")",
"idx",
"=",
"np",
".",
"argwhere",
"(",
"freq_mask",
"&",
"util",
".",
"localmax",
"(",
"S",
"*",
"(",
"S",
">",
"ref_value",
")",
")",
")",
"# Store pitch and magnitude",
"pitches",
"[",
"idx",
"[",
":",
",",
"0",
"]",
",",
"idx",
"[",
":",
",",
"1",
"]",
"]",
"=",
"(",
"(",
"idx",
"[",
":",
",",
"0",
"]",
"+",
"shift",
"[",
"idx",
"[",
":",
",",
"0",
"]",
",",
"idx",
"[",
":",
",",
"1",
"]",
"]",
")",
"*",
"float",
"(",
"sr",
")",
"/",
"n_fft",
")",
"mags",
"[",
"idx",
"[",
":",
",",
"0",
"]",
",",
"idx",
"[",
":",
",",
"1",
"]",
"]",
"=",
"(",
"S",
"[",
"idx",
"[",
":",
",",
"0",
"]",
",",
"idx",
"[",
":",
",",
"1",
"]",
"]",
"+",
"dskew",
"[",
"idx",
"[",
":",
",",
"0",
"]",
",",
"idx",
"[",
":",
",",
"1",
"]",
"]",
")",
"return",
"pitches",
",",
"mags"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
hpss
|
Decompose an audio time series into harmonic and percussive components.
This function automates the STFT->HPSS->ISTFT pipeline, and ensures that
the output waveforms have equal length to the input waveform `y`.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_harmonic : np.ndarray [shape=(n,)]
audio time series of the harmonic elements
y_percussive : np.ndarray [shape=(n,)]
audio time series of the percussive elements
See Also
--------
harmonic : Extract only the harmonic component
percussive : Extract only the percussive component
librosa.decompose.hpss : HPSS on spectrograms
Examples
--------
>>> # Extract harmonic and percussive components
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_harmonic, y_percussive = librosa.effects.hpss(y)
>>> # Get a more isolated percussive component by widening its margin
>>> y_harmonic, y_percussive = librosa.effects.hpss(y, margin=(1.0,5.0))
|
librosa/effects.py
|
def hpss(y, **kwargs):
'''Decompose an audio time series into harmonic and percussive components.
This function automates the STFT->HPSS->ISTFT pipeline, and ensures that
the output waveforms have equal length to the input waveform `y`.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_harmonic : np.ndarray [shape=(n,)]
audio time series of the harmonic elements
y_percussive : np.ndarray [shape=(n,)]
audio time series of the percussive elements
See Also
--------
harmonic : Extract only the harmonic component
percussive : Extract only the percussive component
librosa.decompose.hpss : HPSS on spectrograms
Examples
--------
>>> # Extract harmonic and percussive components
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_harmonic, y_percussive = librosa.effects.hpss(y)
>>> # Get a more isolated percussive component by widening its margin
>>> y_harmonic, y_percussive = librosa.effects.hpss(y, margin=(1.0,5.0))
'''
# Compute the STFT matrix
stft = core.stft(y)
# Decompose into harmonic and percussives
stft_harm, stft_perc = decompose.hpss(stft, **kwargs)
# Invert the STFTs. Adjust length to match the input.
y_harm = util.fix_length(core.istft(stft_harm, dtype=y.dtype), len(y))
y_perc = util.fix_length(core.istft(stft_perc, dtype=y.dtype), len(y))
return y_harm, y_perc
|
def hpss(y, **kwargs):
'''Decompose an audio time series into harmonic and percussive components.
This function automates the STFT->HPSS->ISTFT pipeline, and ensures that
the output waveforms have equal length to the input waveform `y`.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_harmonic : np.ndarray [shape=(n,)]
audio time series of the harmonic elements
y_percussive : np.ndarray [shape=(n,)]
audio time series of the percussive elements
See Also
--------
harmonic : Extract only the harmonic component
percussive : Extract only the percussive component
librosa.decompose.hpss : HPSS on spectrograms
Examples
--------
>>> # Extract harmonic and percussive components
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_harmonic, y_percussive = librosa.effects.hpss(y)
>>> # Get a more isolated percussive component by widening its margin
>>> y_harmonic, y_percussive = librosa.effects.hpss(y, margin=(1.0,5.0))
'''
# Compute the STFT matrix
stft = core.stft(y)
# Decompose into harmonic and percussives
stft_harm, stft_perc = decompose.hpss(stft, **kwargs)
# Invert the STFTs. Adjust length to match the input.
y_harm = util.fix_length(core.istft(stft_harm, dtype=y.dtype), len(y))
y_perc = util.fix_length(core.istft(stft_perc, dtype=y.dtype), len(y))
return y_harm, y_perc
|
[
"Decompose",
"an",
"audio",
"time",
"series",
"into",
"harmonic",
"and",
"percussive",
"components",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/effects.py#L47-L98
|
[
"def",
"hpss",
"(",
"y",
",",
"*",
"*",
"kwargs",
")",
":",
"# Compute the STFT matrix",
"stft",
"=",
"core",
".",
"stft",
"(",
"y",
")",
"# Decompose into harmonic and percussives",
"stft_harm",
",",
"stft_perc",
"=",
"decompose",
".",
"hpss",
"(",
"stft",
",",
"*",
"*",
"kwargs",
")",
"# Invert the STFTs. Adjust length to match the input.",
"y_harm",
"=",
"util",
".",
"fix_length",
"(",
"core",
".",
"istft",
"(",
"stft_harm",
",",
"dtype",
"=",
"y",
".",
"dtype",
")",
",",
"len",
"(",
"y",
")",
")",
"y_perc",
"=",
"util",
".",
"fix_length",
"(",
"core",
".",
"istft",
"(",
"stft_perc",
",",
"dtype",
"=",
"y",
".",
"dtype",
")",
",",
"len",
"(",
"y",
")",
")",
"return",
"y_harm",
",",
"y_perc"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
harmonic
|
Extract harmonic elements from an audio time-series.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_harmonic : np.ndarray [shape=(n,)]
audio time series of just the harmonic portion
See Also
--------
hpss : Separate harmonic and percussive components
percussive : Extract only the percussive component
librosa.decompose.hpss : HPSS for spectrograms
Examples
--------
>>> # Extract harmonic component
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_harmonic = librosa.effects.harmonic(y)
>>> # Use a margin > 1.0 for greater harmonic separation
>>> y_harmonic = librosa.effects.harmonic(y, margin=3.0)
|
librosa/effects.py
|
def harmonic(y, **kwargs):
'''Extract harmonic elements from an audio time-series.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_harmonic : np.ndarray [shape=(n,)]
audio time series of just the harmonic portion
See Also
--------
hpss : Separate harmonic and percussive components
percussive : Extract only the percussive component
librosa.decompose.hpss : HPSS for spectrograms
Examples
--------
>>> # Extract harmonic component
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_harmonic = librosa.effects.harmonic(y)
>>> # Use a margin > 1.0 for greater harmonic separation
>>> y_harmonic = librosa.effects.harmonic(y, margin=3.0)
'''
# Compute the STFT matrix
stft = core.stft(y)
# Remove percussives
stft_harm = decompose.hpss(stft, **kwargs)[0]
# Invert the STFTs
y_harm = util.fix_length(core.istft(stft_harm, dtype=y.dtype), len(y))
return y_harm
|
def harmonic(y, **kwargs):
'''Extract harmonic elements from an audio time-series.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_harmonic : np.ndarray [shape=(n,)]
audio time series of just the harmonic portion
See Also
--------
hpss : Separate harmonic and percussive components
percussive : Extract only the percussive component
librosa.decompose.hpss : HPSS for spectrograms
Examples
--------
>>> # Extract harmonic component
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_harmonic = librosa.effects.harmonic(y)
>>> # Use a margin > 1.0 for greater harmonic separation
>>> y_harmonic = librosa.effects.harmonic(y, margin=3.0)
'''
# Compute the STFT matrix
stft = core.stft(y)
# Remove percussives
stft_harm = decompose.hpss(stft, **kwargs)[0]
# Invert the STFTs
y_harm = util.fix_length(core.istft(stft_harm, dtype=y.dtype), len(y))
return y_harm
|
[
"Extract",
"harmonic",
"elements",
"from",
"an",
"audio",
"time",
"-",
"series",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/effects.py#L101-L142
|
[
"def",
"harmonic",
"(",
"y",
",",
"*",
"*",
"kwargs",
")",
":",
"# Compute the STFT matrix",
"stft",
"=",
"core",
".",
"stft",
"(",
"y",
")",
"# Remove percussives",
"stft_harm",
"=",
"decompose",
".",
"hpss",
"(",
"stft",
",",
"*",
"*",
"kwargs",
")",
"[",
"0",
"]",
"# Invert the STFTs",
"y_harm",
"=",
"util",
".",
"fix_length",
"(",
"core",
".",
"istft",
"(",
"stft_harm",
",",
"dtype",
"=",
"y",
".",
"dtype",
")",
",",
"len",
"(",
"y",
")",
")",
"return",
"y_harm"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
percussive
|
Extract percussive elements from an audio time-series.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_percussive : np.ndarray [shape=(n,)]
audio time series of just the percussive portion
See Also
--------
hpss : Separate harmonic and percussive components
harmonic : Extract only the harmonic component
librosa.decompose.hpss : HPSS for spectrograms
Examples
--------
>>> # Extract percussive component
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_percussive = librosa.effects.percussive(y)
>>> # Use a margin > 1.0 for greater percussive separation
>>> y_percussive = librosa.effects.percussive(y, margin=3.0)
|
librosa/effects.py
|
def percussive(y, **kwargs):
'''Extract percussive elements from an audio time-series.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_percussive : np.ndarray [shape=(n,)]
audio time series of just the percussive portion
See Also
--------
hpss : Separate harmonic and percussive components
harmonic : Extract only the harmonic component
librosa.decompose.hpss : HPSS for spectrograms
Examples
--------
>>> # Extract percussive component
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_percussive = librosa.effects.percussive(y)
>>> # Use a margin > 1.0 for greater percussive separation
>>> y_percussive = librosa.effects.percussive(y, margin=3.0)
'''
# Compute the STFT matrix
stft = core.stft(y)
# Remove harmonics
stft_perc = decompose.hpss(stft, **kwargs)[1]
# Invert the STFT
y_perc = util.fix_length(core.istft(stft_perc, dtype=y.dtype), len(y))
return y_perc
|
def percussive(y, **kwargs):
'''Extract percussive elements from an audio time-series.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
kwargs : additional keyword arguments.
See `librosa.decompose.hpss` for details.
Returns
-------
y_percussive : np.ndarray [shape=(n,)]
audio time series of just the percussive portion
See Also
--------
hpss : Separate harmonic and percussive components
harmonic : Extract only the harmonic component
librosa.decompose.hpss : HPSS for spectrograms
Examples
--------
>>> # Extract percussive component
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_percussive = librosa.effects.percussive(y)
>>> # Use a margin > 1.0 for greater percussive separation
>>> y_percussive = librosa.effects.percussive(y, margin=3.0)
'''
# Compute the STFT matrix
stft = core.stft(y)
# Remove harmonics
stft_perc = decompose.hpss(stft, **kwargs)[1]
# Invert the STFT
y_perc = util.fix_length(core.istft(stft_perc, dtype=y.dtype), len(y))
return y_perc
|
[
"Extract",
"percussive",
"elements",
"from",
"an",
"audio",
"time",
"-",
"series",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/effects.py#L145-L186
|
[
"def",
"percussive",
"(",
"y",
",",
"*",
"*",
"kwargs",
")",
":",
"# Compute the STFT matrix",
"stft",
"=",
"core",
".",
"stft",
"(",
"y",
")",
"# Remove harmonics",
"stft_perc",
"=",
"decompose",
".",
"hpss",
"(",
"stft",
",",
"*",
"*",
"kwargs",
")",
"[",
"1",
"]",
"# Invert the STFT",
"y_perc",
"=",
"util",
".",
"fix_length",
"(",
"core",
".",
"istft",
"(",
"stft_perc",
",",
"dtype",
"=",
"y",
".",
"dtype",
")",
",",
"len",
"(",
"y",
")",
")",
"return",
"y_perc"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
time_stretch
|
Time-stretch an audio series by a fixed rate.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
rate : float > 0 [scalar]
Stretch factor. If `rate > 1`, then the signal is sped up.
If `rate < 1`, then the signal is slowed down.
Returns
-------
y_stretch : np.ndarray [shape=(rate * n,)]
audio time series stretched by the specified rate
See Also
--------
pitch_shift : pitch shifting
librosa.core.phase_vocoder : spectrogram phase vocoder
Examples
--------
Compress to be twice as fast
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_fast = librosa.effects.time_stretch(y, 2.0)
Or half the original speed
>>> y_slow = librosa.effects.time_stretch(y, 0.5)
|
librosa/effects.py
|
def time_stretch(y, rate):
'''Time-stretch an audio series by a fixed rate.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
rate : float > 0 [scalar]
Stretch factor. If `rate > 1`, then the signal is sped up.
If `rate < 1`, then the signal is slowed down.
Returns
-------
y_stretch : np.ndarray [shape=(rate * n,)]
audio time series stretched by the specified rate
See Also
--------
pitch_shift : pitch shifting
librosa.core.phase_vocoder : spectrogram phase vocoder
Examples
--------
Compress to be twice as fast
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_fast = librosa.effects.time_stretch(y, 2.0)
Or half the original speed
>>> y_slow = librosa.effects.time_stretch(y, 0.5)
'''
if rate <= 0:
raise ParameterError('rate must be a positive number')
# Construct the stft
stft = core.stft(y)
# Stretch by phase vocoding
stft_stretch = core.phase_vocoder(stft, rate)
# Invert the stft
y_stretch = core.istft(stft_stretch, dtype=y.dtype)
return y_stretch
|
def time_stretch(y, rate):
'''Time-stretch an audio series by a fixed rate.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
rate : float > 0 [scalar]
Stretch factor. If `rate > 1`, then the signal is sped up.
If `rate < 1`, then the signal is slowed down.
Returns
-------
y_stretch : np.ndarray [shape=(rate * n,)]
audio time series stretched by the specified rate
See Also
--------
pitch_shift : pitch shifting
librosa.core.phase_vocoder : spectrogram phase vocoder
Examples
--------
Compress to be twice as fast
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_fast = librosa.effects.time_stretch(y, 2.0)
Or half the original speed
>>> y_slow = librosa.effects.time_stretch(y, 0.5)
'''
if rate <= 0:
raise ParameterError('rate must be a positive number')
# Construct the stft
stft = core.stft(y)
# Stretch by phase vocoding
stft_stretch = core.phase_vocoder(stft, rate)
# Invert the stft
y_stretch = core.istft(stft_stretch, dtype=y.dtype)
return y_stretch
|
[
"Time",
"-",
"stretch",
"an",
"audio",
"series",
"by",
"a",
"fixed",
"rate",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/effects.py#L189-L239
|
[
"def",
"time_stretch",
"(",
"y",
",",
"rate",
")",
":",
"if",
"rate",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'rate must be a positive number'",
")",
"# Construct the stft",
"stft",
"=",
"core",
".",
"stft",
"(",
"y",
")",
"# Stretch by phase vocoding",
"stft_stretch",
"=",
"core",
".",
"phase_vocoder",
"(",
"stft",
",",
"rate",
")",
"# Invert the stft",
"y_stretch",
"=",
"core",
".",
"istft",
"(",
"stft_stretch",
",",
"dtype",
"=",
"y",
".",
"dtype",
")",
"return",
"y_stretch"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
pitch_shift
|
Pitch-shift the waveform by `n_steps` half-steps.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
audio sampling rate of `y`
n_steps : float [scalar]
how many (fractional) half-steps to shift `y`
bins_per_octave : float > 0 [scalar]
how many steps per octave
res_type : string
Resample type.
Possible options: 'kaiser_best', 'kaiser_fast', and 'scipy', 'polyphase',
'fft'.
By default, 'kaiser_best' is used.
See `core.resample` for more information.
Returns
-------
y_shift : np.ndarray [shape=(n,)]
The pitch-shifted audio time-series
See Also
--------
time_stretch : time stretching
librosa.core.phase_vocoder : spectrogram phase vocoder
Examples
--------
Shift up by a major third (four half-steps)
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_third = librosa.effects.pitch_shift(y, sr, n_steps=4)
Shift down by a tritone (six half-steps)
>>> y_tritone = librosa.effects.pitch_shift(y, sr, n_steps=-6)
Shift up by 3 quarter-tones
>>> y_three_qt = librosa.effects.pitch_shift(y, sr, n_steps=3,
... bins_per_octave=24)
|
librosa/effects.py
|
def pitch_shift(y, sr, n_steps, bins_per_octave=12, res_type='kaiser_best'):
'''Pitch-shift the waveform by `n_steps` half-steps.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
audio sampling rate of `y`
n_steps : float [scalar]
how many (fractional) half-steps to shift `y`
bins_per_octave : float > 0 [scalar]
how many steps per octave
res_type : string
Resample type.
Possible options: 'kaiser_best', 'kaiser_fast', and 'scipy', 'polyphase',
'fft'.
By default, 'kaiser_best' is used.
See `core.resample` for more information.
Returns
-------
y_shift : np.ndarray [shape=(n,)]
The pitch-shifted audio time-series
See Also
--------
time_stretch : time stretching
librosa.core.phase_vocoder : spectrogram phase vocoder
Examples
--------
Shift up by a major third (four half-steps)
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_third = librosa.effects.pitch_shift(y, sr, n_steps=4)
Shift down by a tritone (six half-steps)
>>> y_tritone = librosa.effects.pitch_shift(y, sr, n_steps=-6)
Shift up by 3 quarter-tones
>>> y_three_qt = librosa.effects.pitch_shift(y, sr, n_steps=3,
... bins_per_octave=24)
'''
if bins_per_octave < 1 or not np.issubdtype(type(bins_per_octave), np.integer):
raise ParameterError('bins_per_octave must be a positive integer.')
rate = 2.0 ** (-float(n_steps) / bins_per_octave)
# Stretch in time, then resample
y_shift = core.resample(time_stretch(y, rate), float(sr) / rate, sr,
res_type=res_type)
# Crop to the same dimension as the input
return util.fix_length(y_shift, len(y))
|
def pitch_shift(y, sr, n_steps, bins_per_octave=12, res_type='kaiser_best'):
'''Pitch-shift the waveform by `n_steps` half-steps.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
audio sampling rate of `y`
n_steps : float [scalar]
how many (fractional) half-steps to shift `y`
bins_per_octave : float > 0 [scalar]
how many steps per octave
res_type : string
Resample type.
Possible options: 'kaiser_best', 'kaiser_fast', and 'scipy', 'polyphase',
'fft'.
By default, 'kaiser_best' is used.
See `core.resample` for more information.
Returns
-------
y_shift : np.ndarray [shape=(n,)]
The pitch-shifted audio time-series
See Also
--------
time_stretch : time stretching
librosa.core.phase_vocoder : spectrogram phase vocoder
Examples
--------
Shift up by a major third (four half-steps)
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> y_third = librosa.effects.pitch_shift(y, sr, n_steps=4)
Shift down by a tritone (six half-steps)
>>> y_tritone = librosa.effects.pitch_shift(y, sr, n_steps=-6)
Shift up by 3 quarter-tones
>>> y_three_qt = librosa.effects.pitch_shift(y, sr, n_steps=3,
... bins_per_octave=24)
'''
if bins_per_octave < 1 or not np.issubdtype(type(bins_per_octave), np.integer):
raise ParameterError('bins_per_octave must be a positive integer.')
rate = 2.0 ** (-float(n_steps) / bins_per_octave)
# Stretch in time, then resample
y_shift = core.resample(time_stretch(y, rate), float(sr) / rate, sr,
res_type=res_type)
# Crop to the same dimension as the input
return util.fix_length(y_shift, len(y))
|
[
"Pitch",
"-",
"shift",
"the",
"waveform",
"by",
"n_steps",
"half",
"-",
"steps",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/effects.py#L242-L307
|
[
"def",
"pitch_shift",
"(",
"y",
",",
"sr",
",",
"n_steps",
",",
"bins_per_octave",
"=",
"12",
",",
"res_type",
"=",
"'kaiser_best'",
")",
":",
"if",
"bins_per_octave",
"<",
"1",
"or",
"not",
"np",
".",
"issubdtype",
"(",
"type",
"(",
"bins_per_octave",
")",
",",
"np",
".",
"integer",
")",
":",
"raise",
"ParameterError",
"(",
"'bins_per_octave must be a positive integer.'",
")",
"rate",
"=",
"2.0",
"**",
"(",
"-",
"float",
"(",
"n_steps",
")",
"/",
"bins_per_octave",
")",
"# Stretch in time, then resample",
"y_shift",
"=",
"core",
".",
"resample",
"(",
"time_stretch",
"(",
"y",
",",
"rate",
")",
",",
"float",
"(",
"sr",
")",
"/",
"rate",
",",
"sr",
",",
"res_type",
"=",
"res_type",
")",
"# Crop to the same dimension as the input",
"return",
"util",
".",
"fix_length",
"(",
"y_shift",
",",
"len",
"(",
"y",
")",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
remix
|
Remix an audio signal by re-ordering time intervals.
Parameters
----------
y : np.ndarray [shape=(t,) or (2, t)]
Audio time series
intervals : iterable of tuples (start, end)
An iterable (list-like or generator) where the `i`th item
`intervals[i]` indicates the start and end (in samples)
of a slice of `y`.
align_zeros : boolean
If `True`, interval boundaries are mapped to the closest
zero-crossing in `y`. If `y` is stereo, zero-crossings
are computed after converting to mono.
Returns
-------
y_remix : np.ndarray [shape=(d,) or (2, d)]
`y` remixed in the order specified by `intervals`
Examples
--------
Load in the example track and reverse the beats
>>> y, sr = librosa.load(librosa.util.example_audio_file())
Compute beats
>>> _, beat_frames = librosa.beat.beat_track(y=y, sr=sr,
... hop_length=512)
Convert from frames to sample indices
>>> beat_samples = librosa.frames_to_samples(beat_frames)
Generate intervals from consecutive events
>>> intervals = librosa.util.frame(beat_samples, frame_length=2,
... hop_length=1).T
Reverse the beat intervals
>>> y_out = librosa.effects.remix(y, intervals[::-1])
|
librosa/effects.py
|
def remix(y, intervals, align_zeros=True):
'''Remix an audio signal by re-ordering time intervals.
Parameters
----------
y : np.ndarray [shape=(t,) or (2, t)]
Audio time series
intervals : iterable of tuples (start, end)
An iterable (list-like or generator) where the `i`th item
`intervals[i]` indicates the start and end (in samples)
of a slice of `y`.
align_zeros : boolean
If `True`, interval boundaries are mapped to the closest
zero-crossing in `y`. If `y` is stereo, zero-crossings
are computed after converting to mono.
Returns
-------
y_remix : np.ndarray [shape=(d,) or (2, d)]
`y` remixed in the order specified by `intervals`
Examples
--------
Load in the example track and reverse the beats
>>> y, sr = librosa.load(librosa.util.example_audio_file())
Compute beats
>>> _, beat_frames = librosa.beat.beat_track(y=y, sr=sr,
... hop_length=512)
Convert from frames to sample indices
>>> beat_samples = librosa.frames_to_samples(beat_frames)
Generate intervals from consecutive events
>>> intervals = librosa.util.frame(beat_samples, frame_length=2,
... hop_length=1).T
Reverse the beat intervals
>>> y_out = librosa.effects.remix(y, intervals[::-1])
'''
# Validate the audio buffer
util.valid_audio(y, mono=False)
y_out = []
if align_zeros:
y_mono = core.to_mono(y)
zeros = np.nonzero(core.zero_crossings(y_mono))[-1]
# Force end-of-signal onto zeros
zeros = np.append(zeros, [len(y_mono)])
clip = [slice(None)] * y.ndim
for interval in intervals:
if align_zeros:
interval = zeros[util.match_events(interval, zeros)]
clip[-1] = slice(interval[0], interval[1])
y_out.append(y[tuple(clip)])
return np.concatenate(y_out, axis=-1)
|
def remix(y, intervals, align_zeros=True):
'''Remix an audio signal by re-ordering time intervals.
Parameters
----------
y : np.ndarray [shape=(t,) or (2, t)]
Audio time series
intervals : iterable of tuples (start, end)
An iterable (list-like or generator) where the `i`th item
`intervals[i]` indicates the start and end (in samples)
of a slice of `y`.
align_zeros : boolean
If `True`, interval boundaries are mapped to the closest
zero-crossing in `y`. If `y` is stereo, zero-crossings
are computed after converting to mono.
Returns
-------
y_remix : np.ndarray [shape=(d,) or (2, d)]
`y` remixed in the order specified by `intervals`
Examples
--------
Load in the example track and reverse the beats
>>> y, sr = librosa.load(librosa.util.example_audio_file())
Compute beats
>>> _, beat_frames = librosa.beat.beat_track(y=y, sr=sr,
... hop_length=512)
Convert from frames to sample indices
>>> beat_samples = librosa.frames_to_samples(beat_frames)
Generate intervals from consecutive events
>>> intervals = librosa.util.frame(beat_samples, frame_length=2,
... hop_length=1).T
Reverse the beat intervals
>>> y_out = librosa.effects.remix(y, intervals[::-1])
'''
# Validate the audio buffer
util.valid_audio(y, mono=False)
y_out = []
if align_zeros:
y_mono = core.to_mono(y)
zeros = np.nonzero(core.zero_crossings(y_mono))[-1]
# Force end-of-signal onto zeros
zeros = np.append(zeros, [len(y_mono)])
clip = [slice(None)] * y.ndim
for interval in intervals:
if align_zeros:
interval = zeros[util.match_events(interval, zeros)]
clip[-1] = slice(interval[0], interval[1])
y_out.append(y[tuple(clip)])
return np.concatenate(y_out, axis=-1)
|
[
"Remix",
"an",
"audio",
"signal",
"by",
"re",
"-",
"ordering",
"time",
"intervals",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/effects.py#L310-L387
|
[
"def",
"remix",
"(",
"y",
",",
"intervals",
",",
"align_zeros",
"=",
"True",
")",
":",
"# Validate the audio buffer",
"util",
".",
"valid_audio",
"(",
"y",
",",
"mono",
"=",
"False",
")",
"y_out",
"=",
"[",
"]",
"if",
"align_zeros",
":",
"y_mono",
"=",
"core",
".",
"to_mono",
"(",
"y",
")",
"zeros",
"=",
"np",
".",
"nonzero",
"(",
"core",
".",
"zero_crossings",
"(",
"y_mono",
")",
")",
"[",
"-",
"1",
"]",
"# Force end-of-signal onto zeros",
"zeros",
"=",
"np",
".",
"append",
"(",
"zeros",
",",
"[",
"len",
"(",
"y_mono",
")",
"]",
")",
"clip",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"y",
".",
"ndim",
"for",
"interval",
"in",
"intervals",
":",
"if",
"align_zeros",
":",
"interval",
"=",
"zeros",
"[",
"util",
".",
"match_events",
"(",
"interval",
",",
"zeros",
")",
"]",
"clip",
"[",
"-",
"1",
"]",
"=",
"slice",
"(",
"interval",
"[",
"0",
"]",
",",
"interval",
"[",
"1",
"]",
")",
"y_out",
".",
"append",
"(",
"y",
"[",
"tuple",
"(",
"clip",
")",
"]",
")",
"return",
"np",
".",
"concatenate",
"(",
"y_out",
",",
"axis",
"=",
"-",
"1",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
_signal_to_frame_nonsilent
|
Frame-wise non-silent indicator for audio input.
This is a helper function for `trim` and `split`.
Parameters
----------
y : np.ndarray, shape=(n,) or (2,n)
Audio signal, mono or stereo
frame_length : int > 0
The number of samples per frame
hop_length : int > 0
The number of samples between frames
top_db : number > 0
The threshold (in decibels) below reference to consider as
silence
ref : callable or float
The reference power
Returns
-------
non_silent : np.ndarray, shape=(m,), dtype=bool
Indicator of non-silent frames
|
librosa/effects.py
|
def _signal_to_frame_nonsilent(y, frame_length=2048, hop_length=512, top_db=60,
ref=np.max):
'''Frame-wise non-silent indicator for audio input.
This is a helper function for `trim` and `split`.
Parameters
----------
y : np.ndarray, shape=(n,) or (2,n)
Audio signal, mono or stereo
frame_length : int > 0
The number of samples per frame
hop_length : int > 0
The number of samples between frames
top_db : number > 0
The threshold (in decibels) below reference to consider as
silence
ref : callable or float
The reference power
Returns
-------
non_silent : np.ndarray, shape=(m,), dtype=bool
Indicator of non-silent frames
'''
# Convert to mono
y_mono = core.to_mono(y)
# Compute the MSE for the signal
mse = feature.rms(y=y_mono,
frame_length=frame_length,
hop_length=hop_length)**2
return (core.power_to_db(mse.squeeze(),
ref=ref,
top_db=None) > - top_db)
|
def _signal_to_frame_nonsilent(y, frame_length=2048, hop_length=512, top_db=60,
ref=np.max):
'''Frame-wise non-silent indicator for audio input.
This is a helper function for `trim` and `split`.
Parameters
----------
y : np.ndarray, shape=(n,) or (2,n)
Audio signal, mono or stereo
frame_length : int > 0
The number of samples per frame
hop_length : int > 0
The number of samples between frames
top_db : number > 0
The threshold (in decibels) below reference to consider as
silence
ref : callable or float
The reference power
Returns
-------
non_silent : np.ndarray, shape=(m,), dtype=bool
Indicator of non-silent frames
'''
# Convert to mono
y_mono = core.to_mono(y)
# Compute the MSE for the signal
mse = feature.rms(y=y_mono,
frame_length=frame_length,
hop_length=hop_length)**2
return (core.power_to_db(mse.squeeze(),
ref=ref,
top_db=None) > - top_db)
|
[
"Frame",
"-",
"wise",
"non",
"-",
"silent",
"indicator",
"for",
"audio",
"input",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/effects.py#L390-L429
|
[
"def",
"_signal_to_frame_nonsilent",
"(",
"y",
",",
"frame_length",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"top_db",
"=",
"60",
",",
"ref",
"=",
"np",
".",
"max",
")",
":",
"# Convert to mono",
"y_mono",
"=",
"core",
".",
"to_mono",
"(",
"y",
")",
"# Compute the MSE for the signal",
"mse",
"=",
"feature",
".",
"rms",
"(",
"y",
"=",
"y_mono",
",",
"frame_length",
"=",
"frame_length",
",",
"hop_length",
"=",
"hop_length",
")",
"**",
"2",
"return",
"(",
"core",
".",
"power_to_db",
"(",
"mse",
".",
"squeeze",
"(",
")",
",",
"ref",
"=",
"ref",
",",
"top_db",
"=",
"None",
")",
">",
"-",
"top_db",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
trim
|
Trim leading and trailing silence from an audio signal.
Parameters
----------
y : np.ndarray, shape=(n,) or (2,n)
Audio signal, can be mono or stereo
top_db : number > 0
The threshold (in decibels) below reference to consider as
silence
ref : number or callable
The reference power. By default, it uses `np.max` and compares
to the peak power in the signal.
frame_length : int > 0
The number of samples per analysis frame
hop_length : int > 0
The number of samples between analysis frames
Returns
-------
y_trimmed : np.ndarray, shape=(m,) or (2, m)
The trimmed signal
index : np.ndarray, shape=(2,)
the interval of `y` corresponding to the non-silent region:
`y_trimmed = y[index[0]:index[1]]` (for mono) or
`y_trimmed = y[:, index[0]:index[1]]` (for stereo).
Examples
--------
>>> # Load some audio
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> # Trim the beginning and ending silence
>>> yt, index = librosa.effects.trim(y)
>>> # Print the durations
>>> print(librosa.get_duration(y), librosa.get_duration(yt))
61.45886621315193 60.58086167800454
|
librosa/effects.py
|
def trim(y, top_db=60, ref=np.max, frame_length=2048, hop_length=512):
'''Trim leading and trailing silence from an audio signal.
Parameters
----------
y : np.ndarray, shape=(n,) or (2,n)
Audio signal, can be mono or stereo
top_db : number > 0
The threshold (in decibels) below reference to consider as
silence
ref : number or callable
The reference power. By default, it uses `np.max` and compares
to the peak power in the signal.
frame_length : int > 0
The number of samples per analysis frame
hop_length : int > 0
The number of samples between analysis frames
Returns
-------
y_trimmed : np.ndarray, shape=(m,) or (2, m)
The trimmed signal
index : np.ndarray, shape=(2,)
the interval of `y` corresponding to the non-silent region:
`y_trimmed = y[index[0]:index[1]]` (for mono) or
`y_trimmed = y[:, index[0]:index[1]]` (for stereo).
Examples
--------
>>> # Load some audio
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> # Trim the beginning and ending silence
>>> yt, index = librosa.effects.trim(y)
>>> # Print the durations
>>> print(librosa.get_duration(y), librosa.get_duration(yt))
61.45886621315193 60.58086167800454
'''
non_silent = _signal_to_frame_nonsilent(y,
frame_length=frame_length,
hop_length=hop_length,
ref=ref,
top_db=top_db)
nonzero = np.flatnonzero(non_silent)
if nonzero.size > 0:
# Compute the start and end positions
# End position goes one frame past the last non-zero
start = int(core.frames_to_samples(nonzero[0], hop_length))
end = min(y.shape[-1],
int(core.frames_to_samples(nonzero[-1] + 1, hop_length)))
else:
# The signal only contains zeros
start, end = 0, 0
# Build the mono/stereo index
full_index = [slice(None)] * y.ndim
full_index[-1] = slice(start, end)
return y[tuple(full_index)], np.asarray([start, end])
|
def trim(y, top_db=60, ref=np.max, frame_length=2048, hop_length=512):
'''Trim leading and trailing silence from an audio signal.
Parameters
----------
y : np.ndarray, shape=(n,) or (2,n)
Audio signal, can be mono or stereo
top_db : number > 0
The threshold (in decibels) below reference to consider as
silence
ref : number or callable
The reference power. By default, it uses `np.max` and compares
to the peak power in the signal.
frame_length : int > 0
The number of samples per analysis frame
hop_length : int > 0
The number of samples between analysis frames
Returns
-------
y_trimmed : np.ndarray, shape=(m,) or (2, m)
The trimmed signal
index : np.ndarray, shape=(2,)
the interval of `y` corresponding to the non-silent region:
`y_trimmed = y[index[0]:index[1]]` (for mono) or
`y_trimmed = y[:, index[0]:index[1]]` (for stereo).
Examples
--------
>>> # Load some audio
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> # Trim the beginning and ending silence
>>> yt, index = librosa.effects.trim(y)
>>> # Print the durations
>>> print(librosa.get_duration(y), librosa.get_duration(yt))
61.45886621315193 60.58086167800454
'''
non_silent = _signal_to_frame_nonsilent(y,
frame_length=frame_length,
hop_length=hop_length,
ref=ref,
top_db=top_db)
nonzero = np.flatnonzero(non_silent)
if nonzero.size > 0:
# Compute the start and end positions
# End position goes one frame past the last non-zero
start = int(core.frames_to_samples(nonzero[0], hop_length))
end = min(y.shape[-1],
int(core.frames_to_samples(nonzero[-1] + 1, hop_length)))
else:
# The signal only contains zeros
start, end = 0, 0
# Build the mono/stereo index
full_index = [slice(None)] * y.ndim
full_index[-1] = slice(start, end)
return y[tuple(full_index)], np.asarray([start, end])
|
[
"Trim",
"leading",
"and",
"trailing",
"silence",
"from",
"an",
"audio",
"signal",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/effects.py#L432-L498
|
[
"def",
"trim",
"(",
"y",
",",
"top_db",
"=",
"60",
",",
"ref",
"=",
"np",
".",
"max",
",",
"frame_length",
"=",
"2048",
",",
"hop_length",
"=",
"512",
")",
":",
"non_silent",
"=",
"_signal_to_frame_nonsilent",
"(",
"y",
",",
"frame_length",
"=",
"frame_length",
",",
"hop_length",
"=",
"hop_length",
",",
"ref",
"=",
"ref",
",",
"top_db",
"=",
"top_db",
")",
"nonzero",
"=",
"np",
".",
"flatnonzero",
"(",
"non_silent",
")",
"if",
"nonzero",
".",
"size",
">",
"0",
":",
"# Compute the start and end positions",
"# End position goes one frame past the last non-zero",
"start",
"=",
"int",
"(",
"core",
".",
"frames_to_samples",
"(",
"nonzero",
"[",
"0",
"]",
",",
"hop_length",
")",
")",
"end",
"=",
"min",
"(",
"y",
".",
"shape",
"[",
"-",
"1",
"]",
",",
"int",
"(",
"core",
".",
"frames_to_samples",
"(",
"nonzero",
"[",
"-",
"1",
"]",
"+",
"1",
",",
"hop_length",
")",
")",
")",
"else",
":",
"# The signal only contains zeros",
"start",
",",
"end",
"=",
"0",
",",
"0",
"# Build the mono/stereo index",
"full_index",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"y",
".",
"ndim",
"full_index",
"[",
"-",
"1",
"]",
"=",
"slice",
"(",
"start",
",",
"end",
")",
"return",
"y",
"[",
"tuple",
"(",
"full_index",
")",
"]",
",",
"np",
".",
"asarray",
"(",
"[",
"start",
",",
"end",
"]",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
split
|
Split an audio signal into non-silent intervals.
Parameters
----------
y : np.ndarray, shape=(n,) or (2, n)
An audio signal
top_db : number > 0
The threshold (in decibels) below reference to consider as
silence
ref : number or callable
The reference power. By default, it uses `np.max` and compares
to the peak power in the signal.
frame_length : int > 0
The number of samples per analysis frame
hop_length : int > 0
The number of samples between analysis frames
Returns
-------
intervals : np.ndarray, shape=(m, 2)
`intervals[i] == (start_i, end_i)` are the start and end time
(in samples) of non-silent interval `i`.
|
librosa/effects.py
|
def split(y, top_db=60, ref=np.max, frame_length=2048, hop_length=512):
'''Split an audio signal into non-silent intervals.
Parameters
----------
y : np.ndarray, shape=(n,) or (2, n)
An audio signal
top_db : number > 0
The threshold (in decibels) below reference to consider as
silence
ref : number or callable
The reference power. By default, it uses `np.max` and compares
to the peak power in the signal.
frame_length : int > 0
The number of samples per analysis frame
hop_length : int > 0
The number of samples between analysis frames
Returns
-------
intervals : np.ndarray, shape=(m, 2)
`intervals[i] == (start_i, end_i)` are the start and end time
(in samples) of non-silent interval `i`.
'''
non_silent = _signal_to_frame_nonsilent(y,
frame_length=frame_length,
hop_length=hop_length,
ref=ref,
top_db=top_db)
# Interval slicing, adapted from
# https://stackoverflow.com/questions/2619413/efficiently-finding-the-interval-with-non-zeros-in-scipy-numpy-in-python
# Find points where the sign flips
edges = np.flatnonzero(np.diff(non_silent.astype(int)))
# Pad back the sample lost in the diff
edges = [edges + 1]
# If the first frame had high energy, count it
if non_silent[0]:
edges.insert(0, [0])
# Likewise for the last frame
if non_silent[-1]:
edges.append([len(non_silent)])
# Convert from frames to samples
edges = core.frames_to_samples(np.concatenate(edges),
hop_length=hop_length)
# Clip to the signal duration
edges = np.minimum(edges, y.shape[-1])
# Stack the results back as an ndarray
return edges.reshape((-1, 2))
|
def split(y, top_db=60, ref=np.max, frame_length=2048, hop_length=512):
'''Split an audio signal into non-silent intervals.
Parameters
----------
y : np.ndarray, shape=(n,) or (2, n)
An audio signal
top_db : number > 0
The threshold (in decibels) below reference to consider as
silence
ref : number or callable
The reference power. By default, it uses `np.max` and compares
to the peak power in the signal.
frame_length : int > 0
The number of samples per analysis frame
hop_length : int > 0
The number of samples between analysis frames
Returns
-------
intervals : np.ndarray, shape=(m, 2)
`intervals[i] == (start_i, end_i)` are the start and end time
(in samples) of non-silent interval `i`.
'''
non_silent = _signal_to_frame_nonsilent(y,
frame_length=frame_length,
hop_length=hop_length,
ref=ref,
top_db=top_db)
# Interval slicing, adapted from
# https://stackoverflow.com/questions/2619413/efficiently-finding-the-interval-with-non-zeros-in-scipy-numpy-in-python
# Find points where the sign flips
edges = np.flatnonzero(np.diff(non_silent.astype(int)))
# Pad back the sample lost in the diff
edges = [edges + 1]
# If the first frame had high energy, count it
if non_silent[0]:
edges.insert(0, [0])
# Likewise for the last frame
if non_silent[-1]:
edges.append([len(non_silent)])
# Convert from frames to samples
edges = core.frames_to_samples(np.concatenate(edges),
hop_length=hop_length)
# Clip to the signal duration
edges = np.minimum(edges, y.shape[-1])
# Stack the results back as an ndarray
return edges.reshape((-1, 2))
|
[
"Split",
"an",
"audio",
"signal",
"into",
"non",
"-",
"silent",
"intervals",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/effects.py#L501-L561
|
[
"def",
"split",
"(",
"y",
",",
"top_db",
"=",
"60",
",",
"ref",
"=",
"np",
".",
"max",
",",
"frame_length",
"=",
"2048",
",",
"hop_length",
"=",
"512",
")",
":",
"non_silent",
"=",
"_signal_to_frame_nonsilent",
"(",
"y",
",",
"frame_length",
"=",
"frame_length",
",",
"hop_length",
"=",
"hop_length",
",",
"ref",
"=",
"ref",
",",
"top_db",
"=",
"top_db",
")",
"# Interval slicing, adapted from",
"# https://stackoverflow.com/questions/2619413/efficiently-finding-the-interval-with-non-zeros-in-scipy-numpy-in-python",
"# Find points where the sign flips",
"edges",
"=",
"np",
".",
"flatnonzero",
"(",
"np",
".",
"diff",
"(",
"non_silent",
".",
"astype",
"(",
"int",
")",
")",
")",
"# Pad back the sample lost in the diff",
"edges",
"=",
"[",
"edges",
"+",
"1",
"]",
"# If the first frame had high energy, count it",
"if",
"non_silent",
"[",
"0",
"]",
":",
"edges",
".",
"insert",
"(",
"0",
",",
"[",
"0",
"]",
")",
"# Likewise for the last frame",
"if",
"non_silent",
"[",
"-",
"1",
"]",
":",
"edges",
".",
"append",
"(",
"[",
"len",
"(",
"non_silent",
")",
"]",
")",
"# Convert from frames to samples",
"edges",
"=",
"core",
".",
"frames_to_samples",
"(",
"np",
".",
"concatenate",
"(",
"edges",
")",
",",
"hop_length",
"=",
"hop_length",
")",
"# Clip to the signal duration",
"edges",
"=",
"np",
".",
"minimum",
"(",
"edges",
",",
"y",
".",
"shape",
"[",
"-",
"1",
"]",
")",
"# Stack the results back as an ndarray",
"return",
"edges",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"2",
")",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
stft
|
Short-time Fourier transform (STFT)
Returns a complex-valued matrix D such that
`np.abs(D[f, t])` is the magnitude of frequency bin `f`
at frame `t`
`np.angle(D[f, t])` is the phase of frequency bin `f`
at frame `t`
Parameters
----------
y : np.ndarray [shape=(n,)], real-valued
the input signal (audio time series)
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
number audio of frames between STFT columns.
If unspecified, defaults `win_length / 4`.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `y[t * hop_length]`
dtype : numeric type
Complex numeric type for `D`. Default is 64-bit complex.
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
D : np.ndarray [shape=(1 + n_fft/2, t), dtype=dtype]
STFT matrix
See Also
--------
istft : Inverse STFT
ifgram : Instantaneous frequency spectrogram
np.pad : array padding
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = np.abs(librosa.stft(y))
>>> D
array([[2.58028018e-03, 4.32422794e-02, 6.61255598e-01, ...,
6.82710262e-04, 2.51654536e-04, 7.23036574e-05],
[2.49403086e-03, 5.15930466e-02, 6.00107312e-01, ...,
3.48026224e-04, 2.35853557e-04, 7.54836728e-05],
[7.82410789e-04, 1.05394892e-01, 4.37517226e-01, ...,
6.29352580e-04, 3.38571583e-04, 8.38094638e-05],
...,
[9.48568513e-08, 4.74725084e-07, 1.50052492e-05, ...,
1.85637656e-08, 2.89708542e-08, 5.74304337e-09],
[1.25165826e-07, 8.58259284e-07, 1.11157215e-05, ...,
3.49099771e-08, 3.11740926e-08, 5.29926236e-09],
[1.70630571e-07, 8.92518756e-07, 1.23656537e-05, ...,
5.33256745e-08, 3.33264900e-08, 5.13272980e-09]], dtype=float32)
Use left-aligned frames, instead of centered frames
>>> D_left = np.abs(librosa.stft(y, center=False))
Use a shorter hop length
>>> D_short = np.abs(librosa.stft(y, hop_length=64))
Display a spectrogram
>>> import matplotlib.pyplot as plt
>>> librosa.display.specshow(librosa.amplitude_to_db(D,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
|
librosa/core/spectrum.py
|
def stft(y, n_fft=2048, hop_length=None, win_length=None, window='hann',
center=True, dtype=np.complex64, pad_mode='reflect'):
"""Short-time Fourier transform (STFT)
Returns a complex-valued matrix D such that
`np.abs(D[f, t])` is the magnitude of frequency bin `f`
at frame `t`
`np.angle(D[f, t])` is the phase of frequency bin `f`
at frame `t`
Parameters
----------
y : np.ndarray [shape=(n,)], real-valued
the input signal (audio time series)
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
number audio of frames between STFT columns.
If unspecified, defaults `win_length / 4`.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `y[t * hop_length]`
dtype : numeric type
Complex numeric type for `D`. Default is 64-bit complex.
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
D : np.ndarray [shape=(1 + n_fft/2, t), dtype=dtype]
STFT matrix
See Also
--------
istft : Inverse STFT
ifgram : Instantaneous frequency spectrogram
np.pad : array padding
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = np.abs(librosa.stft(y))
>>> D
array([[2.58028018e-03, 4.32422794e-02, 6.61255598e-01, ...,
6.82710262e-04, 2.51654536e-04, 7.23036574e-05],
[2.49403086e-03, 5.15930466e-02, 6.00107312e-01, ...,
3.48026224e-04, 2.35853557e-04, 7.54836728e-05],
[7.82410789e-04, 1.05394892e-01, 4.37517226e-01, ...,
6.29352580e-04, 3.38571583e-04, 8.38094638e-05],
...,
[9.48568513e-08, 4.74725084e-07, 1.50052492e-05, ...,
1.85637656e-08, 2.89708542e-08, 5.74304337e-09],
[1.25165826e-07, 8.58259284e-07, 1.11157215e-05, ...,
3.49099771e-08, 3.11740926e-08, 5.29926236e-09],
[1.70630571e-07, 8.92518756e-07, 1.23656537e-05, ...,
5.33256745e-08, 3.33264900e-08, 5.13272980e-09]], dtype=float32)
Use left-aligned frames, instead of centered frames
>>> D_left = np.abs(librosa.stft(y, center=False))
Use a shorter hop length
>>> D_short = np.abs(librosa.stft(y, hop_length=64))
Display a spectrogram
>>> import matplotlib.pyplot as plt
>>> librosa.display.specshow(librosa.amplitude_to_db(D,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
"""
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
fft_window = get_window(window, win_length, fftbins=True)
# Pad the window out to n_fft size
fft_window = util.pad_center(fft_window, n_fft)
# Reshape so that the window can be broadcast
fft_window = fft_window.reshape((-1, 1))
# Check audio is valid
util.valid_audio(y)
# Pad the time series so that frames are centered
if center:
y = np.pad(y, int(n_fft // 2), mode=pad_mode)
# Window the time series.
y_frames = util.frame(y, frame_length=n_fft, hop_length=hop_length)
# Pre-allocate the STFT matrix
stft_matrix = np.empty((int(1 + n_fft // 2), y_frames.shape[1]),
dtype=dtype,
order='F')
fft = get_fftlib()
# how many columns can we fit within MAX_MEM_BLOCK?
n_columns = int(util.MAX_MEM_BLOCK / (stft_matrix.shape[0] *
stft_matrix.itemsize))
for bl_s in range(0, stft_matrix.shape[1], n_columns):
bl_t = min(bl_s + n_columns, stft_matrix.shape[1])
stft_matrix[:, bl_s:bl_t] = fft.rfft(fft_window *
y_frames[:, bl_s:bl_t],
axis=0)
return stft_matrix
|
def stft(y, n_fft=2048, hop_length=None, win_length=None, window='hann',
center=True, dtype=np.complex64, pad_mode='reflect'):
"""Short-time Fourier transform (STFT)
Returns a complex-valued matrix D such that
`np.abs(D[f, t])` is the magnitude of frequency bin `f`
at frame `t`
`np.angle(D[f, t])` is the phase of frequency bin `f`
at frame `t`
Parameters
----------
y : np.ndarray [shape=(n,)], real-valued
the input signal (audio time series)
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
number audio of frames between STFT columns.
If unspecified, defaults `win_length / 4`.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `y[t * hop_length]`
dtype : numeric type
Complex numeric type for `D`. Default is 64-bit complex.
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
D : np.ndarray [shape=(1 + n_fft/2, t), dtype=dtype]
STFT matrix
See Also
--------
istft : Inverse STFT
ifgram : Instantaneous frequency spectrogram
np.pad : array padding
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = np.abs(librosa.stft(y))
>>> D
array([[2.58028018e-03, 4.32422794e-02, 6.61255598e-01, ...,
6.82710262e-04, 2.51654536e-04, 7.23036574e-05],
[2.49403086e-03, 5.15930466e-02, 6.00107312e-01, ...,
3.48026224e-04, 2.35853557e-04, 7.54836728e-05],
[7.82410789e-04, 1.05394892e-01, 4.37517226e-01, ...,
6.29352580e-04, 3.38571583e-04, 8.38094638e-05],
...,
[9.48568513e-08, 4.74725084e-07, 1.50052492e-05, ...,
1.85637656e-08, 2.89708542e-08, 5.74304337e-09],
[1.25165826e-07, 8.58259284e-07, 1.11157215e-05, ...,
3.49099771e-08, 3.11740926e-08, 5.29926236e-09],
[1.70630571e-07, 8.92518756e-07, 1.23656537e-05, ...,
5.33256745e-08, 3.33264900e-08, 5.13272980e-09]], dtype=float32)
Use left-aligned frames, instead of centered frames
>>> D_left = np.abs(librosa.stft(y, center=False))
Use a shorter hop length
>>> D_short = np.abs(librosa.stft(y, hop_length=64))
Display a spectrogram
>>> import matplotlib.pyplot as plt
>>> librosa.display.specshow(librosa.amplitude_to_db(D,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
"""
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
fft_window = get_window(window, win_length, fftbins=True)
# Pad the window out to n_fft size
fft_window = util.pad_center(fft_window, n_fft)
# Reshape so that the window can be broadcast
fft_window = fft_window.reshape((-1, 1))
# Check audio is valid
util.valid_audio(y)
# Pad the time series so that frames are centered
if center:
y = np.pad(y, int(n_fft // 2), mode=pad_mode)
# Window the time series.
y_frames = util.frame(y, frame_length=n_fft, hop_length=hop_length)
# Pre-allocate the STFT matrix
stft_matrix = np.empty((int(1 + n_fft // 2), y_frames.shape[1]),
dtype=dtype,
order='F')
fft = get_fftlib()
# how many columns can we fit within MAX_MEM_BLOCK?
n_columns = int(util.MAX_MEM_BLOCK / (stft_matrix.shape[0] *
stft_matrix.itemsize))
for bl_s in range(0, stft_matrix.shape[1], n_columns):
bl_t = min(bl_s + n_columns, stft_matrix.shape[1])
stft_matrix[:, bl_s:bl_t] = fft.rfft(fft_window *
y_frames[:, bl_s:bl_t],
axis=0)
return stft_matrix
|
[
"Short",
"-",
"time",
"Fourier",
"transform",
"(",
"STFT",
")"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L33-L189
|
[
"def",
"stft",
"(",
"y",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"None",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"center",
"=",
"True",
",",
"dtype",
"=",
"np",
".",
"complex64",
",",
"pad_mode",
"=",
"'reflect'",
")",
":",
"# By default, use the entire frame",
"if",
"win_length",
"is",
"None",
":",
"win_length",
"=",
"n_fft",
"# Set the default hop, if it's not already specified",
"if",
"hop_length",
"is",
"None",
":",
"hop_length",
"=",
"int",
"(",
"win_length",
"//",
"4",
")",
"fft_window",
"=",
"get_window",
"(",
"window",
",",
"win_length",
",",
"fftbins",
"=",
"True",
")",
"# Pad the window out to n_fft size",
"fft_window",
"=",
"util",
".",
"pad_center",
"(",
"fft_window",
",",
"n_fft",
")",
"# Reshape so that the window can be broadcast",
"fft_window",
"=",
"fft_window",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"# Check audio is valid",
"util",
".",
"valid_audio",
"(",
"y",
")",
"# Pad the time series so that frames are centered",
"if",
"center",
":",
"y",
"=",
"np",
".",
"pad",
"(",
"y",
",",
"int",
"(",
"n_fft",
"//",
"2",
")",
",",
"mode",
"=",
"pad_mode",
")",
"# Window the time series.",
"y_frames",
"=",
"util",
".",
"frame",
"(",
"y",
",",
"frame_length",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
")",
"# Pre-allocate the STFT matrix",
"stft_matrix",
"=",
"np",
".",
"empty",
"(",
"(",
"int",
"(",
"1",
"+",
"n_fft",
"//",
"2",
")",
",",
"y_frames",
".",
"shape",
"[",
"1",
"]",
")",
",",
"dtype",
"=",
"dtype",
",",
"order",
"=",
"'F'",
")",
"fft",
"=",
"get_fftlib",
"(",
")",
"# how many columns can we fit within MAX_MEM_BLOCK?",
"n_columns",
"=",
"int",
"(",
"util",
".",
"MAX_MEM_BLOCK",
"/",
"(",
"stft_matrix",
".",
"shape",
"[",
"0",
"]",
"*",
"stft_matrix",
".",
"itemsize",
")",
")",
"for",
"bl_s",
"in",
"range",
"(",
"0",
",",
"stft_matrix",
".",
"shape",
"[",
"1",
"]",
",",
"n_columns",
")",
":",
"bl_t",
"=",
"min",
"(",
"bl_s",
"+",
"n_columns",
",",
"stft_matrix",
".",
"shape",
"[",
"1",
"]",
")",
"stft_matrix",
"[",
":",
",",
"bl_s",
":",
"bl_t",
"]",
"=",
"fft",
".",
"rfft",
"(",
"fft_window",
"*",
"y_frames",
"[",
":",
",",
"bl_s",
":",
"bl_t",
"]",
",",
"axis",
"=",
"0",
")",
"return",
"stft_matrix"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
istft
|
Inverse short-time Fourier transform (ISTFT).
Converts a complex-valued spectrogram `stft_matrix` to time-series `y`
by minimizing the mean squared error between `stft_matrix` and STFT of
`y` as described in [1]_ up to Section 2 (reconstruction from MSTFT).
In general, window function, hop length and other parameters should be same
as in stft, which mostly leads to perfect reconstruction of a signal from
unmodified `stft_matrix`.
.. [1] D. W. Griffin and J. S. Lim,
"Signal estimation from modified short-time Fourier transform,"
IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984.
Parameters
----------
stft_matrix : np.ndarray [shape=(1 + n_fft/2, t)]
STFT matrix from `stft`
hop_length : int > 0 [scalar]
Number of frames between STFT columns.
If unspecified, defaults to `win_length / 4`.
win_length : int <= n_fft = 2 * (stft_matrix.shape[0] - 1)
When reconstructing the time series, each frame is windowed
and each sample is normalized by the sum of squared window
according to the `window` function (see below).
If unspecified, defaults to `n_fft`.
window : string, tuple, number, function, np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a user-specified window vector of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, `D` is assumed to have centered frames.
- If `False`, `D` is assumed to have left-aligned frames.
dtype : numeric type
Real numeric type for `y`. Default is 32-bit float.
length : int > 0, optional
If provided, the output `y` is zero-padded or clipped to exactly
`length` samples.
Returns
-------
y : np.ndarray [shape=(n,)]
time domain signal reconstructed from `stft_matrix`
See Also
--------
stft : Short-time Fourier Transform
Notes
-----
This function caches at level 30.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> y_hat = librosa.istft(D)
>>> y_hat
array([ -4.812e-06, -4.267e-06, ..., 6.271e-06, 2.827e-07], dtype=float32)
Exactly preserving length of the input signal requires explicit padding.
Otherwise, a partial frame at the end of `y` will not be represented.
>>> n = len(y)
>>> n_fft = 2048
>>> y_pad = librosa.util.fix_length(y, n + n_fft // 2)
>>> D = librosa.stft(y_pad, n_fft=n_fft)
>>> y_out = librosa.istft(D, length=n)
>>> np.max(np.abs(y - y_out))
1.4901161e-07
|
librosa/core/spectrum.py
|
def istft(stft_matrix, hop_length=None, win_length=None, window='hann',
center=True, dtype=np.float32, length=None):
"""
Inverse short-time Fourier transform (ISTFT).
Converts a complex-valued spectrogram `stft_matrix` to time-series `y`
by minimizing the mean squared error between `stft_matrix` and STFT of
`y` as described in [1]_ up to Section 2 (reconstruction from MSTFT).
In general, window function, hop length and other parameters should be same
as in stft, which mostly leads to perfect reconstruction of a signal from
unmodified `stft_matrix`.
.. [1] D. W. Griffin and J. S. Lim,
"Signal estimation from modified short-time Fourier transform,"
IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984.
Parameters
----------
stft_matrix : np.ndarray [shape=(1 + n_fft/2, t)]
STFT matrix from `stft`
hop_length : int > 0 [scalar]
Number of frames between STFT columns.
If unspecified, defaults to `win_length / 4`.
win_length : int <= n_fft = 2 * (stft_matrix.shape[0] - 1)
When reconstructing the time series, each frame is windowed
and each sample is normalized by the sum of squared window
according to the `window` function (see below).
If unspecified, defaults to `n_fft`.
window : string, tuple, number, function, np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a user-specified window vector of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, `D` is assumed to have centered frames.
- If `False`, `D` is assumed to have left-aligned frames.
dtype : numeric type
Real numeric type for `y`. Default is 32-bit float.
length : int > 0, optional
If provided, the output `y` is zero-padded or clipped to exactly
`length` samples.
Returns
-------
y : np.ndarray [shape=(n,)]
time domain signal reconstructed from `stft_matrix`
See Also
--------
stft : Short-time Fourier Transform
Notes
-----
This function caches at level 30.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> y_hat = librosa.istft(D)
>>> y_hat
array([ -4.812e-06, -4.267e-06, ..., 6.271e-06, 2.827e-07], dtype=float32)
Exactly preserving length of the input signal requires explicit padding.
Otherwise, a partial frame at the end of `y` will not be represented.
>>> n = len(y)
>>> n_fft = 2048
>>> y_pad = librosa.util.fix_length(y, n + n_fft // 2)
>>> D = librosa.stft(y_pad, n_fft=n_fft)
>>> y_out = librosa.istft(D, length=n)
>>> np.max(np.abs(y - y_out))
1.4901161e-07
"""
n_fft = 2 * (stft_matrix.shape[0] - 1)
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
ifft_window = get_window(window, win_length, fftbins=True)
# Pad out to match n_fft, and add a broadcasting axis
ifft_window = util.pad_center(ifft_window, n_fft)[:, np.newaxis]
n_frames = stft_matrix.shape[1]
expected_signal_len = n_fft + hop_length * (n_frames - 1)
y = np.zeros(expected_signal_len, dtype=dtype)
n_columns = int(util.MAX_MEM_BLOCK // (stft_matrix.shape[0] *
stft_matrix.itemsize))
fft = get_fftlib()
frame = 0
for bl_s in range(0, n_frames, n_columns):
bl_t = min(bl_s + n_columns, n_frames)
# invert the block and apply the window function
ytmp = ifft_window * fft.irfft(stft_matrix[:, bl_s:bl_t], axis=0)
# Overlap-add the istft block starting at the i'th frame
__overlap_add(y[frame * hop_length:], ytmp, hop_length)
frame += (bl_t - bl_s)
# Normalize by sum of squared window
ifft_window_sum = window_sumsquare(window,
n_frames,
win_length=win_length,
n_fft=n_fft,
hop_length=hop_length,
dtype=dtype)
approx_nonzero_indices = ifft_window_sum > util.tiny(ifft_window_sum)
y[approx_nonzero_indices] /= ifft_window_sum[approx_nonzero_indices]
if length is None:
# If we don't need to control length, just do the usual center trimming
# to eliminate padded data
if center:
y = y[int(n_fft // 2):-int(n_fft // 2)]
else:
if center:
# If we're centering, crop off the first n_fft//2 samples
# and then trim/pad to the target length.
# We don't trim the end here, so that if the signal is zero-padded
# to a longer duration, the decay is smooth by windowing
start = int(n_fft // 2)
else:
# If we're not centering, start at 0 and trim/pad as necessary
start = 0
y = util.fix_length(y[start:], length)
return y
|
def istft(stft_matrix, hop_length=None, win_length=None, window='hann',
center=True, dtype=np.float32, length=None):
"""
Inverse short-time Fourier transform (ISTFT).
Converts a complex-valued spectrogram `stft_matrix` to time-series `y`
by minimizing the mean squared error between `stft_matrix` and STFT of
`y` as described in [1]_ up to Section 2 (reconstruction from MSTFT).
In general, window function, hop length and other parameters should be same
as in stft, which mostly leads to perfect reconstruction of a signal from
unmodified `stft_matrix`.
.. [1] D. W. Griffin and J. S. Lim,
"Signal estimation from modified short-time Fourier transform,"
IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984.
Parameters
----------
stft_matrix : np.ndarray [shape=(1 + n_fft/2, t)]
STFT matrix from `stft`
hop_length : int > 0 [scalar]
Number of frames between STFT columns.
If unspecified, defaults to `win_length / 4`.
win_length : int <= n_fft = 2 * (stft_matrix.shape[0] - 1)
When reconstructing the time series, each frame is windowed
and each sample is normalized by the sum of squared window
according to the `window` function (see below).
If unspecified, defaults to `n_fft`.
window : string, tuple, number, function, np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a user-specified window vector of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, `D` is assumed to have centered frames.
- If `False`, `D` is assumed to have left-aligned frames.
dtype : numeric type
Real numeric type for `y`. Default is 32-bit float.
length : int > 0, optional
If provided, the output `y` is zero-padded or clipped to exactly
`length` samples.
Returns
-------
y : np.ndarray [shape=(n,)]
time domain signal reconstructed from `stft_matrix`
See Also
--------
stft : Short-time Fourier Transform
Notes
-----
This function caches at level 30.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> y_hat = librosa.istft(D)
>>> y_hat
array([ -4.812e-06, -4.267e-06, ..., 6.271e-06, 2.827e-07], dtype=float32)
Exactly preserving length of the input signal requires explicit padding.
Otherwise, a partial frame at the end of `y` will not be represented.
>>> n = len(y)
>>> n_fft = 2048
>>> y_pad = librosa.util.fix_length(y, n + n_fft // 2)
>>> D = librosa.stft(y_pad, n_fft=n_fft)
>>> y_out = librosa.istft(D, length=n)
>>> np.max(np.abs(y - y_out))
1.4901161e-07
"""
n_fft = 2 * (stft_matrix.shape[0] - 1)
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
ifft_window = get_window(window, win_length, fftbins=True)
# Pad out to match n_fft, and add a broadcasting axis
ifft_window = util.pad_center(ifft_window, n_fft)[:, np.newaxis]
n_frames = stft_matrix.shape[1]
expected_signal_len = n_fft + hop_length * (n_frames - 1)
y = np.zeros(expected_signal_len, dtype=dtype)
n_columns = int(util.MAX_MEM_BLOCK // (stft_matrix.shape[0] *
stft_matrix.itemsize))
fft = get_fftlib()
frame = 0
for bl_s in range(0, n_frames, n_columns):
bl_t = min(bl_s + n_columns, n_frames)
# invert the block and apply the window function
ytmp = ifft_window * fft.irfft(stft_matrix[:, bl_s:bl_t], axis=0)
# Overlap-add the istft block starting at the i'th frame
__overlap_add(y[frame * hop_length:], ytmp, hop_length)
frame += (bl_t - bl_s)
# Normalize by sum of squared window
ifft_window_sum = window_sumsquare(window,
n_frames,
win_length=win_length,
n_fft=n_fft,
hop_length=hop_length,
dtype=dtype)
approx_nonzero_indices = ifft_window_sum > util.tiny(ifft_window_sum)
y[approx_nonzero_indices] /= ifft_window_sum[approx_nonzero_indices]
if length is None:
# If we don't need to control length, just do the usual center trimming
# to eliminate padded data
if center:
y = y[int(n_fft // 2):-int(n_fft // 2)]
else:
if center:
# If we're centering, crop off the first n_fft//2 samples
# and then trim/pad to the target length.
# We don't trim the end here, so that if the signal is zero-padded
# to a longer duration, the decay is smooth by windowing
start = int(n_fft // 2)
else:
# If we're not centering, start at 0 and trim/pad as necessary
start = 0
y = util.fix_length(y[start:], length)
return y
|
[
"Inverse",
"short",
"-",
"time",
"Fourier",
"transform",
"(",
"ISTFT",
")",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L193-L343
|
[
"def",
"istft",
"(",
"stft_matrix",
",",
"hop_length",
"=",
"None",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"center",
"=",
"True",
",",
"dtype",
"=",
"np",
".",
"float32",
",",
"length",
"=",
"None",
")",
":",
"n_fft",
"=",
"2",
"*",
"(",
"stft_matrix",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
"# By default, use the entire frame",
"if",
"win_length",
"is",
"None",
":",
"win_length",
"=",
"n_fft",
"# Set the default hop, if it's not already specified",
"if",
"hop_length",
"is",
"None",
":",
"hop_length",
"=",
"int",
"(",
"win_length",
"//",
"4",
")",
"ifft_window",
"=",
"get_window",
"(",
"window",
",",
"win_length",
",",
"fftbins",
"=",
"True",
")",
"# Pad out to match n_fft, and add a broadcasting axis",
"ifft_window",
"=",
"util",
".",
"pad_center",
"(",
"ifft_window",
",",
"n_fft",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"n_frames",
"=",
"stft_matrix",
".",
"shape",
"[",
"1",
"]",
"expected_signal_len",
"=",
"n_fft",
"+",
"hop_length",
"*",
"(",
"n_frames",
"-",
"1",
")",
"y",
"=",
"np",
".",
"zeros",
"(",
"expected_signal_len",
",",
"dtype",
"=",
"dtype",
")",
"n_columns",
"=",
"int",
"(",
"util",
".",
"MAX_MEM_BLOCK",
"//",
"(",
"stft_matrix",
".",
"shape",
"[",
"0",
"]",
"*",
"stft_matrix",
".",
"itemsize",
")",
")",
"fft",
"=",
"get_fftlib",
"(",
")",
"frame",
"=",
"0",
"for",
"bl_s",
"in",
"range",
"(",
"0",
",",
"n_frames",
",",
"n_columns",
")",
":",
"bl_t",
"=",
"min",
"(",
"bl_s",
"+",
"n_columns",
",",
"n_frames",
")",
"# invert the block and apply the window function",
"ytmp",
"=",
"ifft_window",
"*",
"fft",
".",
"irfft",
"(",
"stft_matrix",
"[",
":",
",",
"bl_s",
":",
"bl_t",
"]",
",",
"axis",
"=",
"0",
")",
"# Overlap-add the istft block starting at the i'th frame",
"__overlap_add",
"(",
"y",
"[",
"frame",
"*",
"hop_length",
":",
"]",
",",
"ytmp",
",",
"hop_length",
")",
"frame",
"+=",
"(",
"bl_t",
"-",
"bl_s",
")",
"# Normalize by sum of squared window",
"ifft_window_sum",
"=",
"window_sumsquare",
"(",
"window",
",",
"n_frames",
",",
"win_length",
"=",
"win_length",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"dtype",
"=",
"dtype",
")",
"approx_nonzero_indices",
"=",
"ifft_window_sum",
">",
"util",
".",
"tiny",
"(",
"ifft_window_sum",
")",
"y",
"[",
"approx_nonzero_indices",
"]",
"/=",
"ifft_window_sum",
"[",
"approx_nonzero_indices",
"]",
"if",
"length",
"is",
"None",
":",
"# If we don't need to control length, just do the usual center trimming",
"# to eliminate padded data",
"if",
"center",
":",
"y",
"=",
"y",
"[",
"int",
"(",
"n_fft",
"//",
"2",
")",
":",
"-",
"int",
"(",
"n_fft",
"//",
"2",
")",
"]",
"else",
":",
"if",
"center",
":",
"# If we're centering, crop off the first n_fft//2 samples",
"# and then trim/pad to the target length.",
"# We don't trim the end here, so that if the signal is zero-padded",
"# to a longer duration, the decay is smooth by windowing",
"start",
"=",
"int",
"(",
"n_fft",
"//",
"2",
")",
"else",
":",
"# If we're not centering, start at 0 and trim/pad as necessary",
"start",
"=",
"0",
"y",
"=",
"util",
".",
"fix_length",
"(",
"y",
"[",
"start",
":",
"]",
",",
"length",
")",
"return",
"y"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
ifgram
|
Compute the instantaneous frequency (as a proportion of the sampling rate)
obtained as the time-derivative of the phase of the complex spectrum as
described by [1]_.
Calculates regular STFT as a side effect.
.. [1] Abe, Toshihiko, Takao Kobayashi, and Satoshi Imai.
"Harmonics tracking and pitch extraction based on instantaneous
frequency."
International Conference on Acoustics, Speech, and Signal Processing,
ICASSP-95., Vol. 1. IEEE, 1995.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length, number samples between subsequent frames.
If not supplied, defaults to `win_length / 4`.
win_length : int > 0, <= n_fft
Window length. Defaults to `n_fft`.
See `stft` for details.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a user-specified window vector of length `n_fft`
See `stft` for details.
.. see also:: `filters.get_window`
norm : bool
Normalize the STFT.
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` (and `if_gram`) is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` at `y[t * hop_length]`
ref_power : float >= 0 or callable
Minimum power threshold for estimating instantaneous frequency.
Any bin with `np.abs(D[f, t])**2 < ref_power` will receive the
default frequency estimate.
If callable, the threshold is set to `ref_power(np.abs(D)**2)`.
clip : boolean
- If `True`, clip estimated frequencies to the range `[0, 0.5 * sr]`.
- If `False`, estimated frequencies can be negative or exceed
`0.5 * sr`.
dtype : numeric type
Complex numeric type for `D`. Default is 64-bit complex.
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
if_gram : np.ndarray [shape=(1 + n_fft/2, t), dtype=real]
Instantaneous frequency spectrogram:
`if_gram[f, t]` is the frequency at bin `f`, time `t`
D : np.ndarray [shape=(1 + n_fft/2, t), dtype=complex]
Short-time Fourier transform
See Also
--------
stft : Short-time Fourier Transform
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> frequencies, D = librosa.ifgram(y, sr=sr)
>>> frequencies
array([[ 0.000e+00, 0.000e+00, ..., 0.000e+00, 0.000e+00],
[ 3.150e+01, 3.070e+01, ..., 1.077e+01, 1.077e+01],
...,
[ 1.101e+04, 1.101e+04, ..., 1.101e+04, 1.101e+04],
[ 1.102e+04, 1.102e+04, ..., 1.102e+04, 1.102e+04]])
|
librosa/core/spectrum.py
|
def ifgram(y, sr=22050, n_fft=2048, hop_length=None, win_length=None,
window='hann', norm=False, center=True, ref_power=1e-6,
clip=True, dtype=np.complex64, pad_mode='reflect'):
'''Compute the instantaneous frequency (as a proportion of the sampling rate)
obtained as the time-derivative of the phase of the complex spectrum as
described by [1]_.
Calculates regular STFT as a side effect.
.. [1] Abe, Toshihiko, Takao Kobayashi, and Satoshi Imai.
"Harmonics tracking and pitch extraction based on instantaneous
frequency."
International Conference on Acoustics, Speech, and Signal Processing,
ICASSP-95., Vol. 1. IEEE, 1995.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length, number samples between subsequent frames.
If not supplied, defaults to `win_length / 4`.
win_length : int > 0, <= n_fft
Window length. Defaults to `n_fft`.
See `stft` for details.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a user-specified window vector of length `n_fft`
See `stft` for details.
.. see also:: `filters.get_window`
norm : bool
Normalize the STFT.
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` (and `if_gram`) is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` at `y[t * hop_length]`
ref_power : float >= 0 or callable
Minimum power threshold for estimating instantaneous frequency.
Any bin with `np.abs(D[f, t])**2 < ref_power` will receive the
default frequency estimate.
If callable, the threshold is set to `ref_power(np.abs(D)**2)`.
clip : boolean
- If `True`, clip estimated frequencies to the range `[0, 0.5 * sr]`.
- If `False`, estimated frequencies can be negative or exceed
`0.5 * sr`.
dtype : numeric type
Complex numeric type for `D`. Default is 64-bit complex.
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
if_gram : np.ndarray [shape=(1 + n_fft/2, t), dtype=real]
Instantaneous frequency spectrogram:
`if_gram[f, t]` is the frequency at bin `f`, time `t`
D : np.ndarray [shape=(1 + n_fft/2, t), dtype=complex]
Short-time Fourier transform
See Also
--------
stft : Short-time Fourier Transform
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> frequencies, D = librosa.ifgram(y, sr=sr)
>>> frequencies
array([[ 0.000e+00, 0.000e+00, ..., 0.000e+00, 0.000e+00],
[ 3.150e+01, 3.070e+01, ..., 1.077e+01, 1.077e+01],
...,
[ 1.101e+04, 1.101e+04, ..., 1.101e+04, 1.101e+04],
[ 1.102e+04, 1.102e+04, ..., 1.102e+04, 1.102e+04]])
'''
if win_length is None:
win_length = n_fft
if hop_length is None:
hop_length = int(win_length // 4)
# Construct a padded hann window
fft_window = util.pad_center(get_window(window, win_length,
fftbins=True),
n_fft)
# Window for discrete differentiation
freq_angular = np.linspace(0, 2 * np.pi, n_fft, endpoint=False)
d_window = np.sin(-freq_angular) * np.pi / n_fft
stft_matrix = stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length,
window=window, center=center,
dtype=dtype, pad_mode=pad_mode)
diff_stft = stft(y, n_fft=n_fft, hop_length=hop_length,
window=d_window, center=center,
dtype=dtype, pad_mode=pad_mode).conj()
# Compute power normalization. Suppress zeros.
mag, phase = magphase(stft_matrix)
if six.callable(ref_power):
ref_power = ref_power(mag**2)
elif ref_power < 0:
raise ParameterError('ref_power must be non-negative or callable.')
# Pylint does not correctly infer the type here, but it's correct.
# pylint: disable=maybe-no-member
freq_angular = freq_angular.reshape((-1, 1))
bin_offset = (-phase * diff_stft).imag / mag
bin_offset[mag < ref_power**0.5] = 0
if_gram = freq_angular[:n_fft//2 + 1] + bin_offset
if norm:
stft_matrix = stft_matrix * 2.0 / fft_window.sum()
if clip:
np.clip(if_gram, 0, np.pi, out=if_gram)
if_gram *= float(sr) * 0.5 / np.pi
return if_gram, stft_matrix
|
def ifgram(y, sr=22050, n_fft=2048, hop_length=None, win_length=None,
window='hann', norm=False, center=True, ref_power=1e-6,
clip=True, dtype=np.complex64, pad_mode='reflect'):
'''Compute the instantaneous frequency (as a proportion of the sampling rate)
obtained as the time-derivative of the phase of the complex spectrum as
described by [1]_.
Calculates regular STFT as a side effect.
.. [1] Abe, Toshihiko, Takao Kobayashi, and Satoshi Imai.
"Harmonics tracking and pitch extraction based on instantaneous
frequency."
International Conference on Acoustics, Speech, and Signal Processing,
ICASSP-95., Vol. 1. IEEE, 1995.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length, number samples between subsequent frames.
If not supplied, defaults to `win_length / 4`.
win_length : int > 0, <= n_fft
Window length. Defaults to `n_fft`.
See `stft` for details.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a user-specified window vector of length `n_fft`
See `stft` for details.
.. see also:: `filters.get_window`
norm : bool
Normalize the STFT.
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` (and `if_gram`) is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` at `y[t * hop_length]`
ref_power : float >= 0 or callable
Minimum power threshold for estimating instantaneous frequency.
Any bin with `np.abs(D[f, t])**2 < ref_power` will receive the
default frequency estimate.
If callable, the threshold is set to `ref_power(np.abs(D)**2)`.
clip : boolean
- If `True`, clip estimated frequencies to the range `[0, 0.5 * sr]`.
- If `False`, estimated frequencies can be negative or exceed
`0.5 * sr`.
dtype : numeric type
Complex numeric type for `D`. Default is 64-bit complex.
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
if_gram : np.ndarray [shape=(1 + n_fft/2, t), dtype=real]
Instantaneous frequency spectrogram:
`if_gram[f, t]` is the frequency at bin `f`, time `t`
D : np.ndarray [shape=(1 + n_fft/2, t), dtype=complex]
Short-time Fourier transform
See Also
--------
stft : Short-time Fourier Transform
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> frequencies, D = librosa.ifgram(y, sr=sr)
>>> frequencies
array([[ 0.000e+00, 0.000e+00, ..., 0.000e+00, 0.000e+00],
[ 3.150e+01, 3.070e+01, ..., 1.077e+01, 1.077e+01],
...,
[ 1.101e+04, 1.101e+04, ..., 1.101e+04, 1.101e+04],
[ 1.102e+04, 1.102e+04, ..., 1.102e+04, 1.102e+04]])
'''
if win_length is None:
win_length = n_fft
if hop_length is None:
hop_length = int(win_length // 4)
# Construct a padded hann window
fft_window = util.pad_center(get_window(window, win_length,
fftbins=True),
n_fft)
# Window for discrete differentiation
freq_angular = np.linspace(0, 2 * np.pi, n_fft, endpoint=False)
d_window = np.sin(-freq_angular) * np.pi / n_fft
stft_matrix = stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length,
window=window, center=center,
dtype=dtype, pad_mode=pad_mode)
diff_stft = stft(y, n_fft=n_fft, hop_length=hop_length,
window=d_window, center=center,
dtype=dtype, pad_mode=pad_mode).conj()
# Compute power normalization. Suppress zeros.
mag, phase = magphase(stft_matrix)
if six.callable(ref_power):
ref_power = ref_power(mag**2)
elif ref_power < 0:
raise ParameterError('ref_power must be non-negative or callable.')
# Pylint does not correctly infer the type here, but it's correct.
# pylint: disable=maybe-no-member
freq_angular = freq_angular.reshape((-1, 1))
bin_offset = (-phase * diff_stft).imag / mag
bin_offset[mag < ref_power**0.5] = 0
if_gram = freq_angular[:n_fft//2 + 1] + bin_offset
if norm:
stft_matrix = stft_matrix * 2.0 / fft_window.sum()
if clip:
np.clip(if_gram, 0, np.pi, out=if_gram)
if_gram *= float(sr) * 0.5 / np.pi
return if_gram, stft_matrix
|
[
"Compute",
"the",
"instantaneous",
"frequency",
"(",
"as",
"a",
"proportion",
"of",
"the",
"sampling",
"rate",
")",
"obtained",
"as",
"the",
"time",
"-",
"derivative",
"of",
"the",
"phase",
"of",
"the",
"complex",
"spectrum",
"as",
"described",
"by",
"[",
"1",
"]",
"_",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L359-L507
|
[
"def",
"ifgram",
"(",
"y",
",",
"sr",
"=",
"22050",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"None",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"norm",
"=",
"False",
",",
"center",
"=",
"True",
",",
"ref_power",
"=",
"1e-6",
",",
"clip",
"=",
"True",
",",
"dtype",
"=",
"np",
".",
"complex64",
",",
"pad_mode",
"=",
"'reflect'",
")",
":",
"if",
"win_length",
"is",
"None",
":",
"win_length",
"=",
"n_fft",
"if",
"hop_length",
"is",
"None",
":",
"hop_length",
"=",
"int",
"(",
"win_length",
"//",
"4",
")",
"# Construct a padded hann window",
"fft_window",
"=",
"util",
".",
"pad_center",
"(",
"get_window",
"(",
"window",
",",
"win_length",
",",
"fftbins",
"=",
"True",
")",
",",
"n_fft",
")",
"# Window for discrete differentiation",
"freq_angular",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"2",
"*",
"np",
".",
"pi",
",",
"n_fft",
",",
"endpoint",
"=",
"False",
")",
"d_window",
"=",
"np",
".",
"sin",
"(",
"-",
"freq_angular",
")",
"*",
"np",
".",
"pi",
"/",
"n_fft",
"stft_matrix",
"=",
"stft",
"(",
"y",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"win_length",
"=",
"win_length",
",",
"window",
"=",
"window",
",",
"center",
"=",
"center",
",",
"dtype",
"=",
"dtype",
",",
"pad_mode",
"=",
"pad_mode",
")",
"diff_stft",
"=",
"stft",
"(",
"y",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"window",
"=",
"d_window",
",",
"center",
"=",
"center",
",",
"dtype",
"=",
"dtype",
",",
"pad_mode",
"=",
"pad_mode",
")",
".",
"conj",
"(",
")",
"# Compute power normalization. Suppress zeros.",
"mag",
",",
"phase",
"=",
"magphase",
"(",
"stft_matrix",
")",
"if",
"six",
".",
"callable",
"(",
"ref_power",
")",
":",
"ref_power",
"=",
"ref_power",
"(",
"mag",
"**",
"2",
")",
"elif",
"ref_power",
"<",
"0",
":",
"raise",
"ParameterError",
"(",
"'ref_power must be non-negative or callable.'",
")",
"# Pylint does not correctly infer the type here, but it's correct.",
"# pylint: disable=maybe-no-member",
"freq_angular",
"=",
"freq_angular",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"bin_offset",
"=",
"(",
"-",
"phase",
"*",
"diff_stft",
")",
".",
"imag",
"/",
"mag",
"bin_offset",
"[",
"mag",
"<",
"ref_power",
"**",
"0.5",
"]",
"=",
"0",
"if_gram",
"=",
"freq_angular",
"[",
":",
"n_fft",
"//",
"2",
"+",
"1",
"]",
"+",
"bin_offset",
"if",
"norm",
":",
"stft_matrix",
"=",
"stft_matrix",
"*",
"2.0",
"/",
"fft_window",
".",
"sum",
"(",
")",
"if",
"clip",
":",
"np",
".",
"clip",
"(",
"if_gram",
",",
"0",
",",
"np",
".",
"pi",
",",
"out",
"=",
"if_gram",
")",
"if_gram",
"*=",
"float",
"(",
"sr",
")",
"*",
"0.5",
"/",
"np",
".",
"pi",
"return",
"if_gram",
",",
"stft_matrix"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
magphase
|
Separate a complex-valued spectrogram D into its magnitude (S)
and phase (P) components, so that `D = S * P`.
Parameters
----------
D : np.ndarray [shape=(d, t), dtype=complex]
complex-valued spectrogram
power : float > 0
Exponent for the magnitude spectrogram,
e.g., 1 for energy, 2 for power, etc.
Returns
-------
D_mag : np.ndarray [shape=(d, t), dtype=real]
magnitude of `D`, raised to `power`
D_phase : np.ndarray [shape=(d, t), dtype=complex]
`exp(1.j * phi)` where `phi` is the phase of `D`
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> magnitude, phase = librosa.magphase(D)
>>> magnitude
array([[ 2.524e-03, 4.329e-02, ..., 3.217e-04, 3.520e-05],
[ 2.645e-03, 5.152e-02, ..., 3.283e-04, 3.432e-04],
...,
[ 1.966e-05, 9.828e-06, ..., 3.164e-07, 9.370e-06],
[ 1.966e-05, 9.830e-06, ..., 3.161e-07, 9.366e-06]], dtype=float32)
>>> phase
array([[ 1.000e+00 +0.000e+00j, 1.000e+00 +0.000e+00j, ...,
-1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j],
[ 1.000e+00 +1.615e-16j, 9.950e-01 -1.001e-01j, ...,
9.794e-01 +2.017e-01j, 1.492e-02 -9.999e-01j],
...,
[ 1.000e+00 -5.609e-15j, -5.081e-04 +1.000e+00j, ...,
-9.549e-01 -2.970e-01j, 2.938e-01 -9.559e-01j],
[ -1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j, ...,
-1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j]], dtype=complex64)
Or get the phase angle (in radians)
>>> np.angle(phase)
array([[ 0.000e+00, 0.000e+00, ..., 3.142e+00, 3.142e+00],
[ 1.615e-16, -1.003e-01, ..., 2.031e-01, -1.556e+00],
...,
[ -5.609e-15, 1.571e+00, ..., -2.840e+00, -1.273e+00],
[ 3.142e+00, 3.142e+00, ..., 3.142e+00, 3.142e+00]], dtype=float32)
|
librosa/core/spectrum.py
|
def magphase(D, power=1):
"""Separate a complex-valued spectrogram D into its magnitude (S)
and phase (P) components, so that `D = S * P`.
Parameters
----------
D : np.ndarray [shape=(d, t), dtype=complex]
complex-valued spectrogram
power : float > 0
Exponent for the magnitude spectrogram,
e.g., 1 for energy, 2 for power, etc.
Returns
-------
D_mag : np.ndarray [shape=(d, t), dtype=real]
magnitude of `D`, raised to `power`
D_phase : np.ndarray [shape=(d, t), dtype=complex]
`exp(1.j * phi)` where `phi` is the phase of `D`
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> magnitude, phase = librosa.magphase(D)
>>> magnitude
array([[ 2.524e-03, 4.329e-02, ..., 3.217e-04, 3.520e-05],
[ 2.645e-03, 5.152e-02, ..., 3.283e-04, 3.432e-04],
...,
[ 1.966e-05, 9.828e-06, ..., 3.164e-07, 9.370e-06],
[ 1.966e-05, 9.830e-06, ..., 3.161e-07, 9.366e-06]], dtype=float32)
>>> phase
array([[ 1.000e+00 +0.000e+00j, 1.000e+00 +0.000e+00j, ...,
-1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j],
[ 1.000e+00 +1.615e-16j, 9.950e-01 -1.001e-01j, ...,
9.794e-01 +2.017e-01j, 1.492e-02 -9.999e-01j],
...,
[ 1.000e+00 -5.609e-15j, -5.081e-04 +1.000e+00j, ...,
-9.549e-01 -2.970e-01j, 2.938e-01 -9.559e-01j],
[ -1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j, ...,
-1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j]], dtype=complex64)
Or get the phase angle (in radians)
>>> np.angle(phase)
array([[ 0.000e+00, 0.000e+00, ..., 3.142e+00, 3.142e+00],
[ 1.615e-16, -1.003e-01, ..., 2.031e-01, -1.556e+00],
...,
[ -5.609e-15, 1.571e+00, ..., -2.840e+00, -1.273e+00],
[ 3.142e+00, 3.142e+00, ..., 3.142e+00, 3.142e+00]], dtype=float32)
"""
mag = np.abs(D)
mag **= power
phase = np.exp(1.j * np.angle(D))
return mag, phase
|
def magphase(D, power=1):
"""Separate a complex-valued spectrogram D into its magnitude (S)
and phase (P) components, so that `D = S * P`.
Parameters
----------
D : np.ndarray [shape=(d, t), dtype=complex]
complex-valued spectrogram
power : float > 0
Exponent for the magnitude spectrogram,
e.g., 1 for energy, 2 for power, etc.
Returns
-------
D_mag : np.ndarray [shape=(d, t), dtype=real]
magnitude of `D`, raised to `power`
D_phase : np.ndarray [shape=(d, t), dtype=complex]
`exp(1.j * phi)` where `phi` is the phase of `D`
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> magnitude, phase = librosa.magphase(D)
>>> magnitude
array([[ 2.524e-03, 4.329e-02, ..., 3.217e-04, 3.520e-05],
[ 2.645e-03, 5.152e-02, ..., 3.283e-04, 3.432e-04],
...,
[ 1.966e-05, 9.828e-06, ..., 3.164e-07, 9.370e-06],
[ 1.966e-05, 9.830e-06, ..., 3.161e-07, 9.366e-06]], dtype=float32)
>>> phase
array([[ 1.000e+00 +0.000e+00j, 1.000e+00 +0.000e+00j, ...,
-1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j],
[ 1.000e+00 +1.615e-16j, 9.950e-01 -1.001e-01j, ...,
9.794e-01 +2.017e-01j, 1.492e-02 -9.999e-01j],
...,
[ 1.000e+00 -5.609e-15j, -5.081e-04 +1.000e+00j, ...,
-9.549e-01 -2.970e-01j, 2.938e-01 -9.559e-01j],
[ -1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j, ...,
-1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j]], dtype=complex64)
Or get the phase angle (in radians)
>>> np.angle(phase)
array([[ 0.000e+00, 0.000e+00, ..., 3.142e+00, 3.142e+00],
[ 1.615e-16, -1.003e-01, ..., 2.031e-01, -1.556e+00],
...,
[ -5.609e-15, 1.571e+00, ..., -2.840e+00, -1.273e+00],
[ 3.142e+00, 3.142e+00, ..., 3.142e+00, 3.142e+00]], dtype=float32)
"""
mag = np.abs(D)
mag **= power
phase = np.exp(1.j * np.angle(D))
return mag, phase
|
[
"Separate",
"a",
"complex",
"-",
"valued",
"spectrogram",
"D",
"into",
"its",
"magnitude",
"(",
"S",
")",
"and",
"phase",
"(",
"P",
")",
"components",
"so",
"that",
"D",
"=",
"S",
"*",
"P",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L510-L570
|
[
"def",
"magphase",
"(",
"D",
",",
"power",
"=",
"1",
")",
":",
"mag",
"=",
"np",
".",
"abs",
"(",
"D",
")",
"mag",
"**=",
"power",
"phase",
"=",
"np",
".",
"exp",
"(",
"1.j",
"*",
"np",
".",
"angle",
"(",
"D",
")",
")",
"return",
"mag",
",",
"phase"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
phase_vocoder
|
Phase vocoder. Given an STFT matrix D, speed up by a factor of `rate`
Based on the implementation provided by [1]_.
.. [1] Ellis, D. P. W. "A phase vocoder in Matlab."
Columbia University, 2002.
http://www.ee.columbia.edu/~dpwe/resources/matlab/pvoc/
Examples
--------
>>> # Play at double speed
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y, n_fft=2048, hop_length=512)
>>> D_fast = librosa.phase_vocoder(D, 2.0, hop_length=512)
>>> y_fast = librosa.istft(D_fast, hop_length=512)
>>> # Or play at 1/3 speed
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y, n_fft=2048, hop_length=512)
>>> D_slow = librosa.phase_vocoder(D, 1./3, hop_length=512)
>>> y_slow = librosa.istft(D_slow, hop_length=512)
Parameters
----------
D : np.ndarray [shape=(d, t), dtype=complex]
STFT matrix
rate : float > 0 [scalar]
Speed-up factor: `rate > 1` is faster, `rate < 1` is slower.
hop_length : int > 0 [scalar] or None
The number of samples between successive columns of `D`.
If None, defaults to `n_fft/4 = (D.shape[0]-1)/2`
Returns
-------
D_stretched : np.ndarray [shape=(d, t / rate), dtype=complex]
time-stretched STFT
|
librosa/core/spectrum.py
|
def phase_vocoder(D, rate, hop_length=None):
"""Phase vocoder. Given an STFT matrix D, speed up by a factor of `rate`
Based on the implementation provided by [1]_.
.. [1] Ellis, D. P. W. "A phase vocoder in Matlab."
Columbia University, 2002.
http://www.ee.columbia.edu/~dpwe/resources/matlab/pvoc/
Examples
--------
>>> # Play at double speed
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y, n_fft=2048, hop_length=512)
>>> D_fast = librosa.phase_vocoder(D, 2.0, hop_length=512)
>>> y_fast = librosa.istft(D_fast, hop_length=512)
>>> # Or play at 1/3 speed
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y, n_fft=2048, hop_length=512)
>>> D_slow = librosa.phase_vocoder(D, 1./3, hop_length=512)
>>> y_slow = librosa.istft(D_slow, hop_length=512)
Parameters
----------
D : np.ndarray [shape=(d, t), dtype=complex]
STFT matrix
rate : float > 0 [scalar]
Speed-up factor: `rate > 1` is faster, `rate < 1` is slower.
hop_length : int > 0 [scalar] or None
The number of samples between successive columns of `D`.
If None, defaults to `n_fft/4 = (D.shape[0]-1)/2`
Returns
-------
D_stretched : np.ndarray [shape=(d, t / rate), dtype=complex]
time-stretched STFT
"""
n_fft = 2 * (D.shape[0] - 1)
if hop_length is None:
hop_length = int(n_fft // 4)
time_steps = np.arange(0, D.shape[1], rate, dtype=np.float)
# Create an empty output array
d_stretch = np.zeros((D.shape[0], len(time_steps)), D.dtype, order='F')
# Expected phase advance in each bin
phi_advance = np.linspace(0, np.pi * hop_length, D.shape[0])
# Phase accumulator; initialize to the first sample
phase_acc = np.angle(D[:, 0])
# Pad 0 columns to simplify boundary logic
D = np.pad(D, [(0, 0), (0, 2)], mode='constant')
for (t, step) in enumerate(time_steps):
columns = D[:, int(step):int(step + 2)]
# Weighting for linear magnitude interpolation
alpha = np.mod(step, 1.0)
mag = ((1.0 - alpha) * np.abs(columns[:, 0])
+ alpha * np.abs(columns[:, 1]))
# Store to output array
d_stretch[:, t] = mag * np.exp(1.j * phase_acc)
# Compute phase advance
dphase = (np.angle(columns[:, 1])
- np.angle(columns[:, 0])
- phi_advance)
# Wrap to -pi:pi range
dphase = dphase - 2.0 * np.pi * np.round(dphase / (2.0 * np.pi))
# Accumulate phase
phase_acc += phi_advance + dphase
return d_stretch
|
def phase_vocoder(D, rate, hop_length=None):
"""Phase vocoder. Given an STFT matrix D, speed up by a factor of `rate`
Based on the implementation provided by [1]_.
.. [1] Ellis, D. P. W. "A phase vocoder in Matlab."
Columbia University, 2002.
http://www.ee.columbia.edu/~dpwe/resources/matlab/pvoc/
Examples
--------
>>> # Play at double speed
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y, n_fft=2048, hop_length=512)
>>> D_fast = librosa.phase_vocoder(D, 2.0, hop_length=512)
>>> y_fast = librosa.istft(D_fast, hop_length=512)
>>> # Or play at 1/3 speed
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y, n_fft=2048, hop_length=512)
>>> D_slow = librosa.phase_vocoder(D, 1./3, hop_length=512)
>>> y_slow = librosa.istft(D_slow, hop_length=512)
Parameters
----------
D : np.ndarray [shape=(d, t), dtype=complex]
STFT matrix
rate : float > 0 [scalar]
Speed-up factor: `rate > 1` is faster, `rate < 1` is slower.
hop_length : int > 0 [scalar] or None
The number of samples between successive columns of `D`.
If None, defaults to `n_fft/4 = (D.shape[0]-1)/2`
Returns
-------
D_stretched : np.ndarray [shape=(d, t / rate), dtype=complex]
time-stretched STFT
"""
n_fft = 2 * (D.shape[0] - 1)
if hop_length is None:
hop_length = int(n_fft // 4)
time_steps = np.arange(0, D.shape[1], rate, dtype=np.float)
# Create an empty output array
d_stretch = np.zeros((D.shape[0], len(time_steps)), D.dtype, order='F')
# Expected phase advance in each bin
phi_advance = np.linspace(0, np.pi * hop_length, D.shape[0])
# Phase accumulator; initialize to the first sample
phase_acc = np.angle(D[:, 0])
# Pad 0 columns to simplify boundary logic
D = np.pad(D, [(0, 0), (0, 2)], mode='constant')
for (t, step) in enumerate(time_steps):
columns = D[:, int(step):int(step + 2)]
# Weighting for linear magnitude interpolation
alpha = np.mod(step, 1.0)
mag = ((1.0 - alpha) * np.abs(columns[:, 0])
+ alpha * np.abs(columns[:, 1]))
# Store to output array
d_stretch[:, t] = mag * np.exp(1.j * phase_acc)
# Compute phase advance
dphase = (np.angle(columns[:, 1])
- np.angle(columns[:, 0])
- phi_advance)
# Wrap to -pi:pi range
dphase = dphase - 2.0 * np.pi * np.round(dphase / (2.0 * np.pi))
# Accumulate phase
phase_acc += phi_advance + dphase
return d_stretch
|
[
"Phase",
"vocoder",
".",
"Given",
"an",
"STFT",
"matrix",
"D",
"speed",
"up",
"by",
"a",
"factor",
"of",
"rate"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L573-L657
|
[
"def",
"phase_vocoder",
"(",
"D",
",",
"rate",
",",
"hop_length",
"=",
"None",
")",
":",
"n_fft",
"=",
"2",
"*",
"(",
"D",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
"if",
"hop_length",
"is",
"None",
":",
"hop_length",
"=",
"int",
"(",
"n_fft",
"//",
"4",
")",
"time_steps",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"D",
".",
"shape",
"[",
"1",
"]",
",",
"rate",
",",
"dtype",
"=",
"np",
".",
"float",
")",
"# Create an empty output array",
"d_stretch",
"=",
"np",
".",
"zeros",
"(",
"(",
"D",
".",
"shape",
"[",
"0",
"]",
",",
"len",
"(",
"time_steps",
")",
")",
",",
"D",
".",
"dtype",
",",
"order",
"=",
"'F'",
")",
"# Expected phase advance in each bin",
"phi_advance",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"np",
".",
"pi",
"*",
"hop_length",
",",
"D",
".",
"shape",
"[",
"0",
"]",
")",
"# Phase accumulator; initialize to the first sample",
"phase_acc",
"=",
"np",
".",
"angle",
"(",
"D",
"[",
":",
",",
"0",
"]",
")",
"# Pad 0 columns to simplify boundary logic",
"D",
"=",
"np",
".",
"pad",
"(",
"D",
",",
"[",
"(",
"0",
",",
"0",
")",
",",
"(",
"0",
",",
"2",
")",
"]",
",",
"mode",
"=",
"'constant'",
")",
"for",
"(",
"t",
",",
"step",
")",
"in",
"enumerate",
"(",
"time_steps",
")",
":",
"columns",
"=",
"D",
"[",
":",
",",
"int",
"(",
"step",
")",
":",
"int",
"(",
"step",
"+",
"2",
")",
"]",
"# Weighting for linear magnitude interpolation",
"alpha",
"=",
"np",
".",
"mod",
"(",
"step",
",",
"1.0",
")",
"mag",
"=",
"(",
"(",
"1.0",
"-",
"alpha",
")",
"*",
"np",
".",
"abs",
"(",
"columns",
"[",
":",
",",
"0",
"]",
")",
"+",
"alpha",
"*",
"np",
".",
"abs",
"(",
"columns",
"[",
":",
",",
"1",
"]",
")",
")",
"# Store to output array",
"d_stretch",
"[",
":",
",",
"t",
"]",
"=",
"mag",
"*",
"np",
".",
"exp",
"(",
"1.j",
"*",
"phase_acc",
")",
"# Compute phase advance",
"dphase",
"=",
"(",
"np",
".",
"angle",
"(",
"columns",
"[",
":",
",",
"1",
"]",
")",
"-",
"np",
".",
"angle",
"(",
"columns",
"[",
":",
",",
"0",
"]",
")",
"-",
"phi_advance",
")",
"# Wrap to -pi:pi range",
"dphase",
"=",
"dphase",
"-",
"2.0",
"*",
"np",
".",
"pi",
"*",
"np",
".",
"round",
"(",
"dphase",
"/",
"(",
"2.0",
"*",
"np",
".",
"pi",
")",
")",
"# Accumulate phase",
"phase_acc",
"+=",
"phi_advance",
"+",
"dphase",
"return",
"d_stretch"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
iirt
|
r'''Time-frequency representation using IIR filters [1]_.
This function will return a time-frequency representation
using a multirate filter bank consisting of IIR filters.
First, `y` is resampled as needed according to the provided `sample_rates`.
Then, a filterbank with with `n` band-pass filters is designed.
The resampled input signals are processed by the filterbank as a whole.
(`scipy.signal.filtfilt` resp. `sosfiltfilt` is used to make the phase linear.)
The output of the filterbank is cut into frames.
For each band, the short-time mean-square power (STMSP) is calculated by
summing `win_length` subsequent filtered time samples.
When called with the default set of parameters, it will generate the TF-representation
as described in [1]_ (pitch filterbank):
* 85 filters with MIDI pitches [24, 108] as `center_freqs`.
* each filter having a bandwith of one semitone.
.. [1] Müller, Meinard.
"Information Retrieval for Music and Motion."
Springer Verlag. 2007.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
win_length : int > 0, <= n_fft
Window length.
hop_length : int > 0 [scalar]
Hop length, number samples between subsequent frames.
If not supplied, defaults to `win_length / 4`.
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `y[t * hop_length]`
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin.
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, this function uses reflection padding.
flayout : string
- If `ba`, the standard difference equation is used for filtering with `scipy.signal.filtfilt`.
Can be unstable for high-order filters.
- If `sos`, a series of second-order filters is used for filtering with `scipy.signal.sosfiltfilt`.
Minimizes numerical precision errors for high-order filters, but is slower.
kwargs : additional keyword arguments
Additional arguments for `librosa.filters.semitone_filterbank()`
(e.g., could be used to provide another set of `center_freqs` and `sample_rates`).
Returns
-------
bands_power : np.ndarray [shape=(n, t), dtype=dtype]
Short-time mean-square power for the input signal.
Raises
------
ParameterError
If `flayout` is not None, `ba`, or `sos`.
See Also
--------
librosa.filters.semitone_filterbank
librosa.filters._multirate_fb
librosa.filters.mr_frequencies
librosa.core.cqt
scipy.signal.filtfilt
scipy.signal.sosfiltfilt
Examples
--------
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = np.abs(librosa.iirt(y))
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='cqt_hz', x_axis='time')
>>> plt.title('Semitone spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
|
librosa/core/spectrum.py
|
def iirt(y, sr=22050, win_length=2048, hop_length=None, center=True,
tuning=0.0, pad_mode='reflect', flayout=None, **kwargs):
r'''Time-frequency representation using IIR filters [1]_.
This function will return a time-frequency representation
using a multirate filter bank consisting of IIR filters.
First, `y` is resampled as needed according to the provided `sample_rates`.
Then, a filterbank with with `n` band-pass filters is designed.
The resampled input signals are processed by the filterbank as a whole.
(`scipy.signal.filtfilt` resp. `sosfiltfilt` is used to make the phase linear.)
The output of the filterbank is cut into frames.
For each band, the short-time mean-square power (STMSP) is calculated by
summing `win_length` subsequent filtered time samples.
When called with the default set of parameters, it will generate the TF-representation
as described in [1]_ (pitch filterbank):
* 85 filters with MIDI pitches [24, 108] as `center_freqs`.
* each filter having a bandwith of one semitone.
.. [1] Müller, Meinard.
"Information Retrieval for Music and Motion."
Springer Verlag. 2007.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
win_length : int > 0, <= n_fft
Window length.
hop_length : int > 0 [scalar]
Hop length, number samples between subsequent frames.
If not supplied, defaults to `win_length / 4`.
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `y[t * hop_length]`
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin.
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, this function uses reflection padding.
flayout : string
- If `ba`, the standard difference equation is used for filtering with `scipy.signal.filtfilt`.
Can be unstable for high-order filters.
- If `sos`, a series of second-order filters is used for filtering with `scipy.signal.sosfiltfilt`.
Minimizes numerical precision errors for high-order filters, but is slower.
kwargs : additional keyword arguments
Additional arguments for `librosa.filters.semitone_filterbank()`
(e.g., could be used to provide another set of `center_freqs` and `sample_rates`).
Returns
-------
bands_power : np.ndarray [shape=(n, t), dtype=dtype]
Short-time mean-square power for the input signal.
Raises
------
ParameterError
If `flayout` is not None, `ba`, or `sos`.
See Also
--------
librosa.filters.semitone_filterbank
librosa.filters._multirate_fb
librosa.filters.mr_frequencies
librosa.core.cqt
scipy.signal.filtfilt
scipy.signal.sosfiltfilt
Examples
--------
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = np.abs(librosa.iirt(y))
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='cqt_hz', x_axis='time')
>>> plt.title('Semitone spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
'''
if flayout is None:
warnings.warn('Default filter layout for `iirt` is `ba`, but will be `sos` in 0.7.',
FutureWarning)
flayout = 'ba'
elif flayout not in ('ba', 'sos'):
raise ParameterError('Unsupported flayout={}'.format(flayout))
# check audio input
util.valid_audio(y)
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
# Pad the time series so that frames are centered
if center:
y = np.pad(y, int(hop_length), mode=pad_mode)
# get the semitone filterbank
filterbank_ct, sample_rates = semitone_filterbank(tuning=tuning, flayout=flayout, **kwargs)
# create three downsampled versions of the audio signal
y_resampled = []
y_srs = np.unique(sample_rates)
for cur_sr in y_srs:
y_resampled.append(resample(y, sr, cur_sr))
# Compute the number of frames that will fit. The end may get truncated.
n_frames = 1 + int((len(y) - win_length) / float(hop_length))
bands_power = []
for cur_sr, cur_filter in zip(sample_rates, filterbank_ct):
factor = float(sr) / float(cur_sr)
win_length_STMSP = int(np.round(win_length / factor))
hop_length_STMSP = int(np.round(hop_length / factor))
# filter the signal
cur_sr_idx = np.flatnonzero(y_srs == cur_sr)[0]
if flayout == 'ba':
cur_filter_output = scipy.signal.filtfilt(cur_filter[0], cur_filter[1],
y_resampled[cur_sr_idx])
elif flayout == 'sos':
cur_filter_output = scipy.signal.sosfiltfilt(cur_filter,
y_resampled[cur_sr_idx])
# frame the current filter output
cur_frames = util.frame(np.ascontiguousarray(cur_filter_output),
frame_length=win_length_STMSP,
hop_length=hop_length_STMSP)
bands_power.append(factor * np.sum(cur_frames**2, axis=0)[:n_frames])
return np.asarray(bands_power)
|
def iirt(y, sr=22050, win_length=2048, hop_length=None, center=True,
tuning=0.0, pad_mode='reflect', flayout=None, **kwargs):
r'''Time-frequency representation using IIR filters [1]_.
This function will return a time-frequency representation
using a multirate filter bank consisting of IIR filters.
First, `y` is resampled as needed according to the provided `sample_rates`.
Then, a filterbank with with `n` band-pass filters is designed.
The resampled input signals are processed by the filterbank as a whole.
(`scipy.signal.filtfilt` resp. `sosfiltfilt` is used to make the phase linear.)
The output of the filterbank is cut into frames.
For each band, the short-time mean-square power (STMSP) is calculated by
summing `win_length` subsequent filtered time samples.
When called with the default set of parameters, it will generate the TF-representation
as described in [1]_ (pitch filterbank):
* 85 filters with MIDI pitches [24, 108] as `center_freqs`.
* each filter having a bandwith of one semitone.
.. [1] Müller, Meinard.
"Information Retrieval for Music and Motion."
Springer Verlag. 2007.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
win_length : int > 0, <= n_fft
Window length.
hop_length : int > 0 [scalar]
Hop length, number samples between subsequent frames.
If not supplied, defaults to `win_length / 4`.
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `y[t * hop_length]`
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin.
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, this function uses reflection padding.
flayout : string
- If `ba`, the standard difference equation is used for filtering with `scipy.signal.filtfilt`.
Can be unstable for high-order filters.
- If `sos`, a series of second-order filters is used for filtering with `scipy.signal.sosfiltfilt`.
Minimizes numerical precision errors for high-order filters, but is slower.
kwargs : additional keyword arguments
Additional arguments for `librosa.filters.semitone_filterbank()`
(e.g., could be used to provide another set of `center_freqs` and `sample_rates`).
Returns
-------
bands_power : np.ndarray [shape=(n, t), dtype=dtype]
Short-time mean-square power for the input signal.
Raises
------
ParameterError
If `flayout` is not None, `ba`, or `sos`.
See Also
--------
librosa.filters.semitone_filterbank
librosa.filters._multirate_fb
librosa.filters.mr_frequencies
librosa.core.cqt
scipy.signal.filtfilt
scipy.signal.sosfiltfilt
Examples
--------
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = np.abs(librosa.iirt(y))
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='cqt_hz', x_axis='time')
>>> plt.title('Semitone spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
'''
if flayout is None:
warnings.warn('Default filter layout for `iirt` is `ba`, but will be `sos` in 0.7.',
FutureWarning)
flayout = 'ba'
elif flayout not in ('ba', 'sos'):
raise ParameterError('Unsupported flayout={}'.format(flayout))
# check audio input
util.valid_audio(y)
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
# Pad the time series so that frames are centered
if center:
y = np.pad(y, int(hop_length), mode=pad_mode)
# get the semitone filterbank
filterbank_ct, sample_rates = semitone_filterbank(tuning=tuning, flayout=flayout, **kwargs)
# create three downsampled versions of the audio signal
y_resampled = []
y_srs = np.unique(sample_rates)
for cur_sr in y_srs:
y_resampled.append(resample(y, sr, cur_sr))
# Compute the number of frames that will fit. The end may get truncated.
n_frames = 1 + int((len(y) - win_length) / float(hop_length))
bands_power = []
for cur_sr, cur_filter in zip(sample_rates, filterbank_ct):
factor = float(sr) / float(cur_sr)
win_length_STMSP = int(np.round(win_length / factor))
hop_length_STMSP = int(np.round(hop_length / factor))
# filter the signal
cur_sr_idx = np.flatnonzero(y_srs == cur_sr)[0]
if flayout == 'ba':
cur_filter_output = scipy.signal.filtfilt(cur_filter[0], cur_filter[1],
y_resampled[cur_sr_idx])
elif flayout == 'sos':
cur_filter_output = scipy.signal.sosfiltfilt(cur_filter,
y_resampled[cur_sr_idx])
# frame the current filter output
cur_frames = util.frame(np.ascontiguousarray(cur_filter_output),
frame_length=win_length_STMSP,
hop_length=hop_length_STMSP)
bands_power.append(factor * np.sum(cur_frames**2, axis=0)[:n_frames])
return np.asarray(bands_power)
|
[
"r",
"Time",
"-",
"frequency",
"representation",
"using",
"IIR",
"filters",
"[",
"1",
"]",
"_",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L661-L811
|
[
"def",
"iirt",
"(",
"y",
",",
"sr",
"=",
"22050",
",",
"win_length",
"=",
"2048",
",",
"hop_length",
"=",
"None",
",",
"center",
"=",
"True",
",",
"tuning",
"=",
"0.0",
",",
"pad_mode",
"=",
"'reflect'",
",",
"flayout",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"flayout",
"is",
"None",
":",
"warnings",
".",
"warn",
"(",
"'Default filter layout for `iirt` is `ba`, but will be `sos` in 0.7.'",
",",
"FutureWarning",
")",
"flayout",
"=",
"'ba'",
"elif",
"flayout",
"not",
"in",
"(",
"'ba'",
",",
"'sos'",
")",
":",
"raise",
"ParameterError",
"(",
"'Unsupported flayout={}'",
".",
"format",
"(",
"flayout",
")",
")",
"# check audio input",
"util",
".",
"valid_audio",
"(",
"y",
")",
"# Set the default hop, if it's not already specified",
"if",
"hop_length",
"is",
"None",
":",
"hop_length",
"=",
"int",
"(",
"win_length",
"//",
"4",
")",
"# Pad the time series so that frames are centered",
"if",
"center",
":",
"y",
"=",
"np",
".",
"pad",
"(",
"y",
",",
"int",
"(",
"hop_length",
")",
",",
"mode",
"=",
"pad_mode",
")",
"# get the semitone filterbank",
"filterbank_ct",
",",
"sample_rates",
"=",
"semitone_filterbank",
"(",
"tuning",
"=",
"tuning",
",",
"flayout",
"=",
"flayout",
",",
"*",
"*",
"kwargs",
")",
"# create three downsampled versions of the audio signal",
"y_resampled",
"=",
"[",
"]",
"y_srs",
"=",
"np",
".",
"unique",
"(",
"sample_rates",
")",
"for",
"cur_sr",
"in",
"y_srs",
":",
"y_resampled",
".",
"append",
"(",
"resample",
"(",
"y",
",",
"sr",
",",
"cur_sr",
")",
")",
"# Compute the number of frames that will fit. The end may get truncated.",
"n_frames",
"=",
"1",
"+",
"int",
"(",
"(",
"len",
"(",
"y",
")",
"-",
"win_length",
")",
"/",
"float",
"(",
"hop_length",
")",
")",
"bands_power",
"=",
"[",
"]",
"for",
"cur_sr",
",",
"cur_filter",
"in",
"zip",
"(",
"sample_rates",
",",
"filterbank_ct",
")",
":",
"factor",
"=",
"float",
"(",
"sr",
")",
"/",
"float",
"(",
"cur_sr",
")",
"win_length_STMSP",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"win_length",
"/",
"factor",
")",
")",
"hop_length_STMSP",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"hop_length",
"/",
"factor",
")",
")",
"# filter the signal",
"cur_sr_idx",
"=",
"np",
".",
"flatnonzero",
"(",
"y_srs",
"==",
"cur_sr",
")",
"[",
"0",
"]",
"if",
"flayout",
"==",
"'ba'",
":",
"cur_filter_output",
"=",
"scipy",
".",
"signal",
".",
"filtfilt",
"(",
"cur_filter",
"[",
"0",
"]",
",",
"cur_filter",
"[",
"1",
"]",
",",
"y_resampled",
"[",
"cur_sr_idx",
"]",
")",
"elif",
"flayout",
"==",
"'sos'",
":",
"cur_filter_output",
"=",
"scipy",
".",
"signal",
".",
"sosfiltfilt",
"(",
"cur_filter",
",",
"y_resampled",
"[",
"cur_sr_idx",
"]",
")",
"# frame the current filter output",
"cur_frames",
"=",
"util",
".",
"frame",
"(",
"np",
".",
"ascontiguousarray",
"(",
"cur_filter_output",
")",
",",
"frame_length",
"=",
"win_length_STMSP",
",",
"hop_length",
"=",
"hop_length_STMSP",
")",
"bands_power",
".",
"append",
"(",
"factor",
"*",
"np",
".",
"sum",
"(",
"cur_frames",
"**",
"2",
",",
"axis",
"=",
"0",
")",
"[",
":",
"n_frames",
"]",
")",
"return",
"np",
".",
"asarray",
"(",
"bands_power",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
power_to_db
|
Convert a power spectrogram (amplitude squared) to decibel (dB) units
This computes the scaling ``10 * log10(S / ref)`` in a numerically
stable way.
Parameters
----------
S : np.ndarray
input power
ref : scalar or callable
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`10 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
If callable, the reference value is computed as `ref(S)`.
amin : float > 0 [scalar]
minimum threshold for `abs(S)` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(10 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S_db ~= 10 * log10(S) - 10 * log10(ref)``
See Also
--------
perceptual_weighting
db_to_power
amplitude_to_db
db_to_amplitude
Notes
-----
This function caches at level 30.
Examples
--------
Get a power spectrogram from a waveform ``y``
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.power_to_db(S**2)
array([[-33.293, -27.32 , ..., -33.293, -33.293],
[-33.293, -25.723, ..., -33.293, -33.293],
...,
[-33.293, -33.293, ..., -33.293, -33.293],
[-33.293, -33.293, ..., -33.293, -33.293]], dtype=float32)
Compute dB relative to peak power
>>> librosa.power_to_db(S**2, ref=np.max)
array([[-80. , -74.027, ..., -80. , -80. ],
[-80. , -72.431, ..., -80. , -80. ],
...,
[-80. , -80. , ..., -80. , -80. ],
[-80. , -80. , ..., -80. , -80. ]], dtype=float32)
Or compare to median power
>>> librosa.power_to_db(S**2, ref=np.median)
array([[-0.189, 5.784, ..., -0.189, -0.189],
[-0.189, 7.381, ..., -0.189, -0.189],
...,
[-0.189, -0.189, ..., -0.189, -0.189],
[-0.189, -0.189, ..., -0.189, -0.189]], dtype=float32)
And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(S**2, sr=sr, y_axis='log')
>>> plt.colorbar()
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.power_to_db(S**2, ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log-Power spectrogram')
>>> plt.tight_layout()
|
librosa/core/spectrum.py
|
def power_to_db(S, ref=1.0, amin=1e-10, top_db=80.0):
"""Convert a power spectrogram (amplitude squared) to decibel (dB) units
This computes the scaling ``10 * log10(S / ref)`` in a numerically
stable way.
Parameters
----------
S : np.ndarray
input power
ref : scalar or callable
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`10 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
If callable, the reference value is computed as `ref(S)`.
amin : float > 0 [scalar]
minimum threshold for `abs(S)` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(10 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S_db ~= 10 * log10(S) - 10 * log10(ref)``
See Also
--------
perceptual_weighting
db_to_power
amplitude_to_db
db_to_amplitude
Notes
-----
This function caches at level 30.
Examples
--------
Get a power spectrogram from a waveform ``y``
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.power_to_db(S**2)
array([[-33.293, -27.32 , ..., -33.293, -33.293],
[-33.293, -25.723, ..., -33.293, -33.293],
...,
[-33.293, -33.293, ..., -33.293, -33.293],
[-33.293, -33.293, ..., -33.293, -33.293]], dtype=float32)
Compute dB relative to peak power
>>> librosa.power_to_db(S**2, ref=np.max)
array([[-80. , -74.027, ..., -80. , -80. ],
[-80. , -72.431, ..., -80. , -80. ],
...,
[-80. , -80. , ..., -80. , -80. ],
[-80. , -80. , ..., -80. , -80. ]], dtype=float32)
Or compare to median power
>>> librosa.power_to_db(S**2, ref=np.median)
array([[-0.189, 5.784, ..., -0.189, -0.189],
[-0.189, 7.381, ..., -0.189, -0.189],
...,
[-0.189, -0.189, ..., -0.189, -0.189],
[-0.189, -0.189, ..., -0.189, -0.189]], dtype=float32)
And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(S**2, sr=sr, y_axis='log')
>>> plt.colorbar()
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.power_to_db(S**2, ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log-Power spectrogram')
>>> plt.tight_layout()
"""
S = np.asarray(S)
if amin <= 0:
raise ParameterError('amin must be strictly positive')
if np.issubdtype(S.dtype, np.complexfloating):
warnings.warn('power_to_db was called on complex input so phase '
'information will be discarded. To suppress this warning, '
'call power_to_db(np.abs(D)**2) instead.')
magnitude = np.abs(S)
else:
magnitude = S
if six.callable(ref):
# User supplied a function to calculate reference power
ref_value = ref(magnitude)
else:
ref_value = np.abs(ref)
log_spec = 10.0 * np.log10(np.maximum(amin, magnitude))
log_spec -= 10.0 * np.log10(np.maximum(amin, ref_value))
if top_db is not None:
if top_db < 0:
raise ParameterError('top_db must be non-negative')
log_spec = np.maximum(log_spec, log_spec.max() - top_db)
return log_spec
|
def power_to_db(S, ref=1.0, amin=1e-10, top_db=80.0):
"""Convert a power spectrogram (amplitude squared) to decibel (dB) units
This computes the scaling ``10 * log10(S / ref)`` in a numerically
stable way.
Parameters
----------
S : np.ndarray
input power
ref : scalar or callable
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`10 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
If callable, the reference value is computed as `ref(S)`.
amin : float > 0 [scalar]
minimum threshold for `abs(S)` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(10 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S_db ~= 10 * log10(S) - 10 * log10(ref)``
See Also
--------
perceptual_weighting
db_to_power
amplitude_to_db
db_to_amplitude
Notes
-----
This function caches at level 30.
Examples
--------
Get a power spectrogram from a waveform ``y``
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.power_to_db(S**2)
array([[-33.293, -27.32 , ..., -33.293, -33.293],
[-33.293, -25.723, ..., -33.293, -33.293],
...,
[-33.293, -33.293, ..., -33.293, -33.293],
[-33.293, -33.293, ..., -33.293, -33.293]], dtype=float32)
Compute dB relative to peak power
>>> librosa.power_to_db(S**2, ref=np.max)
array([[-80. , -74.027, ..., -80. , -80. ],
[-80. , -72.431, ..., -80. , -80. ],
...,
[-80. , -80. , ..., -80. , -80. ],
[-80. , -80. , ..., -80. , -80. ]], dtype=float32)
Or compare to median power
>>> librosa.power_to_db(S**2, ref=np.median)
array([[-0.189, 5.784, ..., -0.189, -0.189],
[-0.189, 7.381, ..., -0.189, -0.189],
...,
[-0.189, -0.189, ..., -0.189, -0.189],
[-0.189, -0.189, ..., -0.189, -0.189]], dtype=float32)
And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(S**2, sr=sr, y_axis='log')
>>> plt.colorbar()
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.power_to_db(S**2, ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log-Power spectrogram')
>>> plt.tight_layout()
"""
S = np.asarray(S)
if amin <= 0:
raise ParameterError('amin must be strictly positive')
if np.issubdtype(S.dtype, np.complexfloating):
warnings.warn('power_to_db was called on complex input so phase '
'information will be discarded. To suppress this warning, '
'call power_to_db(np.abs(D)**2) instead.')
magnitude = np.abs(S)
else:
magnitude = S
if six.callable(ref):
# User supplied a function to calculate reference power
ref_value = ref(magnitude)
else:
ref_value = np.abs(ref)
log_spec = 10.0 * np.log10(np.maximum(amin, magnitude))
log_spec -= 10.0 * np.log10(np.maximum(amin, ref_value))
if top_db is not None:
if top_db < 0:
raise ParameterError('top_db must be non-negative')
log_spec = np.maximum(log_spec, log_spec.max() - top_db)
return log_spec
|
[
"Convert",
"a",
"power",
"spectrogram",
"(",
"amplitude",
"squared",
")",
"to",
"decibel",
"(",
"dB",
")",
"units"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L815-L934
|
[
"def",
"power_to_db",
"(",
"S",
",",
"ref",
"=",
"1.0",
",",
"amin",
"=",
"1e-10",
",",
"top_db",
"=",
"80.0",
")",
":",
"S",
"=",
"np",
".",
"asarray",
"(",
"S",
")",
"if",
"amin",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'amin must be strictly positive'",
")",
"if",
"np",
".",
"issubdtype",
"(",
"S",
".",
"dtype",
",",
"np",
".",
"complexfloating",
")",
":",
"warnings",
".",
"warn",
"(",
"'power_to_db was called on complex input so phase '",
"'information will be discarded. To suppress this warning, '",
"'call power_to_db(np.abs(D)**2) instead.'",
")",
"magnitude",
"=",
"np",
".",
"abs",
"(",
"S",
")",
"else",
":",
"magnitude",
"=",
"S",
"if",
"six",
".",
"callable",
"(",
"ref",
")",
":",
"# User supplied a function to calculate reference power",
"ref_value",
"=",
"ref",
"(",
"magnitude",
")",
"else",
":",
"ref_value",
"=",
"np",
".",
"abs",
"(",
"ref",
")",
"log_spec",
"=",
"10.0",
"*",
"np",
".",
"log10",
"(",
"np",
".",
"maximum",
"(",
"amin",
",",
"magnitude",
")",
")",
"log_spec",
"-=",
"10.0",
"*",
"np",
".",
"log10",
"(",
"np",
".",
"maximum",
"(",
"amin",
",",
"ref_value",
")",
")",
"if",
"top_db",
"is",
"not",
"None",
":",
"if",
"top_db",
"<",
"0",
":",
"raise",
"ParameterError",
"(",
"'top_db must be non-negative'",
")",
"log_spec",
"=",
"np",
".",
"maximum",
"(",
"log_spec",
",",
"log_spec",
".",
"max",
"(",
")",
"-",
"top_db",
")",
"return",
"log_spec"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
amplitude_to_db
|
Convert an amplitude spectrogram to dB-scaled spectrogram.
This is equivalent to ``power_to_db(S**2)``, but is provided for convenience.
Parameters
----------
S : np.ndarray
input amplitude
ref : scalar or callable
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`20 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
If callable, the reference value is computed as `ref(S)`.
amin : float > 0 [scalar]
minimum threshold for `S` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(20 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S`` measured in dB
See Also
--------
power_to_db, db_to_amplitude
Notes
-----
This function caches at level 30.
|
librosa/core/spectrum.py
|
def amplitude_to_db(S, ref=1.0, amin=1e-5, top_db=80.0):
'''Convert an amplitude spectrogram to dB-scaled spectrogram.
This is equivalent to ``power_to_db(S**2)``, but is provided for convenience.
Parameters
----------
S : np.ndarray
input amplitude
ref : scalar or callable
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`20 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
If callable, the reference value is computed as `ref(S)`.
amin : float > 0 [scalar]
minimum threshold for `S` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(20 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S`` measured in dB
See Also
--------
power_to_db, db_to_amplitude
Notes
-----
This function caches at level 30.
'''
S = np.asarray(S)
if np.issubdtype(S.dtype, np.complexfloating):
warnings.warn('amplitude_to_db was called on complex input so phase '
'information will be discarded. To suppress this warning, '
'call amplitude_to_db(np.abs(S)) instead.')
magnitude = np.abs(S)
if six.callable(ref):
# User supplied a function to calculate reference power
ref_value = ref(magnitude)
else:
ref_value = np.abs(ref)
power = np.square(magnitude, out=magnitude)
return power_to_db(power, ref=ref_value**2, amin=amin**2,
top_db=top_db)
|
def amplitude_to_db(S, ref=1.0, amin=1e-5, top_db=80.0):
'''Convert an amplitude spectrogram to dB-scaled spectrogram.
This is equivalent to ``power_to_db(S**2)``, but is provided for convenience.
Parameters
----------
S : np.ndarray
input amplitude
ref : scalar or callable
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`20 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
If callable, the reference value is computed as `ref(S)`.
amin : float > 0 [scalar]
minimum threshold for `S` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(20 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S`` measured in dB
See Also
--------
power_to_db, db_to_amplitude
Notes
-----
This function caches at level 30.
'''
S = np.asarray(S)
if np.issubdtype(S.dtype, np.complexfloating):
warnings.warn('amplitude_to_db was called on complex input so phase '
'information will be discarded. To suppress this warning, '
'call amplitude_to_db(np.abs(S)) instead.')
magnitude = np.abs(S)
if six.callable(ref):
# User supplied a function to calculate reference power
ref_value = ref(magnitude)
else:
ref_value = np.abs(ref)
power = np.square(magnitude, out=magnitude)
return power_to_db(power, ref=ref_value**2, amin=amin**2,
top_db=top_db)
|
[
"Convert",
"an",
"amplitude",
"spectrogram",
"to",
"dB",
"-",
"scaled",
"spectrogram",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L966-L1023
|
[
"def",
"amplitude_to_db",
"(",
"S",
",",
"ref",
"=",
"1.0",
",",
"amin",
"=",
"1e-5",
",",
"top_db",
"=",
"80.0",
")",
":",
"S",
"=",
"np",
".",
"asarray",
"(",
"S",
")",
"if",
"np",
".",
"issubdtype",
"(",
"S",
".",
"dtype",
",",
"np",
".",
"complexfloating",
")",
":",
"warnings",
".",
"warn",
"(",
"'amplitude_to_db was called on complex input so phase '",
"'information will be discarded. To suppress this warning, '",
"'call amplitude_to_db(np.abs(S)) instead.'",
")",
"magnitude",
"=",
"np",
".",
"abs",
"(",
"S",
")",
"if",
"six",
".",
"callable",
"(",
"ref",
")",
":",
"# User supplied a function to calculate reference power",
"ref_value",
"=",
"ref",
"(",
"magnitude",
")",
"else",
":",
"ref_value",
"=",
"np",
".",
"abs",
"(",
"ref",
")",
"power",
"=",
"np",
".",
"square",
"(",
"magnitude",
",",
"out",
"=",
"magnitude",
")",
"return",
"power_to_db",
"(",
"power",
",",
"ref",
"=",
"ref_value",
"**",
"2",
",",
"amin",
"=",
"amin",
"**",
"2",
",",
"top_db",
"=",
"top_db",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
perceptual_weighting
|
Perceptual weighting of a power spectrogram:
`S_p[f] = A_weighting(f) + 10*log(S[f] / ref)`
Parameters
----------
S : np.ndarray [shape=(d, t)]
Power spectrogram
frequencies : np.ndarray [shape=(d,)]
Center frequency for each row of `S`
kwargs : additional keyword arguments
Additional keyword arguments to `power_to_db`.
Returns
-------
S_p : np.ndarray [shape=(d, t)]
perceptually weighted version of `S`
See Also
--------
power_to_db
Notes
-----
This function caches at level 30.
Examples
--------
Re-weight a CQT power spectrum, using peak power as reference
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('A1')))
>>> freqs = librosa.cqt_frequencies(C.shape[0],
... fmin=librosa.note_to_hz('A1'))
>>> perceptual_CQT = librosa.perceptual_weighting(C**2,
... freqs,
... ref=np.max)
>>> perceptual_CQT
array([[ -80.076, -80.049, ..., -104.735, -104.735],
[ -78.344, -78.555, ..., -103.725, -103.725],
...,
[ -76.272, -76.272, ..., -76.272, -76.272],
[ -76.485, -76.485, ..., -76.485, -76.485]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... fmin=librosa.note_to_hz('A1'),
... y_axis='cqt_hz')
>>> plt.title('Log CQT power')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(perceptual_CQT, y_axis='cqt_hz',
... fmin=librosa.note_to_hz('A1'),
... x_axis='time')
>>> plt.title('Perceptually weighted log CQT')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
|
librosa/core/spectrum.py
|
def perceptual_weighting(S, frequencies, **kwargs):
'''Perceptual weighting of a power spectrogram:
`S_p[f] = A_weighting(f) + 10*log(S[f] / ref)`
Parameters
----------
S : np.ndarray [shape=(d, t)]
Power spectrogram
frequencies : np.ndarray [shape=(d,)]
Center frequency for each row of `S`
kwargs : additional keyword arguments
Additional keyword arguments to `power_to_db`.
Returns
-------
S_p : np.ndarray [shape=(d, t)]
perceptually weighted version of `S`
See Also
--------
power_to_db
Notes
-----
This function caches at level 30.
Examples
--------
Re-weight a CQT power spectrum, using peak power as reference
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('A1')))
>>> freqs = librosa.cqt_frequencies(C.shape[0],
... fmin=librosa.note_to_hz('A1'))
>>> perceptual_CQT = librosa.perceptual_weighting(C**2,
... freqs,
... ref=np.max)
>>> perceptual_CQT
array([[ -80.076, -80.049, ..., -104.735, -104.735],
[ -78.344, -78.555, ..., -103.725, -103.725],
...,
[ -76.272, -76.272, ..., -76.272, -76.272],
[ -76.485, -76.485, ..., -76.485, -76.485]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... fmin=librosa.note_to_hz('A1'),
... y_axis='cqt_hz')
>>> plt.title('Log CQT power')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(perceptual_CQT, y_axis='cqt_hz',
... fmin=librosa.note_to_hz('A1'),
... x_axis='time')
>>> plt.title('Perceptually weighted log CQT')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
'''
offset = time_frequency.A_weighting(frequencies).reshape((-1, 1))
return offset + power_to_db(S, **kwargs)
|
def perceptual_weighting(S, frequencies, **kwargs):
'''Perceptual weighting of a power spectrogram:
`S_p[f] = A_weighting(f) + 10*log(S[f] / ref)`
Parameters
----------
S : np.ndarray [shape=(d, t)]
Power spectrogram
frequencies : np.ndarray [shape=(d,)]
Center frequency for each row of `S`
kwargs : additional keyword arguments
Additional keyword arguments to `power_to_db`.
Returns
-------
S_p : np.ndarray [shape=(d, t)]
perceptually weighted version of `S`
See Also
--------
power_to_db
Notes
-----
This function caches at level 30.
Examples
--------
Re-weight a CQT power spectrum, using peak power as reference
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('A1')))
>>> freqs = librosa.cqt_frequencies(C.shape[0],
... fmin=librosa.note_to_hz('A1'))
>>> perceptual_CQT = librosa.perceptual_weighting(C**2,
... freqs,
... ref=np.max)
>>> perceptual_CQT
array([[ -80.076, -80.049, ..., -104.735, -104.735],
[ -78.344, -78.555, ..., -103.725, -103.725],
...,
[ -76.272, -76.272, ..., -76.272, -76.272],
[ -76.485, -76.485, ..., -76.485, -76.485]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... fmin=librosa.note_to_hz('A1'),
... y_axis='cqt_hz')
>>> plt.title('Log CQT power')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(perceptual_CQT, y_axis='cqt_hz',
... fmin=librosa.note_to_hz('A1'),
... x_axis='time')
>>> plt.title('Perceptually weighted log CQT')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
'''
offset = time_frequency.A_weighting(frequencies).reshape((-1, 1))
return offset + power_to_db(S, **kwargs)
|
[
"Perceptual",
"weighting",
"of",
"a",
"power",
"spectrogram",
":"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L1055-L1123
|
[
"def",
"perceptual_weighting",
"(",
"S",
",",
"frequencies",
",",
"*",
"*",
"kwargs",
")",
":",
"offset",
"=",
"time_frequency",
".",
"A_weighting",
"(",
"frequencies",
")",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"return",
"offset",
"+",
"power_to_db",
"(",
"S",
",",
"*",
"*",
"kwargs",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
fmt
|
The fast Mellin transform (FMT) [1]_ of a uniformly sampled signal y.
When the Mellin parameter (beta) is 1/2, it is also known as the scale transform [2]_.
The scale transform can be useful for audio analysis because its magnitude is invariant
to scaling of the domain (e.g., time stretching or compression). This is analogous
to the magnitude of the Fourier transform being invariant to shifts in the input domain.
.. [1] De Sena, Antonio, and Davide Rocchesso.
"A fast Mellin and scale transform."
EURASIP Journal on Applied Signal Processing 2007.1 (2007): 75-75.
.. [2] Cohen, L.
"The scale representation."
IEEE Transactions on Signal Processing 41, no. 12 (1993): 3275-3292.
Parameters
----------
y : np.ndarray, real-valued
The input signal(s). Can be multidimensional.
The target axis must contain at least 3 samples.
t_min : float > 0
The minimum time spacing (in samples).
This value should generally be less than 1 to preserve as much information as
possible.
n_fmt : int > 2 or None
The number of scale transform bins to use.
If None, then `n_bins = over_sample * ceil(n * log((n-1)/t_min))` is taken,
where `n = y.shape[axis]`
kind : str
The type of interpolation to use when re-sampling the input.
See `scipy.interpolate.interp1d` for possible values.
Note that the default is to use high-precision (cubic) interpolation.
This can be slow in practice; if speed is preferred over accuracy,
then consider using `kind='linear'`.
beta : float
The Mellin parameter. `beta=0.5` provides the scale transform.
over_sample : float >= 1
Over-sampling factor for exponential resampling.
axis : int
The axis along which to transform `y`
Returns
-------
x_scale : np.ndarray [dtype=complex]
The scale transform of `y` along the `axis` dimension.
Raises
------
ParameterError
if `n_fmt < 2` or `t_min <= 0`
or if `y` is not finite
or if `y.shape[axis] < 3`.
Notes
-----
This function caches at level 30.
Examples
--------
>>> # Generate a signal and time-stretch it (with energy normalization)
>>> scale = 1.25
>>> freq = 3.0
>>> x1 = np.linspace(0, 1, num=1024, endpoint=False)
>>> x2 = np.linspace(0, 1, num=scale * len(x1), endpoint=False)
>>> y1 = np.sin(2 * np.pi * freq * x1)
>>> y2 = np.sin(2 * np.pi * freq * x2) / np.sqrt(scale)
>>> # Verify that the two signals have the same energy
>>> np.sum(np.abs(y1)**2), np.sum(np.abs(y2)**2)
(255.99999999999997, 255.99999999999969)
>>> scale1 = librosa.fmt(y1, n_fmt=512)
>>> scale2 = librosa.fmt(y2, n_fmt=512)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1, 2, 1)
>>> plt.plot(y1, label='Original')
>>> plt.plot(y2, linestyle='--', label='Stretched')
>>> plt.xlabel('time (samples)')
>>> plt.title('Input signals')
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.subplot(1, 2, 2)
>>> plt.semilogy(np.abs(scale1), label='Original')
>>> plt.semilogy(np.abs(scale2), linestyle='--', label='Stretched')
>>> plt.xlabel('scale coefficients')
>>> plt.title('Scale transform magnitude')
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.tight_layout()
>>> # Plot the scale transform of an onset strength autocorrelation
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10.0, duration=30.0)
>>> odf = librosa.onset.onset_strength(y=y, sr=sr)
>>> # Auto-correlate with up to 10 seconds lag
>>> odf_ac = librosa.autocorrelate(odf, max_size=10 * sr // 512)
>>> # Normalize
>>> odf_ac = librosa.util.normalize(odf_ac, norm=np.inf)
>>> # Compute the scale transform
>>> odf_ac_scale = librosa.fmt(librosa.util.normalize(odf_ac), n_fmt=512)
>>> # Plot the results
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> plt.plot(odf, label='Onset strength')
>>> plt.axis('tight')
>>> plt.xlabel('Time (frames)')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.subplot(3, 1, 2)
>>> plt.plot(odf_ac, label='Onset autocorrelation')
>>> plt.axis('tight')
>>> plt.xlabel('Lag (frames)')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.subplot(3, 1, 3)
>>> plt.semilogy(np.abs(odf_ac_scale), label='Scale transform magnitude')
>>> plt.axis('tight')
>>> plt.xlabel('scale coefficients')
>>> plt.legend(frameon=True)
>>> plt.tight_layout()
|
librosa/core/spectrum.py
|
def fmt(y, t_min=0.5, n_fmt=None, kind='cubic', beta=0.5, over_sample=1, axis=-1):
"""The fast Mellin transform (FMT) [1]_ of a uniformly sampled signal y.
When the Mellin parameter (beta) is 1/2, it is also known as the scale transform [2]_.
The scale transform can be useful for audio analysis because its magnitude is invariant
to scaling of the domain (e.g., time stretching or compression). This is analogous
to the magnitude of the Fourier transform being invariant to shifts in the input domain.
.. [1] De Sena, Antonio, and Davide Rocchesso.
"A fast Mellin and scale transform."
EURASIP Journal on Applied Signal Processing 2007.1 (2007): 75-75.
.. [2] Cohen, L.
"The scale representation."
IEEE Transactions on Signal Processing 41, no. 12 (1993): 3275-3292.
Parameters
----------
y : np.ndarray, real-valued
The input signal(s). Can be multidimensional.
The target axis must contain at least 3 samples.
t_min : float > 0
The minimum time spacing (in samples).
This value should generally be less than 1 to preserve as much information as
possible.
n_fmt : int > 2 or None
The number of scale transform bins to use.
If None, then `n_bins = over_sample * ceil(n * log((n-1)/t_min))` is taken,
where `n = y.shape[axis]`
kind : str
The type of interpolation to use when re-sampling the input.
See `scipy.interpolate.interp1d` for possible values.
Note that the default is to use high-precision (cubic) interpolation.
This can be slow in practice; if speed is preferred over accuracy,
then consider using `kind='linear'`.
beta : float
The Mellin parameter. `beta=0.5` provides the scale transform.
over_sample : float >= 1
Over-sampling factor for exponential resampling.
axis : int
The axis along which to transform `y`
Returns
-------
x_scale : np.ndarray [dtype=complex]
The scale transform of `y` along the `axis` dimension.
Raises
------
ParameterError
if `n_fmt < 2` or `t_min <= 0`
or if `y` is not finite
or if `y.shape[axis] < 3`.
Notes
-----
This function caches at level 30.
Examples
--------
>>> # Generate a signal and time-stretch it (with energy normalization)
>>> scale = 1.25
>>> freq = 3.0
>>> x1 = np.linspace(0, 1, num=1024, endpoint=False)
>>> x2 = np.linspace(0, 1, num=scale * len(x1), endpoint=False)
>>> y1 = np.sin(2 * np.pi * freq * x1)
>>> y2 = np.sin(2 * np.pi * freq * x2) / np.sqrt(scale)
>>> # Verify that the two signals have the same energy
>>> np.sum(np.abs(y1)**2), np.sum(np.abs(y2)**2)
(255.99999999999997, 255.99999999999969)
>>> scale1 = librosa.fmt(y1, n_fmt=512)
>>> scale2 = librosa.fmt(y2, n_fmt=512)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1, 2, 1)
>>> plt.plot(y1, label='Original')
>>> plt.plot(y2, linestyle='--', label='Stretched')
>>> plt.xlabel('time (samples)')
>>> plt.title('Input signals')
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.subplot(1, 2, 2)
>>> plt.semilogy(np.abs(scale1), label='Original')
>>> plt.semilogy(np.abs(scale2), linestyle='--', label='Stretched')
>>> plt.xlabel('scale coefficients')
>>> plt.title('Scale transform magnitude')
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.tight_layout()
>>> # Plot the scale transform of an onset strength autocorrelation
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10.0, duration=30.0)
>>> odf = librosa.onset.onset_strength(y=y, sr=sr)
>>> # Auto-correlate with up to 10 seconds lag
>>> odf_ac = librosa.autocorrelate(odf, max_size=10 * sr // 512)
>>> # Normalize
>>> odf_ac = librosa.util.normalize(odf_ac, norm=np.inf)
>>> # Compute the scale transform
>>> odf_ac_scale = librosa.fmt(librosa.util.normalize(odf_ac), n_fmt=512)
>>> # Plot the results
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> plt.plot(odf, label='Onset strength')
>>> plt.axis('tight')
>>> plt.xlabel('Time (frames)')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.subplot(3, 1, 2)
>>> plt.plot(odf_ac, label='Onset autocorrelation')
>>> plt.axis('tight')
>>> plt.xlabel('Lag (frames)')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.subplot(3, 1, 3)
>>> plt.semilogy(np.abs(odf_ac_scale), label='Scale transform magnitude')
>>> plt.axis('tight')
>>> plt.xlabel('scale coefficients')
>>> plt.legend(frameon=True)
>>> plt.tight_layout()
"""
n = y.shape[axis]
if n < 3:
raise ParameterError('y.shape[{:}]=={:} < 3'.format(axis, n))
if t_min <= 0:
raise ParameterError('t_min must be a positive number')
if n_fmt is None:
if over_sample < 1:
raise ParameterError('over_sample must be >= 1')
# The base is the maximum ratio between adjacent samples
# Since the sample spacing is increasing, this is simply the
# ratio between the positions of the last two samples: (n-1)/(n-2)
log_base = np.log(n - 1) - np.log(n - 2)
n_fmt = int(np.ceil(over_sample * (np.log(n - 1) - np.log(t_min)) / log_base))
elif n_fmt < 3:
raise ParameterError('n_fmt=={:} < 3'.format(n_fmt))
else:
log_base = (np.log(n_fmt - 1) - np.log(n_fmt - 2)) / over_sample
if not np.all(np.isfinite(y)):
raise ParameterError('y must be finite everywhere')
base = np.exp(log_base)
# original grid: signal covers [0, 1). This range is arbitrary, but convenient.
# The final sample is positioned at (n-1)/n, so we omit the endpoint
x = np.linspace(0, 1, num=n, endpoint=False)
# build the interpolator
f_interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=axis)
# build the new sampling grid
# exponentially spaced between t_min/n and 1 (exclusive)
# we'll go one past where we need, and drop the last sample
# When over-sampling, the last input sample contributions n_over samples.
# To keep the spacing consistent, we over-sample by n_over, and then
# trim the final samples.
n_over = int(np.ceil(over_sample))
x_exp = np.logspace((np.log(t_min) - np.log(n)) / log_base,
0,
num=n_fmt + n_over,
endpoint=False,
base=base)[:-n_over]
# Clean up any rounding errors at the boundaries of the interpolation
# The interpolator gets angry if we try to extrapolate, so clipping is necessary here.
if x_exp[0] < t_min or x_exp[-1] > float(n - 1.0) / n:
x_exp = np.clip(x_exp, float(t_min) / n, x[-1])
# Make sure that all sample points are unique
# This should never happen!
if len(np.unique(x_exp)) != len(x_exp):
raise RuntimeError('Redundant sample positions in Mellin transform')
# Resample the signal
y_res = f_interp(x_exp)
# Broadcast the window correctly
shape = [1] * y_res.ndim
shape[axis] = -1
# Apply the window and fft
# Normalization is absorbed into the window here for expedience
fft = get_fftlib()
return fft.rfft(y_res * ((x_exp**beta).reshape(shape) * np.sqrt(n) / n_fmt),
axis=axis)
|
def fmt(y, t_min=0.5, n_fmt=None, kind='cubic', beta=0.5, over_sample=1, axis=-1):
"""The fast Mellin transform (FMT) [1]_ of a uniformly sampled signal y.
When the Mellin parameter (beta) is 1/2, it is also known as the scale transform [2]_.
The scale transform can be useful for audio analysis because its magnitude is invariant
to scaling of the domain (e.g., time stretching or compression). This is analogous
to the magnitude of the Fourier transform being invariant to shifts in the input domain.
.. [1] De Sena, Antonio, and Davide Rocchesso.
"A fast Mellin and scale transform."
EURASIP Journal on Applied Signal Processing 2007.1 (2007): 75-75.
.. [2] Cohen, L.
"The scale representation."
IEEE Transactions on Signal Processing 41, no. 12 (1993): 3275-3292.
Parameters
----------
y : np.ndarray, real-valued
The input signal(s). Can be multidimensional.
The target axis must contain at least 3 samples.
t_min : float > 0
The minimum time spacing (in samples).
This value should generally be less than 1 to preserve as much information as
possible.
n_fmt : int > 2 or None
The number of scale transform bins to use.
If None, then `n_bins = over_sample * ceil(n * log((n-1)/t_min))` is taken,
where `n = y.shape[axis]`
kind : str
The type of interpolation to use when re-sampling the input.
See `scipy.interpolate.interp1d` for possible values.
Note that the default is to use high-precision (cubic) interpolation.
This can be slow in practice; if speed is preferred over accuracy,
then consider using `kind='linear'`.
beta : float
The Mellin parameter. `beta=0.5` provides the scale transform.
over_sample : float >= 1
Over-sampling factor for exponential resampling.
axis : int
The axis along which to transform `y`
Returns
-------
x_scale : np.ndarray [dtype=complex]
The scale transform of `y` along the `axis` dimension.
Raises
------
ParameterError
if `n_fmt < 2` or `t_min <= 0`
or if `y` is not finite
or if `y.shape[axis] < 3`.
Notes
-----
This function caches at level 30.
Examples
--------
>>> # Generate a signal and time-stretch it (with energy normalization)
>>> scale = 1.25
>>> freq = 3.0
>>> x1 = np.linspace(0, 1, num=1024, endpoint=False)
>>> x2 = np.linspace(0, 1, num=scale * len(x1), endpoint=False)
>>> y1 = np.sin(2 * np.pi * freq * x1)
>>> y2 = np.sin(2 * np.pi * freq * x2) / np.sqrt(scale)
>>> # Verify that the two signals have the same energy
>>> np.sum(np.abs(y1)**2), np.sum(np.abs(y2)**2)
(255.99999999999997, 255.99999999999969)
>>> scale1 = librosa.fmt(y1, n_fmt=512)
>>> scale2 = librosa.fmt(y2, n_fmt=512)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1, 2, 1)
>>> plt.plot(y1, label='Original')
>>> plt.plot(y2, linestyle='--', label='Stretched')
>>> plt.xlabel('time (samples)')
>>> plt.title('Input signals')
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.subplot(1, 2, 2)
>>> plt.semilogy(np.abs(scale1), label='Original')
>>> plt.semilogy(np.abs(scale2), linestyle='--', label='Stretched')
>>> plt.xlabel('scale coefficients')
>>> plt.title('Scale transform magnitude')
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.tight_layout()
>>> # Plot the scale transform of an onset strength autocorrelation
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10.0, duration=30.0)
>>> odf = librosa.onset.onset_strength(y=y, sr=sr)
>>> # Auto-correlate with up to 10 seconds lag
>>> odf_ac = librosa.autocorrelate(odf, max_size=10 * sr // 512)
>>> # Normalize
>>> odf_ac = librosa.util.normalize(odf_ac, norm=np.inf)
>>> # Compute the scale transform
>>> odf_ac_scale = librosa.fmt(librosa.util.normalize(odf_ac), n_fmt=512)
>>> # Plot the results
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> plt.plot(odf, label='Onset strength')
>>> plt.axis('tight')
>>> plt.xlabel('Time (frames)')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.subplot(3, 1, 2)
>>> plt.plot(odf_ac, label='Onset autocorrelation')
>>> plt.axis('tight')
>>> plt.xlabel('Lag (frames)')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.subplot(3, 1, 3)
>>> plt.semilogy(np.abs(odf_ac_scale), label='Scale transform magnitude')
>>> plt.axis('tight')
>>> plt.xlabel('scale coefficients')
>>> plt.legend(frameon=True)
>>> plt.tight_layout()
"""
n = y.shape[axis]
if n < 3:
raise ParameterError('y.shape[{:}]=={:} < 3'.format(axis, n))
if t_min <= 0:
raise ParameterError('t_min must be a positive number')
if n_fmt is None:
if over_sample < 1:
raise ParameterError('over_sample must be >= 1')
# The base is the maximum ratio between adjacent samples
# Since the sample spacing is increasing, this is simply the
# ratio between the positions of the last two samples: (n-1)/(n-2)
log_base = np.log(n - 1) - np.log(n - 2)
n_fmt = int(np.ceil(over_sample * (np.log(n - 1) - np.log(t_min)) / log_base))
elif n_fmt < 3:
raise ParameterError('n_fmt=={:} < 3'.format(n_fmt))
else:
log_base = (np.log(n_fmt - 1) - np.log(n_fmt - 2)) / over_sample
if not np.all(np.isfinite(y)):
raise ParameterError('y must be finite everywhere')
base = np.exp(log_base)
# original grid: signal covers [0, 1). This range is arbitrary, but convenient.
# The final sample is positioned at (n-1)/n, so we omit the endpoint
x = np.linspace(0, 1, num=n, endpoint=False)
# build the interpolator
f_interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=axis)
# build the new sampling grid
# exponentially spaced between t_min/n and 1 (exclusive)
# we'll go one past where we need, and drop the last sample
# When over-sampling, the last input sample contributions n_over samples.
# To keep the spacing consistent, we over-sample by n_over, and then
# trim the final samples.
n_over = int(np.ceil(over_sample))
x_exp = np.logspace((np.log(t_min) - np.log(n)) / log_base,
0,
num=n_fmt + n_over,
endpoint=False,
base=base)[:-n_over]
# Clean up any rounding errors at the boundaries of the interpolation
# The interpolator gets angry if we try to extrapolate, so clipping is necessary here.
if x_exp[0] < t_min or x_exp[-1] > float(n - 1.0) / n:
x_exp = np.clip(x_exp, float(t_min) / n, x[-1])
# Make sure that all sample points are unique
# This should never happen!
if len(np.unique(x_exp)) != len(x_exp):
raise RuntimeError('Redundant sample positions in Mellin transform')
# Resample the signal
y_res = f_interp(x_exp)
# Broadcast the window correctly
shape = [1] * y_res.ndim
shape[axis] = -1
# Apply the window and fft
# Normalization is absorbed into the window here for expedience
fft = get_fftlib()
return fft.rfft(y_res * ((x_exp**beta).reshape(shape) * np.sqrt(n) / n_fmt),
axis=axis)
|
[
"The",
"fast",
"Mellin",
"transform",
"(",
"FMT",
")",
"[",
"1",
"]",
"_",
"of",
"a",
"uniformly",
"sampled",
"signal",
"y",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L1127-L1328
|
[
"def",
"fmt",
"(",
"y",
",",
"t_min",
"=",
"0.5",
",",
"n_fmt",
"=",
"None",
",",
"kind",
"=",
"'cubic'",
",",
"beta",
"=",
"0.5",
",",
"over_sample",
"=",
"1",
",",
"axis",
"=",
"-",
"1",
")",
":",
"n",
"=",
"y",
".",
"shape",
"[",
"axis",
"]",
"if",
"n",
"<",
"3",
":",
"raise",
"ParameterError",
"(",
"'y.shape[{:}]=={:} < 3'",
".",
"format",
"(",
"axis",
",",
"n",
")",
")",
"if",
"t_min",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'t_min must be a positive number'",
")",
"if",
"n_fmt",
"is",
"None",
":",
"if",
"over_sample",
"<",
"1",
":",
"raise",
"ParameterError",
"(",
"'over_sample must be >= 1'",
")",
"# The base is the maximum ratio between adjacent samples",
"# Since the sample spacing is increasing, this is simply the",
"# ratio between the positions of the last two samples: (n-1)/(n-2)",
"log_base",
"=",
"np",
".",
"log",
"(",
"n",
"-",
"1",
")",
"-",
"np",
".",
"log",
"(",
"n",
"-",
"2",
")",
"n_fmt",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"over_sample",
"*",
"(",
"np",
".",
"log",
"(",
"n",
"-",
"1",
")",
"-",
"np",
".",
"log",
"(",
"t_min",
")",
")",
"/",
"log_base",
")",
")",
"elif",
"n_fmt",
"<",
"3",
":",
"raise",
"ParameterError",
"(",
"'n_fmt=={:} < 3'",
".",
"format",
"(",
"n_fmt",
")",
")",
"else",
":",
"log_base",
"=",
"(",
"np",
".",
"log",
"(",
"n_fmt",
"-",
"1",
")",
"-",
"np",
".",
"log",
"(",
"n_fmt",
"-",
"2",
")",
")",
"/",
"over_sample",
"if",
"not",
"np",
".",
"all",
"(",
"np",
".",
"isfinite",
"(",
"y",
")",
")",
":",
"raise",
"ParameterError",
"(",
"'y must be finite everywhere'",
")",
"base",
"=",
"np",
".",
"exp",
"(",
"log_base",
")",
"# original grid: signal covers [0, 1). This range is arbitrary, but convenient.",
"# The final sample is positioned at (n-1)/n, so we omit the endpoint",
"x",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"1",
",",
"num",
"=",
"n",
",",
"endpoint",
"=",
"False",
")",
"# build the interpolator",
"f_interp",
"=",
"scipy",
".",
"interpolate",
".",
"interp1d",
"(",
"x",
",",
"y",
",",
"kind",
"=",
"kind",
",",
"axis",
"=",
"axis",
")",
"# build the new sampling grid",
"# exponentially spaced between t_min/n and 1 (exclusive)",
"# we'll go one past where we need, and drop the last sample",
"# When over-sampling, the last input sample contributions n_over samples.",
"# To keep the spacing consistent, we over-sample by n_over, and then",
"# trim the final samples.",
"n_over",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"over_sample",
")",
")",
"x_exp",
"=",
"np",
".",
"logspace",
"(",
"(",
"np",
".",
"log",
"(",
"t_min",
")",
"-",
"np",
".",
"log",
"(",
"n",
")",
")",
"/",
"log_base",
",",
"0",
",",
"num",
"=",
"n_fmt",
"+",
"n_over",
",",
"endpoint",
"=",
"False",
",",
"base",
"=",
"base",
")",
"[",
":",
"-",
"n_over",
"]",
"# Clean up any rounding errors at the boundaries of the interpolation",
"# The interpolator gets angry if we try to extrapolate, so clipping is necessary here.",
"if",
"x_exp",
"[",
"0",
"]",
"<",
"t_min",
"or",
"x_exp",
"[",
"-",
"1",
"]",
">",
"float",
"(",
"n",
"-",
"1.0",
")",
"/",
"n",
":",
"x_exp",
"=",
"np",
".",
"clip",
"(",
"x_exp",
",",
"float",
"(",
"t_min",
")",
"/",
"n",
",",
"x",
"[",
"-",
"1",
"]",
")",
"# Make sure that all sample points are unique",
"# This should never happen!",
"if",
"len",
"(",
"np",
".",
"unique",
"(",
"x_exp",
")",
")",
"!=",
"len",
"(",
"x_exp",
")",
":",
"raise",
"RuntimeError",
"(",
"'Redundant sample positions in Mellin transform'",
")",
"# Resample the signal",
"y_res",
"=",
"f_interp",
"(",
"x_exp",
")",
"# Broadcast the window correctly",
"shape",
"=",
"[",
"1",
"]",
"*",
"y_res",
".",
"ndim",
"shape",
"[",
"axis",
"]",
"=",
"-",
"1",
"# Apply the window and fft",
"# Normalization is absorbed into the window here for expedience",
"fft",
"=",
"get_fftlib",
"(",
")",
"return",
"fft",
".",
"rfft",
"(",
"y_res",
"*",
"(",
"(",
"x_exp",
"**",
"beta",
")",
".",
"reshape",
"(",
"shape",
")",
"*",
"np",
".",
"sqrt",
"(",
"n",
")",
"/",
"n_fmt",
")",
",",
"axis",
"=",
"axis",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
pcen
|
Per-channel energy normalization (PCEN) [1]_
This function normalizes a time-frequency representation `S` by
performing automatic gain control, followed by nonlinear compression:
P[f, t] = (S / (eps + M[f, t])**gain + bias)**power - bias**power
where `M` is the result of applying a low-pass, temporal IIR filter
to `S`:
M[f, t] = (1 - b) * M[f, t - 1] + b * S[f, t]
If `b` is not provided, it is calculated as:
b = (sqrt(1 + 4* T**2) - 1) / (2 * T**2)
where `T = time_constant * sr / hop_length`.
This normalization is designed to suppress background noise and
emphasize foreground signals, and can be used as an alternative to
decibel scaling (`amplitude_to_db`).
This implementation also supports smoothing across frequency bins
by specifying `max_size > 1`. If this option is used, the filtered
spectrogram `M` is computed as
M[f, t] = (1 - b) * M[f, t - 1] + b * R[f, t]
where `R` has been max-filtered along the frequency axis, similar to
the SuperFlux algorithm implemented in `onset.onset_strength`:
R[f, t] = max(S[f - max_size//2: f + max_size//2, t])
This can be used to perform automatic gain control on signals that cross
or span multiple frequency bans, which may be desirable for spectrograms
with high frequency resolution.
.. [1] Wang, Y., Getreuer, P., Hughes, T., Lyon, R. F., & Saurous, R. A.
(2017, March). Trainable frontend for robust and far-field keyword spotting.
In Acoustics, Speech and Signal Processing (ICASSP), 2017
IEEE International Conference on (pp. 5670-5674). IEEE.
Parameters
----------
S : np.ndarray (non-negative)
The input (magnitude) spectrogram
sr : number > 0 [scalar]
The audio sampling rate
hop_length : int > 0 [scalar]
The hop length of `S`, expressed in samples
gain : number >= 0 [scalar]
The gain factor. Typical values should be slightly less than 1.
bias : number >= 0 [scalar]
The bias point of the nonlinear compression (default: 2)
power : number > 0 [scalar]
The compression exponent. Typical values should be between 0 and 1.
Smaller values of `power` result in stronger compression.
time_constant : number > 0 [scalar]
The time constant for IIR filtering, measured in seconds.
eps : number > 0 [scalar]
A small constant used to ensure numerical stability of the filter.
b : number in [0, 1] [scalar]
The filter coefficient for the low-pass filter.
If not provided, it will be inferred from `time_constant`.
max_size : int > 0 [scalar]
The width of the max filter applied to the frequency axis.
If left as `1`, no filtering is performed.
ref : None or np.ndarray (shape=S.shape)
An optional pre-computed reference spectrum (`R` in the above).
If not provided it will be computed from `S`.
axis : int [scalar]
The (time) axis of the input spectrogram.
max_axis : None or int [scalar]
The frequency axis of the input spectrogram.
If `None`, and `S` is two-dimensional, it will be inferred
as the opposite from `axis`.
If `S` is not two-dimensional, and `max_size > 1`, an error
will be raised.
zi : np.ndarray
The initial filter delay values.
This may be the `zf` (final delay values) of a previous call to `pcen`, or
computed by `scipy.signal.lfilter_zi`.
return_zf : bool
If `True`, return the final filter delay values along with the PCEN output `P`.
This is primarily useful in streaming contexts, where the final state of one
block of processing should be used to initialize the next block.
If `False` (default) only the PCEN values `P` are returned.
Returns
-------
P : np.ndarray, non-negative [shape=(n, m)]
The per-channel energy normalized version of `S`.
zf : np.ndarray (optional)
The final filter delay values. Only returned if `return_zf=True`.
See Also
--------
amplitude_to_db
librosa.onset.onset_strength
Examples
--------
Compare PCEN to log amplitude (dB) scaling on Mel spectra
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10, duration=10)
>>> # We'll use power=1 to get a magnitude spectrum
>>> # instead of a power spectrum
>>> S = librosa.feature.melspectrogram(y, sr=sr, power=1)
>>> log_S = librosa.amplitude_to_db(S, ref=np.max)
>>> pcen_S = librosa.pcen(S)
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(log_S, x_axis='time', y_axis='mel')
>>> plt.title('log amplitude (dB)')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(pcen_S, x_axis='time', y_axis='mel')
>>> plt.title('Per-channel energy normalization')
>>> plt.colorbar()
>>> plt.tight_layout()
Compare PCEN with and without max-filtering
>>> pcen_max = librosa.pcen(S, max_size=3)
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(pcen_S, x_axis='time', y_axis='mel')
>>> plt.title('Per-channel energy normalization (no max-filter)')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(pcen_max, x_axis='time', y_axis='mel')
>>> plt.title('Per-channel energy normalization (max_size=3)')
>>> plt.colorbar()
>>> plt.tight_layout()
|
librosa/core/spectrum.py
|
def pcen(S, sr=22050, hop_length=512, gain=0.98, bias=2, power=0.5,
time_constant=0.400, eps=1e-6, b=None, max_size=1, ref=None,
axis=-1, max_axis=None, zi=None, return_zf=False):
'''Per-channel energy normalization (PCEN) [1]_
This function normalizes a time-frequency representation `S` by
performing automatic gain control, followed by nonlinear compression:
P[f, t] = (S / (eps + M[f, t])**gain + bias)**power - bias**power
where `M` is the result of applying a low-pass, temporal IIR filter
to `S`:
M[f, t] = (1 - b) * M[f, t - 1] + b * S[f, t]
If `b` is not provided, it is calculated as:
b = (sqrt(1 + 4* T**2) - 1) / (2 * T**2)
where `T = time_constant * sr / hop_length`.
This normalization is designed to suppress background noise and
emphasize foreground signals, and can be used as an alternative to
decibel scaling (`amplitude_to_db`).
This implementation also supports smoothing across frequency bins
by specifying `max_size > 1`. If this option is used, the filtered
spectrogram `M` is computed as
M[f, t] = (1 - b) * M[f, t - 1] + b * R[f, t]
where `R` has been max-filtered along the frequency axis, similar to
the SuperFlux algorithm implemented in `onset.onset_strength`:
R[f, t] = max(S[f - max_size//2: f + max_size//2, t])
This can be used to perform automatic gain control on signals that cross
or span multiple frequency bans, which may be desirable for spectrograms
with high frequency resolution.
.. [1] Wang, Y., Getreuer, P., Hughes, T., Lyon, R. F., & Saurous, R. A.
(2017, March). Trainable frontend for robust and far-field keyword spotting.
In Acoustics, Speech and Signal Processing (ICASSP), 2017
IEEE International Conference on (pp. 5670-5674). IEEE.
Parameters
----------
S : np.ndarray (non-negative)
The input (magnitude) spectrogram
sr : number > 0 [scalar]
The audio sampling rate
hop_length : int > 0 [scalar]
The hop length of `S`, expressed in samples
gain : number >= 0 [scalar]
The gain factor. Typical values should be slightly less than 1.
bias : number >= 0 [scalar]
The bias point of the nonlinear compression (default: 2)
power : number > 0 [scalar]
The compression exponent. Typical values should be between 0 and 1.
Smaller values of `power` result in stronger compression.
time_constant : number > 0 [scalar]
The time constant for IIR filtering, measured in seconds.
eps : number > 0 [scalar]
A small constant used to ensure numerical stability of the filter.
b : number in [0, 1] [scalar]
The filter coefficient for the low-pass filter.
If not provided, it will be inferred from `time_constant`.
max_size : int > 0 [scalar]
The width of the max filter applied to the frequency axis.
If left as `1`, no filtering is performed.
ref : None or np.ndarray (shape=S.shape)
An optional pre-computed reference spectrum (`R` in the above).
If not provided it will be computed from `S`.
axis : int [scalar]
The (time) axis of the input spectrogram.
max_axis : None or int [scalar]
The frequency axis of the input spectrogram.
If `None`, and `S` is two-dimensional, it will be inferred
as the opposite from `axis`.
If `S` is not two-dimensional, and `max_size > 1`, an error
will be raised.
zi : np.ndarray
The initial filter delay values.
This may be the `zf` (final delay values) of a previous call to `pcen`, or
computed by `scipy.signal.lfilter_zi`.
return_zf : bool
If `True`, return the final filter delay values along with the PCEN output `P`.
This is primarily useful in streaming contexts, where the final state of one
block of processing should be used to initialize the next block.
If `False` (default) only the PCEN values `P` are returned.
Returns
-------
P : np.ndarray, non-negative [shape=(n, m)]
The per-channel energy normalized version of `S`.
zf : np.ndarray (optional)
The final filter delay values. Only returned if `return_zf=True`.
See Also
--------
amplitude_to_db
librosa.onset.onset_strength
Examples
--------
Compare PCEN to log amplitude (dB) scaling on Mel spectra
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10, duration=10)
>>> # We'll use power=1 to get a magnitude spectrum
>>> # instead of a power spectrum
>>> S = librosa.feature.melspectrogram(y, sr=sr, power=1)
>>> log_S = librosa.amplitude_to_db(S, ref=np.max)
>>> pcen_S = librosa.pcen(S)
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(log_S, x_axis='time', y_axis='mel')
>>> plt.title('log amplitude (dB)')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(pcen_S, x_axis='time', y_axis='mel')
>>> plt.title('Per-channel energy normalization')
>>> plt.colorbar()
>>> plt.tight_layout()
Compare PCEN with and without max-filtering
>>> pcen_max = librosa.pcen(S, max_size=3)
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(pcen_S, x_axis='time', y_axis='mel')
>>> plt.title('Per-channel energy normalization (no max-filter)')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(pcen_max, x_axis='time', y_axis='mel')
>>> plt.title('Per-channel energy normalization (max_size=3)')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
if power <= 0:
raise ParameterError('power={} must be strictly positive'.format(power))
if gain < 0:
raise ParameterError('gain={} must be non-negative'.format(gain))
if bias < 0:
raise ParameterError('bias={} must be non-negative'.format(bias))
if eps <= 0:
raise ParameterError('eps={} must be strictly positive'.format(eps))
if time_constant <= 0:
raise ParameterError('time_constant={} must be strictly positive'.format(time_constant))
if max_size < 1 or not isinstance(max_size, int):
raise ParameterError('max_size={} must be a positive integer'.format(max_size))
if b is None:
t_frames = time_constant * sr / float(hop_length)
# By default, this solves the equation for b:
# b**2 + (1 - b) / t_frames - 2 = 0
# which approximates the full-width half-max of the
# squared frequency response of the IIR low-pass filter
b = (np.sqrt(1 + 4 * t_frames**2) - 1) / (2 * t_frames**2)
if not 0 <= b <= 1:
raise ParameterError('b={} must be between 0 and 1'.format(b))
if np.issubdtype(S.dtype, np.complexfloating):
warnings.warn('pcen was called on complex input so phase '
'information will be discarded. To suppress this warning, '
'call pcen(np.abs(D)) instead.')
S = np.abs(S)
if ref is None:
if max_size == 1:
ref = S
elif S.ndim == 1:
raise ParameterError('Max-filtering cannot be applied to 1-dimensional input')
else:
if max_axis is None:
if S.ndim != 2:
raise ParameterError('Max-filtering a {:d}-dimensional spectrogram '
'requires you to specify max_axis'.format(S.ndim))
# if axis = 0, max_axis=1
# if axis = +- 1, max_axis = 0
max_axis = np.mod(1 - axis, 2)
ref = scipy.ndimage.maximum_filter1d(S, max_size, axis=max_axis)
if zi is None:
# Make sure zi matches dimension to input
shape = tuple([1] * ref.ndim)
zi = np.empty(shape)
zi[:] = scipy.signal.lfilter_zi([b], [1, b - 1])[:]
S_smooth, zf = scipy.signal.lfilter([b], [1, b - 1], ref, zi=zi,
axis=axis)
# Working in log-space gives us some stability, and a slight speedup
smooth = np.exp(-gain * (np.log(eps) + np.log1p(S_smooth / eps)))
S_out = (S * smooth + bias)**power - bias**power
if return_zf:
return S_out, zf
else:
return S_out
|
def pcen(S, sr=22050, hop_length=512, gain=0.98, bias=2, power=0.5,
time_constant=0.400, eps=1e-6, b=None, max_size=1, ref=None,
axis=-1, max_axis=None, zi=None, return_zf=False):
'''Per-channel energy normalization (PCEN) [1]_
This function normalizes a time-frequency representation `S` by
performing automatic gain control, followed by nonlinear compression:
P[f, t] = (S / (eps + M[f, t])**gain + bias)**power - bias**power
where `M` is the result of applying a low-pass, temporal IIR filter
to `S`:
M[f, t] = (1 - b) * M[f, t - 1] + b * S[f, t]
If `b` is not provided, it is calculated as:
b = (sqrt(1 + 4* T**2) - 1) / (2 * T**2)
where `T = time_constant * sr / hop_length`.
This normalization is designed to suppress background noise and
emphasize foreground signals, and can be used as an alternative to
decibel scaling (`amplitude_to_db`).
This implementation also supports smoothing across frequency bins
by specifying `max_size > 1`. If this option is used, the filtered
spectrogram `M` is computed as
M[f, t] = (1 - b) * M[f, t - 1] + b * R[f, t]
where `R` has been max-filtered along the frequency axis, similar to
the SuperFlux algorithm implemented in `onset.onset_strength`:
R[f, t] = max(S[f - max_size//2: f + max_size//2, t])
This can be used to perform automatic gain control on signals that cross
or span multiple frequency bans, which may be desirable for spectrograms
with high frequency resolution.
.. [1] Wang, Y., Getreuer, P., Hughes, T., Lyon, R. F., & Saurous, R. A.
(2017, March). Trainable frontend for robust and far-field keyword spotting.
In Acoustics, Speech and Signal Processing (ICASSP), 2017
IEEE International Conference on (pp. 5670-5674). IEEE.
Parameters
----------
S : np.ndarray (non-negative)
The input (magnitude) spectrogram
sr : number > 0 [scalar]
The audio sampling rate
hop_length : int > 0 [scalar]
The hop length of `S`, expressed in samples
gain : number >= 0 [scalar]
The gain factor. Typical values should be slightly less than 1.
bias : number >= 0 [scalar]
The bias point of the nonlinear compression (default: 2)
power : number > 0 [scalar]
The compression exponent. Typical values should be between 0 and 1.
Smaller values of `power` result in stronger compression.
time_constant : number > 0 [scalar]
The time constant for IIR filtering, measured in seconds.
eps : number > 0 [scalar]
A small constant used to ensure numerical stability of the filter.
b : number in [0, 1] [scalar]
The filter coefficient for the low-pass filter.
If not provided, it will be inferred from `time_constant`.
max_size : int > 0 [scalar]
The width of the max filter applied to the frequency axis.
If left as `1`, no filtering is performed.
ref : None or np.ndarray (shape=S.shape)
An optional pre-computed reference spectrum (`R` in the above).
If not provided it will be computed from `S`.
axis : int [scalar]
The (time) axis of the input spectrogram.
max_axis : None or int [scalar]
The frequency axis of the input spectrogram.
If `None`, and `S` is two-dimensional, it will be inferred
as the opposite from `axis`.
If `S` is not two-dimensional, and `max_size > 1`, an error
will be raised.
zi : np.ndarray
The initial filter delay values.
This may be the `zf` (final delay values) of a previous call to `pcen`, or
computed by `scipy.signal.lfilter_zi`.
return_zf : bool
If `True`, return the final filter delay values along with the PCEN output `P`.
This is primarily useful in streaming contexts, where the final state of one
block of processing should be used to initialize the next block.
If `False` (default) only the PCEN values `P` are returned.
Returns
-------
P : np.ndarray, non-negative [shape=(n, m)]
The per-channel energy normalized version of `S`.
zf : np.ndarray (optional)
The final filter delay values. Only returned if `return_zf=True`.
See Also
--------
amplitude_to_db
librosa.onset.onset_strength
Examples
--------
Compare PCEN to log amplitude (dB) scaling on Mel spectra
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10, duration=10)
>>> # We'll use power=1 to get a magnitude spectrum
>>> # instead of a power spectrum
>>> S = librosa.feature.melspectrogram(y, sr=sr, power=1)
>>> log_S = librosa.amplitude_to_db(S, ref=np.max)
>>> pcen_S = librosa.pcen(S)
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(log_S, x_axis='time', y_axis='mel')
>>> plt.title('log amplitude (dB)')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(pcen_S, x_axis='time', y_axis='mel')
>>> plt.title('Per-channel energy normalization')
>>> plt.colorbar()
>>> plt.tight_layout()
Compare PCEN with and without max-filtering
>>> pcen_max = librosa.pcen(S, max_size=3)
>>> plt.figure()
>>> plt.subplot(2,1,1)
>>> librosa.display.specshow(pcen_S, x_axis='time', y_axis='mel')
>>> plt.title('Per-channel energy normalization (no max-filter)')
>>> plt.colorbar()
>>> plt.subplot(2,1,2)
>>> librosa.display.specshow(pcen_max, x_axis='time', y_axis='mel')
>>> plt.title('Per-channel energy normalization (max_size=3)')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
if power <= 0:
raise ParameterError('power={} must be strictly positive'.format(power))
if gain < 0:
raise ParameterError('gain={} must be non-negative'.format(gain))
if bias < 0:
raise ParameterError('bias={} must be non-negative'.format(bias))
if eps <= 0:
raise ParameterError('eps={} must be strictly positive'.format(eps))
if time_constant <= 0:
raise ParameterError('time_constant={} must be strictly positive'.format(time_constant))
if max_size < 1 or not isinstance(max_size, int):
raise ParameterError('max_size={} must be a positive integer'.format(max_size))
if b is None:
t_frames = time_constant * sr / float(hop_length)
# By default, this solves the equation for b:
# b**2 + (1 - b) / t_frames - 2 = 0
# which approximates the full-width half-max of the
# squared frequency response of the IIR low-pass filter
b = (np.sqrt(1 + 4 * t_frames**2) - 1) / (2 * t_frames**2)
if not 0 <= b <= 1:
raise ParameterError('b={} must be between 0 and 1'.format(b))
if np.issubdtype(S.dtype, np.complexfloating):
warnings.warn('pcen was called on complex input so phase '
'information will be discarded. To suppress this warning, '
'call pcen(np.abs(D)) instead.')
S = np.abs(S)
if ref is None:
if max_size == 1:
ref = S
elif S.ndim == 1:
raise ParameterError('Max-filtering cannot be applied to 1-dimensional input')
else:
if max_axis is None:
if S.ndim != 2:
raise ParameterError('Max-filtering a {:d}-dimensional spectrogram '
'requires you to specify max_axis'.format(S.ndim))
# if axis = 0, max_axis=1
# if axis = +- 1, max_axis = 0
max_axis = np.mod(1 - axis, 2)
ref = scipy.ndimage.maximum_filter1d(S, max_size, axis=max_axis)
if zi is None:
# Make sure zi matches dimension to input
shape = tuple([1] * ref.ndim)
zi = np.empty(shape)
zi[:] = scipy.signal.lfilter_zi([b], [1, b - 1])[:]
S_smooth, zf = scipy.signal.lfilter([b], [1, b - 1], ref, zi=zi,
axis=axis)
# Working in log-space gives us some stability, and a slight speedup
smooth = np.exp(-gain * (np.log(eps) + np.log1p(S_smooth / eps)))
S_out = (S * smooth + bias)**power - bias**power
if return_zf:
return S_out, zf
else:
return S_out
|
[
"Per",
"-",
"channel",
"energy",
"normalization",
"(",
"PCEN",
")",
"[",
"1",
"]",
"_"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L1332-L1562
|
[
"def",
"pcen",
"(",
"S",
",",
"sr",
"=",
"22050",
",",
"hop_length",
"=",
"512",
",",
"gain",
"=",
"0.98",
",",
"bias",
"=",
"2",
",",
"power",
"=",
"0.5",
",",
"time_constant",
"=",
"0.400",
",",
"eps",
"=",
"1e-6",
",",
"b",
"=",
"None",
",",
"max_size",
"=",
"1",
",",
"ref",
"=",
"None",
",",
"axis",
"=",
"-",
"1",
",",
"max_axis",
"=",
"None",
",",
"zi",
"=",
"None",
",",
"return_zf",
"=",
"False",
")",
":",
"if",
"power",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'power={} must be strictly positive'",
".",
"format",
"(",
"power",
")",
")",
"if",
"gain",
"<",
"0",
":",
"raise",
"ParameterError",
"(",
"'gain={} must be non-negative'",
".",
"format",
"(",
"gain",
")",
")",
"if",
"bias",
"<",
"0",
":",
"raise",
"ParameterError",
"(",
"'bias={} must be non-negative'",
".",
"format",
"(",
"bias",
")",
")",
"if",
"eps",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'eps={} must be strictly positive'",
".",
"format",
"(",
"eps",
")",
")",
"if",
"time_constant",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'time_constant={} must be strictly positive'",
".",
"format",
"(",
"time_constant",
")",
")",
"if",
"max_size",
"<",
"1",
"or",
"not",
"isinstance",
"(",
"max_size",
",",
"int",
")",
":",
"raise",
"ParameterError",
"(",
"'max_size={} must be a positive integer'",
".",
"format",
"(",
"max_size",
")",
")",
"if",
"b",
"is",
"None",
":",
"t_frames",
"=",
"time_constant",
"*",
"sr",
"/",
"float",
"(",
"hop_length",
")",
"# By default, this solves the equation for b:",
"# b**2 + (1 - b) / t_frames - 2 = 0",
"# which approximates the full-width half-max of the",
"# squared frequency response of the IIR low-pass filter",
"b",
"=",
"(",
"np",
".",
"sqrt",
"(",
"1",
"+",
"4",
"*",
"t_frames",
"**",
"2",
")",
"-",
"1",
")",
"/",
"(",
"2",
"*",
"t_frames",
"**",
"2",
")",
"if",
"not",
"0",
"<=",
"b",
"<=",
"1",
":",
"raise",
"ParameterError",
"(",
"'b={} must be between 0 and 1'",
".",
"format",
"(",
"b",
")",
")",
"if",
"np",
".",
"issubdtype",
"(",
"S",
".",
"dtype",
",",
"np",
".",
"complexfloating",
")",
":",
"warnings",
".",
"warn",
"(",
"'pcen was called on complex input so phase '",
"'information will be discarded. To suppress this warning, '",
"'call pcen(np.abs(D)) instead.'",
")",
"S",
"=",
"np",
".",
"abs",
"(",
"S",
")",
"if",
"ref",
"is",
"None",
":",
"if",
"max_size",
"==",
"1",
":",
"ref",
"=",
"S",
"elif",
"S",
".",
"ndim",
"==",
"1",
":",
"raise",
"ParameterError",
"(",
"'Max-filtering cannot be applied to 1-dimensional input'",
")",
"else",
":",
"if",
"max_axis",
"is",
"None",
":",
"if",
"S",
".",
"ndim",
"!=",
"2",
":",
"raise",
"ParameterError",
"(",
"'Max-filtering a {:d}-dimensional spectrogram '",
"'requires you to specify max_axis'",
".",
"format",
"(",
"S",
".",
"ndim",
")",
")",
"# if axis = 0, max_axis=1",
"# if axis = +- 1, max_axis = 0",
"max_axis",
"=",
"np",
".",
"mod",
"(",
"1",
"-",
"axis",
",",
"2",
")",
"ref",
"=",
"scipy",
".",
"ndimage",
".",
"maximum_filter1d",
"(",
"S",
",",
"max_size",
",",
"axis",
"=",
"max_axis",
")",
"if",
"zi",
"is",
"None",
":",
"# Make sure zi matches dimension to input",
"shape",
"=",
"tuple",
"(",
"[",
"1",
"]",
"*",
"ref",
".",
"ndim",
")",
"zi",
"=",
"np",
".",
"empty",
"(",
"shape",
")",
"zi",
"[",
":",
"]",
"=",
"scipy",
".",
"signal",
".",
"lfilter_zi",
"(",
"[",
"b",
"]",
",",
"[",
"1",
",",
"b",
"-",
"1",
"]",
")",
"[",
":",
"]",
"S_smooth",
",",
"zf",
"=",
"scipy",
".",
"signal",
".",
"lfilter",
"(",
"[",
"b",
"]",
",",
"[",
"1",
",",
"b",
"-",
"1",
"]",
",",
"ref",
",",
"zi",
"=",
"zi",
",",
"axis",
"=",
"axis",
")",
"# Working in log-space gives us some stability, and a slight speedup",
"smooth",
"=",
"np",
".",
"exp",
"(",
"-",
"gain",
"*",
"(",
"np",
".",
"log",
"(",
"eps",
")",
"+",
"np",
".",
"log1p",
"(",
"S_smooth",
"/",
"eps",
")",
")",
")",
"S_out",
"=",
"(",
"S",
"*",
"smooth",
"+",
"bias",
")",
"**",
"power",
"-",
"bias",
"**",
"power",
"if",
"return_zf",
":",
"return",
"S_out",
",",
"zf",
"else",
":",
"return",
"S_out"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
_spectrogram
|
Helper function to retrieve a magnitude spectrogram.
This is primarily used in feature extraction functions that can operate on
either audio time-series or spectrogram input.
Parameters
----------
y : None or np.ndarray [ndim=1]
If provided, an audio time series
S : None or np.ndarray
Spectrogram input, optional
n_fft : int > 0
STFT window size
hop_length : int > 0
STFT hop length
power : float > 0
Exponent for the magnitude spectrogram,
e.g., 1 for energy, 2 for power, etc.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
S_out : np.ndarray [dtype=np.float32]
- If `S` is provided as input, then `S_out == S`
- Else, `S_out = |stft(y, ...)|**power`
n_fft : int > 0
- If `S` is provided, then `n_fft` is inferred from `S`
- Else, copied from input
|
librosa/core/spectrum.py
|
def _spectrogram(y=None, S=None, n_fft=2048, hop_length=512, power=1,
win_length=None, window='hann', center=True, pad_mode='reflect'):
'''Helper function to retrieve a magnitude spectrogram.
This is primarily used in feature extraction functions that can operate on
either audio time-series or spectrogram input.
Parameters
----------
y : None or np.ndarray [ndim=1]
If provided, an audio time series
S : None or np.ndarray
Spectrogram input, optional
n_fft : int > 0
STFT window size
hop_length : int > 0
STFT hop length
power : float > 0
Exponent for the magnitude spectrogram,
e.g., 1 for energy, 2 for power, etc.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
S_out : np.ndarray [dtype=np.float32]
- If `S` is provided as input, then `S_out == S`
- Else, `S_out = |stft(y, ...)|**power`
n_fft : int > 0
- If `S` is provided, then `n_fft` is inferred from `S`
- Else, copied from input
'''
if S is not None:
# Infer n_fft from spectrogram shape
n_fft = 2 * (S.shape[0] - 1)
else:
# Otherwise, compute a magnitude spectrogram from input
S = np.abs(stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, center=center,
window=window, pad_mode=pad_mode))**power
return S, n_fft
|
def _spectrogram(y=None, S=None, n_fft=2048, hop_length=512, power=1,
win_length=None, window='hann', center=True, pad_mode='reflect'):
'''Helper function to retrieve a magnitude spectrogram.
This is primarily used in feature extraction functions that can operate on
either audio time-series or spectrogram input.
Parameters
----------
y : None or np.ndarray [ndim=1]
If provided, an audio time series
S : None or np.ndarray
Spectrogram input, optional
n_fft : int > 0
STFT window size
hop_length : int > 0
STFT hop length
power : float > 0
Exponent for the magnitude spectrogram,
e.g., 1 for energy, 2 for power, etc.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
S_out : np.ndarray [dtype=np.float32]
- If `S` is provided as input, then `S_out == S`
- Else, `S_out = |stft(y, ...)|**power`
n_fft : int > 0
- If `S` is provided, then `n_fft` is inferred from `S`
- Else, copied from input
'''
if S is not None:
# Infer n_fft from spectrogram shape
n_fft = 2 * (S.shape[0] - 1)
else:
# Otherwise, compute a magnitude spectrogram from input
S = np.abs(stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, center=center,
window=window, pad_mode=pad_mode))**power
return S, n_fft
|
[
"Helper",
"function",
"to",
"retrieve",
"a",
"magnitude",
"spectrogram",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/core/spectrum.py#L1565-L1636
|
[
"def",
"_spectrogram",
"(",
"y",
"=",
"None",
",",
"S",
"=",
"None",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"power",
"=",
"1",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"center",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
")",
":",
"if",
"S",
"is",
"not",
"None",
":",
"# Infer n_fft from spectrogram shape",
"n_fft",
"=",
"2",
"*",
"(",
"S",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
"else",
":",
"# Otherwise, compute a magnitude spectrogram from input",
"S",
"=",
"np",
".",
"abs",
"(",
"stft",
"(",
"y",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"win_length",
"=",
"win_length",
",",
"center",
"=",
"center",
",",
"window",
"=",
"window",
",",
"pad_mode",
"=",
"pad_mode",
")",
")",
"**",
"power",
"return",
"S",
",",
"n_fft"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
hpss_beats
|
HPSS beat tracking
:parameters:
- input_file : str
Path to input audio file (wav, mp3, m4a, flac, etc.)
- output_file : str
Path to save beat event timestamps as a CSV file
|
examples/hpss_beats.py
|
def hpss_beats(input_file, output_csv):
'''HPSS beat tracking
:parameters:
- input_file : str
Path to input audio file (wav, mp3, m4a, flac, etc.)
- output_file : str
Path to save beat event timestamps as a CSV file
'''
# Load the file
print('Loading ', input_file)
y, sr = librosa.load(input_file)
# Do HPSS
print('Harmonic-percussive separation ... ')
y = librosa.effects.percussive(y)
# Construct onset envelope from percussive component
print('Tracking beats on percussive component')
onset_env = librosa.onset.onset_strength(y=y,
sr=sr,
hop_length=HOP_LENGTH,
n_fft=N_FFT,
aggregate=np.median)
# Track the beats
tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env,
sr=sr,
hop_length=HOP_LENGTH)
beat_times = librosa.frames_to_time(beats,
sr=sr,
hop_length=HOP_LENGTH)
# Save the output
print('Saving beats to ', output_csv)
librosa.output.times_csv(output_csv, beat_times)
|
def hpss_beats(input_file, output_csv):
'''HPSS beat tracking
:parameters:
- input_file : str
Path to input audio file (wav, mp3, m4a, flac, etc.)
- output_file : str
Path to save beat event timestamps as a CSV file
'''
# Load the file
print('Loading ', input_file)
y, sr = librosa.load(input_file)
# Do HPSS
print('Harmonic-percussive separation ... ')
y = librosa.effects.percussive(y)
# Construct onset envelope from percussive component
print('Tracking beats on percussive component')
onset_env = librosa.onset.onset_strength(y=y,
sr=sr,
hop_length=HOP_LENGTH,
n_fft=N_FFT,
aggregate=np.median)
# Track the beats
tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env,
sr=sr,
hop_length=HOP_LENGTH)
beat_times = librosa.frames_to_time(beats,
sr=sr,
hop_length=HOP_LENGTH)
# Save the output
print('Saving beats to ', output_csv)
librosa.output.times_csv(output_csv, beat_times)
|
[
"HPSS",
"beat",
"tracking"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/examples/hpss_beats.py#L24-L62
|
[
"def",
"hpss_beats",
"(",
"input_file",
",",
"output_csv",
")",
":",
"# Load the file",
"print",
"(",
"'Loading '",
",",
"input_file",
")",
"y",
",",
"sr",
"=",
"librosa",
".",
"load",
"(",
"input_file",
")",
"# Do HPSS",
"print",
"(",
"'Harmonic-percussive separation ... '",
")",
"y",
"=",
"librosa",
".",
"effects",
".",
"percussive",
"(",
"y",
")",
"# Construct onset envelope from percussive component",
"print",
"(",
"'Tracking beats on percussive component'",
")",
"onset_env",
"=",
"librosa",
".",
"onset",
".",
"onset_strength",
"(",
"y",
"=",
"y",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"HOP_LENGTH",
",",
"n_fft",
"=",
"N_FFT",
",",
"aggregate",
"=",
"np",
".",
"median",
")",
"# Track the beats",
"tempo",
",",
"beats",
"=",
"librosa",
".",
"beat",
".",
"beat_track",
"(",
"onset_envelope",
"=",
"onset_env",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"HOP_LENGTH",
")",
"beat_times",
"=",
"librosa",
".",
"frames_to_time",
"(",
"beats",
",",
"sr",
"=",
"sr",
",",
"hop_length",
"=",
"HOP_LENGTH",
")",
"# Save the output",
"print",
"(",
"'Saving beats to '",
",",
"output_csv",
")",
"librosa",
".",
"output",
".",
"times_csv",
"(",
"output_csv",
",",
"beat_times",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
decompose
|
Decompose a feature matrix.
Given a spectrogram `S`, produce a decomposition into `components`
and `activations` such that `S ~= components.dot(activations)`.
By default, this is done with with non-negative matrix factorization (NMF),
but any `sklearn.decomposition`-type object will work.
Parameters
----------
S : np.ndarray [shape=(n_features, n_samples), dtype=float]
The input feature matrix (e.g., magnitude spectrogram)
n_components : int > 0 [scalar] or None
number of desired components
if None, then `n_features` components are used
transformer : None or object
If None, use `sklearn.decomposition.NMF`
Otherwise, any object with a similar interface to NMF should work.
`transformer` must follow the scikit-learn convention, where
input data is `(n_samples, n_features)`.
`transformer.fit_transform()` will be run on `S.T` (not `S`),
the return value of which is stored (transposed) as `activations`
The components will be retrieved as `transformer.components_.T`
`S ~= np.dot(activations, transformer.components_).T`
or equivalently:
`S ~= np.dot(transformer.components_.T, activations.T)`
sort : bool
If `True`, components are sorted by ascending peak frequency.
.. note:: If used with `transformer`, sorting is applied to copies
of the decomposition parameters, and not to `transformer`'s
internal parameters.
fit : bool
If `True`, components are estimated from the input ``S``.
If `False`, components are assumed to be pre-computed and stored
in ``transformer``, and are not changed.
kwargs : Additional keyword arguments to the default transformer
`sklearn.decomposition.NMF`
Returns
-------
components: np.ndarray [shape=(n_features, n_components)]
matrix of components (basis elements).
activations: np.ndarray [shape=(n_components, n_samples)]
transformed matrix/activation matrix
Raises
------
ParameterError
if `fit` is False and no `transformer` object is provided.
See Also
--------
sklearn.decomposition : SciKit-Learn matrix decomposition modules
Examples
--------
Decompose a magnitude spectrogram into 32 components with NMF
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> comps, acts = librosa.decompose.decompose(S, n_components=8)
>>> comps
array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],
[ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],
...,
[ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],
[ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])
>>> acts
array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],
[ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],
...,
[ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],
[ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])
Sort components by ascending peak frequency
>>> comps, acts = librosa.decompose.decompose(S, n_components=16,
... sort=True)
Or with sparse dictionary learning
>>> import sklearn.decomposition
>>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)
>>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10,8))
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Input spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(3, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(comps,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Components')
>>> plt.subplot(3, 2, 4)
>>> librosa.display.specshow(acts, x_axis='time')
>>> plt.ylabel('Components')
>>> plt.title('Activations')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> S_approx = comps.dot(acts)
>>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Reconstructed spectrogram')
>>> plt.tight_layout()
|
librosa/decompose.py
|
def decompose(S, n_components=None, transformer=None, sort=False, fit=True, **kwargs):
"""Decompose a feature matrix.
Given a spectrogram `S`, produce a decomposition into `components`
and `activations` such that `S ~= components.dot(activations)`.
By default, this is done with with non-negative matrix factorization (NMF),
but any `sklearn.decomposition`-type object will work.
Parameters
----------
S : np.ndarray [shape=(n_features, n_samples), dtype=float]
The input feature matrix (e.g., magnitude spectrogram)
n_components : int > 0 [scalar] or None
number of desired components
if None, then `n_features` components are used
transformer : None or object
If None, use `sklearn.decomposition.NMF`
Otherwise, any object with a similar interface to NMF should work.
`transformer` must follow the scikit-learn convention, where
input data is `(n_samples, n_features)`.
`transformer.fit_transform()` will be run on `S.T` (not `S`),
the return value of which is stored (transposed) as `activations`
The components will be retrieved as `transformer.components_.T`
`S ~= np.dot(activations, transformer.components_).T`
or equivalently:
`S ~= np.dot(transformer.components_.T, activations.T)`
sort : bool
If `True`, components are sorted by ascending peak frequency.
.. note:: If used with `transformer`, sorting is applied to copies
of the decomposition parameters, and not to `transformer`'s
internal parameters.
fit : bool
If `True`, components are estimated from the input ``S``.
If `False`, components are assumed to be pre-computed and stored
in ``transformer``, and are not changed.
kwargs : Additional keyword arguments to the default transformer
`sklearn.decomposition.NMF`
Returns
-------
components: np.ndarray [shape=(n_features, n_components)]
matrix of components (basis elements).
activations: np.ndarray [shape=(n_components, n_samples)]
transformed matrix/activation matrix
Raises
------
ParameterError
if `fit` is False and no `transformer` object is provided.
See Also
--------
sklearn.decomposition : SciKit-Learn matrix decomposition modules
Examples
--------
Decompose a magnitude spectrogram into 32 components with NMF
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> comps, acts = librosa.decompose.decompose(S, n_components=8)
>>> comps
array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],
[ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],
...,
[ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],
[ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])
>>> acts
array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],
[ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],
...,
[ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],
[ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])
Sort components by ascending peak frequency
>>> comps, acts = librosa.decompose.decompose(S, n_components=16,
... sort=True)
Or with sparse dictionary learning
>>> import sklearn.decomposition
>>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)
>>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10,8))
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Input spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(3, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(comps,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Components')
>>> plt.subplot(3, 2, 4)
>>> librosa.display.specshow(acts, x_axis='time')
>>> plt.ylabel('Components')
>>> plt.title('Activations')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> S_approx = comps.dot(acts)
>>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Reconstructed spectrogram')
>>> plt.tight_layout()
"""
if transformer is None:
if fit is False:
raise ParameterError('fit must be True if transformer is None')
transformer = sklearn.decomposition.NMF(n_components=n_components,
**kwargs)
if n_components is None:
n_components = S.shape[0]
if fit:
activations = transformer.fit_transform(S.T).T
else:
activations = transformer.transform(S.T).T
components = transformer.components_.T
if sort:
components, idx = util.axis_sort(components, index=True)
activations = activations[idx]
return components, activations
|
def decompose(S, n_components=None, transformer=None, sort=False, fit=True, **kwargs):
"""Decompose a feature matrix.
Given a spectrogram `S`, produce a decomposition into `components`
and `activations` such that `S ~= components.dot(activations)`.
By default, this is done with with non-negative matrix factorization (NMF),
but any `sklearn.decomposition`-type object will work.
Parameters
----------
S : np.ndarray [shape=(n_features, n_samples), dtype=float]
The input feature matrix (e.g., magnitude spectrogram)
n_components : int > 0 [scalar] or None
number of desired components
if None, then `n_features` components are used
transformer : None or object
If None, use `sklearn.decomposition.NMF`
Otherwise, any object with a similar interface to NMF should work.
`transformer` must follow the scikit-learn convention, where
input data is `(n_samples, n_features)`.
`transformer.fit_transform()` will be run on `S.T` (not `S`),
the return value of which is stored (transposed) as `activations`
The components will be retrieved as `transformer.components_.T`
`S ~= np.dot(activations, transformer.components_).T`
or equivalently:
`S ~= np.dot(transformer.components_.T, activations.T)`
sort : bool
If `True`, components are sorted by ascending peak frequency.
.. note:: If used with `transformer`, sorting is applied to copies
of the decomposition parameters, and not to `transformer`'s
internal parameters.
fit : bool
If `True`, components are estimated from the input ``S``.
If `False`, components are assumed to be pre-computed and stored
in ``transformer``, and are not changed.
kwargs : Additional keyword arguments to the default transformer
`sklearn.decomposition.NMF`
Returns
-------
components: np.ndarray [shape=(n_features, n_components)]
matrix of components (basis elements).
activations: np.ndarray [shape=(n_components, n_samples)]
transformed matrix/activation matrix
Raises
------
ParameterError
if `fit` is False and no `transformer` object is provided.
See Also
--------
sklearn.decomposition : SciKit-Learn matrix decomposition modules
Examples
--------
Decompose a magnitude spectrogram into 32 components with NMF
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> comps, acts = librosa.decompose.decompose(S, n_components=8)
>>> comps
array([[ 1.876e-01, 5.559e-02, ..., 1.687e-01, 4.907e-02],
[ 3.148e-01, 1.719e-01, ..., 2.314e-01, 9.493e-02],
...,
[ 1.561e-07, 8.564e-08, ..., 7.167e-08, 4.997e-08],
[ 1.531e-07, 7.880e-08, ..., 5.632e-08, 4.028e-08]])
>>> acts
array([[ 4.197e-05, 8.512e-03, ..., 3.056e-05, 9.159e-06],
[ 9.568e-06, 1.718e-02, ..., 3.322e-05, 7.869e-06],
...,
[ 5.982e-05, 1.311e-02, ..., -0.000e+00, 6.323e-06],
[ 3.782e-05, 7.056e-03, ..., 3.290e-05, -0.000e+00]])
Sort components by ascending peak frequency
>>> comps, acts = librosa.decompose.decompose(S, n_components=16,
... sort=True)
Or with sparse dictionary learning
>>> import sklearn.decomposition
>>> T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=16)
>>> scomps, sacts = librosa.decompose.decompose(S, transformer=T, sort=True)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10,8))
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(S,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Input spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(3, 2, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(comps,
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Components')
>>> plt.subplot(3, 2, 4)
>>> librosa.display.specshow(acts, x_axis='time')
>>> plt.ylabel('Components')
>>> plt.title('Activations')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> S_approx = comps.dot(acts)
>>> librosa.display.specshow(librosa.amplitude_to_db(S_approx,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Reconstructed spectrogram')
>>> plt.tight_layout()
"""
if transformer is None:
if fit is False:
raise ParameterError('fit must be True if transformer is None')
transformer = sklearn.decomposition.NMF(n_components=n_components,
**kwargs)
if n_components is None:
n_components = S.shape[0]
if fit:
activations = transformer.fit_transform(S.T).T
else:
activations = transformer.transform(S.T).T
components = transformer.components_.T
if sort:
components, idx = util.axis_sort(components, index=True)
activations = activations[idx]
return components, activations
|
[
"Decompose",
"a",
"feature",
"matrix",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/decompose.py#L30-L187
|
[
"def",
"decompose",
"(",
"S",
",",
"n_components",
"=",
"None",
",",
"transformer",
"=",
"None",
",",
"sort",
"=",
"False",
",",
"fit",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"transformer",
"is",
"None",
":",
"if",
"fit",
"is",
"False",
":",
"raise",
"ParameterError",
"(",
"'fit must be True if transformer is None'",
")",
"transformer",
"=",
"sklearn",
".",
"decomposition",
".",
"NMF",
"(",
"n_components",
"=",
"n_components",
",",
"*",
"*",
"kwargs",
")",
"if",
"n_components",
"is",
"None",
":",
"n_components",
"=",
"S",
".",
"shape",
"[",
"0",
"]",
"if",
"fit",
":",
"activations",
"=",
"transformer",
".",
"fit_transform",
"(",
"S",
".",
"T",
")",
".",
"T",
"else",
":",
"activations",
"=",
"transformer",
".",
"transform",
"(",
"S",
".",
"T",
")",
".",
"T",
"components",
"=",
"transformer",
".",
"components_",
".",
"T",
"if",
"sort",
":",
"components",
",",
"idx",
"=",
"util",
".",
"axis_sort",
"(",
"components",
",",
"index",
"=",
"True",
")",
"activations",
"=",
"activations",
"[",
"idx",
"]",
"return",
"components",
",",
"activations"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
hpss
|
Median-filtering harmonic percussive source separation (HPSS).
If `margin = 1.0`, decomposes an input spectrogram `S = H + P`
where `H` contains the harmonic components,
and `P` contains the percussive components.
If `margin > 1.0`, decomposes an input spectrogram `S = H + P + R`
where `R` contains residual components not included in `H` or `P`.
This implementation is based upon the algorithm described by [1]_ and [2]_.
.. [1] Fitzgerald, Derry.
"Harmonic/percussive separation using median filtering."
13th International Conference on Digital Audio Effects (DAFX10),
Graz, Austria, 2010.
.. [2] Driedger, Müller, Disch.
"Extending harmonic-percussive separation of audio."
15th International Society for Music Information Retrieval Conference (ISMIR 2014),
Taipei, Taiwan, 2014.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input spectrogram. May be real (magnitude) or complex.
kernel_size : int or tuple (kernel_harmonic, kernel_percussive)
kernel size(s) for the median filters.
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the width of the
harmonic filter, and the second value specifies the width
of the percussive filter.
power : float > 0 [scalar]
Exponent for the Wiener filter when constructing soft mask matrices.
mask : bool
Return the masking matrices instead of components.
Masking matrices contain non-negative real values that
can be used to measure the assignment of energy from `S`
into harmonic or percussive components.
Components can be recovered by multiplying `S * mask_H`
or `S * mask_P`.
margin : float or tuple (margin_harmonic, margin_percussive)
margin size(s) for the masks (as described in [2]_)
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the margin of the
harmonic mask, and the second value specifies the margin
of the percussive mask.
Returns
-------
harmonic : np.ndarray [shape=(d, n)]
harmonic component (or mask)
percussive : np.ndarray [shape=(d, n)]
percussive component (or mask)
See Also
--------
util.softmask
Notes
-----
This function caches at level 30.
Examples
--------
Separate into harmonic and percussive
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> D = librosa.stft(y)
>>> H, P = librosa.decompose.hpss(D)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(np.abs(D),
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Full power spectrogram')
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(np.abs(H),
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Harmonic power spectrogram')
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(np.abs(P),
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Percussive power spectrogram')
>>> plt.tight_layout()
Or with a narrower horizontal filter
>>> H, P = librosa.decompose.hpss(D, kernel_size=(13, 31))
Just get harmonic/percussive masks, not the spectra
>>> mask_H, mask_P = librosa.decompose.hpss(D, mask=True)
>>> mask_H
array([[ 1.000e+00, 1.469e-01, ..., 2.648e-03, 2.164e-03],
[ 1.000e+00, 2.368e-01, ..., 9.413e-03, 7.703e-03],
...,
[ 8.869e-01, 5.673e-02, ..., 4.603e-02, 1.247e-05],
[ 7.068e-01, 2.194e-02, ..., 4.453e-02, 1.205e-05]], dtype=float32)
>>> mask_P
array([[ 2.858e-05, 8.531e-01, ..., 9.974e-01, 9.978e-01],
[ 1.586e-05, 7.632e-01, ..., 9.906e-01, 9.923e-01],
...,
[ 1.131e-01, 9.433e-01, ..., 9.540e-01, 1.000e+00],
[ 2.932e-01, 9.781e-01, ..., 9.555e-01, 1.000e+00]], dtype=float32)
Separate into harmonic/percussive/residual components by using a margin > 1.0
>>> H, P = librosa.decompose.hpss(D, margin=3.0)
>>> R = D - (H+P)
>>> y_harm = librosa.core.istft(H)
>>> y_perc = librosa.core.istft(P)
>>> y_resi = librosa.core.istft(R)
Get a more isolated percussive component by widening its margin
>>> H, P = librosa.decompose.hpss(D, margin=(1.0,5.0))
|
librosa/decompose.py
|
def hpss(S, kernel_size=31, power=2.0, mask=False, margin=1.0):
"""Median-filtering harmonic percussive source separation (HPSS).
If `margin = 1.0`, decomposes an input spectrogram `S = H + P`
where `H` contains the harmonic components,
and `P` contains the percussive components.
If `margin > 1.0`, decomposes an input spectrogram `S = H + P + R`
where `R` contains residual components not included in `H` or `P`.
This implementation is based upon the algorithm described by [1]_ and [2]_.
.. [1] Fitzgerald, Derry.
"Harmonic/percussive separation using median filtering."
13th International Conference on Digital Audio Effects (DAFX10),
Graz, Austria, 2010.
.. [2] Driedger, Müller, Disch.
"Extending harmonic-percussive separation of audio."
15th International Society for Music Information Retrieval Conference (ISMIR 2014),
Taipei, Taiwan, 2014.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input spectrogram. May be real (magnitude) or complex.
kernel_size : int or tuple (kernel_harmonic, kernel_percussive)
kernel size(s) for the median filters.
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the width of the
harmonic filter, and the second value specifies the width
of the percussive filter.
power : float > 0 [scalar]
Exponent for the Wiener filter when constructing soft mask matrices.
mask : bool
Return the masking matrices instead of components.
Masking matrices contain non-negative real values that
can be used to measure the assignment of energy from `S`
into harmonic or percussive components.
Components can be recovered by multiplying `S * mask_H`
or `S * mask_P`.
margin : float or tuple (margin_harmonic, margin_percussive)
margin size(s) for the masks (as described in [2]_)
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the margin of the
harmonic mask, and the second value specifies the margin
of the percussive mask.
Returns
-------
harmonic : np.ndarray [shape=(d, n)]
harmonic component (or mask)
percussive : np.ndarray [shape=(d, n)]
percussive component (or mask)
See Also
--------
util.softmask
Notes
-----
This function caches at level 30.
Examples
--------
Separate into harmonic and percussive
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> D = librosa.stft(y)
>>> H, P = librosa.decompose.hpss(D)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(np.abs(D),
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Full power spectrogram')
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(np.abs(H),
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Harmonic power spectrogram')
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(np.abs(P),
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Percussive power spectrogram')
>>> plt.tight_layout()
Or with a narrower horizontal filter
>>> H, P = librosa.decompose.hpss(D, kernel_size=(13, 31))
Just get harmonic/percussive masks, not the spectra
>>> mask_H, mask_P = librosa.decompose.hpss(D, mask=True)
>>> mask_H
array([[ 1.000e+00, 1.469e-01, ..., 2.648e-03, 2.164e-03],
[ 1.000e+00, 2.368e-01, ..., 9.413e-03, 7.703e-03],
...,
[ 8.869e-01, 5.673e-02, ..., 4.603e-02, 1.247e-05],
[ 7.068e-01, 2.194e-02, ..., 4.453e-02, 1.205e-05]], dtype=float32)
>>> mask_P
array([[ 2.858e-05, 8.531e-01, ..., 9.974e-01, 9.978e-01],
[ 1.586e-05, 7.632e-01, ..., 9.906e-01, 9.923e-01],
...,
[ 1.131e-01, 9.433e-01, ..., 9.540e-01, 1.000e+00],
[ 2.932e-01, 9.781e-01, ..., 9.555e-01, 1.000e+00]], dtype=float32)
Separate into harmonic/percussive/residual components by using a margin > 1.0
>>> H, P = librosa.decompose.hpss(D, margin=3.0)
>>> R = D - (H+P)
>>> y_harm = librosa.core.istft(H)
>>> y_perc = librosa.core.istft(P)
>>> y_resi = librosa.core.istft(R)
Get a more isolated percussive component by widening its margin
>>> H, P = librosa.decompose.hpss(D, margin=(1.0,5.0))
"""
if np.iscomplexobj(S):
S, phase = core.magphase(S)
else:
phase = 1
if np.isscalar(kernel_size):
win_harm = kernel_size
win_perc = kernel_size
else:
win_harm = kernel_size[0]
win_perc = kernel_size[1]
if np.isscalar(margin):
margin_harm = margin
margin_perc = margin
else:
margin_harm = margin[0]
margin_perc = margin[1]
# margin minimum is 1.0
if margin_harm < 1 or margin_perc < 1:
raise ParameterError("Margins must be >= 1.0. "
"A typical range is between 1 and 10.")
# Compute median filters. Pre-allocation here preserves memory layout.
harm = np.empty_like(S)
harm[:] = median_filter(S, size=(1, win_harm), mode='reflect')
perc = np.empty_like(S)
perc[:] = median_filter(S, size=(win_perc, 1), mode='reflect')
split_zeros = (margin_harm == 1 and margin_perc == 1)
mask_harm = util.softmask(harm, perc * margin_harm,
power=power,
split_zeros=split_zeros)
mask_perc = util.softmask(perc, harm * margin_perc,
power=power,
split_zeros=split_zeros)
if mask:
return mask_harm, mask_perc
return ((S * mask_harm) * phase, (S * mask_perc) * phase)
|
def hpss(S, kernel_size=31, power=2.0, mask=False, margin=1.0):
"""Median-filtering harmonic percussive source separation (HPSS).
If `margin = 1.0`, decomposes an input spectrogram `S = H + P`
where `H` contains the harmonic components,
and `P` contains the percussive components.
If `margin > 1.0`, decomposes an input spectrogram `S = H + P + R`
where `R` contains residual components not included in `H` or `P`.
This implementation is based upon the algorithm described by [1]_ and [2]_.
.. [1] Fitzgerald, Derry.
"Harmonic/percussive separation using median filtering."
13th International Conference on Digital Audio Effects (DAFX10),
Graz, Austria, 2010.
.. [2] Driedger, Müller, Disch.
"Extending harmonic-percussive separation of audio."
15th International Society for Music Information Retrieval Conference (ISMIR 2014),
Taipei, Taiwan, 2014.
Parameters
----------
S : np.ndarray [shape=(d, n)]
input spectrogram. May be real (magnitude) or complex.
kernel_size : int or tuple (kernel_harmonic, kernel_percussive)
kernel size(s) for the median filters.
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the width of the
harmonic filter, and the second value specifies the width
of the percussive filter.
power : float > 0 [scalar]
Exponent for the Wiener filter when constructing soft mask matrices.
mask : bool
Return the masking matrices instead of components.
Masking matrices contain non-negative real values that
can be used to measure the assignment of energy from `S`
into harmonic or percussive components.
Components can be recovered by multiplying `S * mask_H`
or `S * mask_P`.
margin : float or tuple (margin_harmonic, margin_percussive)
margin size(s) for the masks (as described in [2]_)
- If scalar, the same size is used for both harmonic and percussive.
- If tuple, the first value specifies the margin of the
harmonic mask, and the second value specifies the margin
of the percussive mask.
Returns
-------
harmonic : np.ndarray [shape=(d, n)]
harmonic component (or mask)
percussive : np.ndarray [shape=(d, n)]
percussive component (or mask)
See Also
--------
util.softmask
Notes
-----
This function caches at level 30.
Examples
--------
Separate into harmonic and percussive
>>> y, sr = librosa.load(librosa.util.example_audio_file(), duration=15)
>>> D = librosa.stft(y)
>>> H, P = librosa.decompose.hpss(D)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(np.abs(D),
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Full power spectrogram')
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(np.abs(H),
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Harmonic power spectrogram')
>>> plt.subplot(3, 1, 3)
>>> librosa.display.specshow(librosa.amplitude_to_db(np.abs(P),
... ref=np.max),
... y_axis='log')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Percussive power spectrogram')
>>> plt.tight_layout()
Or with a narrower horizontal filter
>>> H, P = librosa.decompose.hpss(D, kernel_size=(13, 31))
Just get harmonic/percussive masks, not the spectra
>>> mask_H, mask_P = librosa.decompose.hpss(D, mask=True)
>>> mask_H
array([[ 1.000e+00, 1.469e-01, ..., 2.648e-03, 2.164e-03],
[ 1.000e+00, 2.368e-01, ..., 9.413e-03, 7.703e-03],
...,
[ 8.869e-01, 5.673e-02, ..., 4.603e-02, 1.247e-05],
[ 7.068e-01, 2.194e-02, ..., 4.453e-02, 1.205e-05]], dtype=float32)
>>> mask_P
array([[ 2.858e-05, 8.531e-01, ..., 9.974e-01, 9.978e-01],
[ 1.586e-05, 7.632e-01, ..., 9.906e-01, 9.923e-01],
...,
[ 1.131e-01, 9.433e-01, ..., 9.540e-01, 1.000e+00],
[ 2.932e-01, 9.781e-01, ..., 9.555e-01, 1.000e+00]], dtype=float32)
Separate into harmonic/percussive/residual components by using a margin > 1.0
>>> H, P = librosa.decompose.hpss(D, margin=3.0)
>>> R = D - (H+P)
>>> y_harm = librosa.core.istft(H)
>>> y_perc = librosa.core.istft(P)
>>> y_resi = librosa.core.istft(R)
Get a more isolated percussive component by widening its margin
>>> H, P = librosa.decompose.hpss(D, margin=(1.0,5.0))
"""
if np.iscomplexobj(S):
S, phase = core.magphase(S)
else:
phase = 1
if np.isscalar(kernel_size):
win_harm = kernel_size
win_perc = kernel_size
else:
win_harm = kernel_size[0]
win_perc = kernel_size[1]
if np.isscalar(margin):
margin_harm = margin
margin_perc = margin
else:
margin_harm = margin[0]
margin_perc = margin[1]
# margin minimum is 1.0
if margin_harm < 1 or margin_perc < 1:
raise ParameterError("Margins must be >= 1.0. "
"A typical range is between 1 and 10.")
# Compute median filters. Pre-allocation here preserves memory layout.
harm = np.empty_like(S)
harm[:] = median_filter(S, size=(1, win_harm), mode='reflect')
perc = np.empty_like(S)
perc[:] = median_filter(S, size=(win_perc, 1), mode='reflect')
split_zeros = (margin_harm == 1 and margin_perc == 1)
mask_harm = util.softmask(harm, perc * margin_harm,
power=power,
split_zeros=split_zeros)
mask_perc = util.softmask(perc, harm * margin_perc,
power=power,
split_zeros=split_zeros)
if mask:
return mask_harm, mask_perc
return ((S * mask_harm) * phase, (S * mask_perc) * phase)
|
[
"Median",
"-",
"filtering",
"harmonic",
"percussive",
"source",
"separation",
"(",
"HPSS",
")",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/decompose.py#L191-L375
|
[
"def",
"hpss",
"(",
"S",
",",
"kernel_size",
"=",
"31",
",",
"power",
"=",
"2.0",
",",
"mask",
"=",
"False",
",",
"margin",
"=",
"1.0",
")",
":",
"if",
"np",
".",
"iscomplexobj",
"(",
"S",
")",
":",
"S",
",",
"phase",
"=",
"core",
".",
"magphase",
"(",
"S",
")",
"else",
":",
"phase",
"=",
"1",
"if",
"np",
".",
"isscalar",
"(",
"kernel_size",
")",
":",
"win_harm",
"=",
"kernel_size",
"win_perc",
"=",
"kernel_size",
"else",
":",
"win_harm",
"=",
"kernel_size",
"[",
"0",
"]",
"win_perc",
"=",
"kernel_size",
"[",
"1",
"]",
"if",
"np",
".",
"isscalar",
"(",
"margin",
")",
":",
"margin_harm",
"=",
"margin",
"margin_perc",
"=",
"margin",
"else",
":",
"margin_harm",
"=",
"margin",
"[",
"0",
"]",
"margin_perc",
"=",
"margin",
"[",
"1",
"]",
"# margin minimum is 1.0",
"if",
"margin_harm",
"<",
"1",
"or",
"margin_perc",
"<",
"1",
":",
"raise",
"ParameterError",
"(",
"\"Margins must be >= 1.0. \"",
"\"A typical range is between 1 and 10.\"",
")",
"# Compute median filters. Pre-allocation here preserves memory layout.",
"harm",
"=",
"np",
".",
"empty_like",
"(",
"S",
")",
"harm",
"[",
":",
"]",
"=",
"median_filter",
"(",
"S",
",",
"size",
"=",
"(",
"1",
",",
"win_harm",
")",
",",
"mode",
"=",
"'reflect'",
")",
"perc",
"=",
"np",
".",
"empty_like",
"(",
"S",
")",
"perc",
"[",
":",
"]",
"=",
"median_filter",
"(",
"S",
",",
"size",
"=",
"(",
"win_perc",
",",
"1",
")",
",",
"mode",
"=",
"'reflect'",
")",
"split_zeros",
"=",
"(",
"margin_harm",
"==",
"1",
"and",
"margin_perc",
"==",
"1",
")",
"mask_harm",
"=",
"util",
".",
"softmask",
"(",
"harm",
",",
"perc",
"*",
"margin_harm",
",",
"power",
"=",
"power",
",",
"split_zeros",
"=",
"split_zeros",
")",
"mask_perc",
"=",
"util",
".",
"softmask",
"(",
"perc",
",",
"harm",
"*",
"margin_perc",
",",
"power",
"=",
"power",
",",
"split_zeros",
"=",
"split_zeros",
")",
"if",
"mask",
":",
"return",
"mask_harm",
",",
"mask_perc",
"return",
"(",
"(",
"S",
"*",
"mask_harm",
")",
"*",
"phase",
",",
"(",
"S",
"*",
"mask_perc",
")",
"*",
"phase",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
nn_filter
|
Filtering by nearest-neighbors.
Each data point (e.g, spectrogram column) is replaced
by aggregating its nearest neighbors in feature space.
This can be useful for de-noising a spectrogram or feature matrix.
The non-local means method [1]_ can be recovered by providing a
weighted recurrence matrix as input and specifying `aggregate=np.average`.
Similarly, setting `aggregate=np.median` produces sparse de-noising
as in REPET-SIM [2]_.
.. [1] Buades, A., Coll, B., & Morel, J. M.
(2005, June). A non-local algorithm for image denoising.
In Computer Vision and Pattern Recognition, 2005.
CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.
.. [2] Rafii, Z., & Pardo, B.
(2012, October). "Music/Voice Separation Using the Similarity Matrix."
International Society for Music Information Retrieval Conference, 2012.
Parameters
----------
S : np.ndarray
The input data (spectrogram) to filter
rec : (optional) scipy.sparse.spmatrix or np.ndarray
Optionally, a pre-computed nearest-neighbor matrix
as provided by `librosa.segment.recurrence_matrix`
aggregate : function
aggregation function (default: `np.mean`)
If `aggregate=np.average`, then a weighted average is
computed according to the (per-row) weights in `rec`.
For all other aggregation functions, all neighbors
are treated equally.
axis : int
The axis along which to filter (by default, columns)
kwargs
Additional keyword arguments provided to
`librosa.segment.recurrence_matrix` if `rec` is not provided
Returns
-------
S_filtered : np.ndarray
The filtered data
Raises
------
ParameterError
if `rec` is provided and its shape is incompatible with `S`.
See also
--------
decompose
hpss
librosa.segment.recurrence_matrix
Notes
-----
This function caches at level 30.
Examples
--------
De-noise a chromagram by non-local median filtering.
By default this would use euclidean distance to select neighbors,
but this can be overridden directly by setting the `metric` parameter.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=10)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> chroma_med = librosa.decompose.nn_filter(chroma,
... aggregate=np.median,
... metric='cosine')
To use non-local means, provide an affinity matrix and `aggregate=np.average`.
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',
... metric='cosine', sparse=True)
>>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,
... aggregate=np.average)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 8))
>>> plt.subplot(5, 1, 1)
>>> librosa.display.specshow(chroma, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Unfiltered')
>>> plt.subplot(5, 1, 2)
>>> librosa.display.specshow(chroma_med, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Median-filtered')
>>> plt.subplot(5, 1, 3)
>>> librosa.display.specshow(chroma_nlm, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Non-local means')
>>> plt.subplot(5, 1, 4)
>>> librosa.display.specshow(chroma - chroma_med,
... y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Original - median')
>>> plt.subplot(5, 1, 5)
>>> librosa.display.specshow(chroma - chroma_nlm,
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Original - NLM')
>>> plt.tight_layout()
|
librosa/decompose.py
|
def nn_filter(S, rec=None, aggregate=None, axis=-1, **kwargs):
'''Filtering by nearest-neighbors.
Each data point (e.g, spectrogram column) is replaced
by aggregating its nearest neighbors in feature space.
This can be useful for de-noising a spectrogram or feature matrix.
The non-local means method [1]_ can be recovered by providing a
weighted recurrence matrix as input and specifying `aggregate=np.average`.
Similarly, setting `aggregate=np.median` produces sparse de-noising
as in REPET-SIM [2]_.
.. [1] Buades, A., Coll, B., & Morel, J. M.
(2005, June). A non-local algorithm for image denoising.
In Computer Vision and Pattern Recognition, 2005.
CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.
.. [2] Rafii, Z., & Pardo, B.
(2012, October). "Music/Voice Separation Using the Similarity Matrix."
International Society for Music Information Retrieval Conference, 2012.
Parameters
----------
S : np.ndarray
The input data (spectrogram) to filter
rec : (optional) scipy.sparse.spmatrix or np.ndarray
Optionally, a pre-computed nearest-neighbor matrix
as provided by `librosa.segment.recurrence_matrix`
aggregate : function
aggregation function (default: `np.mean`)
If `aggregate=np.average`, then a weighted average is
computed according to the (per-row) weights in `rec`.
For all other aggregation functions, all neighbors
are treated equally.
axis : int
The axis along which to filter (by default, columns)
kwargs
Additional keyword arguments provided to
`librosa.segment.recurrence_matrix` if `rec` is not provided
Returns
-------
S_filtered : np.ndarray
The filtered data
Raises
------
ParameterError
if `rec` is provided and its shape is incompatible with `S`.
See also
--------
decompose
hpss
librosa.segment.recurrence_matrix
Notes
-----
This function caches at level 30.
Examples
--------
De-noise a chromagram by non-local median filtering.
By default this would use euclidean distance to select neighbors,
but this can be overridden directly by setting the `metric` parameter.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=10)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> chroma_med = librosa.decompose.nn_filter(chroma,
... aggregate=np.median,
... metric='cosine')
To use non-local means, provide an affinity matrix and `aggregate=np.average`.
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',
... metric='cosine', sparse=True)
>>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,
... aggregate=np.average)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 8))
>>> plt.subplot(5, 1, 1)
>>> librosa.display.specshow(chroma, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Unfiltered')
>>> plt.subplot(5, 1, 2)
>>> librosa.display.specshow(chroma_med, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Median-filtered')
>>> plt.subplot(5, 1, 3)
>>> librosa.display.specshow(chroma_nlm, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Non-local means')
>>> plt.subplot(5, 1, 4)
>>> librosa.display.specshow(chroma - chroma_med,
... y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Original - median')
>>> plt.subplot(5, 1, 5)
>>> librosa.display.specshow(chroma - chroma_nlm,
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Original - NLM')
>>> plt.tight_layout()
'''
if aggregate is None:
aggregate = np.mean
if rec is None:
kwargs = dict(kwargs)
kwargs['sparse'] = True
rec = segment.recurrence_matrix(S, axis=axis, **kwargs)
elif not scipy.sparse.issparse(rec):
rec = scipy.sparse.csr_matrix(rec)
if rec.shape[0] != S.shape[axis] or rec.shape[0] != rec.shape[1]:
raise ParameterError('Invalid self-similarity matrix shape '
'rec.shape={} for S.shape={}'.format(rec.shape,
S.shape))
return __nn_filter_helper(rec.data, rec.indices, rec.indptr,
S.swapaxes(0, axis), aggregate).swapaxes(0, axis)
|
def nn_filter(S, rec=None, aggregate=None, axis=-1, **kwargs):
'''Filtering by nearest-neighbors.
Each data point (e.g, spectrogram column) is replaced
by aggregating its nearest neighbors in feature space.
This can be useful for de-noising a spectrogram or feature matrix.
The non-local means method [1]_ can be recovered by providing a
weighted recurrence matrix as input and specifying `aggregate=np.average`.
Similarly, setting `aggregate=np.median` produces sparse de-noising
as in REPET-SIM [2]_.
.. [1] Buades, A., Coll, B., & Morel, J. M.
(2005, June). A non-local algorithm for image denoising.
In Computer Vision and Pattern Recognition, 2005.
CVPR 2005. IEEE Computer Society Conference on (Vol. 2, pp. 60-65). IEEE.
.. [2] Rafii, Z., & Pardo, B.
(2012, October). "Music/Voice Separation Using the Similarity Matrix."
International Society for Music Information Retrieval Conference, 2012.
Parameters
----------
S : np.ndarray
The input data (spectrogram) to filter
rec : (optional) scipy.sparse.spmatrix or np.ndarray
Optionally, a pre-computed nearest-neighbor matrix
as provided by `librosa.segment.recurrence_matrix`
aggregate : function
aggregation function (default: `np.mean`)
If `aggregate=np.average`, then a weighted average is
computed according to the (per-row) weights in `rec`.
For all other aggregation functions, all neighbors
are treated equally.
axis : int
The axis along which to filter (by default, columns)
kwargs
Additional keyword arguments provided to
`librosa.segment.recurrence_matrix` if `rec` is not provided
Returns
-------
S_filtered : np.ndarray
The filtered data
Raises
------
ParameterError
if `rec` is provided and its shape is incompatible with `S`.
See also
--------
decompose
hpss
librosa.segment.recurrence_matrix
Notes
-----
This function caches at level 30.
Examples
--------
De-noise a chromagram by non-local median filtering.
By default this would use euclidean distance to select neighbors,
but this can be overridden directly by setting the `metric` parameter.
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=30, duration=10)
>>> chroma = librosa.feature.chroma_cqt(y=y, sr=sr)
>>> chroma_med = librosa.decompose.nn_filter(chroma,
... aggregate=np.median,
... metric='cosine')
To use non-local means, provide an affinity matrix and `aggregate=np.average`.
>>> rec = librosa.segment.recurrence_matrix(chroma, mode='affinity',
... metric='cosine', sparse=True)
>>> chroma_nlm = librosa.decompose.nn_filter(chroma, rec=rec,
... aggregate=np.average)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 8))
>>> plt.subplot(5, 1, 1)
>>> librosa.display.specshow(chroma, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Unfiltered')
>>> plt.subplot(5, 1, 2)
>>> librosa.display.specshow(chroma_med, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Median-filtered')
>>> plt.subplot(5, 1, 3)
>>> librosa.display.specshow(chroma_nlm, y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Non-local means')
>>> plt.subplot(5, 1, 4)
>>> librosa.display.specshow(chroma - chroma_med,
... y_axis='chroma')
>>> plt.colorbar()
>>> plt.title('Original - median')
>>> plt.subplot(5, 1, 5)
>>> librosa.display.specshow(chroma - chroma_nlm,
... y_axis='chroma', x_axis='time')
>>> plt.colorbar()
>>> plt.title('Original - NLM')
>>> plt.tight_layout()
'''
if aggregate is None:
aggregate = np.mean
if rec is None:
kwargs = dict(kwargs)
kwargs['sparse'] = True
rec = segment.recurrence_matrix(S, axis=axis, **kwargs)
elif not scipy.sparse.issparse(rec):
rec = scipy.sparse.csr_matrix(rec)
if rec.shape[0] != S.shape[axis] or rec.shape[0] != rec.shape[1]:
raise ParameterError('Invalid self-similarity matrix shape '
'rec.shape={} for S.shape={}'.format(rec.shape,
S.shape))
return __nn_filter_helper(rec.data, rec.indices, rec.indptr,
S.swapaxes(0, axis), aggregate).swapaxes(0, axis)
|
[
"Filtering",
"by",
"nearest",
"-",
"neighbors",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/decompose.py#L379-L513
|
[
"def",
"nn_filter",
"(",
"S",
",",
"rec",
"=",
"None",
",",
"aggregate",
"=",
"None",
",",
"axis",
"=",
"-",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"aggregate",
"is",
"None",
":",
"aggregate",
"=",
"np",
".",
"mean",
"if",
"rec",
"is",
"None",
":",
"kwargs",
"=",
"dict",
"(",
"kwargs",
")",
"kwargs",
"[",
"'sparse'",
"]",
"=",
"True",
"rec",
"=",
"segment",
".",
"recurrence_matrix",
"(",
"S",
",",
"axis",
"=",
"axis",
",",
"*",
"*",
"kwargs",
")",
"elif",
"not",
"scipy",
".",
"sparse",
".",
"issparse",
"(",
"rec",
")",
":",
"rec",
"=",
"scipy",
".",
"sparse",
".",
"csr_matrix",
"(",
"rec",
")",
"if",
"rec",
".",
"shape",
"[",
"0",
"]",
"!=",
"S",
".",
"shape",
"[",
"axis",
"]",
"or",
"rec",
".",
"shape",
"[",
"0",
"]",
"!=",
"rec",
".",
"shape",
"[",
"1",
"]",
":",
"raise",
"ParameterError",
"(",
"'Invalid self-similarity matrix shape '",
"'rec.shape={} for S.shape={}'",
".",
"format",
"(",
"rec",
".",
"shape",
",",
"S",
".",
"shape",
")",
")",
"return",
"__nn_filter_helper",
"(",
"rec",
".",
"data",
",",
"rec",
".",
"indices",
",",
"rec",
".",
"indptr",
",",
"S",
".",
"swapaxes",
"(",
"0",
",",
"axis",
")",
",",
"aggregate",
")",
".",
"swapaxes",
"(",
"0",
",",
"axis",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__nn_filter_helper
|
Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
|
librosa/decompose.py
|
def __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):
'''Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
'''
s_out = np.empty_like(S)
for i in range(len(R_ptr)-1):
# Get the non-zeros out of the recurrence matrix
targets = R_indices[R_ptr[i]:R_ptr[i+1]]
if not len(targets):
s_out[i] = S[i]
continue
neighbors = np.take(S, targets, axis=0)
if aggregate is np.average:
weights = R_data[R_ptr[i]:R_ptr[i+1]]
s_out[i] = aggregate(neighbors, axis=0, weights=weights)
else:
s_out[i] = aggregate(neighbors, axis=0)
return s_out
|
def __nn_filter_helper(R_data, R_indices, R_ptr, S, aggregate):
'''Nearest-neighbor filter helper function.
This is an internal function, not for use outside of the decompose module.
It applies the nearest-neighbor filter to S, assuming that the first index
corresponds to observations.
Parameters
----------
R_data, R_indices, R_ptr : np.ndarrays
The `data`, `indices`, and `indptr` of a scipy.sparse matrix
S : np.ndarray
The observation data to filter
aggregate : callable
The aggregation operator
Returns
-------
S_out : np.ndarray like S
The filtered data array
'''
s_out = np.empty_like(S)
for i in range(len(R_ptr)-1):
# Get the non-zeros out of the recurrence matrix
targets = R_indices[R_ptr[i]:R_ptr[i+1]]
if not len(targets):
s_out[i] = S[i]
continue
neighbors = np.take(S, targets, axis=0)
if aggregate is np.average:
weights = R_data[R_ptr[i]:R_ptr[i+1]]
s_out[i] = aggregate(neighbors, axis=0, weights=weights)
else:
s_out[i] = aggregate(neighbors, axis=0)
return s_out
|
[
"Nearest",
"-",
"neighbor",
"filter",
"helper",
"function",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/decompose.py#L516-L560
|
[
"def",
"__nn_filter_helper",
"(",
"R_data",
",",
"R_indices",
",",
"R_ptr",
",",
"S",
",",
"aggregate",
")",
":",
"s_out",
"=",
"np",
".",
"empty_like",
"(",
"S",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"R_ptr",
")",
"-",
"1",
")",
":",
"# Get the non-zeros out of the recurrence matrix",
"targets",
"=",
"R_indices",
"[",
"R_ptr",
"[",
"i",
"]",
":",
"R_ptr",
"[",
"i",
"+",
"1",
"]",
"]",
"if",
"not",
"len",
"(",
"targets",
")",
":",
"s_out",
"[",
"i",
"]",
"=",
"S",
"[",
"i",
"]",
"continue",
"neighbors",
"=",
"np",
".",
"take",
"(",
"S",
",",
"targets",
",",
"axis",
"=",
"0",
")",
"if",
"aggregate",
"is",
"np",
".",
"average",
":",
"weights",
"=",
"R_data",
"[",
"R_ptr",
"[",
"i",
"]",
":",
"R_ptr",
"[",
"i",
"+",
"1",
"]",
"]",
"s_out",
"[",
"i",
"]",
"=",
"aggregate",
"(",
"neighbors",
",",
"axis",
"=",
"0",
",",
"weights",
"=",
"weights",
")",
"else",
":",
"s_out",
"[",
"i",
"]",
"=",
"aggregate",
"(",
"neighbors",
",",
"axis",
"=",
"0",
")",
"return",
"s_out"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
mel
|
Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
norm : {None, 1, np.inf} [scalar]
if 1, divide the triangular mel weights by the width of the mel band
(area normalization). Otherwise, leave all the triangles aiming for
a peak value of 1.0
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Notes
-----
This function caches at level 10.
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
|
librosa/filters.py
|
def mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False,
norm=1, dtype=np.float32):
"""Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
norm : {None, 1, np.inf} [scalar]
if 1, divide the triangular mel weights by the width of the mel band
(area normalization). Otherwise, leave all the triangles aiming for
a peak value of 1.0
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Notes
-----
This function caches at level 10.
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if fmax is None:
fmax = float(sr) / 2
if norm is not None and norm != 1 and norm != np.inf:
raise ParameterError('Unsupported norm: {}'.format(repr(norm)))
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)), dtype=dtype)
# Center freqs of each FFT bin
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)
# 'Center freqs' of mel bands - uniformly spaced between limits
mel_f = mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax, htk=htk)
fdiff = np.diff(mel_f)
ramps = np.subtract.outer(mel_f, fftfreqs)
for i in range(n_mels):
# lower and upper slopes for all bins
lower = -ramps[i] / fdiff[i]
upper = ramps[i+2] / fdiff[i+1]
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper))
if norm == 1:
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (mel_f[2:n_mels+2] - mel_f[:n_mels])
weights *= enorm[:, np.newaxis]
# Only check weights if f_mel[0] is positive
if not np.all((mel_f[:-2] == 0) | (weights.max(axis=1) > 0)):
# This means we have an empty channel somewhere
warnings.warn('Empty filters detected in mel frequency basis. '
'Some channels will produce empty responses. '
'Try increasing your sampling rate (and fmax) or '
'reducing n_mels.')
return weights
|
def mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False,
norm=1, dtype=np.float32):
"""Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : number > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
norm : {None, 1, np.inf} [scalar]
if 1, divide the triangular mel weights by the width of the mel band
(area normalization). Otherwise, leave all the triangles aiming for
a peak value of 1.0
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Notes
-----
This function caches at level 10.
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if fmax is None:
fmax = float(sr) / 2
if norm is not None and norm != 1 and norm != np.inf:
raise ParameterError('Unsupported norm: {}'.format(repr(norm)))
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)), dtype=dtype)
# Center freqs of each FFT bin
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)
# 'Center freqs' of mel bands - uniformly spaced between limits
mel_f = mel_frequencies(n_mels + 2, fmin=fmin, fmax=fmax, htk=htk)
fdiff = np.diff(mel_f)
ramps = np.subtract.outer(mel_f, fftfreqs)
for i in range(n_mels):
# lower and upper slopes for all bins
lower = -ramps[i] / fdiff[i]
upper = ramps[i+2] / fdiff[i+1]
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper))
if norm == 1:
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (mel_f[2:n_mels+2] - mel_f[:n_mels])
weights *= enorm[:, np.newaxis]
# Only check weights if f_mel[0] is positive
if not np.all((mel_f[:-2] == 0) | (weights.max(axis=1) > 0)):
# This means we have an empty channel somewhere
warnings.warn('Empty filters detected in mel frequency basis. '
'Some channels will produce empty responses. '
'Try increasing your sampling rate (and fmax) or '
'reducing n_mels.')
return weights
|
[
"Create",
"a",
"Filterbank",
"matrix",
"to",
"combine",
"FFT",
"bins",
"into",
"Mel",
"-",
"frequency",
"bins"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L112-L225
|
[
"def",
"mel",
"(",
"sr",
",",
"n_fft",
",",
"n_mels",
"=",
"128",
",",
"fmin",
"=",
"0.0",
",",
"fmax",
"=",
"None",
",",
"htk",
"=",
"False",
",",
"norm",
"=",
"1",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
":",
"if",
"fmax",
"is",
"None",
":",
"fmax",
"=",
"float",
"(",
"sr",
")",
"/",
"2",
"if",
"norm",
"is",
"not",
"None",
"and",
"norm",
"!=",
"1",
"and",
"norm",
"!=",
"np",
".",
"inf",
":",
"raise",
"ParameterError",
"(",
"'Unsupported norm: {}'",
".",
"format",
"(",
"repr",
"(",
"norm",
")",
")",
")",
"# Initialize the weights",
"n_mels",
"=",
"int",
"(",
"n_mels",
")",
"weights",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_mels",
",",
"int",
"(",
"1",
"+",
"n_fft",
"//",
"2",
")",
")",
",",
"dtype",
"=",
"dtype",
")",
"# Center freqs of each FFT bin",
"fftfreqs",
"=",
"fft_frequencies",
"(",
"sr",
"=",
"sr",
",",
"n_fft",
"=",
"n_fft",
")",
"# 'Center freqs' of mel bands - uniformly spaced between limits",
"mel_f",
"=",
"mel_frequencies",
"(",
"n_mels",
"+",
"2",
",",
"fmin",
"=",
"fmin",
",",
"fmax",
"=",
"fmax",
",",
"htk",
"=",
"htk",
")",
"fdiff",
"=",
"np",
".",
"diff",
"(",
"mel_f",
")",
"ramps",
"=",
"np",
".",
"subtract",
".",
"outer",
"(",
"mel_f",
",",
"fftfreqs",
")",
"for",
"i",
"in",
"range",
"(",
"n_mels",
")",
":",
"# lower and upper slopes for all bins",
"lower",
"=",
"-",
"ramps",
"[",
"i",
"]",
"/",
"fdiff",
"[",
"i",
"]",
"upper",
"=",
"ramps",
"[",
"i",
"+",
"2",
"]",
"/",
"fdiff",
"[",
"i",
"+",
"1",
"]",
"# .. then intersect them with each other and zero",
"weights",
"[",
"i",
"]",
"=",
"np",
".",
"maximum",
"(",
"0",
",",
"np",
".",
"minimum",
"(",
"lower",
",",
"upper",
")",
")",
"if",
"norm",
"==",
"1",
":",
"# Slaney-style mel is scaled to be approx constant energy per channel",
"enorm",
"=",
"2.0",
"/",
"(",
"mel_f",
"[",
"2",
":",
"n_mels",
"+",
"2",
"]",
"-",
"mel_f",
"[",
":",
"n_mels",
"]",
")",
"weights",
"*=",
"enorm",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"# Only check weights if f_mel[0] is positive",
"if",
"not",
"np",
".",
"all",
"(",
"(",
"mel_f",
"[",
":",
"-",
"2",
"]",
"==",
"0",
")",
"|",
"(",
"weights",
".",
"max",
"(",
"axis",
"=",
"1",
")",
">",
"0",
")",
")",
":",
"# This means we have an empty channel somewhere",
"warnings",
".",
"warn",
"(",
"'Empty filters detected in mel frequency basis. '",
"'Some channels will produce empty responses. '",
"'Try increasing your sampling rate (and fmax) or '",
"'reducing n_mels.'",
")",
"return",
"weights"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
chroma
|
Create a Filterbank matrix to convert STFT to chroma
Parameters
----------
sr : number > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
number of FFT bins
n_chroma : int > 0 [scalar]
number of chroma bins
A440 : float > 0 [scalar]
Reference frequency for A440
ctroct : float > 0 [scalar]
octwidth : float > 0 or None [scalar]
`ctroct` and `octwidth` specify a dominance window -
a Gaussian weighting centered on `ctroct` (in octs, A0 = 27.5Hz)
and with a gaussian half-width of `octwidth`.
Set `octwidth` to `None` to use a flat weighting.
norm : float > 0 or np.inf
Normalization factor for each filter
base_c : bool
If True, the filter bank will start at 'C'.
If False, the filter bank will start at 'A'.
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)]
Chroma filter matrix
See Also
--------
util.normalize
feature.chroma_stft
Notes
-----
This function caches at level 10.
Examples
--------
Build a simple chroma filter bank
>>> chromafb = librosa.filters.chroma(22050, 4096)
array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17],
[ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25],
...,
[ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06],
[ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]])
Use quarter-tones instead of semitones
>>> librosa.filters.chroma(22050, 4096, n_chroma=24)
array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63],
[ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79],
...,
[ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38],
[ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]])
Equally weight all octaves
>>> librosa.filters.chroma(22050, 4096, octwidth=None)
array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16],
[ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24],
...,
[ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05],
[ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chromafb, x_axis='linear')
>>> plt.ylabel('Chroma filter')
>>> plt.title('Chroma filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
|
librosa/filters.py
|
def chroma(sr, n_fft, n_chroma=12, A440=440.0, ctroct=5.0,
octwidth=2, norm=2, base_c=True, dtype=np.float32):
"""Create a Filterbank matrix to convert STFT to chroma
Parameters
----------
sr : number > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
number of FFT bins
n_chroma : int > 0 [scalar]
number of chroma bins
A440 : float > 0 [scalar]
Reference frequency for A440
ctroct : float > 0 [scalar]
octwidth : float > 0 or None [scalar]
`ctroct` and `octwidth` specify a dominance window -
a Gaussian weighting centered on `ctroct` (in octs, A0 = 27.5Hz)
and with a gaussian half-width of `octwidth`.
Set `octwidth` to `None` to use a flat weighting.
norm : float > 0 or np.inf
Normalization factor for each filter
base_c : bool
If True, the filter bank will start at 'C'.
If False, the filter bank will start at 'A'.
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)]
Chroma filter matrix
See Also
--------
util.normalize
feature.chroma_stft
Notes
-----
This function caches at level 10.
Examples
--------
Build a simple chroma filter bank
>>> chromafb = librosa.filters.chroma(22050, 4096)
array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17],
[ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25],
...,
[ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06],
[ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]])
Use quarter-tones instead of semitones
>>> librosa.filters.chroma(22050, 4096, n_chroma=24)
array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63],
[ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79],
...,
[ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38],
[ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]])
Equally weight all octaves
>>> librosa.filters.chroma(22050, 4096, octwidth=None)
array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16],
[ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24],
...,
[ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05],
[ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chromafb, x_axis='linear')
>>> plt.ylabel('Chroma filter')
>>> plt.title('Chroma filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
wts = np.zeros((n_chroma, n_fft))
# Get the FFT bins, not counting the DC component
frequencies = np.linspace(0, sr, n_fft, endpoint=False)[1:]
frqbins = n_chroma * hz_to_octs(frequencies, A440)
# make up a value for the 0 Hz bin = 1.5 octaves below bin 1
# (so chroma is 50% rotated from bin 1, and bin width is broad)
frqbins = np.concatenate(([frqbins[0] - 1.5 * n_chroma], frqbins))
binwidthbins = np.concatenate((np.maximum(frqbins[1:] - frqbins[:-1],
1.0), [1]))
D = np.subtract.outer(frqbins, np.arange(0, n_chroma, dtype='d')).T
n_chroma2 = np.round(float(n_chroma) / 2)
# Project into range -n_chroma/2 .. n_chroma/2
# add on fixed offset of 10*n_chroma to ensure all values passed to
# rem are positive
D = np.remainder(D + n_chroma2 + 10*n_chroma, n_chroma) - n_chroma2
# Gaussian bumps - 2*D to make them narrower
wts = np.exp(-0.5 * (2*D / np.tile(binwidthbins, (n_chroma, 1)))**2)
# normalize each column
wts = util.normalize(wts, norm=norm, axis=0)
# Maybe apply scaling for fft bins
if octwidth is not None:
wts *= np.tile(
np.exp(-0.5 * (((frqbins/n_chroma - ctroct)/octwidth)**2)),
(n_chroma, 1))
if base_c:
wts = np.roll(wts, -3, axis=0)
# remove aliasing columns, copy to ensure row-contiguity
return np.ascontiguousarray(wts[:, :int(1 + n_fft/2)], dtype=dtype)
|
def chroma(sr, n_fft, n_chroma=12, A440=440.0, ctroct=5.0,
octwidth=2, norm=2, base_c=True, dtype=np.float32):
"""Create a Filterbank matrix to convert STFT to chroma
Parameters
----------
sr : number > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
number of FFT bins
n_chroma : int > 0 [scalar]
number of chroma bins
A440 : float > 0 [scalar]
Reference frequency for A440
ctroct : float > 0 [scalar]
octwidth : float > 0 or None [scalar]
`ctroct` and `octwidth` specify a dominance window -
a Gaussian weighting centered on `ctroct` (in octs, A0 = 27.5Hz)
and with a gaussian half-width of `octwidth`.
Set `octwidth` to `None` to use a flat weighting.
norm : float > 0 or np.inf
Normalization factor for each filter
base_c : bool
If True, the filter bank will start at 'C'.
If False, the filter bank will start at 'A'.
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)]
Chroma filter matrix
See Also
--------
util.normalize
feature.chroma_stft
Notes
-----
This function caches at level 10.
Examples
--------
Build a simple chroma filter bank
>>> chromafb = librosa.filters.chroma(22050, 4096)
array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17],
[ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25],
...,
[ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06],
[ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]])
Use quarter-tones instead of semitones
>>> librosa.filters.chroma(22050, 4096, n_chroma=24)
array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63],
[ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79],
...,
[ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38],
[ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]])
Equally weight all octaves
>>> librosa.filters.chroma(22050, 4096, octwidth=None)
array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16],
[ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24],
...,
[ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05],
[ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chromafb, x_axis='linear')
>>> plt.ylabel('Chroma filter')
>>> plt.title('Chroma filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
wts = np.zeros((n_chroma, n_fft))
# Get the FFT bins, not counting the DC component
frequencies = np.linspace(0, sr, n_fft, endpoint=False)[1:]
frqbins = n_chroma * hz_to_octs(frequencies, A440)
# make up a value for the 0 Hz bin = 1.5 octaves below bin 1
# (so chroma is 50% rotated from bin 1, and bin width is broad)
frqbins = np.concatenate(([frqbins[0] - 1.5 * n_chroma], frqbins))
binwidthbins = np.concatenate((np.maximum(frqbins[1:] - frqbins[:-1],
1.0), [1]))
D = np.subtract.outer(frqbins, np.arange(0, n_chroma, dtype='d')).T
n_chroma2 = np.round(float(n_chroma) / 2)
# Project into range -n_chroma/2 .. n_chroma/2
# add on fixed offset of 10*n_chroma to ensure all values passed to
# rem are positive
D = np.remainder(D + n_chroma2 + 10*n_chroma, n_chroma) - n_chroma2
# Gaussian bumps - 2*D to make them narrower
wts = np.exp(-0.5 * (2*D / np.tile(binwidthbins, (n_chroma, 1)))**2)
# normalize each column
wts = util.normalize(wts, norm=norm, axis=0)
# Maybe apply scaling for fft bins
if octwidth is not None:
wts *= np.tile(
np.exp(-0.5 * (((frqbins/n_chroma - ctroct)/octwidth)**2)),
(n_chroma, 1))
if base_c:
wts = np.roll(wts, -3, axis=0)
# remove aliasing columns, copy to ensure row-contiguity
return np.ascontiguousarray(wts[:, :int(1 + n_fft/2)], dtype=dtype)
|
[
"Create",
"a",
"Filterbank",
"matrix",
"to",
"convert",
"STFT",
"to",
"chroma"
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L229-L359
|
[
"def",
"chroma",
"(",
"sr",
",",
"n_fft",
",",
"n_chroma",
"=",
"12",
",",
"A440",
"=",
"440.0",
",",
"ctroct",
"=",
"5.0",
",",
"octwidth",
"=",
"2",
",",
"norm",
"=",
"2",
",",
"base_c",
"=",
"True",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
":",
"wts",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_chroma",
",",
"n_fft",
")",
")",
"# Get the FFT bins, not counting the DC component",
"frequencies",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"sr",
",",
"n_fft",
",",
"endpoint",
"=",
"False",
")",
"[",
"1",
":",
"]",
"frqbins",
"=",
"n_chroma",
"*",
"hz_to_octs",
"(",
"frequencies",
",",
"A440",
")",
"# make up a value for the 0 Hz bin = 1.5 octaves below bin 1",
"# (so chroma is 50% rotated from bin 1, and bin width is broad)",
"frqbins",
"=",
"np",
".",
"concatenate",
"(",
"(",
"[",
"frqbins",
"[",
"0",
"]",
"-",
"1.5",
"*",
"n_chroma",
"]",
",",
"frqbins",
")",
")",
"binwidthbins",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"maximum",
"(",
"frqbins",
"[",
"1",
":",
"]",
"-",
"frqbins",
"[",
":",
"-",
"1",
"]",
",",
"1.0",
")",
",",
"[",
"1",
"]",
")",
")",
"D",
"=",
"np",
".",
"subtract",
".",
"outer",
"(",
"frqbins",
",",
"np",
".",
"arange",
"(",
"0",
",",
"n_chroma",
",",
"dtype",
"=",
"'d'",
")",
")",
".",
"T",
"n_chroma2",
"=",
"np",
".",
"round",
"(",
"float",
"(",
"n_chroma",
")",
"/",
"2",
")",
"# Project into range -n_chroma/2 .. n_chroma/2",
"# add on fixed offset of 10*n_chroma to ensure all values passed to",
"# rem are positive",
"D",
"=",
"np",
".",
"remainder",
"(",
"D",
"+",
"n_chroma2",
"+",
"10",
"*",
"n_chroma",
",",
"n_chroma",
")",
"-",
"n_chroma2",
"# Gaussian bumps - 2*D to make them narrower",
"wts",
"=",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"(",
"2",
"*",
"D",
"/",
"np",
".",
"tile",
"(",
"binwidthbins",
",",
"(",
"n_chroma",
",",
"1",
")",
")",
")",
"**",
"2",
")",
"# normalize each column",
"wts",
"=",
"util",
".",
"normalize",
"(",
"wts",
",",
"norm",
"=",
"norm",
",",
"axis",
"=",
"0",
")",
"# Maybe apply scaling for fft bins",
"if",
"octwidth",
"is",
"not",
"None",
":",
"wts",
"*=",
"np",
".",
"tile",
"(",
"np",
".",
"exp",
"(",
"-",
"0.5",
"*",
"(",
"(",
"(",
"frqbins",
"/",
"n_chroma",
"-",
"ctroct",
")",
"/",
"octwidth",
")",
"**",
"2",
")",
")",
",",
"(",
"n_chroma",
",",
"1",
")",
")",
"if",
"base_c",
":",
"wts",
"=",
"np",
".",
"roll",
"(",
"wts",
",",
"-",
"3",
",",
"axis",
"=",
"0",
")",
"# remove aliasing columns, copy to ensure row-contiguity",
"return",
"np",
".",
"ascontiguousarray",
"(",
"wts",
"[",
":",
",",
":",
"int",
"(",
"1",
"+",
"n_fft",
"/",
"2",
")",
"]",
",",
"dtype",
"=",
"dtype",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__float_window
|
Decorator function for windows with fractional input.
This function guarantees that for fractional `x`, the following hold:
1. `__float_window(window_function)(x)` has length `np.ceil(x)`
2. all values from `np.floor(x)` are set to 0.
For integer-valued `x`, there should be no change in behavior.
|
librosa/filters.py
|
def __float_window(window_spec):
'''Decorator function for windows with fractional input.
This function guarantees that for fractional `x`, the following hold:
1. `__float_window(window_function)(x)` has length `np.ceil(x)`
2. all values from `np.floor(x)` are set to 0.
For integer-valued `x`, there should be no change in behavior.
'''
def _wrap(n, *args, **kwargs):
'''The wrapped window'''
n_min, n_max = int(np.floor(n)), int(np.ceil(n))
window = get_window(window_spec, n_min)
if len(window) < n_max:
window = np.pad(window, [(0, n_max - len(window))],
mode='constant')
window[n_min:] = 0.0
return window
return _wrap
|
def __float_window(window_spec):
'''Decorator function for windows with fractional input.
This function guarantees that for fractional `x`, the following hold:
1. `__float_window(window_function)(x)` has length `np.ceil(x)`
2. all values from `np.floor(x)` are set to 0.
For integer-valued `x`, there should be no change in behavior.
'''
def _wrap(n, *args, **kwargs):
'''The wrapped window'''
n_min, n_max = int(np.floor(n)), int(np.ceil(n))
window = get_window(window_spec, n_min)
if len(window) < n_max:
window = np.pad(window, [(0, n_max - len(window))],
mode='constant')
window[n_min:] = 0.0
return window
return _wrap
|
[
"Decorator",
"function",
"for",
"windows",
"with",
"fractional",
"input",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L362-L387
|
[
"def",
"__float_window",
"(",
"window_spec",
")",
":",
"def",
"_wrap",
"(",
"n",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"'''The wrapped window'''",
"n_min",
",",
"n_max",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"n",
")",
")",
",",
"int",
"(",
"np",
".",
"ceil",
"(",
"n",
")",
")",
"window",
"=",
"get_window",
"(",
"window_spec",
",",
"n_min",
")",
"if",
"len",
"(",
"window",
")",
"<",
"n_max",
":",
"window",
"=",
"np",
".",
"pad",
"(",
"window",
",",
"[",
"(",
"0",
",",
"n_max",
"-",
"len",
"(",
"window",
")",
")",
"]",
",",
"mode",
"=",
"'constant'",
")",
"window",
"[",
"n_min",
":",
"]",
"=",
"0.0",
"return",
"window",
"return",
"_wrap"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
constant_q
|
r'''Construct a constant-Q basis.
This uses the filter bank described by [1]_.
.. [1] McVicar, Matthew.
"A machine learning approach to automatic chord extraction."
Dissertation, University of Bristol. 2013.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : string, tuple, number, or function
Windowing function to apply to filters.
filter_scale : float > 0 [scalar]
Scale of filter windows.
Small values (<1) use shorter windows for higher temporal resolution.
pad_fft : boolean
Center-pad all filters up to the nearest integral power of 2.
By default, padding is done with zeros, but this can be overridden
by setting the `mode=` field in *kwargs*.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See librosa.util.normalize
dtype : np.dtype
The data type of the output basis.
By default, uses 64-bit (single precision) complex floating point.
kwargs : additional keyword arguments
Arguments to `np.pad()` when `pad==True`.
Returns
-------
filters : np.ndarray, `len(filters) == n_bins`
`filters[i]` is `i`\ th time-domain CQT basis filter
lengths : np.ndarray, `len(lengths) == n_bins`
The (fractional) length of each filter
Notes
-----
This function caches at level 10.
See Also
--------
constant_q_lengths
librosa.core.cqt
librosa.util.normalize
Examples
--------
Use a shorter window for each filter
>>> basis, lengths = librosa.filters.constant_q(22050, filter_scale=0.5)
Plot one octave of filters in time and frequency
>>> import matplotlib.pyplot as plt
>>> basis, lengths = librosa.filters.constant_q(22050)
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2, 1, 1)
>>> notes = librosa.midi_to_note(np.arange(24, 24 + len(basis)))
>>> for i, (f, n) in enumerate(zip(basis, notes[:12])):
... f_scale = librosa.util.normalize(f) / 2
... plt.plot(i + f_scale.real)
... plt.plot(i + f_scale.imag, linestyle=':')
>>> plt.axis('tight')
>>> plt.yticks(np.arange(len(notes[:12])), notes[:12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filters (one octave, time domain)')
>>> plt.xlabel('Time (samples at 22050 Hz)')
>>> plt.legend(['Real', 'Imaginary'], frameon=True, framealpha=0.8)
>>> plt.subplot(2, 1, 2)
>>> F = np.abs(np.fft.fftn(basis, axes=[-1]))
>>> # Keep only the positive frequencies
>>> F = F[:, :(1 + F.shape[1] // 2)]
>>> librosa.display.specshow(F, x_axis='linear')
>>> plt.yticks(np.arange(len(notes))[::12], notes[::12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filter magnitudes (frequency domain)')
>>> plt.tight_layout()
|
librosa/filters.py
|
def constant_q(sr, fmin=None, n_bins=84, bins_per_octave=12, tuning=0.0,
window='hann', filter_scale=1, pad_fft=True, norm=1,
dtype=np.complex64, **kwargs):
r'''Construct a constant-Q basis.
This uses the filter bank described by [1]_.
.. [1] McVicar, Matthew.
"A machine learning approach to automatic chord extraction."
Dissertation, University of Bristol. 2013.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : string, tuple, number, or function
Windowing function to apply to filters.
filter_scale : float > 0 [scalar]
Scale of filter windows.
Small values (<1) use shorter windows for higher temporal resolution.
pad_fft : boolean
Center-pad all filters up to the nearest integral power of 2.
By default, padding is done with zeros, but this can be overridden
by setting the `mode=` field in *kwargs*.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See librosa.util.normalize
dtype : np.dtype
The data type of the output basis.
By default, uses 64-bit (single precision) complex floating point.
kwargs : additional keyword arguments
Arguments to `np.pad()` when `pad==True`.
Returns
-------
filters : np.ndarray, `len(filters) == n_bins`
`filters[i]` is `i`\ th time-domain CQT basis filter
lengths : np.ndarray, `len(lengths) == n_bins`
The (fractional) length of each filter
Notes
-----
This function caches at level 10.
See Also
--------
constant_q_lengths
librosa.core.cqt
librosa.util.normalize
Examples
--------
Use a shorter window for each filter
>>> basis, lengths = librosa.filters.constant_q(22050, filter_scale=0.5)
Plot one octave of filters in time and frequency
>>> import matplotlib.pyplot as plt
>>> basis, lengths = librosa.filters.constant_q(22050)
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2, 1, 1)
>>> notes = librosa.midi_to_note(np.arange(24, 24 + len(basis)))
>>> for i, (f, n) in enumerate(zip(basis, notes[:12])):
... f_scale = librosa.util.normalize(f) / 2
... plt.plot(i + f_scale.real)
... plt.plot(i + f_scale.imag, linestyle=':')
>>> plt.axis('tight')
>>> plt.yticks(np.arange(len(notes[:12])), notes[:12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filters (one octave, time domain)')
>>> plt.xlabel('Time (samples at 22050 Hz)')
>>> plt.legend(['Real', 'Imaginary'], frameon=True, framealpha=0.8)
>>> plt.subplot(2, 1, 2)
>>> F = np.abs(np.fft.fftn(basis, axes=[-1]))
>>> # Keep only the positive frequencies
>>> F = F[:, :(1 + F.shape[1] // 2)]
>>> librosa.display.specshow(F, x_axis='linear')
>>> plt.yticks(np.arange(len(notes))[::12], notes[::12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filter magnitudes (frequency domain)')
>>> plt.tight_layout()
'''
if fmin is None:
fmin = note_to_hz('C1')
# Pass-through parameters to get the filter lengths
lengths = constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
filter_scale=filter_scale)
# Apply tuning correction
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
# Convert lengths back to frequencies
freqs = Q * sr / lengths
# Build the filters
filters = []
for ilen, freq in zip(lengths, freqs):
# Build the filter: note, length will be ceil(ilen)
sig = np.exp(np.arange(-ilen//2, ilen//2, dtype=float) * 1j * 2 * np.pi * freq / sr)
# Apply the windowing function
sig = sig * __float_window(window)(len(sig))
# Normalize
sig = util.normalize(sig, norm=norm)
filters.append(sig)
# Pad and stack
max_len = max(lengths)
if pad_fft:
max_len = int(2.0**(np.ceil(np.log2(max_len))))
else:
max_len = int(np.ceil(max_len))
filters = np.asarray([util.pad_center(filt, max_len, **kwargs)
for filt in filters], dtype=dtype)
return filters, np.asarray(lengths)
|
def constant_q(sr, fmin=None, n_bins=84, bins_per_octave=12, tuning=0.0,
window='hann', filter_scale=1, pad_fft=True, norm=1,
dtype=np.complex64, **kwargs):
r'''Construct a constant-Q basis.
This uses the filter bank described by [1]_.
.. [1] McVicar, Matthew.
"A machine learning approach to automatic chord extraction."
Dissertation, University of Bristol. 2013.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : string, tuple, number, or function
Windowing function to apply to filters.
filter_scale : float > 0 [scalar]
Scale of filter windows.
Small values (<1) use shorter windows for higher temporal resolution.
pad_fft : boolean
Center-pad all filters up to the nearest integral power of 2.
By default, padding is done with zeros, but this can be overridden
by setting the `mode=` field in *kwargs*.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See librosa.util.normalize
dtype : np.dtype
The data type of the output basis.
By default, uses 64-bit (single precision) complex floating point.
kwargs : additional keyword arguments
Arguments to `np.pad()` when `pad==True`.
Returns
-------
filters : np.ndarray, `len(filters) == n_bins`
`filters[i]` is `i`\ th time-domain CQT basis filter
lengths : np.ndarray, `len(lengths) == n_bins`
The (fractional) length of each filter
Notes
-----
This function caches at level 10.
See Also
--------
constant_q_lengths
librosa.core.cqt
librosa.util.normalize
Examples
--------
Use a shorter window for each filter
>>> basis, lengths = librosa.filters.constant_q(22050, filter_scale=0.5)
Plot one octave of filters in time and frequency
>>> import matplotlib.pyplot as plt
>>> basis, lengths = librosa.filters.constant_q(22050)
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2, 1, 1)
>>> notes = librosa.midi_to_note(np.arange(24, 24 + len(basis)))
>>> for i, (f, n) in enumerate(zip(basis, notes[:12])):
... f_scale = librosa.util.normalize(f) / 2
... plt.plot(i + f_scale.real)
... plt.plot(i + f_scale.imag, linestyle=':')
>>> plt.axis('tight')
>>> plt.yticks(np.arange(len(notes[:12])), notes[:12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filters (one octave, time domain)')
>>> plt.xlabel('Time (samples at 22050 Hz)')
>>> plt.legend(['Real', 'Imaginary'], frameon=True, framealpha=0.8)
>>> plt.subplot(2, 1, 2)
>>> F = np.abs(np.fft.fftn(basis, axes=[-1]))
>>> # Keep only the positive frequencies
>>> F = F[:, :(1 + F.shape[1] // 2)]
>>> librosa.display.specshow(F, x_axis='linear')
>>> plt.yticks(np.arange(len(notes))[::12], notes[::12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filter magnitudes (frequency domain)')
>>> plt.tight_layout()
'''
if fmin is None:
fmin = note_to_hz('C1')
# Pass-through parameters to get the filter lengths
lengths = constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
filter_scale=filter_scale)
# Apply tuning correction
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
# Convert lengths back to frequencies
freqs = Q * sr / lengths
# Build the filters
filters = []
for ilen, freq in zip(lengths, freqs):
# Build the filter: note, length will be ceil(ilen)
sig = np.exp(np.arange(-ilen//2, ilen//2, dtype=float) * 1j * 2 * np.pi * freq / sr)
# Apply the windowing function
sig = sig * __float_window(window)(len(sig))
# Normalize
sig = util.normalize(sig, norm=norm)
filters.append(sig)
# Pad and stack
max_len = max(lengths)
if pad_fft:
max_len = int(2.0**(np.ceil(np.log2(max_len))))
else:
max_len = int(np.ceil(max_len))
filters = np.asarray([util.pad_center(filt, max_len, **kwargs)
for filt in filters], dtype=dtype)
return filters, np.asarray(lengths)
|
[
"r",
"Construct",
"a",
"constant",
"-",
"Q",
"basis",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L391-L543
|
[
"def",
"constant_q",
"(",
"sr",
",",
"fmin",
"=",
"None",
",",
"n_bins",
"=",
"84",
",",
"bins_per_octave",
"=",
"12",
",",
"tuning",
"=",
"0.0",
",",
"window",
"=",
"'hann'",
",",
"filter_scale",
"=",
"1",
",",
"pad_fft",
"=",
"True",
",",
"norm",
"=",
"1",
",",
"dtype",
"=",
"np",
".",
"complex64",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"fmin",
"is",
"None",
":",
"fmin",
"=",
"note_to_hz",
"(",
"'C1'",
")",
"# Pass-through parameters to get the filter lengths",
"lengths",
"=",
"constant_q_lengths",
"(",
"sr",
",",
"fmin",
",",
"n_bins",
"=",
"n_bins",
",",
"bins_per_octave",
"=",
"bins_per_octave",
",",
"tuning",
"=",
"tuning",
",",
"window",
"=",
"window",
",",
"filter_scale",
"=",
"filter_scale",
")",
"# Apply tuning correction",
"correction",
"=",
"2.0",
"**",
"(",
"float",
"(",
"tuning",
")",
"/",
"bins_per_octave",
")",
"fmin",
"=",
"correction",
"*",
"fmin",
"# Q should be capitalized here, so we suppress the name warning",
"# pylint: disable=invalid-name",
"Q",
"=",
"float",
"(",
"filter_scale",
")",
"/",
"(",
"2.0",
"**",
"(",
"1.",
"/",
"bins_per_octave",
")",
"-",
"1",
")",
"# Convert lengths back to frequencies",
"freqs",
"=",
"Q",
"*",
"sr",
"/",
"lengths",
"# Build the filters",
"filters",
"=",
"[",
"]",
"for",
"ilen",
",",
"freq",
"in",
"zip",
"(",
"lengths",
",",
"freqs",
")",
":",
"# Build the filter: note, length will be ceil(ilen)",
"sig",
"=",
"np",
".",
"exp",
"(",
"np",
".",
"arange",
"(",
"-",
"ilen",
"//",
"2",
",",
"ilen",
"//",
"2",
",",
"dtype",
"=",
"float",
")",
"*",
"1j",
"*",
"2",
"*",
"np",
".",
"pi",
"*",
"freq",
"/",
"sr",
")",
"# Apply the windowing function",
"sig",
"=",
"sig",
"*",
"__float_window",
"(",
"window",
")",
"(",
"len",
"(",
"sig",
")",
")",
"# Normalize",
"sig",
"=",
"util",
".",
"normalize",
"(",
"sig",
",",
"norm",
"=",
"norm",
")",
"filters",
".",
"append",
"(",
"sig",
")",
"# Pad and stack",
"max_len",
"=",
"max",
"(",
"lengths",
")",
"if",
"pad_fft",
":",
"max_len",
"=",
"int",
"(",
"2.0",
"**",
"(",
"np",
".",
"ceil",
"(",
"np",
".",
"log2",
"(",
"max_len",
")",
")",
")",
")",
"else",
":",
"max_len",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"max_len",
")",
")",
"filters",
"=",
"np",
".",
"asarray",
"(",
"[",
"util",
".",
"pad_center",
"(",
"filt",
",",
"max_len",
",",
"*",
"*",
"kwargs",
")",
"for",
"filt",
"in",
"filters",
"]",
",",
"dtype",
"=",
"dtype",
")",
"return",
"filters",
",",
"np",
".",
"asarray",
"(",
"lengths",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
constant_q_lengths
|
r'''Return length of each filter in a constant-Q basis.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin.
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : str or callable
Window function to use on filters
filter_scale : float > 0 [scalar]
Resolution of filter windows. Larger values use longer windows.
Returns
-------
lengths : np.ndarray
The length of each filter.
Notes
-----
This function caches at level 10.
See Also
--------
constant_q
librosa.core.cqt
|
librosa/filters.py
|
def constant_q_lengths(sr, fmin, n_bins=84, bins_per_octave=12,
tuning=0.0, window='hann', filter_scale=1):
r'''Return length of each filter in a constant-Q basis.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin.
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : str or callable
Window function to use on filters
filter_scale : float > 0 [scalar]
Resolution of filter windows. Larger values use longer windows.
Returns
-------
lengths : np.ndarray
The length of each filter.
Notes
-----
This function caches at level 10.
See Also
--------
constant_q
librosa.core.cqt
'''
if fmin <= 0:
raise ParameterError('fmin must be positive')
if bins_per_octave <= 0:
raise ParameterError('bins_per_octave must be positive')
if filter_scale <= 0:
raise ParameterError('filter_scale must be positive')
if n_bins <= 0 or not isinstance(n_bins, int):
raise ParameterError('n_bins must be a positive integer')
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
# Compute the frequencies
freq = fmin * (2.0 ** (np.arange(n_bins, dtype=float) / bins_per_octave))
if freq[-1] * (1 + 0.5 * window_bandwidth(window) / Q) > sr / 2.0:
raise ParameterError('Filter pass-band lies beyond Nyquist')
# Convert frequencies to filter lengths
lengths = Q * sr / freq
return lengths
|
def constant_q_lengths(sr, fmin, n_bins=84, bins_per_octave=12,
tuning=0.0, window='hann', filter_scale=1):
r'''Return length of each filter in a constant-Q basis.
Parameters
----------
sr : number > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin.
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : str or callable
Window function to use on filters
filter_scale : float > 0 [scalar]
Resolution of filter windows. Larger values use longer windows.
Returns
-------
lengths : np.ndarray
The length of each filter.
Notes
-----
This function caches at level 10.
See Also
--------
constant_q
librosa.core.cqt
'''
if fmin <= 0:
raise ParameterError('fmin must be positive')
if bins_per_octave <= 0:
raise ParameterError('bins_per_octave must be positive')
if filter_scale <= 0:
raise ParameterError('filter_scale must be positive')
if n_bins <= 0 or not isinstance(n_bins, int):
raise ParameterError('n_bins must be a positive integer')
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(filter_scale) / (2.0**(1. / bins_per_octave) - 1)
# Compute the frequencies
freq = fmin * (2.0 ** (np.arange(n_bins, dtype=float) / bins_per_octave))
if freq[-1] * (1 + 0.5 * window_bandwidth(window) / Q) > sr / 2.0:
raise ParameterError('Filter pass-band lies beyond Nyquist')
# Convert frequencies to filter lengths
lengths = Q * sr / freq
return lengths
|
[
"r",
"Return",
"length",
"of",
"each",
"filter",
"in",
"a",
"constant",
"-",
"Q",
"basis",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L547-L618
|
[
"def",
"constant_q_lengths",
"(",
"sr",
",",
"fmin",
",",
"n_bins",
"=",
"84",
",",
"bins_per_octave",
"=",
"12",
",",
"tuning",
"=",
"0.0",
",",
"window",
"=",
"'hann'",
",",
"filter_scale",
"=",
"1",
")",
":",
"if",
"fmin",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'fmin must be positive'",
")",
"if",
"bins_per_octave",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'bins_per_octave must be positive'",
")",
"if",
"filter_scale",
"<=",
"0",
":",
"raise",
"ParameterError",
"(",
"'filter_scale must be positive'",
")",
"if",
"n_bins",
"<=",
"0",
"or",
"not",
"isinstance",
"(",
"n_bins",
",",
"int",
")",
":",
"raise",
"ParameterError",
"(",
"'n_bins must be a positive integer'",
")",
"correction",
"=",
"2.0",
"**",
"(",
"float",
"(",
"tuning",
")",
"/",
"bins_per_octave",
")",
"fmin",
"=",
"correction",
"*",
"fmin",
"# Q should be capitalized here, so we suppress the name warning",
"# pylint: disable=invalid-name",
"Q",
"=",
"float",
"(",
"filter_scale",
")",
"/",
"(",
"2.0",
"**",
"(",
"1.",
"/",
"bins_per_octave",
")",
"-",
"1",
")",
"# Compute the frequencies",
"freq",
"=",
"fmin",
"*",
"(",
"2.0",
"**",
"(",
"np",
".",
"arange",
"(",
"n_bins",
",",
"dtype",
"=",
"float",
")",
"/",
"bins_per_octave",
")",
")",
"if",
"freq",
"[",
"-",
"1",
"]",
"*",
"(",
"1",
"+",
"0.5",
"*",
"window_bandwidth",
"(",
"window",
")",
"/",
"Q",
")",
">",
"sr",
"/",
"2.0",
":",
"raise",
"ParameterError",
"(",
"'Filter pass-band lies beyond Nyquist'",
")",
"# Convert frequencies to filter lengths",
"lengths",
"=",
"Q",
"*",
"sr",
"/",
"freq",
"return",
"lengths"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
cq_to_chroma
|
Convert a Constant-Q basis to Chroma.
Parameters
----------
n_input : int > 0 [scalar]
Number of input components (CQT bins)
bins_per_octave : int > 0 [scalar]
How many bins per octave in the CQT
n_chroma : int > 0 [scalar]
Number of output bins (per octave) in the chroma
fmin : None or float > 0
Center frequency of the first constant-Q channel.
Default: 'C1' ~= 32.7 Hz
window : None or np.ndarray
If provided, the cq_to_chroma filter bank will be
convolved with `window`.
base_c : bool
If True, the first chroma bin will start at 'C'
If False, the first chroma bin will start at 'A'
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
cq_to_chroma : np.ndarray [shape=(n_chroma, n_input)]
Transformation matrix: `Chroma = np.dot(cq_to_chroma, CQT)`
Raises
------
ParameterError
If `n_input` is not an integer multiple of `n_chroma`
Notes
-----
This function caches at level 10.
Examples
--------
Get a CQT, and wrap bins to chroma
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> CQT = np.abs(librosa.cqt(y, sr=sr))
>>> chroma_map = librosa.filters.cq_to_chroma(CQT.shape[0])
>>> chromagram = chroma_map.dot(CQT)
>>> # Max-normalize each time step
>>> chromagram = librosa.util.normalize(chromagram, axis=0)
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(CQT,
... ref=np.max),
... y_axis='cqt_note')
>>> plt.title('CQT Power')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(chromagram, y_axis='chroma')
>>> plt.title('Chroma (wrapped CQT)')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.title('librosa.feature.chroma_stft')
>>> plt.colorbar()
>>> plt.tight_layout()
|
librosa/filters.py
|
def cq_to_chroma(n_input, bins_per_octave=12, n_chroma=12,
fmin=None, window=None, base_c=True, dtype=np.float32):
'''Convert a Constant-Q basis to Chroma.
Parameters
----------
n_input : int > 0 [scalar]
Number of input components (CQT bins)
bins_per_octave : int > 0 [scalar]
How many bins per octave in the CQT
n_chroma : int > 0 [scalar]
Number of output bins (per octave) in the chroma
fmin : None or float > 0
Center frequency of the first constant-Q channel.
Default: 'C1' ~= 32.7 Hz
window : None or np.ndarray
If provided, the cq_to_chroma filter bank will be
convolved with `window`.
base_c : bool
If True, the first chroma bin will start at 'C'
If False, the first chroma bin will start at 'A'
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
cq_to_chroma : np.ndarray [shape=(n_chroma, n_input)]
Transformation matrix: `Chroma = np.dot(cq_to_chroma, CQT)`
Raises
------
ParameterError
If `n_input` is not an integer multiple of `n_chroma`
Notes
-----
This function caches at level 10.
Examples
--------
Get a CQT, and wrap bins to chroma
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> CQT = np.abs(librosa.cqt(y, sr=sr))
>>> chroma_map = librosa.filters.cq_to_chroma(CQT.shape[0])
>>> chromagram = chroma_map.dot(CQT)
>>> # Max-normalize each time step
>>> chromagram = librosa.util.normalize(chromagram, axis=0)
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(CQT,
... ref=np.max),
... y_axis='cqt_note')
>>> plt.title('CQT Power')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(chromagram, y_axis='chroma')
>>> plt.title('Chroma (wrapped CQT)')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.title('librosa.feature.chroma_stft')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
# How many fractional bins are we merging?
n_merge = float(bins_per_octave) / n_chroma
if fmin is None:
fmin = note_to_hz('C1')
if np.mod(n_merge, 1) != 0:
raise ParameterError('Incompatible CQ merge: '
'input bins must be an '
'integer multiple of output bins.')
# Tile the identity to merge fractional bins
cq_to_ch = np.repeat(np.eye(n_chroma), n_merge, axis=1)
# Roll it left to center on the target bin
cq_to_ch = np.roll(cq_to_ch, - int(n_merge // 2), axis=1)
# How many octaves are we repeating?
n_octaves = np.ceil(np.float(n_input) / bins_per_octave)
# Repeat and trim
cq_to_ch = np.tile(cq_to_ch, int(n_octaves))[:, :n_input]
# What's the note number of the first bin in the CQT?
# midi uses 12 bins per octave here
midi_0 = np.mod(hz_to_midi(fmin), 12)
if base_c:
# rotate to C
roll = midi_0
else:
# rotate to A
roll = midi_0 - 9
# Adjust the roll in terms of how many chroma we want out
# We need to be careful with rounding here
roll = int(np.round(roll * (n_chroma / 12.)))
# Apply the roll
cq_to_ch = np.roll(cq_to_ch, roll, axis=0).astype(dtype)
if window is not None:
cq_to_ch = scipy.signal.convolve(cq_to_ch,
np.atleast_2d(window),
mode='same')
return cq_to_ch
|
def cq_to_chroma(n_input, bins_per_octave=12, n_chroma=12,
fmin=None, window=None, base_c=True, dtype=np.float32):
'''Convert a Constant-Q basis to Chroma.
Parameters
----------
n_input : int > 0 [scalar]
Number of input components (CQT bins)
bins_per_octave : int > 0 [scalar]
How many bins per octave in the CQT
n_chroma : int > 0 [scalar]
Number of output bins (per octave) in the chroma
fmin : None or float > 0
Center frequency of the first constant-Q channel.
Default: 'C1' ~= 32.7 Hz
window : None or np.ndarray
If provided, the cq_to_chroma filter bank will be
convolved with `window`.
base_c : bool
If True, the first chroma bin will start at 'C'
If False, the first chroma bin will start at 'A'
dtype : np.dtype
The data type of the output basis.
By default, uses 32-bit (single-precision) floating point.
Returns
-------
cq_to_chroma : np.ndarray [shape=(n_chroma, n_input)]
Transformation matrix: `Chroma = np.dot(cq_to_chroma, CQT)`
Raises
------
ParameterError
If `n_input` is not an integer multiple of `n_chroma`
Notes
-----
This function caches at level 10.
Examples
--------
Get a CQT, and wrap bins to chroma
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> CQT = np.abs(librosa.cqt(y, sr=sr))
>>> chroma_map = librosa.filters.cq_to_chroma(CQT.shape[0])
>>> chromagram = chroma_map.dot(CQT)
>>> # Max-normalize each time step
>>> chromagram = librosa.util.normalize(chromagram, axis=0)
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(CQT,
... ref=np.max),
... y_axis='cqt_note')
>>> plt.title('CQT Power')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(chromagram, y_axis='chroma')
>>> plt.title('Chroma (wrapped CQT)')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.title('librosa.feature.chroma_stft')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
# How many fractional bins are we merging?
n_merge = float(bins_per_octave) / n_chroma
if fmin is None:
fmin = note_to_hz('C1')
if np.mod(n_merge, 1) != 0:
raise ParameterError('Incompatible CQ merge: '
'input bins must be an '
'integer multiple of output bins.')
# Tile the identity to merge fractional bins
cq_to_ch = np.repeat(np.eye(n_chroma), n_merge, axis=1)
# Roll it left to center on the target bin
cq_to_ch = np.roll(cq_to_ch, - int(n_merge // 2), axis=1)
# How many octaves are we repeating?
n_octaves = np.ceil(np.float(n_input) / bins_per_octave)
# Repeat and trim
cq_to_ch = np.tile(cq_to_ch, int(n_octaves))[:, :n_input]
# What's the note number of the first bin in the CQT?
# midi uses 12 bins per octave here
midi_0 = np.mod(hz_to_midi(fmin), 12)
if base_c:
# rotate to C
roll = midi_0
else:
# rotate to A
roll = midi_0 - 9
# Adjust the roll in terms of how many chroma we want out
# We need to be careful with rounding here
roll = int(np.round(roll * (n_chroma / 12.)))
# Apply the roll
cq_to_ch = np.roll(cq_to_ch, roll, axis=0).astype(dtype)
if window is not None:
cq_to_ch = scipy.signal.convolve(cq_to_ch,
np.atleast_2d(window),
mode='same')
return cq_to_ch
|
[
"Convert",
"a",
"Constant",
"-",
"Q",
"basis",
"to",
"Chroma",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L622-L746
|
[
"def",
"cq_to_chroma",
"(",
"n_input",
",",
"bins_per_octave",
"=",
"12",
",",
"n_chroma",
"=",
"12",
",",
"fmin",
"=",
"None",
",",
"window",
"=",
"None",
",",
"base_c",
"=",
"True",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
":",
"# How many fractional bins are we merging?",
"n_merge",
"=",
"float",
"(",
"bins_per_octave",
")",
"/",
"n_chroma",
"if",
"fmin",
"is",
"None",
":",
"fmin",
"=",
"note_to_hz",
"(",
"'C1'",
")",
"if",
"np",
".",
"mod",
"(",
"n_merge",
",",
"1",
")",
"!=",
"0",
":",
"raise",
"ParameterError",
"(",
"'Incompatible CQ merge: '",
"'input bins must be an '",
"'integer multiple of output bins.'",
")",
"# Tile the identity to merge fractional bins",
"cq_to_ch",
"=",
"np",
".",
"repeat",
"(",
"np",
".",
"eye",
"(",
"n_chroma",
")",
",",
"n_merge",
",",
"axis",
"=",
"1",
")",
"# Roll it left to center on the target bin",
"cq_to_ch",
"=",
"np",
".",
"roll",
"(",
"cq_to_ch",
",",
"-",
"int",
"(",
"n_merge",
"//",
"2",
")",
",",
"axis",
"=",
"1",
")",
"# How many octaves are we repeating?",
"n_octaves",
"=",
"np",
".",
"ceil",
"(",
"np",
".",
"float",
"(",
"n_input",
")",
"/",
"bins_per_octave",
")",
"# Repeat and trim",
"cq_to_ch",
"=",
"np",
".",
"tile",
"(",
"cq_to_ch",
",",
"int",
"(",
"n_octaves",
")",
")",
"[",
":",
",",
":",
"n_input",
"]",
"# What's the note number of the first bin in the CQT?",
"# midi uses 12 bins per octave here",
"midi_0",
"=",
"np",
".",
"mod",
"(",
"hz_to_midi",
"(",
"fmin",
")",
",",
"12",
")",
"if",
"base_c",
":",
"# rotate to C",
"roll",
"=",
"midi_0",
"else",
":",
"# rotate to A",
"roll",
"=",
"midi_0",
"-",
"9",
"# Adjust the roll in terms of how many chroma we want out",
"# We need to be careful with rounding here",
"roll",
"=",
"int",
"(",
"np",
".",
"round",
"(",
"roll",
"*",
"(",
"n_chroma",
"/",
"12.",
")",
")",
")",
"# Apply the roll",
"cq_to_ch",
"=",
"np",
".",
"roll",
"(",
"cq_to_ch",
",",
"roll",
",",
"axis",
"=",
"0",
")",
".",
"astype",
"(",
"dtype",
")",
"if",
"window",
"is",
"not",
"None",
":",
"cq_to_ch",
"=",
"scipy",
".",
"signal",
".",
"convolve",
"(",
"cq_to_ch",
",",
"np",
".",
"atleast_2d",
"(",
"window",
")",
",",
"mode",
"=",
"'same'",
")",
"return",
"cq_to_ch"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
window_bandwidth
|
Get the equivalent noise bandwidth of a window function.
Parameters
----------
window : callable or string
A window function, or the name of a window function.
Examples:
- scipy.signal.hann
- 'boxcar'
n : int > 0
The number of coefficients to use in estimating the
window bandwidth
Returns
-------
bandwidth : float
The equivalent noise bandwidth (in FFT bins) of the
given window function
Notes
-----
This function caches at level 10.
See Also
--------
get_window
|
librosa/filters.py
|
def window_bandwidth(window, n=1000):
'''Get the equivalent noise bandwidth of a window function.
Parameters
----------
window : callable or string
A window function, or the name of a window function.
Examples:
- scipy.signal.hann
- 'boxcar'
n : int > 0
The number of coefficients to use in estimating the
window bandwidth
Returns
-------
bandwidth : float
The equivalent noise bandwidth (in FFT bins) of the
given window function
Notes
-----
This function caches at level 10.
See Also
--------
get_window
'''
if hasattr(window, '__name__'):
key = window.__name__
else:
key = window
if key not in WINDOW_BANDWIDTHS:
win = get_window(window, n)
WINDOW_BANDWIDTHS[key] = n * np.sum(win**2) / np.sum(np.abs(win))**2
return WINDOW_BANDWIDTHS[key]
|
def window_bandwidth(window, n=1000):
'''Get the equivalent noise bandwidth of a window function.
Parameters
----------
window : callable or string
A window function, or the name of a window function.
Examples:
- scipy.signal.hann
- 'boxcar'
n : int > 0
The number of coefficients to use in estimating the
window bandwidth
Returns
-------
bandwidth : float
The equivalent noise bandwidth (in FFT bins) of the
given window function
Notes
-----
This function caches at level 10.
See Also
--------
get_window
'''
if hasattr(window, '__name__'):
key = window.__name__
else:
key = window
if key not in WINDOW_BANDWIDTHS:
win = get_window(window, n)
WINDOW_BANDWIDTHS[key] = n * np.sum(win**2) / np.sum(np.abs(win))**2
return WINDOW_BANDWIDTHS[key]
|
[
"Get",
"the",
"equivalent",
"noise",
"bandwidth",
"of",
"a",
"window",
"function",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L750-L790
|
[
"def",
"window_bandwidth",
"(",
"window",
",",
"n",
"=",
"1000",
")",
":",
"if",
"hasattr",
"(",
"window",
",",
"'__name__'",
")",
":",
"key",
"=",
"window",
".",
"__name__",
"else",
":",
"key",
"=",
"window",
"if",
"key",
"not",
"in",
"WINDOW_BANDWIDTHS",
":",
"win",
"=",
"get_window",
"(",
"window",
",",
"n",
")",
"WINDOW_BANDWIDTHS",
"[",
"key",
"]",
"=",
"n",
"*",
"np",
".",
"sum",
"(",
"win",
"**",
"2",
")",
"/",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"win",
")",
")",
"**",
"2",
"return",
"WINDOW_BANDWIDTHS",
"[",
"key",
"]"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
get_window
|
Compute a window function.
This is a wrapper for `scipy.signal.get_window` that additionally
supports callable or pre-computed windows.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window specification:
- If string, it's the name of the window function (e.g., `'hann'`)
- If tuple, it's the name of the window function and any parameters
(e.g., `('kaiser', 4.0)`)
- If numeric, it is treated as the beta parameter of the `'kaiser'`
window, as in `scipy.signal.get_window`.
- If callable, it's a function that accepts one integer argument
(the window length)
- If list-like, it's a pre-computed window of the correct length `Nx`
Nx : int > 0
The length of the window
fftbins : bool, optional
If True (default), create a periodic window for use with FFT
If False, create a symmetric window for filter design applications.
Returns
-------
get_window : np.ndarray
A window of length `Nx` and type `window`
See Also
--------
scipy.signal.get_window
Notes
-----
This function caches at level 10.
Raises
------
ParameterError
If `window` is supplied as a vector of length != `n_fft`,
or is otherwise mis-specified.
|
librosa/filters.py
|
def get_window(window, Nx, fftbins=True):
'''Compute a window function.
This is a wrapper for `scipy.signal.get_window` that additionally
supports callable or pre-computed windows.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window specification:
- If string, it's the name of the window function (e.g., `'hann'`)
- If tuple, it's the name of the window function and any parameters
(e.g., `('kaiser', 4.0)`)
- If numeric, it is treated as the beta parameter of the `'kaiser'`
window, as in `scipy.signal.get_window`.
- If callable, it's a function that accepts one integer argument
(the window length)
- If list-like, it's a pre-computed window of the correct length `Nx`
Nx : int > 0
The length of the window
fftbins : bool, optional
If True (default), create a periodic window for use with FFT
If False, create a symmetric window for filter design applications.
Returns
-------
get_window : np.ndarray
A window of length `Nx` and type `window`
See Also
--------
scipy.signal.get_window
Notes
-----
This function caches at level 10.
Raises
------
ParameterError
If `window` is supplied as a vector of length != `n_fft`,
or is otherwise mis-specified.
'''
if six.callable(window):
return window(Nx)
elif (isinstance(window, (six.string_types, tuple)) or
np.isscalar(window)):
# TODO: if we add custom window functions in librosa, call them here
return scipy.signal.get_window(window, Nx, fftbins=fftbins)
elif isinstance(window, (np.ndarray, list)):
if len(window) == Nx:
return np.asarray(window)
raise ParameterError('Window size mismatch: '
'{:d} != {:d}'.format(len(window), Nx))
else:
raise ParameterError('Invalid window specification: {}'.format(window))
|
def get_window(window, Nx, fftbins=True):
'''Compute a window function.
This is a wrapper for `scipy.signal.get_window` that additionally
supports callable or pre-computed windows.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window specification:
- If string, it's the name of the window function (e.g., `'hann'`)
- If tuple, it's the name of the window function and any parameters
(e.g., `('kaiser', 4.0)`)
- If numeric, it is treated as the beta parameter of the `'kaiser'`
window, as in `scipy.signal.get_window`.
- If callable, it's a function that accepts one integer argument
(the window length)
- If list-like, it's a pre-computed window of the correct length `Nx`
Nx : int > 0
The length of the window
fftbins : bool, optional
If True (default), create a periodic window for use with FFT
If False, create a symmetric window for filter design applications.
Returns
-------
get_window : np.ndarray
A window of length `Nx` and type `window`
See Also
--------
scipy.signal.get_window
Notes
-----
This function caches at level 10.
Raises
------
ParameterError
If `window` is supplied as a vector of length != `n_fft`,
or is otherwise mis-specified.
'''
if six.callable(window):
return window(Nx)
elif (isinstance(window, (six.string_types, tuple)) or
np.isscalar(window)):
# TODO: if we add custom window functions in librosa, call them here
return scipy.signal.get_window(window, Nx, fftbins=fftbins)
elif isinstance(window, (np.ndarray, list)):
if len(window) == Nx:
return np.asarray(window)
raise ParameterError('Window size mismatch: '
'{:d} != {:d}'.format(len(window), Nx))
else:
raise ParameterError('Invalid window specification: {}'.format(window))
|
[
"Compute",
"a",
"window",
"function",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L794-L856
|
[
"def",
"get_window",
"(",
"window",
",",
"Nx",
",",
"fftbins",
"=",
"True",
")",
":",
"if",
"six",
".",
"callable",
"(",
"window",
")",
":",
"return",
"window",
"(",
"Nx",
")",
"elif",
"(",
"isinstance",
"(",
"window",
",",
"(",
"six",
".",
"string_types",
",",
"tuple",
")",
")",
"or",
"np",
".",
"isscalar",
"(",
"window",
")",
")",
":",
"# TODO: if we add custom window functions in librosa, call them here",
"return",
"scipy",
".",
"signal",
".",
"get_window",
"(",
"window",
",",
"Nx",
",",
"fftbins",
"=",
"fftbins",
")",
"elif",
"isinstance",
"(",
"window",
",",
"(",
"np",
".",
"ndarray",
",",
"list",
")",
")",
":",
"if",
"len",
"(",
"window",
")",
"==",
"Nx",
":",
"return",
"np",
".",
"asarray",
"(",
"window",
")",
"raise",
"ParameterError",
"(",
"'Window size mismatch: '",
"'{:d} != {:d}'",
".",
"format",
"(",
"len",
"(",
"window",
")",
",",
"Nx",
")",
")",
"else",
":",
"raise",
"ParameterError",
"(",
"'Invalid window specification: {}'",
".",
"format",
"(",
"window",
")",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
_multirate_fb
|
r'''Helper function to construct a multirate filterbank.
A filter bank consists of multiple band-pass filters which divide the input signal
into subbands. In the case of a multirate filter bank, the band-pass filters
operate with resampled versions of the input signal, e.g. to keep the length
of a filter constant while shifting its center frequency.
This implementation uses `scipy.signal.iirdesign` to design the filters.
Parameters
----------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Samplerate for each filter (used for multirate filterbank).
Q : float
Q factor (influences the filter bandwith).
passband_ripple : float
The maximum loss in the passband (dB)
See `scipy.signal.iirdesign` for details.
stopband_attenuation : float
The minimum attenuation in the stopband (dB)
See `scipy.signal.iirdesign` for details.
ftype : str
The type of IIR filter to design
See `scipy.signal.iirdesign` for details.
flayout : string
Valid `output` argument for `scipy.signal.iirdesign`.
- If `ba`, returns numerators/denominators of the transfer functions,
used for filtering with `scipy.signal.filtfilt`.
Can be unstable for high-order filters.
- If `sos`, returns a series of second-order filters,
used for filtering with `scipy.signal.sosfiltfilt`.
Minimizes numerical precision errors for high-order filters, but is slower.
- If `zpk`, returns zeros, poles, and system gains of the transfer functions.
Returns
-------
filterbank : list [shape=(n,), dtype=float]
Each list entry comprises the filter coefficients for a single filter.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Samplerate for each filter.
Notes
-----
This function caches at level 10.
See Also
--------
scipy.signal.iirdesign
Raises
------
ParameterError
If `center_freqs` is `None`.
If `sample_rates` is `None`.
If `center_freqs.shape` does not match `sample_rates.shape`.
|
librosa/filters.py
|
def _multirate_fb(center_freqs=None, sample_rates=None, Q=25.0,
passband_ripple=1, stopband_attenuation=50, ftype='ellip', flayout='ba'):
r'''Helper function to construct a multirate filterbank.
A filter bank consists of multiple band-pass filters which divide the input signal
into subbands. In the case of a multirate filter bank, the band-pass filters
operate with resampled versions of the input signal, e.g. to keep the length
of a filter constant while shifting its center frequency.
This implementation uses `scipy.signal.iirdesign` to design the filters.
Parameters
----------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Samplerate for each filter (used for multirate filterbank).
Q : float
Q factor (influences the filter bandwith).
passband_ripple : float
The maximum loss in the passband (dB)
See `scipy.signal.iirdesign` for details.
stopband_attenuation : float
The minimum attenuation in the stopband (dB)
See `scipy.signal.iirdesign` for details.
ftype : str
The type of IIR filter to design
See `scipy.signal.iirdesign` for details.
flayout : string
Valid `output` argument for `scipy.signal.iirdesign`.
- If `ba`, returns numerators/denominators of the transfer functions,
used for filtering with `scipy.signal.filtfilt`.
Can be unstable for high-order filters.
- If `sos`, returns a series of second-order filters,
used for filtering with `scipy.signal.sosfiltfilt`.
Minimizes numerical precision errors for high-order filters, but is slower.
- If `zpk`, returns zeros, poles, and system gains of the transfer functions.
Returns
-------
filterbank : list [shape=(n,), dtype=float]
Each list entry comprises the filter coefficients for a single filter.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Samplerate for each filter.
Notes
-----
This function caches at level 10.
See Also
--------
scipy.signal.iirdesign
Raises
------
ParameterError
If `center_freqs` is `None`.
If `sample_rates` is `None`.
If `center_freqs.shape` does not match `sample_rates.shape`.
'''
if center_freqs is None:
raise ParameterError('center_freqs must be provided.')
if sample_rates is None:
raise ParameterError('sample_rates must be provided.')
if center_freqs.shape != sample_rates.shape:
raise ParameterError('Number of provided center_freqs and sample_rates must be equal.')
nyquist = 0.5 * sample_rates
filter_bandwidths = center_freqs / float(Q)
filterbank = []
for cur_center_freq, cur_nyquist, cur_bw in zip(center_freqs, nyquist, filter_bandwidths):
passband_freqs = [cur_center_freq - 0.5 * cur_bw, cur_center_freq + 0.5 * cur_bw] / cur_nyquist
stopband_freqs = [cur_center_freq - cur_bw, cur_center_freq + cur_bw] / cur_nyquist
cur_filter = scipy.signal.iirdesign(passband_freqs, stopband_freqs,
passband_ripple, stopband_attenuation,
analog=False, ftype=ftype, output=flayout)
filterbank.append(cur_filter)
return filterbank, sample_rates
|
def _multirate_fb(center_freqs=None, sample_rates=None, Q=25.0,
passband_ripple=1, stopband_attenuation=50, ftype='ellip', flayout='ba'):
r'''Helper function to construct a multirate filterbank.
A filter bank consists of multiple band-pass filters which divide the input signal
into subbands. In the case of a multirate filter bank, the band-pass filters
operate with resampled versions of the input signal, e.g. to keep the length
of a filter constant while shifting its center frequency.
This implementation uses `scipy.signal.iirdesign` to design the filters.
Parameters
----------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Samplerate for each filter (used for multirate filterbank).
Q : float
Q factor (influences the filter bandwith).
passband_ripple : float
The maximum loss in the passband (dB)
See `scipy.signal.iirdesign` for details.
stopband_attenuation : float
The minimum attenuation in the stopband (dB)
See `scipy.signal.iirdesign` for details.
ftype : str
The type of IIR filter to design
See `scipy.signal.iirdesign` for details.
flayout : string
Valid `output` argument for `scipy.signal.iirdesign`.
- If `ba`, returns numerators/denominators of the transfer functions,
used for filtering with `scipy.signal.filtfilt`.
Can be unstable for high-order filters.
- If `sos`, returns a series of second-order filters,
used for filtering with `scipy.signal.sosfiltfilt`.
Minimizes numerical precision errors for high-order filters, but is slower.
- If `zpk`, returns zeros, poles, and system gains of the transfer functions.
Returns
-------
filterbank : list [shape=(n,), dtype=float]
Each list entry comprises the filter coefficients for a single filter.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Samplerate for each filter.
Notes
-----
This function caches at level 10.
See Also
--------
scipy.signal.iirdesign
Raises
------
ParameterError
If `center_freqs` is `None`.
If `sample_rates` is `None`.
If `center_freqs.shape` does not match `sample_rates.shape`.
'''
if center_freqs is None:
raise ParameterError('center_freqs must be provided.')
if sample_rates is None:
raise ParameterError('sample_rates must be provided.')
if center_freqs.shape != sample_rates.shape:
raise ParameterError('Number of provided center_freqs and sample_rates must be equal.')
nyquist = 0.5 * sample_rates
filter_bandwidths = center_freqs / float(Q)
filterbank = []
for cur_center_freq, cur_nyquist, cur_bw in zip(center_freqs, nyquist, filter_bandwidths):
passband_freqs = [cur_center_freq - 0.5 * cur_bw, cur_center_freq + 0.5 * cur_bw] / cur_nyquist
stopband_freqs = [cur_center_freq - cur_bw, cur_center_freq + cur_bw] / cur_nyquist
cur_filter = scipy.signal.iirdesign(passband_freqs, stopband_freqs,
passband_ripple, stopband_attenuation,
analog=False, ftype=ftype, output=flayout)
filterbank.append(cur_filter)
return filterbank, sample_rates
|
[
"r",
"Helper",
"function",
"to",
"construct",
"a",
"multirate",
"filterbank",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L860-L954
|
[
"def",
"_multirate_fb",
"(",
"center_freqs",
"=",
"None",
",",
"sample_rates",
"=",
"None",
",",
"Q",
"=",
"25.0",
",",
"passband_ripple",
"=",
"1",
",",
"stopband_attenuation",
"=",
"50",
",",
"ftype",
"=",
"'ellip'",
",",
"flayout",
"=",
"'ba'",
")",
":",
"if",
"center_freqs",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'center_freqs must be provided.'",
")",
"if",
"sample_rates",
"is",
"None",
":",
"raise",
"ParameterError",
"(",
"'sample_rates must be provided.'",
")",
"if",
"center_freqs",
".",
"shape",
"!=",
"sample_rates",
".",
"shape",
":",
"raise",
"ParameterError",
"(",
"'Number of provided center_freqs and sample_rates must be equal.'",
")",
"nyquist",
"=",
"0.5",
"*",
"sample_rates",
"filter_bandwidths",
"=",
"center_freqs",
"/",
"float",
"(",
"Q",
")",
"filterbank",
"=",
"[",
"]",
"for",
"cur_center_freq",
",",
"cur_nyquist",
",",
"cur_bw",
"in",
"zip",
"(",
"center_freqs",
",",
"nyquist",
",",
"filter_bandwidths",
")",
":",
"passband_freqs",
"=",
"[",
"cur_center_freq",
"-",
"0.5",
"*",
"cur_bw",
",",
"cur_center_freq",
"+",
"0.5",
"*",
"cur_bw",
"]",
"/",
"cur_nyquist",
"stopband_freqs",
"=",
"[",
"cur_center_freq",
"-",
"cur_bw",
",",
"cur_center_freq",
"+",
"cur_bw",
"]",
"/",
"cur_nyquist",
"cur_filter",
"=",
"scipy",
".",
"signal",
".",
"iirdesign",
"(",
"passband_freqs",
",",
"stopband_freqs",
",",
"passband_ripple",
",",
"stopband_attenuation",
",",
"analog",
"=",
"False",
",",
"ftype",
"=",
"ftype",
",",
"output",
"=",
"flayout",
")",
"filterbank",
".",
"append",
"(",
"cur_filter",
")",
"return",
"filterbank",
",",
"sample_rates"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
mr_frequencies
|
r'''Helper function for generating center frequency and sample rate pairs.
This function will return center frequency and corresponding sample rates
to obtain similar pitch filterbank settings as described in [1]_.
Instead of starting with MIDI pitch `A0`, we start with `C0`.
.. [1] Müller, Meinard.
"Information Retrieval for Music and Motion."
Springer Verlag. 2007.
Parameters
----------
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440, measure as a fraction of the equally
tempered semitone (1/12 of an octave).
Returns
-------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rate for each filter, used for multirate filterbank.
Notes
-----
This function caches at level 10.
See Also
--------
librosa.filters.semitone_filterbank
librosa.filters._multirate_fb
|
librosa/filters.py
|
def mr_frequencies(tuning):
r'''Helper function for generating center frequency and sample rate pairs.
This function will return center frequency and corresponding sample rates
to obtain similar pitch filterbank settings as described in [1]_.
Instead of starting with MIDI pitch `A0`, we start with `C0`.
.. [1] Müller, Meinard.
"Information Retrieval for Music and Motion."
Springer Verlag. 2007.
Parameters
----------
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440, measure as a fraction of the equally
tempered semitone (1/12 of an octave).
Returns
-------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rate for each filter, used for multirate filterbank.
Notes
-----
This function caches at level 10.
See Also
--------
librosa.filters.semitone_filterbank
librosa.filters._multirate_fb
'''
center_freqs = midi_to_hz(np.arange(24 + tuning, 109 + tuning))
sample_rates = np.asarray(len(np.arange(0, 36)) * [882, ] +
len(np.arange(36, 70)) * [4410, ] +
len(np.arange(70, 85)) * [22050, ])
return center_freqs, sample_rates
|
def mr_frequencies(tuning):
r'''Helper function for generating center frequency and sample rate pairs.
This function will return center frequency and corresponding sample rates
to obtain similar pitch filterbank settings as described in [1]_.
Instead of starting with MIDI pitch `A0`, we start with `C0`.
.. [1] Müller, Meinard.
"Information Retrieval for Music and Motion."
Springer Verlag. 2007.
Parameters
----------
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440, measure as a fraction of the equally
tempered semitone (1/12 of an octave).
Returns
-------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rate for each filter, used for multirate filterbank.
Notes
-----
This function caches at level 10.
See Also
--------
librosa.filters.semitone_filterbank
librosa.filters._multirate_fb
'''
center_freqs = midi_to_hz(np.arange(24 + tuning, 109 + tuning))
sample_rates = np.asarray(len(np.arange(0, 36)) * [882, ] +
len(np.arange(36, 70)) * [4410, ] +
len(np.arange(70, 85)) * [22050, ])
return center_freqs, sample_rates
|
[
"r",
"Helper",
"function",
"for",
"generating",
"center",
"frequency",
"and",
"sample",
"rate",
"pairs",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L958-L1002
|
[
"def",
"mr_frequencies",
"(",
"tuning",
")",
":",
"center_freqs",
"=",
"midi_to_hz",
"(",
"np",
".",
"arange",
"(",
"24",
"+",
"tuning",
",",
"109",
"+",
"tuning",
")",
")",
"sample_rates",
"=",
"np",
".",
"asarray",
"(",
"len",
"(",
"np",
".",
"arange",
"(",
"0",
",",
"36",
")",
")",
"*",
"[",
"882",
",",
"]",
"+",
"len",
"(",
"np",
".",
"arange",
"(",
"36",
",",
"70",
")",
")",
"*",
"[",
"4410",
",",
"]",
"+",
"len",
"(",
"np",
".",
"arange",
"(",
"70",
",",
"85",
")",
")",
"*",
"[",
"22050",
",",
"]",
")",
"return",
"center_freqs",
",",
"sample_rates"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
semitone_filterbank
|
r'''Constructs a multirate filterbank of infinite-impulse response (IIR)
band-pass filters at user-defined center frequencies and sample rates.
By default, these center frequencies are set equal to the 88 fundamental
frequencies of the grand piano keyboard, according to a pitch tuning standard
of A440, that is, note A above middle C set to 440 Hz. The center frequencies
are tuned to the twelve-tone equal temperament, which means that they grow
exponentially at a rate of 2**(1/12), that is, twelve notes per octave.
The A440 tuning can be changed by the user while keeping twelve-tone equal
temperament. While A440 is currently the international standard in the music
industry (ISO 16), some orchestras tune to A441-A445, whereas baroque musicians
tune to A415.
See [1]_ for details.
.. [1] Müller, Meinard.
"Information Retrieval for Music and Motion."
Springer Verlag. 2007.
Parameters
----------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 as a fraction of a semitone (1/12 of an octave
in equal temperament).
sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rates of each filter in the multirate filterbank.
flayout : string
- If `ba`, the standard difference equation is used for filtering with `scipy.signal.filtfilt`.
Can be unstable for high-order filters.
- If `sos`, a series of second-order filters is used for filtering with `scipy.signal.sosfiltfilt`.
Minimizes numerical precision errors for high-order filters, but is slower.
kwargs : additional keyword arguments
Additional arguments to the private function `_multirate_fb()`.
Returns
-------
filterbank : list [shape=(n,), dtype=float]
Each list entry contains the filter coefficients for a single filter.
fb_sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rate for each filter.
See Also
--------
librosa.core.cqt
librosa.core.iirt
librosa.filters._multirate_fb
librosa.filters.mr_frequencies
scipy.signal.iirdesign
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import scipy.signal
>>> semitone_filterbank, sample_rates = librosa.filters.semitone_filterbank()
>>> plt.figure(figsize=(10, 6))
>>> for cur_sr, cur_filter in zip(sample_rates, semitone_filterbank):
... w, h = scipy.signal.freqz(cur_filter[0], cur_filter[1], worN=2000)
... plt.plot((cur_sr / (2 * np.pi)) * w, 20 * np.log10(abs(h)))
>>> plt.semilogx()
>>> plt.xlim([20, 10e3])
>>> plt.ylim([-60, 3])
>>> plt.title('Magnitude Responses of the Pitch Filterbank')
>>> plt.xlabel('Log-Frequency (Hz)')
>>> plt.ylabel('Magnitude (dB)')
>>> plt.tight_layout()
|
librosa/filters.py
|
def semitone_filterbank(center_freqs=None, tuning=0.0, sample_rates=None, flayout='ba', **kwargs):
r'''Constructs a multirate filterbank of infinite-impulse response (IIR)
band-pass filters at user-defined center frequencies and sample rates.
By default, these center frequencies are set equal to the 88 fundamental
frequencies of the grand piano keyboard, according to a pitch tuning standard
of A440, that is, note A above middle C set to 440 Hz. The center frequencies
are tuned to the twelve-tone equal temperament, which means that they grow
exponentially at a rate of 2**(1/12), that is, twelve notes per octave.
The A440 tuning can be changed by the user while keeping twelve-tone equal
temperament. While A440 is currently the international standard in the music
industry (ISO 16), some orchestras tune to A441-A445, whereas baroque musicians
tune to A415.
See [1]_ for details.
.. [1] Müller, Meinard.
"Information Retrieval for Music and Motion."
Springer Verlag. 2007.
Parameters
----------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 as a fraction of a semitone (1/12 of an octave
in equal temperament).
sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rates of each filter in the multirate filterbank.
flayout : string
- If `ba`, the standard difference equation is used for filtering with `scipy.signal.filtfilt`.
Can be unstable for high-order filters.
- If `sos`, a series of second-order filters is used for filtering with `scipy.signal.sosfiltfilt`.
Minimizes numerical precision errors for high-order filters, but is slower.
kwargs : additional keyword arguments
Additional arguments to the private function `_multirate_fb()`.
Returns
-------
filterbank : list [shape=(n,), dtype=float]
Each list entry contains the filter coefficients for a single filter.
fb_sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rate for each filter.
See Also
--------
librosa.core.cqt
librosa.core.iirt
librosa.filters._multirate_fb
librosa.filters.mr_frequencies
scipy.signal.iirdesign
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import scipy.signal
>>> semitone_filterbank, sample_rates = librosa.filters.semitone_filterbank()
>>> plt.figure(figsize=(10, 6))
>>> for cur_sr, cur_filter in zip(sample_rates, semitone_filterbank):
... w, h = scipy.signal.freqz(cur_filter[0], cur_filter[1], worN=2000)
... plt.plot((cur_sr / (2 * np.pi)) * w, 20 * np.log10(abs(h)))
>>> plt.semilogx()
>>> plt.xlim([20, 10e3])
>>> plt.ylim([-60, 3])
>>> plt.title('Magnitude Responses of the Pitch Filterbank')
>>> plt.xlabel('Log-Frequency (Hz)')
>>> plt.ylabel('Magnitude (dB)')
>>> plt.tight_layout()
'''
if (center_freqs is None) and (sample_rates is None):
center_freqs, sample_rates = mr_frequencies(tuning)
filterbank, fb_sample_rates = _multirate_fb(center_freqs=center_freqs, sample_rates=sample_rates,
flayout=flayout, **kwargs)
return filterbank, fb_sample_rates
|
def semitone_filterbank(center_freqs=None, tuning=0.0, sample_rates=None, flayout='ba', **kwargs):
r'''Constructs a multirate filterbank of infinite-impulse response (IIR)
band-pass filters at user-defined center frequencies and sample rates.
By default, these center frequencies are set equal to the 88 fundamental
frequencies of the grand piano keyboard, according to a pitch tuning standard
of A440, that is, note A above middle C set to 440 Hz. The center frequencies
are tuned to the twelve-tone equal temperament, which means that they grow
exponentially at a rate of 2**(1/12), that is, twelve notes per octave.
The A440 tuning can be changed by the user while keeping twelve-tone equal
temperament. While A440 is currently the international standard in the music
industry (ISO 16), some orchestras tune to A441-A445, whereas baroque musicians
tune to A415.
See [1]_ for details.
.. [1] Müller, Meinard.
"Information Retrieval for Music and Motion."
Springer Verlag. 2007.
Parameters
----------
center_freqs : np.ndarray [shape=(n,), dtype=float]
Center frequencies of the filter kernels.
Also defines the number of filters in the filterbank.
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 as a fraction of a semitone (1/12 of an octave
in equal temperament).
sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rates of each filter in the multirate filterbank.
flayout : string
- If `ba`, the standard difference equation is used for filtering with `scipy.signal.filtfilt`.
Can be unstable for high-order filters.
- If `sos`, a series of second-order filters is used for filtering with `scipy.signal.sosfiltfilt`.
Minimizes numerical precision errors for high-order filters, but is slower.
kwargs : additional keyword arguments
Additional arguments to the private function `_multirate_fb()`.
Returns
-------
filterbank : list [shape=(n,), dtype=float]
Each list entry contains the filter coefficients for a single filter.
fb_sample_rates : np.ndarray [shape=(n,), dtype=float]
Sample rate for each filter.
See Also
--------
librosa.core.cqt
librosa.core.iirt
librosa.filters._multirate_fb
librosa.filters.mr_frequencies
scipy.signal.iirdesign
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import scipy.signal
>>> semitone_filterbank, sample_rates = librosa.filters.semitone_filterbank()
>>> plt.figure(figsize=(10, 6))
>>> for cur_sr, cur_filter in zip(sample_rates, semitone_filterbank):
... w, h = scipy.signal.freqz(cur_filter[0], cur_filter[1], worN=2000)
... plt.plot((cur_sr / (2 * np.pi)) * w, 20 * np.log10(abs(h)))
>>> plt.semilogx()
>>> plt.xlim([20, 10e3])
>>> plt.ylim([-60, 3])
>>> plt.title('Magnitude Responses of the Pitch Filterbank')
>>> plt.xlabel('Log-Frequency (Hz)')
>>> plt.ylabel('Magnitude (dB)')
>>> plt.tight_layout()
'''
if (center_freqs is None) and (sample_rates is None):
center_freqs, sample_rates = mr_frequencies(tuning)
filterbank, fb_sample_rates = _multirate_fb(center_freqs=center_freqs, sample_rates=sample_rates,
flayout=flayout, **kwargs)
return filterbank, fb_sample_rates
|
[
"r",
"Constructs",
"a",
"multirate",
"filterbank",
"of",
"infinite",
"-",
"impulse",
"response",
"(",
"IIR",
")",
"band",
"-",
"pass",
"filters",
"at",
"user",
"-",
"defined",
"center",
"frequencies",
"and",
"sample",
"rates",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L1005-L1090
|
[
"def",
"semitone_filterbank",
"(",
"center_freqs",
"=",
"None",
",",
"tuning",
"=",
"0.0",
",",
"sample_rates",
"=",
"None",
",",
"flayout",
"=",
"'ba'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"center_freqs",
"is",
"None",
")",
"and",
"(",
"sample_rates",
"is",
"None",
")",
":",
"center_freqs",
",",
"sample_rates",
"=",
"mr_frequencies",
"(",
"tuning",
")",
"filterbank",
",",
"fb_sample_rates",
"=",
"_multirate_fb",
"(",
"center_freqs",
"=",
"center_freqs",
",",
"sample_rates",
"=",
"sample_rates",
",",
"flayout",
"=",
"flayout",
",",
"*",
"*",
"kwargs",
")",
"return",
"filterbank",
",",
"fb_sample_rates"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
__window_ss_fill
|
Helper function for window sum-square calculation.
|
librosa/filters.py
|
def __window_ss_fill(x, win_sq, n_frames, hop_length): # pragma: no cover
'''Helper function for window sum-square calculation.'''
n = len(x)
n_fft = len(win_sq)
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
|
def __window_ss_fill(x, win_sq, n_frames, hop_length): # pragma: no cover
'''Helper function for window sum-square calculation.'''
n = len(x)
n_fft = len(win_sq)
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
|
[
"Helper",
"function",
"for",
"window",
"sum",
"-",
"square",
"calculation",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L1094-L1101
|
[
"def",
"__window_ss_fill",
"(",
"x",
",",
"win_sq",
",",
"n_frames",
",",
"hop_length",
")",
":",
"# pragma: no cover",
"n",
"=",
"len",
"(",
"x",
")",
"n_fft",
"=",
"len",
"(",
"win_sq",
")",
"for",
"i",
"in",
"range",
"(",
"n_frames",
")",
":",
"sample",
"=",
"i",
"*",
"hop_length",
"x",
"[",
"sample",
":",
"min",
"(",
"n",
",",
"sample",
"+",
"n_fft",
")",
"]",
"+=",
"win_sq",
"[",
":",
"max",
"(",
"0",
",",
"min",
"(",
"n_fft",
",",
"n",
"-",
"sample",
")",
")",
"]"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
window_sumsquare
|
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing observations
in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
Examples
--------
For a fixed frame length (2048), compare modulation effects for a Hann window
at different hop lengths:
>>> n_frames = 50
>>> wss_256 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=256)
>>> wss_512 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=512)
>>> wss_1024 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=1024)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3,1,1)
>>> plt.plot(wss_256)
>>> plt.title('hop_length=256')
>>> plt.subplot(3,1,2)
>>> plt.plot(wss_512)
>>> plt.title('hop_length=512')
>>> plt.subplot(3,1,3)
>>> plt.plot(wss_1024)
>>> plt.title('hop_length=1024')
>>> plt.tight_layout()
|
librosa/filters.py
|
def window_sumsquare(window, n_frames, hop_length=512, win_length=None, n_fft=2048,
dtype=np.float32, norm=None):
'''
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing observations
in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
Examples
--------
For a fixed frame length (2048), compare modulation effects for a Hann window
at different hop lengths:
>>> n_frames = 50
>>> wss_256 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=256)
>>> wss_512 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=512)
>>> wss_1024 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=1024)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3,1,1)
>>> plt.plot(wss_256)
>>> plt.title('hop_length=256')
>>> plt.subplot(3,1,2)
>>> plt.plot(wss_512)
>>> plt.title('hop_length=512')
>>> plt.subplot(3,1,3)
>>> plt.plot(wss_1024)
>>> plt.title('hop_length=1024')
>>> plt.tight_layout()
'''
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length)
win_sq = util.normalize(win_sq, norm=norm)**2
win_sq = util.pad_center(win_sq, n_fft)
# Fill the envelope
__window_ss_fill(x, win_sq, n_frames, hop_length)
return x
|
def window_sumsquare(window, n_frames, hop_length=512, win_length=None, n_fft=2048,
dtype=np.float32, norm=None):
'''
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing observations
in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
Examples
--------
For a fixed frame length (2048), compare modulation effects for a Hann window
at different hop lengths:
>>> n_frames = 50
>>> wss_256 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=256)
>>> wss_512 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=512)
>>> wss_1024 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=1024)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(3,1,1)
>>> plt.plot(wss_256)
>>> plt.title('hop_length=256')
>>> plt.subplot(3,1,2)
>>> plt.plot(wss_512)
>>> plt.title('hop_length=512')
>>> plt.subplot(3,1,3)
>>> plt.plot(wss_1024)
>>> plt.title('hop_length=1024')
>>> plt.tight_layout()
'''
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length)
win_sq = util.normalize(win_sq, norm=norm)**2
win_sq = util.pad_center(win_sq, n_fft)
# Fill the envelope
__window_ss_fill(x, win_sq, n_frames, hop_length)
return x
|
[
"Compute",
"the",
"sum",
"-",
"square",
"envelope",
"of",
"a",
"window",
"function",
"at",
"a",
"given",
"hop",
"length",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L1104-L1175
|
[
"def",
"window_sumsquare",
"(",
"window",
",",
"n_frames",
",",
"hop_length",
"=",
"512",
",",
"win_length",
"=",
"None",
",",
"n_fft",
"=",
"2048",
",",
"dtype",
"=",
"np",
".",
"float32",
",",
"norm",
"=",
"None",
")",
":",
"if",
"win_length",
"is",
"None",
":",
"win_length",
"=",
"n_fft",
"n",
"=",
"n_fft",
"+",
"hop_length",
"*",
"(",
"n_frames",
"-",
"1",
")",
"x",
"=",
"np",
".",
"zeros",
"(",
"n",
",",
"dtype",
"=",
"dtype",
")",
"# Compute the squared window at the desired length",
"win_sq",
"=",
"get_window",
"(",
"window",
",",
"win_length",
")",
"win_sq",
"=",
"util",
".",
"normalize",
"(",
"win_sq",
",",
"norm",
"=",
"norm",
")",
"**",
"2",
"win_sq",
"=",
"util",
".",
"pad_center",
"(",
"win_sq",
",",
"n_fft",
")",
"# Fill the envelope",
"__window_ss_fill",
"(",
"x",
",",
"win_sq",
",",
"n_frames",
",",
"hop_length",
")",
"return",
"x"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
diagonal_filter
|
Build a two-dimensional diagonal filter.
This is primarily used for smoothing recurrence or self-similarity matrices.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window function to use for the filter.
See `get_window` for details.
Note that the window used here should be non-negative.
n : int > 0
the length of the filter
slope : float
The slope of the diagonal filter to produce
angle : float or None
If given, the slope parameter is ignored,
and angle directly sets the orientation of the filter (in radians).
Otherwise, angle is inferred as `arctan(slope)`.
zero_mean : bool
If True, a zero-mean filter is used.
Otherwise, a non-negative averaging filter is used.
This should be enabled if you want to enhance paths and suppress
blocks.
Returns
-------
kernel : np.ndarray, shape=[(m, m)]
The 2-dimensional filter kernel
Notes
-----
This function caches at level 10.
|
librosa/filters.py
|
def diagonal_filter(window, n, slope=1.0, angle=None, zero_mean=False):
'''Build a two-dimensional diagonal filter.
This is primarily used for smoothing recurrence or self-similarity matrices.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window function to use for the filter.
See `get_window` for details.
Note that the window used here should be non-negative.
n : int > 0
the length of the filter
slope : float
The slope of the diagonal filter to produce
angle : float or None
If given, the slope parameter is ignored,
and angle directly sets the orientation of the filter (in radians).
Otherwise, angle is inferred as `arctan(slope)`.
zero_mean : bool
If True, a zero-mean filter is used.
Otherwise, a non-negative averaging filter is used.
This should be enabled if you want to enhance paths and suppress
blocks.
Returns
-------
kernel : np.ndarray, shape=[(m, m)]
The 2-dimensional filter kernel
Notes
-----
This function caches at level 10.
'''
if angle is None:
angle = np.arctan(slope)
win = np.diag(get_window(window, n, fftbins=False))
if not np.isclose(angle, np.pi/4):
win = scipy.ndimage.rotate(win, 45 - angle * 180 / np.pi,
order=5, prefilter=False)
np.clip(win, 0, None, out=win)
win /= win.sum()
if zero_mean:
win -= win.mean()
return win
|
def diagonal_filter(window, n, slope=1.0, angle=None, zero_mean=False):
'''Build a two-dimensional diagonal filter.
This is primarily used for smoothing recurrence or self-similarity matrices.
Parameters
----------
window : string, tuple, number, callable, or list-like
The window function to use for the filter.
See `get_window` for details.
Note that the window used here should be non-negative.
n : int > 0
the length of the filter
slope : float
The slope of the diagonal filter to produce
angle : float or None
If given, the slope parameter is ignored,
and angle directly sets the orientation of the filter (in radians).
Otherwise, angle is inferred as `arctan(slope)`.
zero_mean : bool
If True, a zero-mean filter is used.
Otherwise, a non-negative averaging filter is used.
This should be enabled if you want to enhance paths and suppress
blocks.
Returns
-------
kernel : np.ndarray, shape=[(m, m)]
The 2-dimensional filter kernel
Notes
-----
This function caches at level 10.
'''
if angle is None:
angle = np.arctan(slope)
win = np.diag(get_window(window, n, fftbins=False))
if not np.isclose(angle, np.pi/4):
win = scipy.ndimage.rotate(win, 45 - angle * 180 / np.pi,
order=5, prefilter=False)
np.clip(win, 0, None, out=win)
win /= win.sum()
if zero_mean:
win -= win.mean()
return win
|
[
"Build",
"a",
"two",
"-",
"dimensional",
"diagonal",
"filter",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/filters.py#L1179-L1238
|
[
"def",
"diagonal_filter",
"(",
"window",
",",
"n",
",",
"slope",
"=",
"1.0",
",",
"angle",
"=",
"None",
",",
"zero_mean",
"=",
"False",
")",
":",
"if",
"angle",
"is",
"None",
":",
"angle",
"=",
"np",
".",
"arctan",
"(",
"slope",
")",
"win",
"=",
"np",
".",
"diag",
"(",
"get_window",
"(",
"window",
",",
"n",
",",
"fftbins",
"=",
"False",
")",
")",
"if",
"not",
"np",
".",
"isclose",
"(",
"angle",
",",
"np",
".",
"pi",
"/",
"4",
")",
":",
"win",
"=",
"scipy",
".",
"ndimage",
".",
"rotate",
"(",
"win",
",",
"45",
"-",
"angle",
"*",
"180",
"/",
"np",
".",
"pi",
",",
"order",
"=",
"5",
",",
"prefilter",
"=",
"False",
")",
"np",
".",
"clip",
"(",
"win",
",",
"0",
",",
"None",
",",
"out",
"=",
"win",
")",
"win",
"/=",
"win",
".",
"sum",
"(",
")",
"if",
"zero_mean",
":",
"win",
"-=",
"win",
".",
"mean",
"(",
")",
"return",
"win"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
test
|
spectral_centroid
|
Compute the spectral centroid.
Each frame of a magnitude spectrogram is normalized and treated as a
distribution over frequency bins, from which the mean (centroid) is
extracted per frame.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
or a matrix of center frequencies as constructed by
`librosa.core.ifgram`
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
centroid : np.ndarray [shape=(1, t)]
centroid frequencies
See Also
--------
librosa.core.stft
Short-time Fourier Transform
librosa.core.ifgram
Instantaneous-frequency spectrogram
Examples
--------
From time-series input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> cent = librosa.feature.spectral_centroid(y=y, sr=sr)
>>> cent
array([[ 4382.894, 626.588, ..., 5037.07 , 5413.398]])
From spectrogram input:
>>> S, phase = librosa.magphase(librosa.stft(y=y))
>>> librosa.feature.spectral_centroid(S=S)
array([[ 4382.894, 626.588, ..., 5037.07 , 5413.398]])
Using variable bin center frequencies:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> if_gram, D = librosa.ifgram(y)
>>> librosa.feature.spectral_centroid(S=np.abs(D), freq=if_gram)
array([[ 4420.719, 625.769, ..., 5011.86 , 5221.492]])
Plot the result
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(cent.T, label='Spectral centroid')
>>> plt.ylabel('Hz')
>>> plt.xticks([])
>>> plt.xlim([0, cent.shape[-1]])
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
|
librosa/feature/spectral.py
|
def spectral_centroid(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
freq=None, win_length=None, window='hann', center=True,
pad_mode='reflect'):
'''Compute the spectral centroid.
Each frame of a magnitude spectrogram is normalized and treated as a
distribution over frequency bins, from which the mean (centroid) is
extracted per frame.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
or a matrix of center frequencies as constructed by
`librosa.core.ifgram`
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
centroid : np.ndarray [shape=(1, t)]
centroid frequencies
See Also
--------
librosa.core.stft
Short-time Fourier Transform
librosa.core.ifgram
Instantaneous-frequency spectrogram
Examples
--------
From time-series input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> cent = librosa.feature.spectral_centroid(y=y, sr=sr)
>>> cent
array([[ 4382.894, 626.588, ..., 5037.07 , 5413.398]])
From spectrogram input:
>>> S, phase = librosa.magphase(librosa.stft(y=y))
>>> librosa.feature.spectral_centroid(S=S)
array([[ 4382.894, 626.588, ..., 5037.07 , 5413.398]])
Using variable bin center frequencies:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> if_gram, D = librosa.ifgram(y)
>>> librosa.feature.spectral_centroid(S=np.abs(D), freq=if_gram)
array([[ 4420.719, 625.769, ..., 5011.86 , 5221.492]])
Plot the result
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(cent.T, label='Spectral centroid')
>>> plt.ylabel('Hz')
>>> plt.xticks([])
>>> plt.xlim([0, cent.shape[-1]])
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
'''
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
if not np.isrealobj(S):
raise ParameterError('Spectral centroid is only defined '
'with real-valued input')
elif np.any(S < 0):
raise ParameterError('Spectral centroid is only defined '
'with non-negative energies')
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
if freq.ndim == 1:
freq = freq.reshape((-1, 1))
# Column-normalize S
return np.sum(freq * util.normalize(S, norm=1, axis=0),
axis=0, keepdims=True)
|
def spectral_centroid(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
freq=None, win_length=None, window='hann', center=True,
pad_mode='reflect'):
'''Compute the spectral centroid.
Each frame of a magnitude spectrogram is normalized and treated as a
distribution over frequency bins, from which the mean (centroid) is
extracted per frame.
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
audio sampling rate of `y`
S : np.ndarray [shape=(d, t)] or None
(optional) spectrogram magnitude
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length for STFT. See `librosa.core.stft` for details.
freq : None or np.ndarray [shape=(d,) or shape=(d, t)]
Center frequencies for spectrogram bins.
If `None`, then FFT bin center frequencies are used.
Otherwise, it can be a single array of `d` center frequencies,
or a matrix of center frequencies as constructed by
`librosa.core.ifgram`
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`t` is centered at `y[t * hop_length]`.
- If `False`, then frame `t` begins at `y[t * hop_length]`
pad_mode : string
If `center=True`, the padding mode to use at the edges of the signal.
By default, STFT uses reflection padding.
Returns
-------
centroid : np.ndarray [shape=(1, t)]
centroid frequencies
See Also
--------
librosa.core.stft
Short-time Fourier Transform
librosa.core.ifgram
Instantaneous-frequency spectrogram
Examples
--------
From time-series input:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> cent = librosa.feature.spectral_centroid(y=y, sr=sr)
>>> cent
array([[ 4382.894, 626.588, ..., 5037.07 , 5413.398]])
From spectrogram input:
>>> S, phase = librosa.magphase(librosa.stft(y=y))
>>> librosa.feature.spectral_centroid(S=S)
array([[ 4382.894, 626.588, ..., 5037.07 , 5413.398]])
Using variable bin center frequencies:
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> if_gram, D = librosa.ifgram(y)
>>> librosa.feature.spectral_centroid(S=np.abs(D), freq=if_gram)
array([[ 4420.719, 625.769, ..., 5011.86 , 5221.492]])
Plot the result
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> plt.semilogy(cent.T, label='Spectral centroid')
>>> plt.ylabel('Hz')
>>> plt.xticks([])
>>> plt.xlim([0, cent.shape[-1]])
>>> plt.legend()
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('log Power spectrogram')
>>> plt.tight_layout()
'''
S, n_fft = _spectrogram(y=y, S=S, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode)
if not np.isrealobj(S):
raise ParameterError('Spectral centroid is only defined '
'with real-valued input')
elif np.any(S < 0):
raise ParameterError('Spectral centroid is only defined '
'with non-negative energies')
# Compute the center frequencies of each bin
if freq is None:
freq = fft_frequencies(sr=sr, n_fft=n_fft)
if freq.ndim == 1:
freq = freq.reshape((-1, 1))
# Column-normalize S
return np.sum(freq * util.normalize(S, norm=1, axis=0),
axis=0, keepdims=True)
|
[
"Compute",
"the",
"spectral",
"centroid",
"."
] |
librosa/librosa
|
python
|
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/feature/spectral.py#L38-L168
|
[
"def",
"spectral_centroid",
"(",
"y",
"=",
"None",
",",
"sr",
"=",
"22050",
",",
"S",
"=",
"None",
",",
"n_fft",
"=",
"2048",
",",
"hop_length",
"=",
"512",
",",
"freq",
"=",
"None",
",",
"win_length",
"=",
"None",
",",
"window",
"=",
"'hann'",
",",
"center",
"=",
"True",
",",
"pad_mode",
"=",
"'reflect'",
")",
":",
"S",
",",
"n_fft",
"=",
"_spectrogram",
"(",
"y",
"=",
"y",
",",
"S",
"=",
"S",
",",
"n_fft",
"=",
"n_fft",
",",
"hop_length",
"=",
"hop_length",
",",
"win_length",
"=",
"win_length",
",",
"window",
"=",
"window",
",",
"center",
"=",
"center",
",",
"pad_mode",
"=",
"pad_mode",
")",
"if",
"not",
"np",
".",
"isrealobj",
"(",
"S",
")",
":",
"raise",
"ParameterError",
"(",
"'Spectral centroid is only defined '",
"'with real-valued input'",
")",
"elif",
"np",
".",
"any",
"(",
"S",
"<",
"0",
")",
":",
"raise",
"ParameterError",
"(",
"'Spectral centroid is only defined '",
"'with non-negative energies'",
")",
"# Compute the center frequencies of each bin",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"fft_frequencies",
"(",
"sr",
"=",
"sr",
",",
"n_fft",
"=",
"n_fft",
")",
"if",
"freq",
".",
"ndim",
"==",
"1",
":",
"freq",
"=",
"freq",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"# Column-normalize S",
"return",
"np",
".",
"sum",
"(",
"freq",
"*",
"util",
".",
"normalize",
"(",
"S",
",",
"norm",
"=",
"1",
",",
"axis",
"=",
"0",
")",
",",
"axis",
"=",
"0",
",",
"keepdims",
"=",
"True",
")"
] |
180e8e6eb8f958fa6b20b8cba389f7945d508247
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.