partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
align_times
Aligns the times to the closest frame times (e.g. beats). Parameters ---------- times: np.ndarray Times in seconds to be aligned. frames: np.ndarray Frame times in seconds. Returns ------- aligned_times: np.ndarray Aligned times.
msaf/input_output.py
def align_times(times, frames): """Aligns the times to the closest frame times (e.g. beats). Parameters ---------- times: np.ndarray Times in seconds to be aligned. frames: np.ndarray Frame times in seconds. Returns ------- aligned_times: np.ndarray Aligned times. """ dist = np.minimum.outer(times, frames) bound_frames = np.argmax(np.maximum(0, dist), axis=1) aligned_times = np.unique(bound_frames) return aligned_times
def align_times(times, frames): """Aligns the times to the closest frame times (e.g. beats). Parameters ---------- times: np.ndarray Times in seconds to be aligned. frames: np.ndarray Frame times in seconds. Returns ------- aligned_times: np.ndarray Aligned times. """ dist = np.minimum.outer(times, frames) bound_frames = np.argmax(np.maximum(0, dist), axis=1) aligned_times = np.unique(bound_frames) return aligned_times
[ "Aligns", "the", "times", "to", "the", "closest", "frame", "times", "(", "e", ".", "g", ".", "beats", ")", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/input_output.py#L141-L159
[ "def", "align_times", "(", "times", ",", "frames", ")", ":", "dist", "=", "np", ".", "minimum", ".", "outer", "(", "times", ",", "frames", ")", "bound_frames", "=", "np", ".", "argmax", "(", "np", ".", "maximum", "(", "0", ",", "dist", ")", ",", "axis", "=", "1", ")", "aligned_times", "=", "np", ".", "unique", "(", "bound_frames", ")", "return", "aligned_times" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
find_estimation
Finds the correct estimation from all the estimations contained in a JAMS file given the specified arguments. Parameters ---------- jam : jams.JAMS JAMS object. boundaries_id : str Identifier of the algorithm used to compute the boundaries. labels_id : str Identifier of the algorithm used to compute the labels. params : dict Additional search parameters. E.g. {"feature" : "pcp"}. Returns ------- ann : jams.Annotation Found estimation. `None` if it couldn't be found.
msaf/input_output.py
def find_estimation(jam, boundaries_id, labels_id, params): """Finds the correct estimation from all the estimations contained in a JAMS file given the specified arguments. Parameters ---------- jam : jams.JAMS JAMS object. boundaries_id : str Identifier of the algorithm used to compute the boundaries. labels_id : str Identifier of the algorithm used to compute the labels. params : dict Additional search parameters. E.g. {"feature" : "pcp"}. Returns ------- ann : jams.Annotation Found estimation. `None` if it couldn't be found. """ # Use handy JAMS search interface namespace = "multi_segment" if params["hier"] else "segment_open" # TODO: This is a workaround to issue in JAMS. Should be # resolved in JAMS 0.2.3, but for now, this works too. ann = jam.search(namespace=namespace).\ search(**{"Sandbox.boundaries_id": boundaries_id}).\ search(**{"Sandbox.labels_id": lambda x: (isinstance(x, six.string_types) and re.match(labels_id, x) is not None) or x is None}) for key, val in zip(params.keys(), params.values()): if isinstance(val, six.string_types): ann = ann.search(**{"Sandbox.%s" % key: val}) else: ann = ann.search(**{"Sandbox.%s" % key: lambda x: x == val}) # Check estimations found if len(ann) > 1: logging.warning("More than one estimation with same parameters.") if len(ann) > 0: ann = ann[0] # If we couldn't find anything, let's return None if not ann: ann = None return ann
def find_estimation(jam, boundaries_id, labels_id, params): """Finds the correct estimation from all the estimations contained in a JAMS file given the specified arguments. Parameters ---------- jam : jams.JAMS JAMS object. boundaries_id : str Identifier of the algorithm used to compute the boundaries. labels_id : str Identifier of the algorithm used to compute the labels. params : dict Additional search parameters. E.g. {"feature" : "pcp"}. Returns ------- ann : jams.Annotation Found estimation. `None` if it couldn't be found. """ # Use handy JAMS search interface namespace = "multi_segment" if params["hier"] else "segment_open" # TODO: This is a workaround to issue in JAMS. Should be # resolved in JAMS 0.2.3, but for now, this works too. ann = jam.search(namespace=namespace).\ search(**{"Sandbox.boundaries_id": boundaries_id}).\ search(**{"Sandbox.labels_id": lambda x: (isinstance(x, six.string_types) and re.match(labels_id, x) is not None) or x is None}) for key, val in zip(params.keys(), params.values()): if isinstance(val, six.string_types): ann = ann.search(**{"Sandbox.%s" % key: val}) else: ann = ann.search(**{"Sandbox.%s" % key: lambda x: x == val}) # Check estimations found if len(ann) > 1: logging.warning("More than one estimation with same parameters.") if len(ann) > 0: ann = ann[0] # If we couldn't find anything, let's return None if not ann: ann = None return ann
[ "Finds", "the", "correct", "estimation", "from", "all", "the", "estimations", "contained", "in", "a", "JAMS", "file", "given", "the", "specified", "arguments", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/input_output.py#L162-L209
[ "def", "find_estimation", "(", "jam", ",", "boundaries_id", ",", "labels_id", ",", "params", ")", ":", "# Use handy JAMS search interface", "namespace", "=", "\"multi_segment\"", "if", "params", "[", "\"hier\"", "]", "else", "\"segment_open\"", "# TODO: This is a workaround to issue in JAMS. Should be", "# resolved in JAMS 0.2.3, but for now, this works too.", "ann", "=", "jam", ".", "search", "(", "namespace", "=", "namespace", ")", ".", "search", "(", "*", "*", "{", "\"Sandbox.boundaries_id\"", ":", "boundaries_id", "}", ")", ".", "search", "(", "*", "*", "{", "\"Sandbox.labels_id\"", ":", "lambda", "x", ":", "(", "isinstance", "(", "x", ",", "six", ".", "string_types", ")", "and", "re", ".", "match", "(", "labels_id", ",", "x", ")", "is", "not", "None", ")", "or", "x", "is", "None", "}", ")", "for", "key", ",", "val", "in", "zip", "(", "params", ".", "keys", "(", ")", ",", "params", ".", "values", "(", ")", ")", ":", "if", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", ":", "ann", "=", "ann", ".", "search", "(", "*", "*", "{", "\"Sandbox.%s\"", "%", "key", ":", "val", "}", ")", "else", ":", "ann", "=", "ann", ".", "search", "(", "*", "*", "{", "\"Sandbox.%s\"", "%", "key", ":", "lambda", "x", ":", "x", "==", "val", "}", ")", "# Check estimations found", "if", "len", "(", "ann", ")", ">", "1", ":", "logging", ".", "warning", "(", "\"More than one estimation with same parameters.\"", ")", "if", "len", "(", "ann", ")", ">", "0", ":", "ann", "=", "ann", "[", "0", "]", "# If we couldn't find anything, let's return None", "if", "not", "ann", ":", "ann", "=", "None", "return", "ann" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
save_estimations
Saves the segment estimations in a JAMS file. Parameters ---------- file_struct : FileStruct Object with the different file paths of the current file. times : np.array or list Estimated boundary times. If `list`, estimated hierarchical boundaries. labels : np.array(N, 2) Estimated labels (None in case we are only storing boundary evaluations). boundaries_id : str Boundary algorithm identifier. labels_id : str Labels algorithm identifier. params : dict Dictionary with additional parameters for both algorithms.
msaf/input_output.py
def save_estimations(file_struct, times, labels, boundaries_id, labels_id, **params): """Saves the segment estimations in a JAMS file. Parameters ---------- file_struct : FileStruct Object with the different file paths of the current file. times : np.array or list Estimated boundary times. If `list`, estimated hierarchical boundaries. labels : np.array(N, 2) Estimated labels (None in case we are only storing boundary evaluations). boundaries_id : str Boundary algorithm identifier. labels_id : str Labels algorithm identifier. params : dict Dictionary with additional parameters for both algorithms. """ # Remove features if they exist params.pop("features", None) # Get duration dur = get_duration(file_struct.features_file) # Convert to intervals and sanity check if 'numpy' in str(type(times)): # Flat check inters = utils.times_to_intervals(times) assert len(inters) == len(labels), "Number of boundary intervals " \ "(%d) and labels (%d) do not match" % (len(inters), len(labels)) # Put into lists to simplify the writing process later inters = [inters] labels = [labels] else: # Hierarchical check inters = [] for level in range(len(times)): est_inters = utils.times_to_intervals(times[level]) inters.append(est_inters) assert len(inters[level]) == len(labels[level]), \ "Number of boundary intervals (%d) and labels (%d) do not " \ "match in level %d" % (len(inters[level]), len(labels[level]), level) # Create new estimation namespace = "multi_segment" if params["hier"] else "segment_open" ann = jams.Annotation(namespace=namespace) # Find estimation in file if os.path.isfile(file_struct.est_file): jam = jams.load(file_struct.est_file, validate=False) curr_ann = find_estimation(jam, boundaries_id, labels_id, params) if curr_ann is not None: curr_ann.data = ann.data # cleanup all data ann = curr_ann # This will overwrite the existing estimation else: jam.annotations.append(ann) else: # Create new JAMS if it doesn't exist jam = jams.JAMS() jam.file_metadata.duration = dur jam.annotations.append(ann) # Save metadata and parameters ann.annotation_metadata.version = msaf.__version__ ann.annotation_metadata.data_source = "MSAF" sandbox = {} sandbox["boundaries_id"] = boundaries_id sandbox["labels_id"] = labels_id sandbox["timestamp"] = \ datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S") for key in params: sandbox[key] = params[key] ann.sandbox = sandbox # Save actual data for i, (level_inters, level_labels) in enumerate(zip(inters, labels)): for bound_inter, label in zip(level_inters, level_labels): dur = float(bound_inter[1]) - float(bound_inter[0]) label = chr(int(label) + 65) if params["hier"]: value = {"label": label, "level": i} else: value = label ann.append(time=bound_inter[0], duration=dur, value=value) # Write results jam.save(file_struct.est_file)
def save_estimations(file_struct, times, labels, boundaries_id, labels_id, **params): """Saves the segment estimations in a JAMS file. Parameters ---------- file_struct : FileStruct Object with the different file paths of the current file. times : np.array or list Estimated boundary times. If `list`, estimated hierarchical boundaries. labels : np.array(N, 2) Estimated labels (None in case we are only storing boundary evaluations). boundaries_id : str Boundary algorithm identifier. labels_id : str Labels algorithm identifier. params : dict Dictionary with additional parameters for both algorithms. """ # Remove features if they exist params.pop("features", None) # Get duration dur = get_duration(file_struct.features_file) # Convert to intervals and sanity check if 'numpy' in str(type(times)): # Flat check inters = utils.times_to_intervals(times) assert len(inters) == len(labels), "Number of boundary intervals " \ "(%d) and labels (%d) do not match" % (len(inters), len(labels)) # Put into lists to simplify the writing process later inters = [inters] labels = [labels] else: # Hierarchical check inters = [] for level in range(len(times)): est_inters = utils.times_to_intervals(times[level]) inters.append(est_inters) assert len(inters[level]) == len(labels[level]), \ "Number of boundary intervals (%d) and labels (%d) do not " \ "match in level %d" % (len(inters[level]), len(labels[level]), level) # Create new estimation namespace = "multi_segment" if params["hier"] else "segment_open" ann = jams.Annotation(namespace=namespace) # Find estimation in file if os.path.isfile(file_struct.est_file): jam = jams.load(file_struct.est_file, validate=False) curr_ann = find_estimation(jam, boundaries_id, labels_id, params) if curr_ann is not None: curr_ann.data = ann.data # cleanup all data ann = curr_ann # This will overwrite the existing estimation else: jam.annotations.append(ann) else: # Create new JAMS if it doesn't exist jam = jams.JAMS() jam.file_metadata.duration = dur jam.annotations.append(ann) # Save metadata and parameters ann.annotation_metadata.version = msaf.__version__ ann.annotation_metadata.data_source = "MSAF" sandbox = {} sandbox["boundaries_id"] = boundaries_id sandbox["labels_id"] = labels_id sandbox["timestamp"] = \ datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S") for key in params: sandbox[key] = params[key] ann.sandbox = sandbox # Save actual data for i, (level_inters, level_labels) in enumerate(zip(inters, labels)): for bound_inter, label in zip(level_inters, level_labels): dur = float(bound_inter[1]) - float(bound_inter[0]) label = chr(int(label) + 65) if params["hier"]: value = {"label": label, "level": i} else: value = label ann.append(time=bound_inter[0], duration=dur, value=value) # Write results jam.save(file_struct.est_file)
[ "Saves", "the", "segment", "estimations", "in", "a", "JAMS", "file", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/input_output.py#L212-L303
[ "def", "save_estimations", "(", "file_struct", ",", "times", ",", "labels", ",", "boundaries_id", ",", "labels_id", ",", "*", "*", "params", ")", ":", "# Remove features if they exist", "params", ".", "pop", "(", "\"features\"", ",", "None", ")", "# Get duration", "dur", "=", "get_duration", "(", "file_struct", ".", "features_file", ")", "# Convert to intervals and sanity check", "if", "'numpy'", "in", "str", "(", "type", "(", "times", ")", ")", ":", "# Flat check", "inters", "=", "utils", ".", "times_to_intervals", "(", "times", ")", "assert", "len", "(", "inters", ")", "==", "len", "(", "labels", ")", ",", "\"Number of boundary intervals \"", "\"(%d) and labels (%d) do not match\"", "%", "(", "len", "(", "inters", ")", ",", "len", "(", "labels", ")", ")", "# Put into lists to simplify the writing process later", "inters", "=", "[", "inters", "]", "labels", "=", "[", "labels", "]", "else", ":", "# Hierarchical check", "inters", "=", "[", "]", "for", "level", "in", "range", "(", "len", "(", "times", ")", ")", ":", "est_inters", "=", "utils", ".", "times_to_intervals", "(", "times", "[", "level", "]", ")", "inters", ".", "append", "(", "est_inters", ")", "assert", "len", "(", "inters", "[", "level", "]", ")", "==", "len", "(", "labels", "[", "level", "]", ")", ",", "\"Number of boundary intervals (%d) and labels (%d) do not \"", "\"match in level %d\"", "%", "(", "len", "(", "inters", "[", "level", "]", ")", ",", "len", "(", "labels", "[", "level", "]", ")", ",", "level", ")", "# Create new estimation", "namespace", "=", "\"multi_segment\"", "if", "params", "[", "\"hier\"", "]", "else", "\"segment_open\"", "ann", "=", "jams", ".", "Annotation", "(", "namespace", "=", "namespace", ")", "# Find estimation in file", "if", "os", ".", "path", ".", "isfile", "(", "file_struct", ".", "est_file", ")", ":", "jam", "=", "jams", ".", "load", "(", "file_struct", ".", "est_file", ",", "validate", "=", "False", ")", "curr_ann", "=", "find_estimation", "(", "jam", ",", "boundaries_id", ",", "labels_id", ",", "params", ")", "if", "curr_ann", "is", "not", "None", ":", "curr_ann", ".", "data", "=", "ann", ".", "data", "# cleanup all data", "ann", "=", "curr_ann", "# This will overwrite the existing estimation", "else", ":", "jam", ".", "annotations", ".", "append", "(", "ann", ")", "else", ":", "# Create new JAMS if it doesn't exist", "jam", "=", "jams", ".", "JAMS", "(", ")", "jam", ".", "file_metadata", ".", "duration", "=", "dur", "jam", ".", "annotations", ".", "append", "(", "ann", ")", "# Save metadata and parameters", "ann", ".", "annotation_metadata", ".", "version", "=", "msaf", ".", "__version__", "ann", ".", "annotation_metadata", ".", "data_source", "=", "\"MSAF\"", "sandbox", "=", "{", "}", "sandbox", "[", "\"boundaries_id\"", "]", "=", "boundaries_id", "sandbox", "[", "\"labels_id\"", "]", "=", "labels_id", "sandbox", "[", "\"timestamp\"", "]", "=", "datetime", ".", "datetime", ".", "today", "(", ")", ".", "strftime", "(", "\"%Y/%m/%d %H:%M:%S\"", ")", "for", "key", "in", "params", ":", "sandbox", "[", "key", "]", "=", "params", "[", "key", "]", "ann", ".", "sandbox", "=", "sandbox", "# Save actual data", "for", "i", ",", "(", "level_inters", ",", "level_labels", ")", "in", "enumerate", "(", "zip", "(", "inters", ",", "labels", ")", ")", ":", "for", "bound_inter", ",", "label", "in", "zip", "(", "level_inters", ",", "level_labels", ")", ":", "dur", "=", "float", "(", "bound_inter", "[", "1", "]", ")", "-", "float", "(", "bound_inter", "[", "0", "]", ")", "label", "=", "chr", "(", "int", "(", "label", ")", "+", "65", ")", "if", "params", "[", "\"hier\"", "]", ":", "value", "=", "{", "\"label\"", ":", "label", ",", "\"level\"", ":", "i", "}", "else", ":", "value", "=", "label", "ann", ".", "append", "(", "time", "=", "bound_inter", "[", "0", "]", ",", "duration", "=", "dur", ",", "value", "=", "value", ")", "# Write results", "jam", ".", "save", "(", "file_struct", ".", "est_file", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
get_all_boundary_algorithms
Gets all the possible boundary algorithms in MSAF. Returns ------- algo_ids : list List of all the IDs of boundary algorithms (strings).
msaf/input_output.py
def get_all_boundary_algorithms(): """Gets all the possible boundary algorithms in MSAF. Returns ------- algo_ids : list List of all the IDs of boundary algorithms (strings). """ algo_ids = [] for name in msaf.algorithms.__all__: module = eval(msaf.algorithms.__name__ + "." + name) if module.is_boundary_type: algo_ids.append(module.algo_id) return algo_ids
def get_all_boundary_algorithms(): """Gets all the possible boundary algorithms in MSAF. Returns ------- algo_ids : list List of all the IDs of boundary algorithms (strings). """ algo_ids = [] for name in msaf.algorithms.__all__: module = eval(msaf.algorithms.__name__ + "." + name) if module.is_boundary_type: algo_ids.append(module.algo_id) return algo_ids
[ "Gets", "all", "the", "possible", "boundary", "algorithms", "in", "MSAF", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/input_output.py#L306-L319
[ "def", "get_all_boundary_algorithms", "(", ")", ":", "algo_ids", "=", "[", "]", "for", "name", "in", "msaf", ".", "algorithms", ".", "__all__", ":", "module", "=", "eval", "(", "msaf", ".", "algorithms", ".", "__name__", "+", "\".\"", "+", "name", ")", "if", "module", ".", "is_boundary_type", ":", "algo_ids", ".", "append", "(", "module", ".", "algo_id", ")", "return", "algo_ids" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
get_all_label_algorithms
Gets all the possible label (structural grouping) algorithms in MSAF. Returns ------- algo_ids : list List of all the IDs of label algorithms (strings).
msaf/input_output.py
def get_all_label_algorithms(): """Gets all the possible label (structural grouping) algorithms in MSAF. Returns ------- algo_ids : list List of all the IDs of label algorithms (strings). """ algo_ids = [] for name in msaf.algorithms.__all__: module = eval(msaf.algorithms.__name__ + "." + name) if module.is_label_type: algo_ids.append(module.algo_id) return algo_ids
def get_all_label_algorithms(): """Gets all the possible label (structural grouping) algorithms in MSAF. Returns ------- algo_ids : list List of all the IDs of label algorithms (strings). """ algo_ids = [] for name in msaf.algorithms.__all__: module = eval(msaf.algorithms.__name__ + "." + name) if module.is_label_type: algo_ids.append(module.algo_id) return algo_ids
[ "Gets", "all", "the", "possible", "label", "(", "structural", "grouping", ")", "algorithms", "in", "MSAF", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/input_output.py#L322-L335
[ "def", "get_all_label_algorithms", "(", ")", ":", "algo_ids", "=", "[", "]", "for", "name", "in", "msaf", ".", "algorithms", ".", "__all__", ":", "module", "=", "eval", "(", "msaf", ".", "algorithms", ".", "__name__", "+", "\".\"", "+", "name", ")", "if", "module", ".", "is_label_type", ":", "algo_ids", ".", "append", "(", "module", ".", "algo_id", ")", "return", "algo_ids" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
get_configuration
Gets the configuration dictionary from the current parameters of the algorithms to be evaluated.
msaf/input_output.py
def get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id): """Gets the configuration dictionary from the current parameters of the algorithms to be evaluated.""" config = {} config["annot_beats"] = annot_beats config["feature"] = feature config["framesync"] = framesync bound_config = {} if boundaries_id != "gt": bound_config = \ eval(msaf.algorithms.__name__ + "." + boundaries_id).config config.update(bound_config) if labels_id is not None: label_config = \ eval(msaf.algorithms.__name__ + "." + labels_id).config # Make sure we don't have parameter name duplicates if labels_id != boundaries_id: overlap = set(bound_config.keys()). \ intersection(set(label_config.keys())) assert len(overlap) == 0, \ "Parameter %s must not exist both in %s and %s algorithms" % \ (overlap, boundaries_id, labels_id) config.update(label_config) return config
def get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id): """Gets the configuration dictionary from the current parameters of the algorithms to be evaluated.""" config = {} config["annot_beats"] = annot_beats config["feature"] = feature config["framesync"] = framesync bound_config = {} if boundaries_id != "gt": bound_config = \ eval(msaf.algorithms.__name__ + "." + boundaries_id).config config.update(bound_config) if labels_id is not None: label_config = \ eval(msaf.algorithms.__name__ + "." + labels_id).config # Make sure we don't have parameter name duplicates if labels_id != boundaries_id: overlap = set(bound_config.keys()). \ intersection(set(label_config.keys())) assert len(overlap) == 0, \ "Parameter %s must not exist both in %s and %s algorithms" % \ (overlap, boundaries_id, labels_id) config.update(label_config) return config
[ "Gets", "the", "configuration", "dictionary", "from", "the", "current", "parameters", "of", "the", "algorithms", "to", "be", "evaluated", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/input_output.py#L338-L363
[ "def", "get_configuration", "(", "feature", ",", "annot_beats", ",", "framesync", ",", "boundaries_id", ",", "labels_id", ")", ":", "config", "=", "{", "}", "config", "[", "\"annot_beats\"", "]", "=", "annot_beats", "config", "[", "\"feature\"", "]", "=", "feature", "config", "[", "\"framesync\"", "]", "=", "framesync", "bound_config", "=", "{", "}", "if", "boundaries_id", "!=", "\"gt\"", ":", "bound_config", "=", "eval", "(", "msaf", ".", "algorithms", ".", "__name__", "+", "\".\"", "+", "boundaries_id", ")", ".", "config", "config", ".", "update", "(", "bound_config", ")", "if", "labels_id", "is", "not", "None", ":", "label_config", "=", "eval", "(", "msaf", ".", "algorithms", ".", "__name__", "+", "\".\"", "+", "labels_id", ")", ".", "config", "# Make sure we don't have parameter name duplicates", "if", "labels_id", "!=", "boundaries_id", ":", "overlap", "=", "set", "(", "bound_config", ".", "keys", "(", ")", ")", ".", "intersection", "(", "set", "(", "label_config", ".", "keys", "(", ")", ")", ")", "assert", "len", "(", "overlap", ")", "==", "0", ",", "\"Parameter %s must not exist both in %s and %s algorithms\"", "%", "(", "overlap", ",", "boundaries_id", ",", "labels_id", ")", "config", ".", "update", "(", "label_config", ")", "return", "config" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
get_dataset_files
Gets the files of the given dataset.
msaf/input_output.py
def get_dataset_files(in_path): """Gets the files of the given dataset.""" # Get audio files audio_files = [] for ext in ds_config.audio_exts: audio_files += glob.glob( os.path.join(in_path, ds_config.audio_dir, "*" + ext)) # Make sure directories exist utils.ensure_dir(os.path.join(in_path, ds_config.features_dir)) utils.ensure_dir(os.path.join(in_path, ds_config.estimations_dir)) utils.ensure_dir(os.path.join(in_path, ds_config.references_dir)) # Get the file structs file_structs = [] for audio_file in audio_files: file_structs.append(FileStruct(audio_file)) # Sort by audio file name file_structs = sorted(file_structs, key=lambda file_struct: file_struct.audio_file) return file_structs
def get_dataset_files(in_path): """Gets the files of the given dataset.""" # Get audio files audio_files = [] for ext in ds_config.audio_exts: audio_files += glob.glob( os.path.join(in_path, ds_config.audio_dir, "*" + ext)) # Make sure directories exist utils.ensure_dir(os.path.join(in_path, ds_config.features_dir)) utils.ensure_dir(os.path.join(in_path, ds_config.estimations_dir)) utils.ensure_dir(os.path.join(in_path, ds_config.references_dir)) # Get the file structs file_structs = [] for audio_file in audio_files: file_structs.append(FileStruct(audio_file)) # Sort by audio file name file_structs = sorted(file_structs, key=lambda file_struct: file_struct.audio_file) return file_structs
[ "Gets", "the", "files", "of", "the", "given", "dataset", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/input_output.py#L366-L388
[ "def", "get_dataset_files", "(", "in_path", ")", ":", "# Get audio files", "audio_files", "=", "[", "]", "for", "ext", "in", "ds_config", ".", "audio_exts", ":", "audio_files", "+=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "in_path", ",", "ds_config", ".", "audio_dir", ",", "\"*\"", "+", "ext", ")", ")", "# Make sure directories exist", "utils", ".", "ensure_dir", "(", "os", ".", "path", ".", "join", "(", "in_path", ",", "ds_config", ".", "features_dir", ")", ")", "utils", ".", "ensure_dir", "(", "os", ".", "path", ".", "join", "(", "in_path", ",", "ds_config", ".", "estimations_dir", ")", ")", "utils", ".", "ensure_dir", "(", "os", ".", "path", ".", "join", "(", "in_path", ",", "ds_config", ".", "references_dir", ")", ")", "# Get the file structs", "file_structs", "=", "[", "]", "for", "audio_file", "in", "audio_files", ":", "file_structs", ".", "append", "(", "FileStruct", "(", "audio_file", ")", ")", "# Sort by audio file name", "file_structs", "=", "sorted", "(", "file_structs", ",", "key", "=", "lambda", "file_struct", ":", "file_struct", ".", "audio_file", ")", "return", "file_structs" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
read_hier_references
Reads hierarchical references from a jams file. Parameters ---------- jams_file : str Path to the jams file. annotation_id : int > 0 Identifier of the annotator to read from. exclude_levels: list List of levels to exclude. Empty list to include all levels. Returns ------- hier_bounds : list List of the segment boundary times in seconds for each level. hier_labels : list List of the segment labels for each level. hier_levels : list List of strings for the level identifiers.
msaf/input_output.py
def read_hier_references(jams_file, annotation_id=0, exclude_levels=[]): """Reads hierarchical references from a jams file. Parameters ---------- jams_file : str Path to the jams file. annotation_id : int > 0 Identifier of the annotator to read from. exclude_levels: list List of levels to exclude. Empty list to include all levels. Returns ------- hier_bounds : list List of the segment boundary times in seconds for each level. hier_labels : list List of the segment labels for each level. hier_levels : list List of strings for the level identifiers. """ hier_bounds = [] hier_labels = [] hier_levels = [] jam = jams.load(jams_file) namespaces = ["segment_salami_upper", "segment_salami_function", "segment_open", "segment_tut", "segment_salami_lower"] # Remove levels if needed for exclude in exclude_levels: if exclude in namespaces: namespaces.remove(exclude) # Build hierarchy references for ns in namespaces: ann = jam.search(namespace=ns) if not ann: continue ref_inters, ref_labels = ann[annotation_id].to_interval_values() hier_bounds.append(utils.intervals_to_times(ref_inters)) hier_labels.append(ref_labels) hier_levels.append(ns) return hier_bounds, hier_labels, hier_levels
def read_hier_references(jams_file, annotation_id=0, exclude_levels=[]): """Reads hierarchical references from a jams file. Parameters ---------- jams_file : str Path to the jams file. annotation_id : int > 0 Identifier of the annotator to read from. exclude_levels: list List of levels to exclude. Empty list to include all levels. Returns ------- hier_bounds : list List of the segment boundary times in seconds for each level. hier_labels : list List of the segment labels for each level. hier_levels : list List of strings for the level identifiers. """ hier_bounds = [] hier_labels = [] hier_levels = [] jam = jams.load(jams_file) namespaces = ["segment_salami_upper", "segment_salami_function", "segment_open", "segment_tut", "segment_salami_lower"] # Remove levels if needed for exclude in exclude_levels: if exclude in namespaces: namespaces.remove(exclude) # Build hierarchy references for ns in namespaces: ann = jam.search(namespace=ns) if not ann: continue ref_inters, ref_labels = ann[annotation_id].to_interval_values() hier_bounds.append(utils.intervals_to_times(ref_inters)) hier_labels.append(ref_labels) hier_levels.append(ns) return hier_bounds, hier_labels, hier_levels
[ "Reads", "hierarchical", "references", "from", "a", "jams", "file", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/input_output.py#L391-L434
[ "def", "read_hier_references", "(", "jams_file", ",", "annotation_id", "=", "0", ",", "exclude_levels", "=", "[", "]", ")", ":", "hier_bounds", "=", "[", "]", "hier_labels", "=", "[", "]", "hier_levels", "=", "[", "]", "jam", "=", "jams", ".", "load", "(", "jams_file", ")", "namespaces", "=", "[", "\"segment_salami_upper\"", ",", "\"segment_salami_function\"", ",", "\"segment_open\"", ",", "\"segment_tut\"", ",", "\"segment_salami_lower\"", "]", "# Remove levels if needed", "for", "exclude", "in", "exclude_levels", ":", "if", "exclude", "in", "namespaces", ":", "namespaces", ".", "remove", "(", "exclude", ")", "# Build hierarchy references", "for", "ns", "in", "namespaces", ":", "ann", "=", "jam", ".", "search", "(", "namespace", "=", "ns", ")", "if", "not", "ann", ":", "continue", "ref_inters", ",", "ref_labels", "=", "ann", "[", "annotation_id", "]", ".", "to_interval_values", "(", ")", "hier_bounds", ".", "append", "(", "utils", ".", "intervals_to_times", "(", "ref_inters", ")", ")", "hier_labels", ".", "append", "(", "ref_labels", ")", "hier_levels", ".", "append", "(", "ns", ")", "return", "hier_bounds", ",", "hier_labels", ",", "hier_levels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
get_duration
Reads the duration of a given features file. Parameters ---------- features_file: str Path to the JSON file containing the features. Returns ------- dur: float Duration of the analyzed file.
msaf/input_output.py
def get_duration(features_file): """Reads the duration of a given features file. Parameters ---------- features_file: str Path to the JSON file containing the features. Returns ------- dur: float Duration of the analyzed file. """ with open(features_file) as f: feats = json.load(f) return float(feats["globals"]["dur"])
def get_duration(features_file): """Reads the duration of a given features file. Parameters ---------- features_file: str Path to the JSON file containing the features. Returns ------- dur: float Duration of the analyzed file. """ with open(features_file) as f: feats = json.load(f) return float(feats["globals"]["dur"])
[ "Reads", "the", "duration", "of", "a", "given", "features", "file", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/input_output.py#L437-L452
[ "def", "get_duration", "(", "features_file", ")", ":", "with", "open", "(", "features_file", ")", "as", "f", ":", "feats", "=", "json", ".", "load", "(", "f", ")", "return", "float", "(", "feats", "[", "\"globals\"", "]", "[", "\"dur\"", "]", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
write_mirex
Writes results to file using the standard MIREX format. Parameters ---------- times: np.array Times in seconds of the boundaries. labels: np.array Labels associated to the segments defined by the boundaries. out_file: str Output file path to save the results.
msaf/input_output.py
def write_mirex(times, labels, out_file): """Writes results to file using the standard MIREX format. Parameters ---------- times: np.array Times in seconds of the boundaries. labels: np.array Labels associated to the segments defined by the boundaries. out_file: str Output file path to save the results. """ inters = msaf.utils.times_to_intervals(times) assert len(inters) == len(labels) out_str = "" for inter, label in zip(inters, labels): out_str += "%.3f\t%.3f\t%s\n" % (inter[0], inter[1], label) with open(out_file, "w") as f: f.write(out_str[:-1])
def write_mirex(times, labels, out_file): """Writes results to file using the standard MIREX format. Parameters ---------- times: np.array Times in seconds of the boundaries. labels: np.array Labels associated to the segments defined by the boundaries. out_file: str Output file path to save the results. """ inters = msaf.utils.times_to_intervals(times) assert len(inters) == len(labels) out_str = "" for inter, label in zip(inters, labels): out_str += "%.3f\t%.3f\t%s\n" % (inter[0], inter[1], label) with open(out_file, "w") as f: f.write(out_str[:-1])
[ "Writes", "results", "to", "file", "using", "the", "standard", "MIREX", "format", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/input_output.py#L455-L473
[ "def", "write_mirex", "(", "times", ",", "labels", ",", "out_file", ")", ":", "inters", "=", "msaf", ".", "utils", ".", "times_to_intervals", "(", "times", ")", "assert", "len", "(", "inters", ")", "==", "len", "(", "labels", ")", "out_str", "=", "\"\"", "for", "inter", ",", "label", "in", "zip", "(", "inters", ",", "labels", ")", ":", "out_str", "+=", "\"%.3f\\t%.3f\\t%s\\n\"", "%", "(", "inter", "[", "0", "]", ",", "inter", "[", "1", "]", ",", "label", ")", "with", "open", "(", "out_file", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "out_str", "[", ":", "-", "1", "]", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
FileStruct._get_dataset_file
Gets the desired dataset file.
msaf/input_output.py
def _get_dataset_file(self, dir, ext): """Gets the desired dataset file.""" audio_file_ext = "." + self.audio_file.split(".")[-1] base_file = os.path.basename(self.audio_file).replace( audio_file_ext, ext) return os.path.join(self.ds_path, dir, base_file)
def _get_dataset_file(self, dir, ext): """Gets the desired dataset file.""" audio_file_ext = "." + self.audio_file.split(".")[-1] base_file = os.path.basename(self.audio_file).replace( audio_file_ext, ext) return os.path.join(self.ds_path, dir, base_file)
[ "Gets", "the", "desired", "dataset", "file", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/input_output.py#L38-L43
[ "def", "_get_dataset_file", "(", "self", ",", "dir", ",", "ext", ")", ":", "audio_file_ext", "=", "\".\"", "+", "self", ".", "audio_file", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", "base_file", "=", "os", ".", "path", ".", "basename", "(", "self", ".", "audio_file", ")", ".", "replace", "(", "audio_file_ext", ",", "ext", ")", "return", "os", ".", "path", ".", "join", "(", "self", ".", "ds_path", ",", "dir", ",", "base_file", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Segmenter.processFlat
Main process. Returns ------- est_idxs : np.array(N) Estimated indeces the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments.
msaf/algorithms/example/segmenter.py
def processFlat(self): """Main process. Returns ------- est_idxs : np.array(N) Estimated indeces the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments. """ # Preprocess to obtain features (array(n_frames, n_features)) F = self._preprocess() # Do something with the default parameters # (these are defined in the in the config.py file). assert self.config["my_param1"] == 1.0 # Identify boundaries in frame indeces with the new algorithm my_bounds = np.array([0, F.shape[0] - 1]) # Label the segments (use -1 to have empty segments) my_labels = np.ones(len(my_bounds) - 1) * -1 # Post process estimations est_idxs, est_labels = self._postprocess(my_bounds, my_labels) # We're done! return est_idxs, est_labels
def processFlat(self): """Main process. Returns ------- est_idxs : np.array(N) Estimated indeces the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments. """ # Preprocess to obtain features (array(n_frames, n_features)) F = self._preprocess() # Do something with the default parameters # (these are defined in the in the config.py file). assert self.config["my_param1"] == 1.0 # Identify boundaries in frame indeces with the new algorithm my_bounds = np.array([0, F.shape[0] - 1]) # Label the segments (use -1 to have empty segments) my_labels = np.ones(len(my_bounds) - 1) * -1 # Post process estimations est_idxs, est_labels = self._postprocess(my_bounds, my_labels) # We're done! return est_idxs, est_labels
[ "Main", "process", ".", "Returns", "-------", "est_idxs", ":", "np", ".", "array", "(", "N", ")", "Estimated", "indeces", "the", "segment", "boundaries", "in", "frame", "indeces", ".", "est_labels", ":", "np", ".", "array", "(", "N", "-", "1", ")", "Estimated", "labels", "for", "the", "segments", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/example/segmenter.py#L9-L35
[ "def", "processFlat", "(", "self", ")", ":", "# Preprocess to obtain features (array(n_frames, n_features))", "F", "=", "self", ".", "_preprocess", "(", ")", "# Do something with the default parameters", "# (these are defined in the in the config.py file).", "assert", "self", ".", "config", "[", "\"my_param1\"", "]", "==", "1.0", "# Identify boundaries in frame indeces with the new algorithm", "my_bounds", "=", "np", ".", "array", "(", "[", "0", ",", "F", ".", "shape", "[", "0", "]", "-", "1", "]", ")", "# Label the segments (use -1 to have empty segments)", "my_labels", "=", "np", ".", "ones", "(", "len", "(", "my_bounds", ")", "-", "1", ")", "*", "-", "1", "# Post process estimations", "est_idxs", ",", "est_labels", "=", "self", ".", "_postprocess", "(", "my_bounds", ",", "my_labels", ")", "# We're done!", "return", "est_idxs", ",", "est_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
align_segmentation
Load a ground-truth segmentation, and align times to the nearest detected beats. Arguments: beat_times -- array song -- path to the audio file Returns: segment_beats -- array beat-aligned segment boundaries segment_times -- array true segment times segment_labels -- array list of segment labels
msaf/algorithms/olda/make_train.py
def align_segmentation(beat_times, song): '''Load a ground-truth segmentation, and align times to the nearest detected beats. Arguments: beat_times -- array song -- path to the audio file Returns: segment_beats -- array beat-aligned segment boundaries segment_times -- array true segment times segment_labels -- array list of segment labels ''' try: segment_times, segment_labels = msaf.io.read_references(song) except: return None, None, None segment_times = np.asarray(segment_times) # Map to intervals segment_intervals = msaf.utils.times_to_intervals(segment_times) # Map beats to intervals beat_intervals = np.asarray(zip(beat_times[:-1], beat_times[1:])) # Map beats to segments beat_segment_ids = librosa.util.match_intervals(beat_intervals, segment_intervals) segment_beats = [] segment_times_out = [] segment_labels_out = [] # print segment_times, beat_segment_ids, len(beat_times), # len(beat_segment_ids) for i in range(segment_times.shape[0]): hits = np.argwhere(beat_segment_ids == i) if len(hits) > 0 and i < len(segment_intervals) and \ i < len(segment_labels): segment_beats.extend(hits[0]) segment_times_out.append(segment_intervals[i, :]) segment_labels_out.append(segment_labels[i]) # Pull out the segment start times segment_beats = list(segment_beats) # segment_times_out = np.asarray( # segment_times_out)[:, 0].squeeze().reshape((-1, 1)) # if segment_times_out.ndim == 0: # segment_times_out = segment_times_out[np.newaxis] segment_times_out = segment_times return segment_beats, segment_times_out, segment_labels_out
def align_segmentation(beat_times, song): '''Load a ground-truth segmentation, and align times to the nearest detected beats. Arguments: beat_times -- array song -- path to the audio file Returns: segment_beats -- array beat-aligned segment boundaries segment_times -- array true segment times segment_labels -- array list of segment labels ''' try: segment_times, segment_labels = msaf.io.read_references(song) except: return None, None, None segment_times = np.asarray(segment_times) # Map to intervals segment_intervals = msaf.utils.times_to_intervals(segment_times) # Map beats to intervals beat_intervals = np.asarray(zip(beat_times[:-1], beat_times[1:])) # Map beats to segments beat_segment_ids = librosa.util.match_intervals(beat_intervals, segment_intervals) segment_beats = [] segment_times_out = [] segment_labels_out = [] # print segment_times, beat_segment_ids, len(beat_times), # len(beat_segment_ids) for i in range(segment_times.shape[0]): hits = np.argwhere(beat_segment_ids == i) if len(hits) > 0 and i < len(segment_intervals) and \ i < len(segment_labels): segment_beats.extend(hits[0]) segment_times_out.append(segment_intervals[i, :]) segment_labels_out.append(segment_labels[i]) # Pull out the segment start times segment_beats = list(segment_beats) # segment_times_out = np.asarray( # segment_times_out)[:, 0].squeeze().reshape((-1, 1)) # if segment_times_out.ndim == 0: # segment_times_out = segment_times_out[np.newaxis] segment_times_out = segment_times return segment_beats, segment_times_out, segment_labels_out
[ "Load", "a", "ground", "-", "truth", "segmentation", "and", "align", "times", "to", "the", "nearest", "detected", "beats", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/olda/make_train.py#L18-L75
[ "def", "align_segmentation", "(", "beat_times", ",", "song", ")", ":", "try", ":", "segment_times", ",", "segment_labels", "=", "msaf", ".", "io", ".", "read_references", "(", "song", ")", "except", ":", "return", "None", ",", "None", ",", "None", "segment_times", "=", "np", ".", "asarray", "(", "segment_times", ")", "# Map to intervals", "segment_intervals", "=", "msaf", ".", "utils", ".", "times_to_intervals", "(", "segment_times", ")", "# Map beats to intervals", "beat_intervals", "=", "np", ".", "asarray", "(", "zip", "(", "beat_times", "[", ":", "-", "1", "]", ",", "beat_times", "[", "1", ":", "]", ")", ")", "# Map beats to segments", "beat_segment_ids", "=", "librosa", ".", "util", ".", "match_intervals", "(", "beat_intervals", ",", "segment_intervals", ")", "segment_beats", "=", "[", "]", "segment_times_out", "=", "[", "]", "segment_labels_out", "=", "[", "]", "# print segment_times, beat_segment_ids, len(beat_times),", "# len(beat_segment_ids)", "for", "i", "in", "range", "(", "segment_times", ".", "shape", "[", "0", "]", ")", ":", "hits", "=", "np", ".", "argwhere", "(", "beat_segment_ids", "==", "i", ")", "if", "len", "(", "hits", ")", ">", "0", "and", "i", "<", "len", "(", "segment_intervals", ")", "and", "i", "<", "len", "(", "segment_labels", ")", ":", "segment_beats", ".", "extend", "(", "hits", "[", "0", "]", ")", "segment_times_out", ".", "append", "(", "segment_intervals", "[", "i", ",", ":", "]", ")", "segment_labels_out", ".", "append", "(", "segment_labels", "[", "i", "]", ")", "# Pull out the segment start times", "segment_beats", "=", "list", "(", "segment_beats", ")", "# segment_times_out = np.asarray(", "# segment_times_out)[:, 0].squeeze().reshape((-1, 1))", "# if segment_times_out.ndim == 0:", "# segment_times_out = segment_times_out[np.newaxis]", "segment_times_out", "=", "segment_times", "return", "segment_beats", ",", "segment_times_out", ",", "segment_labels_out" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Features.estimate_beats
Estimates the beats using librosa. Returns ------- times: np.array Times of estimated beats in seconds. frames: np.array Frame indeces of estimated beats.
msaf/base.py
def estimate_beats(self): """Estimates the beats using librosa. Returns ------- times: np.array Times of estimated beats in seconds. frames: np.array Frame indeces of estimated beats. """ # Compute harmonic-percussive source separation if needed if self._audio_percussive is None: self._audio_harmonic, self._audio_percussive = self.compute_HPSS() # Compute beats tempo, frames = librosa.beat.beat_track( y=self._audio_percussive, sr=self.sr, hop_length=self.hop_length) # To times times = librosa.frames_to_time(frames, sr=self.sr, hop_length=self.hop_length) # TODO: Is this really necessary? if len(times) > 0 and times[0] == 0: times = times[1:] frames = frames[1:] return times, frames
def estimate_beats(self): """Estimates the beats using librosa. Returns ------- times: np.array Times of estimated beats in seconds. frames: np.array Frame indeces of estimated beats. """ # Compute harmonic-percussive source separation if needed if self._audio_percussive is None: self._audio_harmonic, self._audio_percussive = self.compute_HPSS() # Compute beats tempo, frames = librosa.beat.beat_track( y=self._audio_percussive, sr=self.sr, hop_length=self.hop_length) # To times times = librosa.frames_to_time(frames, sr=self.sr, hop_length=self.hop_length) # TODO: Is this really necessary? if len(times) > 0 and times[0] == 0: times = times[1:] frames = frames[1:] return times, frames
[ "Estimates", "the", "beats", "using", "librosa", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L112-L140
[ "def", "estimate_beats", "(", "self", ")", ":", "# Compute harmonic-percussive source separation if needed", "if", "self", ".", "_audio_percussive", "is", "None", ":", "self", ".", "_audio_harmonic", ",", "self", ".", "_audio_percussive", "=", "self", ".", "compute_HPSS", "(", ")", "# Compute beats", "tempo", ",", "frames", "=", "librosa", ".", "beat", ".", "beat_track", "(", "y", "=", "self", ".", "_audio_percussive", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "# To times", "times", "=", "librosa", ".", "frames_to_time", "(", "frames", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "# TODO: Is this really necessary?", "if", "len", "(", "times", ")", ">", "0", "and", "times", "[", "0", "]", "==", "0", ":", "times", "=", "times", "[", "1", ":", "]", "frames", "=", "frames", "[", "1", ":", "]", "return", "times", ",", "frames" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Features.read_ann_beats
Reads the annotated beats if available. Returns ------- times: np.array Times of annotated beats in seconds. frames: np.array Frame indeces of annotated beats.
msaf/base.py
def read_ann_beats(self): """Reads the annotated beats if available. Returns ------- times: np.array Times of annotated beats in seconds. frames: np.array Frame indeces of annotated beats. """ times, frames = (None, None) # Read annotations if they exist in correct folder if os.path.isfile(self.file_struct.ref_file): try: jam = jams.load(self.file_struct.ref_file) except TypeError: logging.warning( "Can't read JAMS file %s. Maybe it's not " "compatible with current JAMS version?" % self.file_struct.ref_file) return times, frames beat_annot = jam.search(namespace="beat.*") # If beat annotations exist, get times and frames if len(beat_annot) > 0: beats_inters, _ = beat_annot[0].to_interval_values() times = beats_inters[:, 0] frames = librosa.time_to_frames(times, sr=self.sr, hop_length=self.hop_length) return times, frames
def read_ann_beats(self): """Reads the annotated beats if available. Returns ------- times: np.array Times of annotated beats in seconds. frames: np.array Frame indeces of annotated beats. """ times, frames = (None, None) # Read annotations if they exist in correct folder if os.path.isfile(self.file_struct.ref_file): try: jam = jams.load(self.file_struct.ref_file) except TypeError: logging.warning( "Can't read JAMS file %s. Maybe it's not " "compatible with current JAMS version?" % self.file_struct.ref_file) return times, frames beat_annot = jam.search(namespace="beat.*") # If beat annotations exist, get times and frames if len(beat_annot) > 0: beats_inters, _ = beat_annot[0].to_interval_values() times = beats_inters[:, 0] frames = librosa.time_to_frames(times, sr=self.sr, hop_length=self.hop_length) return times, frames
[ "Reads", "the", "annotated", "beats", "if", "available", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L142-L172
[ "def", "read_ann_beats", "(", "self", ")", ":", "times", ",", "frames", "=", "(", "None", ",", "None", ")", "# Read annotations if they exist in correct folder", "if", "os", ".", "path", ".", "isfile", "(", "self", ".", "file_struct", ".", "ref_file", ")", ":", "try", ":", "jam", "=", "jams", ".", "load", "(", "self", ".", "file_struct", ".", "ref_file", ")", "except", "TypeError", ":", "logging", ".", "warning", "(", "\"Can't read JAMS file %s. Maybe it's not \"", "\"compatible with current JAMS version?\"", "%", "self", ".", "file_struct", ".", "ref_file", ")", "return", "times", ",", "frames", "beat_annot", "=", "jam", ".", "search", "(", "namespace", "=", "\"beat.*\"", ")", "# If beat annotations exist, get times and frames", "if", "len", "(", "beat_annot", ")", ">", "0", ":", "beats_inters", ",", "_", "=", "beat_annot", "[", "0", "]", ".", "to_interval_values", "(", ")", "times", "=", "beats_inters", "[", ":", ",", "0", "]", "frames", "=", "librosa", ".", "time_to_frames", "(", "times", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "return", "times", ",", "frames" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Features.compute_beat_sync_features
Make the features beat-synchronous. Parameters ---------- beat_frames: np.array The frame indeces of the beat positions. beat_times: np.array The time points of the beat positions (in seconds). pad: boolean If `True`, `beat_frames` is padded to span the full range. Returns ------- beatsync_feats: np.array The beat-synchronized features. `None` if the beat_frames was `None`. beatsync_times: np.array The beat-synchronized times. `None` if the beat_frames was `None`.
msaf/base.py
def compute_beat_sync_features(self, beat_frames, beat_times, pad): """Make the features beat-synchronous. Parameters ---------- beat_frames: np.array The frame indeces of the beat positions. beat_times: np.array The time points of the beat positions (in seconds). pad: boolean If `True`, `beat_frames` is padded to span the full range. Returns ------- beatsync_feats: np.array The beat-synchronized features. `None` if the beat_frames was `None`. beatsync_times: np.array The beat-synchronized times. `None` if the beat_frames was `None`. """ if beat_frames is None: return None, None # Make beat synchronous beatsync_feats = librosa.util.utils.sync(self._framesync_features.T, beat_frames, pad=pad).T # Assign times (and add last time if padded) beatsync_times = np.copy(beat_times) if beatsync_times.shape[0] != beatsync_feats.shape[0]: beatsync_times = np.concatenate((beatsync_times, [self._framesync_times[-1]])) return beatsync_feats, beatsync_times
def compute_beat_sync_features(self, beat_frames, beat_times, pad): """Make the features beat-synchronous. Parameters ---------- beat_frames: np.array The frame indeces of the beat positions. beat_times: np.array The time points of the beat positions (in seconds). pad: boolean If `True`, `beat_frames` is padded to span the full range. Returns ------- beatsync_feats: np.array The beat-synchronized features. `None` if the beat_frames was `None`. beatsync_times: np.array The beat-synchronized times. `None` if the beat_frames was `None`. """ if beat_frames is None: return None, None # Make beat synchronous beatsync_feats = librosa.util.utils.sync(self._framesync_features.T, beat_frames, pad=pad).T # Assign times (and add last time if padded) beatsync_times = np.copy(beat_times) if beatsync_times.shape[0] != beatsync_feats.shape[0]: beatsync_times = np.concatenate((beatsync_times, [self._framesync_times[-1]])) return beatsync_feats, beatsync_times
[ "Make", "the", "features", "beat", "-", "synchronous", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L174-L207
[ "def", "compute_beat_sync_features", "(", "self", ",", "beat_frames", ",", "beat_times", ",", "pad", ")", ":", "if", "beat_frames", "is", "None", ":", "return", "None", ",", "None", "# Make beat synchronous", "beatsync_feats", "=", "librosa", ".", "util", ".", "utils", ".", "sync", "(", "self", ".", "_framesync_features", ".", "T", ",", "beat_frames", ",", "pad", "=", "pad", ")", ".", "T", "# Assign times (and add last time if padded)", "beatsync_times", "=", "np", ".", "copy", "(", "beat_times", ")", "if", "beatsync_times", ".", "shape", "[", "0", "]", "!=", "beatsync_feats", ".", "shape", "[", "0", "]", ":", "beatsync_times", "=", "np", ".", "concatenate", "(", "(", "beatsync_times", ",", "[", "self", ".", "_framesync_times", "[", "-", "1", "]", "]", ")", ")", "return", "beatsync_feats", ",", "beatsync_times" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Features.read_features
Reads the features from a file and stores them in the current object. Parameters ---------- tol: float Tolerance level to detect duration of audio.
msaf/base.py
def read_features(self, tol=1e-3): """Reads the features from a file and stores them in the current object. Parameters ---------- tol: float Tolerance level to detect duration of audio. """ try: # Read JSON file with open(self.file_struct.features_file) as f: feats = json.load(f) # Store duration if self.dur is None: self.dur = float(feats["globals"]["dur"]) # Check that we have the correct global parameters assert(np.isclose( self.dur, float(feats["globals"]["dur"]), rtol=tol)) assert(self.sr == int(feats["globals"]["sample_rate"])) assert(self.hop_length == int(feats["globals"]["hop_length"])) assert(os.path.basename(self.file_struct.audio_file) == os.path.basename(feats["globals"]["audio_file"])) # Check for specific features params feat_params_err = FeatureParamsError( "Couldn't find features for %s id in file %s" % (self.get_id(), self.file_struct.features_file)) if self.get_id() not in feats.keys(): raise feat_params_err for param_name in self.get_param_names(): value = getattr(self, param_name) if hasattr(value, '__call__'): # Special case of functions if value.__name__ != \ feats[self.get_id()]["params"][param_name]: raise feat_params_err else: if str(value) != \ feats[self.get_id()]["params"][param_name]: raise feat_params_err # Store actual features self._est_beats_times = np.array(feats["est_beats"]) self._est_beatsync_times = np.array(feats["est_beatsync_times"]) self._est_beats_frames = librosa.core.time_to_frames( self._est_beats_times, sr=self.sr, hop_length=self.hop_length) self._framesync_features = \ np.array(feats[self.get_id()]["framesync"]) self._est_beatsync_features = \ np.array(feats[self.get_id()]["est_beatsync"]) # Read annotated beats if available if "ann_beats" in feats.keys(): self._ann_beats_times = np.array(feats["ann_beats"]) self._ann_beatsync_times = np.array(feats["ann_beatsync_times"]) self._ann_beats_frames = librosa.core.time_to_frames( self._ann_beats_times, sr=self.sr, hop_length=self.hop_length) self._ann_beatsync_features = \ np.array(feats[self.get_id()]["ann_beatsync"]) except KeyError: raise WrongFeaturesFormatError( "The features file %s is not correctly formatted" % self.file_struct.features_file) except AssertionError: raise FeaturesNotFound( "The features for the given parameters were not found in " "features file %s" % self.file_struct.features_file) except IOError: raise NoFeaturesFileError("Could not find features file %s", self.file_struct.features_file)
def read_features(self, tol=1e-3): """Reads the features from a file and stores them in the current object. Parameters ---------- tol: float Tolerance level to detect duration of audio. """ try: # Read JSON file with open(self.file_struct.features_file) as f: feats = json.load(f) # Store duration if self.dur is None: self.dur = float(feats["globals"]["dur"]) # Check that we have the correct global parameters assert(np.isclose( self.dur, float(feats["globals"]["dur"]), rtol=tol)) assert(self.sr == int(feats["globals"]["sample_rate"])) assert(self.hop_length == int(feats["globals"]["hop_length"])) assert(os.path.basename(self.file_struct.audio_file) == os.path.basename(feats["globals"]["audio_file"])) # Check for specific features params feat_params_err = FeatureParamsError( "Couldn't find features for %s id in file %s" % (self.get_id(), self.file_struct.features_file)) if self.get_id() not in feats.keys(): raise feat_params_err for param_name in self.get_param_names(): value = getattr(self, param_name) if hasattr(value, '__call__'): # Special case of functions if value.__name__ != \ feats[self.get_id()]["params"][param_name]: raise feat_params_err else: if str(value) != \ feats[self.get_id()]["params"][param_name]: raise feat_params_err # Store actual features self._est_beats_times = np.array(feats["est_beats"]) self._est_beatsync_times = np.array(feats["est_beatsync_times"]) self._est_beats_frames = librosa.core.time_to_frames( self._est_beats_times, sr=self.sr, hop_length=self.hop_length) self._framesync_features = \ np.array(feats[self.get_id()]["framesync"]) self._est_beatsync_features = \ np.array(feats[self.get_id()]["est_beatsync"]) # Read annotated beats if available if "ann_beats" in feats.keys(): self._ann_beats_times = np.array(feats["ann_beats"]) self._ann_beatsync_times = np.array(feats["ann_beatsync_times"]) self._ann_beats_frames = librosa.core.time_to_frames( self._ann_beats_times, sr=self.sr, hop_length=self.hop_length) self._ann_beatsync_features = \ np.array(feats[self.get_id()]["ann_beatsync"]) except KeyError: raise WrongFeaturesFormatError( "The features file %s is not correctly formatted" % self.file_struct.features_file) except AssertionError: raise FeaturesNotFound( "The features for the given parameters were not found in " "features file %s" % self.file_struct.features_file) except IOError: raise NoFeaturesFileError("Could not find features file %s", self.file_struct.features_file)
[ "Reads", "the", "features", "from", "a", "file", "and", "stores", "them", "in", "the", "current", "object", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L209-L282
[ "def", "read_features", "(", "self", ",", "tol", "=", "1e-3", ")", ":", "try", ":", "# Read JSON file", "with", "open", "(", "self", ".", "file_struct", ".", "features_file", ")", "as", "f", ":", "feats", "=", "json", ".", "load", "(", "f", ")", "# Store duration", "if", "self", ".", "dur", "is", "None", ":", "self", ".", "dur", "=", "float", "(", "feats", "[", "\"globals\"", "]", "[", "\"dur\"", "]", ")", "# Check that we have the correct global parameters", "assert", "(", "np", ".", "isclose", "(", "self", ".", "dur", ",", "float", "(", "feats", "[", "\"globals\"", "]", "[", "\"dur\"", "]", ")", ",", "rtol", "=", "tol", ")", ")", "assert", "(", "self", ".", "sr", "==", "int", "(", "feats", "[", "\"globals\"", "]", "[", "\"sample_rate\"", "]", ")", ")", "assert", "(", "self", ".", "hop_length", "==", "int", "(", "feats", "[", "\"globals\"", "]", "[", "\"hop_length\"", "]", ")", ")", "assert", "(", "os", ".", "path", ".", "basename", "(", "self", ".", "file_struct", ".", "audio_file", ")", "==", "os", ".", "path", ".", "basename", "(", "feats", "[", "\"globals\"", "]", "[", "\"audio_file\"", "]", ")", ")", "# Check for specific features params", "feat_params_err", "=", "FeatureParamsError", "(", "\"Couldn't find features for %s id in file %s\"", "%", "(", "self", ".", "get_id", "(", ")", ",", "self", ".", "file_struct", ".", "features_file", ")", ")", "if", "self", ".", "get_id", "(", ")", "not", "in", "feats", ".", "keys", "(", ")", ":", "raise", "feat_params_err", "for", "param_name", "in", "self", ".", "get_param_names", "(", ")", ":", "value", "=", "getattr", "(", "self", ",", "param_name", ")", "if", "hasattr", "(", "value", ",", "'__call__'", ")", ":", "# Special case of functions", "if", "value", ".", "__name__", "!=", "feats", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"params\"", "]", "[", "param_name", "]", ":", "raise", "feat_params_err", "else", ":", "if", "str", "(", "value", ")", "!=", "feats", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"params\"", "]", "[", "param_name", "]", ":", "raise", "feat_params_err", "# Store actual features", "self", ".", "_est_beats_times", "=", "np", ".", "array", "(", "feats", "[", "\"est_beats\"", "]", ")", "self", ".", "_est_beatsync_times", "=", "np", ".", "array", "(", "feats", "[", "\"est_beatsync_times\"", "]", ")", "self", ".", "_est_beats_frames", "=", "librosa", ".", "core", ".", "time_to_frames", "(", "self", ".", "_est_beats_times", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "self", ".", "_framesync_features", "=", "np", ".", "array", "(", "feats", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"framesync\"", "]", ")", "self", ".", "_est_beatsync_features", "=", "np", ".", "array", "(", "feats", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"est_beatsync\"", "]", ")", "# Read annotated beats if available", "if", "\"ann_beats\"", "in", "feats", ".", "keys", "(", ")", ":", "self", ".", "_ann_beats_times", "=", "np", ".", "array", "(", "feats", "[", "\"ann_beats\"", "]", ")", "self", ".", "_ann_beatsync_times", "=", "np", ".", "array", "(", "feats", "[", "\"ann_beatsync_times\"", "]", ")", "self", ".", "_ann_beats_frames", "=", "librosa", ".", "core", ".", "time_to_frames", "(", "self", ".", "_ann_beats_times", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "self", ".", "_ann_beatsync_features", "=", "np", ".", "array", "(", "feats", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"ann_beatsync\"", "]", ")", "except", "KeyError", ":", "raise", "WrongFeaturesFormatError", "(", "\"The features file %s is not correctly formatted\"", "%", "self", ".", "file_struct", ".", "features_file", ")", "except", "AssertionError", ":", "raise", "FeaturesNotFound", "(", "\"The features for the given parameters were not found in \"", "\"features file %s\"", "%", "self", ".", "file_struct", ".", "features_file", ")", "except", "IOError", ":", "raise", "NoFeaturesFileError", "(", "\"Could not find features file %s\"", ",", "self", ".", "file_struct", ".", "features_file", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Features.write_features
Saves features to file.
msaf/base.py
def write_features(self): """Saves features to file.""" out_json = collections.OrderedDict() try: # Only save the necessary information self.read_features() except (WrongFeaturesFormatError, FeaturesNotFound, NoFeaturesFileError): # We need to create the file or overwite it # Metadata out_json = collections.OrderedDict({"metadata": { "versions": {"librosa": librosa.__version__, "msaf": msaf.__version__, "numpy": np.__version__}, "timestamp": datetime.datetime.today().strftime( "%Y/%m/%d %H:%M:%S")}}) # Global parameters out_json["globals"] = { "dur": self.dur, "sample_rate": self.sr, "hop_length": self.hop_length, "audio_file": self.file_struct.audio_file } # Beats out_json["est_beats"] = self._est_beats_times.tolist() out_json["est_beatsync_times"] = self._est_beatsync_times.tolist() if self._ann_beats_times is not None: out_json["ann_beats"] = self._ann_beats_times.tolist() out_json["ann_beatsync_times"] = self._ann_beatsync_times.tolist() except FeatureParamsError: # We have other features in the file, simply add these ones with open(self.file_struct.features_file) as f: out_json = json.load(f) finally: # Specific parameters of the current features out_json[self.get_id()] = {} out_json[self.get_id()]["params"] = {} for param_name in self.get_param_names(): value = getattr(self, param_name) # Check for special case of functions if hasattr(value, '__call__'): value = value.__name__ else: value = str(value) out_json[self.get_id()]["params"][param_name] = value # Actual features out_json[self.get_id()]["framesync"] = \ self._framesync_features.tolist() out_json[self.get_id()]["est_beatsync"] = \ self._est_beatsync_features.tolist() if self._ann_beatsync_features is not None: out_json[self.get_id()]["ann_beatsync"] = \ self._ann_beatsync_features.tolist() # Save it with open(self.file_struct.features_file, "w") as f: json.dump(out_json, f, indent=2)
def write_features(self): """Saves features to file.""" out_json = collections.OrderedDict() try: # Only save the necessary information self.read_features() except (WrongFeaturesFormatError, FeaturesNotFound, NoFeaturesFileError): # We need to create the file or overwite it # Metadata out_json = collections.OrderedDict({"metadata": { "versions": {"librosa": librosa.__version__, "msaf": msaf.__version__, "numpy": np.__version__}, "timestamp": datetime.datetime.today().strftime( "%Y/%m/%d %H:%M:%S")}}) # Global parameters out_json["globals"] = { "dur": self.dur, "sample_rate": self.sr, "hop_length": self.hop_length, "audio_file": self.file_struct.audio_file } # Beats out_json["est_beats"] = self._est_beats_times.tolist() out_json["est_beatsync_times"] = self._est_beatsync_times.tolist() if self._ann_beats_times is not None: out_json["ann_beats"] = self._ann_beats_times.tolist() out_json["ann_beatsync_times"] = self._ann_beatsync_times.tolist() except FeatureParamsError: # We have other features in the file, simply add these ones with open(self.file_struct.features_file) as f: out_json = json.load(f) finally: # Specific parameters of the current features out_json[self.get_id()] = {} out_json[self.get_id()]["params"] = {} for param_name in self.get_param_names(): value = getattr(self, param_name) # Check for special case of functions if hasattr(value, '__call__'): value = value.__name__ else: value = str(value) out_json[self.get_id()]["params"][param_name] = value # Actual features out_json[self.get_id()]["framesync"] = \ self._framesync_features.tolist() out_json[self.get_id()]["est_beatsync"] = \ self._est_beatsync_features.tolist() if self._ann_beatsync_features is not None: out_json[self.get_id()]["ann_beatsync"] = \ self._ann_beatsync_features.tolist() # Save it with open(self.file_struct.features_file, "w") as f: json.dump(out_json, f, indent=2)
[ "Saves", "features", "to", "file", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L284-L343
[ "def", "write_features", "(", "self", ")", ":", "out_json", "=", "collections", ".", "OrderedDict", "(", ")", "try", ":", "# Only save the necessary information", "self", ".", "read_features", "(", ")", "except", "(", "WrongFeaturesFormatError", ",", "FeaturesNotFound", ",", "NoFeaturesFileError", ")", ":", "# We need to create the file or overwite it", "# Metadata", "out_json", "=", "collections", ".", "OrderedDict", "(", "{", "\"metadata\"", ":", "{", "\"versions\"", ":", "{", "\"librosa\"", ":", "librosa", ".", "__version__", ",", "\"msaf\"", ":", "msaf", ".", "__version__", ",", "\"numpy\"", ":", "np", ".", "__version__", "}", ",", "\"timestamp\"", ":", "datetime", ".", "datetime", ".", "today", "(", ")", ".", "strftime", "(", "\"%Y/%m/%d %H:%M:%S\"", ")", "}", "}", ")", "# Global parameters", "out_json", "[", "\"globals\"", "]", "=", "{", "\"dur\"", ":", "self", ".", "dur", ",", "\"sample_rate\"", ":", "self", ".", "sr", ",", "\"hop_length\"", ":", "self", ".", "hop_length", ",", "\"audio_file\"", ":", "self", ".", "file_struct", ".", "audio_file", "}", "# Beats", "out_json", "[", "\"est_beats\"", "]", "=", "self", ".", "_est_beats_times", ".", "tolist", "(", ")", "out_json", "[", "\"est_beatsync_times\"", "]", "=", "self", ".", "_est_beatsync_times", ".", "tolist", "(", ")", "if", "self", ".", "_ann_beats_times", "is", "not", "None", ":", "out_json", "[", "\"ann_beats\"", "]", "=", "self", ".", "_ann_beats_times", ".", "tolist", "(", ")", "out_json", "[", "\"ann_beatsync_times\"", "]", "=", "self", ".", "_ann_beatsync_times", ".", "tolist", "(", ")", "except", "FeatureParamsError", ":", "# We have other features in the file, simply add these ones", "with", "open", "(", "self", ".", "file_struct", ".", "features_file", ")", "as", "f", ":", "out_json", "=", "json", ".", "load", "(", "f", ")", "finally", ":", "# Specific parameters of the current features", "out_json", "[", "self", ".", "get_id", "(", ")", "]", "=", "{", "}", "out_json", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"params\"", "]", "=", "{", "}", "for", "param_name", "in", "self", ".", "get_param_names", "(", ")", ":", "value", "=", "getattr", "(", "self", ",", "param_name", ")", "# Check for special case of functions", "if", "hasattr", "(", "value", ",", "'__call__'", ")", ":", "value", "=", "value", ".", "__name__", "else", ":", "value", "=", "str", "(", "value", ")", "out_json", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"params\"", "]", "[", "param_name", "]", "=", "value", "# Actual features", "out_json", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"framesync\"", "]", "=", "self", ".", "_framesync_features", ".", "tolist", "(", ")", "out_json", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"est_beatsync\"", "]", "=", "self", ".", "_est_beatsync_features", ".", "tolist", "(", ")", "if", "self", ".", "_ann_beatsync_features", "is", "not", "None", ":", "out_json", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"ann_beatsync\"", "]", "=", "self", ".", "_ann_beatsync_features", ".", "tolist", "(", ")", "# Save it", "with", "open", "(", "self", ".", "file_struct", ".", "features_file", ",", "\"w\"", ")", "as", "f", ":", "json", ".", "dump", "(", "out_json", ",", "f", ",", "indent", "=", "2", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Features.get_param_names
Returns the parameter names for these features, avoiding the global parameters.
msaf/base.py
def get_param_names(self): """Returns the parameter names for these features, avoiding the global parameters.""" return [name for name in vars(self) if not name.startswith('_') and name not in self._global_param_names]
def get_param_names(self): """Returns the parameter names for these features, avoiding the global parameters.""" return [name for name in vars(self) if not name.startswith('_') and name not in self._global_param_names]
[ "Returns", "the", "parameter", "names", "for", "these", "features", "avoiding", "the", "global", "parameters", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L345-L349
[ "def", "get_param_names", "(", "self", ")", ":", "return", "[", "name", "for", "name", "in", "vars", "(", "self", ")", "if", "not", "name", ".", "startswith", "(", "'_'", ")", "and", "name", "not", "in", "self", ".", "_global_param_names", "]" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Features._compute_framesync_times
Computes the framesync times based on the framesync features.
msaf/base.py
def _compute_framesync_times(self): """Computes the framesync times based on the framesync features.""" self._framesync_times = librosa.core.frames_to_time( np.arange(self._framesync_features.shape[0]), self.sr, self.hop_length)
def _compute_framesync_times(self): """Computes the framesync times based on the framesync features.""" self._framesync_times = librosa.core.frames_to_time( np.arange(self._framesync_features.shape[0]), self.sr, self.hop_length)
[ "Computes", "the", "framesync", "times", "based", "on", "the", "framesync", "features", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L351-L355
[ "def", "_compute_framesync_times", "(", "self", ")", ":", "self", ".", "_framesync_times", "=", "librosa", ".", "core", ".", "frames_to_time", "(", "np", ".", "arange", "(", "self", ".", "_framesync_features", ".", "shape", "[", "0", "]", ")", ",", "self", ".", "sr", ",", "self", ".", "hop_length", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Features._compute_all_features
Computes all the features (beatsync, framesync) from the audio.
msaf/base.py
def _compute_all_features(self): """Computes all the features (beatsync, framesync) from the audio.""" # Read actual audio waveform self._audio, _ = librosa.load(self.file_struct.audio_file, sr=self.sr) # Get duration of audio file self.dur = len(self._audio) / float(self.sr) # Compute actual features self._framesync_features = self.compute_features() # Compute framesync times self._compute_framesync_times() # Compute/Read beats self._est_beats_times, self._est_beats_frames = self.estimate_beats() self._ann_beats_times, self._ann_beats_frames = self.read_ann_beats() # Beat-Synchronize pad = True # Always append to the end of the features self._est_beatsync_features, self._est_beatsync_times = \ self.compute_beat_sync_features(self._est_beats_frames, self._est_beats_times, pad) self._ann_beatsync_features, self._ann_beatsync_times = \ self.compute_beat_sync_features(self._ann_beats_frames, self._ann_beats_times, pad)
def _compute_all_features(self): """Computes all the features (beatsync, framesync) from the audio.""" # Read actual audio waveform self._audio, _ = librosa.load(self.file_struct.audio_file, sr=self.sr) # Get duration of audio file self.dur = len(self._audio) / float(self.sr) # Compute actual features self._framesync_features = self.compute_features() # Compute framesync times self._compute_framesync_times() # Compute/Read beats self._est_beats_times, self._est_beats_frames = self.estimate_beats() self._ann_beats_times, self._ann_beats_frames = self.read_ann_beats() # Beat-Synchronize pad = True # Always append to the end of the features self._est_beatsync_features, self._est_beatsync_times = \ self.compute_beat_sync_features(self._est_beats_frames, self._est_beats_times, pad) self._ann_beatsync_features, self._ann_beatsync_times = \ self.compute_beat_sync_features(self._ann_beats_frames, self._ann_beats_times, pad)
[ "Computes", "all", "the", "features", "(", "beatsync", "framesync", ")", "from", "the", "audio", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L357-L383
[ "def", "_compute_all_features", "(", "self", ")", ":", "# Read actual audio waveform", "self", ".", "_audio", ",", "_", "=", "librosa", ".", "load", "(", "self", ".", "file_struct", ".", "audio_file", ",", "sr", "=", "self", ".", "sr", ")", "# Get duration of audio file", "self", ".", "dur", "=", "len", "(", "self", ".", "_audio", ")", "/", "float", "(", "self", ".", "sr", ")", "# Compute actual features", "self", ".", "_framesync_features", "=", "self", ".", "compute_features", "(", ")", "# Compute framesync times", "self", ".", "_compute_framesync_times", "(", ")", "# Compute/Read beats", "self", ".", "_est_beats_times", ",", "self", ".", "_est_beats_frames", "=", "self", ".", "estimate_beats", "(", ")", "self", ".", "_ann_beats_times", ",", "self", ".", "_ann_beats_frames", "=", "self", ".", "read_ann_beats", "(", ")", "# Beat-Synchronize", "pad", "=", "True", "# Always append to the end of the features", "self", ".", "_est_beatsync_features", ",", "self", ".", "_est_beatsync_times", "=", "self", ".", "compute_beat_sync_features", "(", "self", ".", "_est_beats_frames", ",", "self", ".", "_est_beats_times", ",", "pad", ")", "self", ".", "_ann_beatsync_features", ",", "self", ".", "_ann_beatsync_times", "=", "self", ".", "compute_beat_sync_features", "(", "self", ".", "_ann_beats_frames", ",", "self", ".", "_ann_beats_times", ",", "pad", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Features.frame_times
This getter returns the frame times, for the corresponding type of features.
msaf/base.py
def frame_times(self): """This getter returns the frame times, for the corresponding type of features.""" frame_times = None # Make sure we have already computed the features self.features if self.feat_type is FeatureTypes.framesync: self._compute_framesync_times() frame_times = self._framesync_times elif self.feat_type is FeatureTypes.est_beatsync: frame_times = self._est_beatsync_times elif self.feat_type is FeatureTypes.ann_beatsync: frame_times = self._ann_beatsync_times return frame_times
def frame_times(self): """This getter returns the frame times, for the corresponding type of features.""" frame_times = None # Make sure we have already computed the features self.features if self.feat_type is FeatureTypes.framesync: self._compute_framesync_times() frame_times = self._framesync_times elif self.feat_type is FeatureTypes.est_beatsync: frame_times = self._est_beatsync_times elif self.feat_type is FeatureTypes.ann_beatsync: frame_times = self._ann_beatsync_times return frame_times
[ "This", "getter", "returns", "the", "frame", "times", "for", "the", "corresponding", "type", "of", "features", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L386-L400
[ "def", "frame_times", "(", "self", ")", ":", "frame_times", "=", "None", "# Make sure we have already computed the features", "self", ".", "features", "if", "self", ".", "feat_type", "is", "FeatureTypes", ".", "framesync", ":", "self", ".", "_compute_framesync_times", "(", ")", "frame_times", "=", "self", ".", "_framesync_times", "elif", "self", ".", "feat_type", "is", "FeatureTypes", ".", "est_beatsync", ":", "frame_times", "=", "self", ".", "_est_beatsync_times", "elif", "self", ".", "feat_type", "is", "FeatureTypes", ".", "ann_beatsync", ":", "frame_times", "=", "self", ".", "_ann_beatsync_times", "return", "frame_times" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Features.features
This getter will compute the actual features if they haven't been computed yet. Returns ------- features: np.array The actual features. Each row corresponds to a feature vector.
msaf/base.py
def features(self): """This getter will compute the actual features if they haven't been computed yet. Returns ------- features: np.array The actual features. Each row corresponds to a feature vector. """ # Compute features if needed if self._features is None: try: self.read_features() except (NoFeaturesFileError, FeaturesNotFound, WrongFeaturesFormatError, FeatureParamsError) as e: try: self._compute_all_features() self.write_features() except IOError: if isinstance(e, FeaturesNotFound) or \ isinstance(e, FeatureParamsError): msg = "Computation of the features is needed for " \ "current parameters but no audio file was found." \ "Please, change your parameters or add the audio" \ " file in %s" else: msg = "Couldn't find audio file in %s" raise NoAudioFileError(msg % self.file_struct.audio_file) # Choose features based on type if self.feat_type is FeatureTypes.framesync: self._features = self._framesync_features elif self.feat_type is FeatureTypes.est_beatsync: self._features = self._est_beatsync_features elif self.feat_type is FeatureTypes.ann_beatsync: if self._ann_beatsync_features is None: raise FeatureTypeNotFound( "Feature type %s is not valid because no annotated beats " "were found" % self.feat_type) self._features = self._ann_beatsync_features else: raise FeatureTypeNotFound("Feature type %s is not valid." % self.feat_type) return self._features
def features(self): """This getter will compute the actual features if they haven't been computed yet. Returns ------- features: np.array The actual features. Each row corresponds to a feature vector. """ # Compute features if needed if self._features is None: try: self.read_features() except (NoFeaturesFileError, FeaturesNotFound, WrongFeaturesFormatError, FeatureParamsError) as e: try: self._compute_all_features() self.write_features() except IOError: if isinstance(e, FeaturesNotFound) or \ isinstance(e, FeatureParamsError): msg = "Computation of the features is needed for " \ "current parameters but no audio file was found." \ "Please, change your parameters or add the audio" \ " file in %s" else: msg = "Couldn't find audio file in %s" raise NoAudioFileError(msg % self.file_struct.audio_file) # Choose features based on type if self.feat_type is FeatureTypes.framesync: self._features = self._framesync_features elif self.feat_type is FeatureTypes.est_beatsync: self._features = self._est_beatsync_features elif self.feat_type is FeatureTypes.ann_beatsync: if self._ann_beatsync_features is None: raise FeatureTypeNotFound( "Feature type %s is not valid because no annotated beats " "were found" % self.feat_type) self._features = self._ann_beatsync_features else: raise FeatureTypeNotFound("Feature type %s is not valid." % self.feat_type) return self._features
[ "This", "getter", "will", "compute", "the", "actual", "features", "if", "they", "haven", "t", "been", "computed", "yet", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L403-L447
[ "def", "features", "(", "self", ")", ":", "# Compute features if needed", "if", "self", ".", "_features", "is", "None", ":", "try", ":", "self", ".", "read_features", "(", ")", "except", "(", "NoFeaturesFileError", ",", "FeaturesNotFound", ",", "WrongFeaturesFormatError", ",", "FeatureParamsError", ")", "as", "e", ":", "try", ":", "self", ".", "_compute_all_features", "(", ")", "self", ".", "write_features", "(", ")", "except", "IOError", ":", "if", "isinstance", "(", "e", ",", "FeaturesNotFound", ")", "or", "isinstance", "(", "e", ",", "FeatureParamsError", ")", ":", "msg", "=", "\"Computation of the features is needed for \"", "\"current parameters but no audio file was found.\"", "\"Please, change your parameters or add the audio\"", "\" file in %s\"", "else", ":", "msg", "=", "\"Couldn't find audio file in %s\"", "raise", "NoAudioFileError", "(", "msg", "%", "self", ".", "file_struct", ".", "audio_file", ")", "# Choose features based on type", "if", "self", ".", "feat_type", "is", "FeatureTypes", ".", "framesync", ":", "self", ".", "_features", "=", "self", ".", "_framesync_features", "elif", "self", ".", "feat_type", "is", "FeatureTypes", ".", "est_beatsync", ":", "self", ".", "_features", "=", "self", ".", "_est_beatsync_features", "elif", "self", ".", "feat_type", "is", "FeatureTypes", ".", "ann_beatsync", ":", "if", "self", ".", "_ann_beatsync_features", "is", "None", ":", "raise", "FeatureTypeNotFound", "(", "\"Feature type %s is not valid because no annotated beats \"", "\"were found\"", "%", "self", ".", "feat_type", ")", "self", ".", "_features", "=", "self", ".", "_ann_beatsync_features", "else", ":", "raise", "FeatureTypeNotFound", "(", "\"Feature type %s is not valid.\"", "%", "self", ".", "feat_type", ")", "return", "self", ".", "_features" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Features.select_features
Selects the features from the given parameters. Parameters ---------- features_id: str The identifier of the features (it must be a key inside the `features_registry`) file_struct: msaf.io.FileStruct The file struct containing the files to extract the features from annot_beats: boolean Whether to use annotated (`True`) or estimated (`False`) beats framesync: boolean Whether to use framesync (`True`) or beatsync (`False`) features Returns ------- features: obj The actual features object that inherits from `msaf.Features`
msaf/base.py
def select_features(cls, features_id, file_struct, annot_beats, framesync): """Selects the features from the given parameters. Parameters ---------- features_id: str The identifier of the features (it must be a key inside the `features_registry`) file_struct: msaf.io.FileStruct The file struct containing the files to extract the features from annot_beats: boolean Whether to use annotated (`True`) or estimated (`False`) beats framesync: boolean Whether to use framesync (`True`) or beatsync (`False`) features Returns ------- features: obj The actual features object that inherits from `msaf.Features` """ if not annot_beats and framesync: feat_type = FeatureTypes.framesync elif annot_beats and not framesync: feat_type = FeatureTypes.ann_beatsync elif not annot_beats and not framesync: feat_type = FeatureTypes.est_beatsync else: raise FeatureTypeNotFound("Type of features not valid.") # Select features with default parameters if features_id not in features_registry.keys(): raise FeaturesNotFound( "The features '%s' are invalid (valid features are %s)" % (features_id, features_registry.keys())) return features_registry[features_id](file_struct, feat_type)
def select_features(cls, features_id, file_struct, annot_beats, framesync): """Selects the features from the given parameters. Parameters ---------- features_id: str The identifier of the features (it must be a key inside the `features_registry`) file_struct: msaf.io.FileStruct The file struct containing the files to extract the features from annot_beats: boolean Whether to use annotated (`True`) or estimated (`False`) beats framesync: boolean Whether to use framesync (`True`) or beatsync (`False`) features Returns ------- features: obj The actual features object that inherits from `msaf.Features` """ if not annot_beats and framesync: feat_type = FeatureTypes.framesync elif annot_beats and not framesync: feat_type = FeatureTypes.ann_beatsync elif not annot_beats and not framesync: feat_type = FeatureTypes.est_beatsync else: raise FeatureTypeNotFound("Type of features not valid.") # Select features with default parameters if features_id not in features_registry.keys(): raise FeaturesNotFound( "The features '%s' are invalid (valid features are %s)" % (features_id, features_registry.keys())) return features_registry[features_id](file_struct, feat_type)
[ "Selects", "the", "features", "from", "the", "given", "parameters", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/base.py#L450-L485
[ "def", "select_features", "(", "cls", ",", "features_id", ",", "file_struct", ",", "annot_beats", ",", "framesync", ")", ":", "if", "not", "annot_beats", "and", "framesync", ":", "feat_type", "=", "FeatureTypes", ".", "framesync", "elif", "annot_beats", "and", "not", "framesync", ":", "feat_type", "=", "FeatureTypes", ".", "ann_beatsync", "elif", "not", "annot_beats", "and", "not", "framesync", ":", "feat_type", "=", "FeatureTypes", ".", "est_beatsync", "else", ":", "raise", "FeatureTypeNotFound", "(", "\"Type of features not valid.\"", ")", "# Select features with default parameters", "if", "features_id", "not", "in", "features_registry", ".", "keys", "(", ")", ":", "raise", "FeaturesNotFound", "(", "\"The features '%s' are invalid (valid features are %s)\"", "%", "(", "features_id", ",", "features_registry", ".", "keys", "(", ")", ")", ")", "return", "features_registry", "[", "features_id", "]", "(", "file_struct", ",", "feat_type", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
SegmenterInterface._preprocess
This method obtains the actual features.
msaf/algorithms/interface.py
def _preprocess(self, valid_features=["pcp", "tonnetz", "mfcc", "cqt", "tempogram"]): """This method obtains the actual features.""" # Use specific feature if self.feature_str not in valid_features: raise RuntimeError("Feature %s in not valid for algorithm: %s " "(valid features are %s)." % (self.feature_str, __name__, valid_features)) else: try: F = self.features.features except KeyError: raise RuntimeError("Feature %s in not supported by MSAF" % (self.feature_str)) return F
def _preprocess(self, valid_features=["pcp", "tonnetz", "mfcc", "cqt", "tempogram"]): """This method obtains the actual features.""" # Use specific feature if self.feature_str not in valid_features: raise RuntimeError("Feature %s in not valid for algorithm: %s " "(valid features are %s)." % (self.feature_str, __name__, valid_features)) else: try: F = self.features.features except KeyError: raise RuntimeError("Feature %s in not supported by MSAF" % (self.feature_str)) return F
[ "This", "method", "obtains", "the", "actual", "features", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/interface.py#L85-L100
[ "def", "_preprocess", "(", "self", ",", "valid_features", "=", "[", "\"pcp\"", ",", "\"tonnetz\"", ",", "\"mfcc\"", ",", "\"cqt\"", ",", "\"tempogram\"", "]", ")", ":", "# Use specific feature", "if", "self", ".", "feature_str", "not", "in", "valid_features", ":", "raise", "RuntimeError", "(", "\"Feature %s in not valid for algorithm: %s \"", "\"(valid features are %s).\"", "%", "(", "self", ".", "feature_str", ",", "__name__", ",", "valid_features", ")", ")", "else", ":", "try", ":", "F", "=", "self", ".", "features", ".", "features", "except", "KeyError", ":", "raise", "RuntimeError", "(", "\"Feature %s in not supported by MSAF\"", "%", "(", "self", ".", "feature_str", ")", ")", "return", "F" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
SegmenterInterface._postprocess
Post processes the estimations from the algorithm, removing empty segments and making sure the lenghts of the boundaries and labels match.
msaf/algorithms/interface.py
def _postprocess(self, est_idxs, est_labels): """Post processes the estimations from the algorithm, removing empty segments and making sure the lenghts of the boundaries and labels match.""" # Make sure we are using the previously input bounds, if any if self.in_bound_idxs is not None: F = self._preprocess() est_labels = U.synchronize_labels(self.in_bound_idxs, est_idxs, est_labels, F.shape[0]) est_idxs = self.in_bound_idxs # Remove empty segments if needed est_idxs, est_labels = U.remove_empty_segments(est_idxs, est_labels) assert len(est_idxs) - 1 == len(est_labels), "Number of boundaries " \ "(%d) and number of labels(%d) don't match" % (len(est_idxs), len(est_labels)) # Make sure the indeces are integers est_idxs = np.asarray(est_idxs, dtype=int) return est_idxs, est_labels
def _postprocess(self, est_idxs, est_labels): """Post processes the estimations from the algorithm, removing empty segments and making sure the lenghts of the boundaries and labels match.""" # Make sure we are using the previously input bounds, if any if self.in_bound_idxs is not None: F = self._preprocess() est_labels = U.synchronize_labels(self.in_bound_idxs, est_idxs, est_labels, F.shape[0]) est_idxs = self.in_bound_idxs # Remove empty segments if needed est_idxs, est_labels = U.remove_empty_segments(est_idxs, est_labels) assert len(est_idxs) - 1 == len(est_labels), "Number of boundaries " \ "(%d) and number of labels(%d) don't match" % (len(est_idxs), len(est_labels)) # Make sure the indeces are integers est_idxs = np.asarray(est_idxs, dtype=int) return est_idxs, est_labels
[ "Post", "processes", "the", "estimations", "from", "the", "algorithm", "removing", "empty", "segments", "and", "making", "sure", "the", "lenghts", "of", "the", "boundaries", "and", "labels", "match", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/interface.py#L102-L123
[ "def", "_postprocess", "(", "self", ",", "est_idxs", ",", "est_labels", ")", ":", "# Make sure we are using the previously input bounds, if any", "if", "self", ".", "in_bound_idxs", "is", "not", "None", ":", "F", "=", "self", ".", "_preprocess", "(", ")", "est_labels", "=", "U", ".", "synchronize_labels", "(", "self", ".", "in_bound_idxs", ",", "est_idxs", ",", "est_labels", ",", "F", ".", "shape", "[", "0", "]", ")", "est_idxs", "=", "self", ".", "in_bound_idxs", "# Remove empty segments if needed", "est_idxs", ",", "est_labels", "=", "U", ".", "remove_empty_segments", "(", "est_idxs", ",", "est_labels", ")", "assert", "len", "(", "est_idxs", ")", "-", "1", "==", "len", "(", "est_labels", ")", ",", "\"Number of boundaries \"", "\"(%d) and number of labels(%d) don't match\"", "%", "(", "len", "(", "est_idxs", ")", ",", "len", "(", "est_labels", ")", ")", "# Make sure the indeces are integers", "est_idxs", "=", "np", ".", "asarray", "(", "est_idxs", ",", "dtype", "=", "int", ")", "return", "est_idxs", ",", "est_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
process
Sweeps parameters across the specified algorithm.
examples/run_sweep.py
def process(in_path, annot_beats=False, feature="mfcc", framesync=False, boundaries_id="gt", labels_id=None, n_jobs=4, config=None): """Sweeps parameters across the specified algorithm.""" results_file = "results_sweep_boundsE%s_labelsE%s.csv" % (boundaries_id, labels_id) if labels_id == "cnmf3" or boundaries_id == "cnmf3": config = io.get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id) hh = range(15, 33) RR = range(15, 40) ranks = range(3, 6) RR_labels = range(11, 12) ranks_labels = range(6, 7) all_results = pd.DataFrame() for rank in ranks: for h in hh: for R in RR: for rank_labels in ranks_labels: for R_labels in RR_labels: config["h"] = h config["R"] = R config["rank"] = rank config["rank_labels"] = rank_labels config["R_labels"] = R_labels config["features"] = None # Run process msaf.run.process( in_path, n_jobs=n_jobs, boundaries_id=boundaries_id, labels_id=labels_id, config=config) # Compute evaluations results = msaf.eval.process( in_path, boundaries_id, labels_id, save=True, n_jobs=n_jobs, config=config) # Save avg results new_columns = {"config_h": h, "config_R": R, "config_rank": rank, "config_R_labels": R_labels, "config_rank_labels": rank_labels} results = results.append([new_columns], ignore_index=True) all_results = all_results.append(results.mean(), ignore_index=True) all_results.to_csv(results_file) elif labels_id is None and boundaries_id == "sf": config = io.get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id) MM = range(20, 32) mm = range(3, 4) kk = np.arange(0.03, 0.1, 0.01) Mpp = range(16, 32) ott = np.arange(0.02, 0.1, 0.01) all_results = pd.DataFrame() for k in kk: for ot in ott: for m in mm: for M in MM: for Mp in Mpp: config["M_gaussian"] = M config["m_embedded"] = m config["k_nearest"] = k config["Mp_adaptive"] = Mp config["offset_thres"] = ot config["features"] = None # Run process msaf.run.process( in_path, n_jobs=n_jobs, boundaries_id=boundaries_id, labels_id=labels_id, config=config) # Compute evaluations results = msaf.eval.process( in_path, boundaries_id, labels_id, save=True, n_jobs=n_jobs, config=config) # Save avg results new_columns = {"config_M": M, "config_m": m, "config_k": k, "config_Mp": Mp, "config_ot": ot} results = results.append([new_columns], ignore_index=True) all_results = all_results.append(results.mean(), ignore_index=True) all_results.to_csv(results_file) else: logging.error("Can't sweep parameters for %s algorithm. " "Implement me! :D")
def process(in_path, annot_beats=False, feature="mfcc", framesync=False, boundaries_id="gt", labels_id=None, n_jobs=4, config=None): """Sweeps parameters across the specified algorithm.""" results_file = "results_sweep_boundsE%s_labelsE%s.csv" % (boundaries_id, labels_id) if labels_id == "cnmf3" or boundaries_id == "cnmf3": config = io.get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id) hh = range(15, 33) RR = range(15, 40) ranks = range(3, 6) RR_labels = range(11, 12) ranks_labels = range(6, 7) all_results = pd.DataFrame() for rank in ranks: for h in hh: for R in RR: for rank_labels in ranks_labels: for R_labels in RR_labels: config["h"] = h config["R"] = R config["rank"] = rank config["rank_labels"] = rank_labels config["R_labels"] = R_labels config["features"] = None # Run process msaf.run.process( in_path, n_jobs=n_jobs, boundaries_id=boundaries_id, labels_id=labels_id, config=config) # Compute evaluations results = msaf.eval.process( in_path, boundaries_id, labels_id, save=True, n_jobs=n_jobs, config=config) # Save avg results new_columns = {"config_h": h, "config_R": R, "config_rank": rank, "config_R_labels": R_labels, "config_rank_labels": rank_labels} results = results.append([new_columns], ignore_index=True) all_results = all_results.append(results.mean(), ignore_index=True) all_results.to_csv(results_file) elif labels_id is None and boundaries_id == "sf": config = io.get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id) MM = range(20, 32) mm = range(3, 4) kk = np.arange(0.03, 0.1, 0.01) Mpp = range(16, 32) ott = np.arange(0.02, 0.1, 0.01) all_results = pd.DataFrame() for k in kk: for ot in ott: for m in mm: for M in MM: for Mp in Mpp: config["M_gaussian"] = M config["m_embedded"] = m config["k_nearest"] = k config["Mp_adaptive"] = Mp config["offset_thres"] = ot config["features"] = None # Run process msaf.run.process( in_path, n_jobs=n_jobs, boundaries_id=boundaries_id, labels_id=labels_id, config=config) # Compute evaluations results = msaf.eval.process( in_path, boundaries_id, labels_id, save=True, n_jobs=n_jobs, config=config) # Save avg results new_columns = {"config_M": M, "config_m": m, "config_k": k, "config_Mp": Mp, "config_ot": ot} results = results.append([new_columns], ignore_index=True) all_results = all_results.append(results.mean(), ignore_index=True) all_results.to_csv(results_file) else: logging.error("Can't sweep parameters for %s algorithm. " "Implement me! :D")
[ "Sweeps", "parameters", "across", "the", "specified", "algorithm", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/examples/run_sweep.py#L13-L109
[ "def", "process", "(", "in_path", ",", "annot_beats", "=", "False", ",", "feature", "=", "\"mfcc\"", ",", "framesync", "=", "False", ",", "boundaries_id", "=", "\"gt\"", ",", "labels_id", "=", "None", ",", "n_jobs", "=", "4", ",", "config", "=", "None", ")", ":", "results_file", "=", "\"results_sweep_boundsE%s_labelsE%s.csv\"", "%", "(", "boundaries_id", ",", "labels_id", ")", "if", "labels_id", "==", "\"cnmf3\"", "or", "boundaries_id", "==", "\"cnmf3\"", ":", "config", "=", "io", ".", "get_configuration", "(", "feature", ",", "annot_beats", ",", "framesync", ",", "boundaries_id", ",", "labels_id", ")", "hh", "=", "range", "(", "15", ",", "33", ")", "RR", "=", "range", "(", "15", ",", "40", ")", "ranks", "=", "range", "(", "3", ",", "6", ")", "RR_labels", "=", "range", "(", "11", ",", "12", ")", "ranks_labels", "=", "range", "(", "6", ",", "7", ")", "all_results", "=", "pd", ".", "DataFrame", "(", ")", "for", "rank", "in", "ranks", ":", "for", "h", "in", "hh", ":", "for", "R", "in", "RR", ":", "for", "rank_labels", "in", "ranks_labels", ":", "for", "R_labels", "in", "RR_labels", ":", "config", "[", "\"h\"", "]", "=", "h", "config", "[", "\"R\"", "]", "=", "R", "config", "[", "\"rank\"", "]", "=", "rank", "config", "[", "\"rank_labels\"", "]", "=", "rank_labels", "config", "[", "\"R_labels\"", "]", "=", "R_labels", "config", "[", "\"features\"", "]", "=", "None", "# Run process", "msaf", ".", "run", ".", "process", "(", "in_path", ",", "n_jobs", "=", "n_jobs", ",", "boundaries_id", "=", "boundaries_id", ",", "labels_id", "=", "labels_id", ",", "config", "=", "config", ")", "# Compute evaluations", "results", "=", "msaf", ".", "eval", ".", "process", "(", "in_path", ",", "boundaries_id", ",", "labels_id", ",", "save", "=", "True", ",", "n_jobs", "=", "n_jobs", ",", "config", "=", "config", ")", "# Save avg results", "new_columns", "=", "{", "\"config_h\"", ":", "h", ",", "\"config_R\"", ":", "R", ",", "\"config_rank\"", ":", "rank", ",", "\"config_R_labels\"", ":", "R_labels", ",", "\"config_rank_labels\"", ":", "rank_labels", "}", "results", "=", "results", ".", "append", "(", "[", "new_columns", "]", ",", "ignore_index", "=", "True", ")", "all_results", "=", "all_results", ".", "append", "(", "results", ".", "mean", "(", ")", ",", "ignore_index", "=", "True", ")", "all_results", ".", "to_csv", "(", "results_file", ")", "elif", "labels_id", "is", "None", "and", "boundaries_id", "==", "\"sf\"", ":", "config", "=", "io", ".", "get_configuration", "(", "feature", ",", "annot_beats", ",", "framesync", ",", "boundaries_id", ",", "labels_id", ")", "MM", "=", "range", "(", "20", ",", "32", ")", "mm", "=", "range", "(", "3", ",", "4", ")", "kk", "=", "np", ".", "arange", "(", "0.03", ",", "0.1", ",", "0.01", ")", "Mpp", "=", "range", "(", "16", ",", "32", ")", "ott", "=", "np", ".", "arange", "(", "0.02", ",", "0.1", ",", "0.01", ")", "all_results", "=", "pd", ".", "DataFrame", "(", ")", "for", "k", "in", "kk", ":", "for", "ot", "in", "ott", ":", "for", "m", "in", "mm", ":", "for", "M", "in", "MM", ":", "for", "Mp", "in", "Mpp", ":", "config", "[", "\"M_gaussian\"", "]", "=", "M", "config", "[", "\"m_embedded\"", "]", "=", "m", "config", "[", "\"k_nearest\"", "]", "=", "k", "config", "[", "\"Mp_adaptive\"", "]", "=", "Mp", "config", "[", "\"offset_thres\"", "]", "=", "ot", "config", "[", "\"features\"", "]", "=", "None", "# Run process", "msaf", ".", "run", ".", "process", "(", "in_path", ",", "n_jobs", "=", "n_jobs", ",", "boundaries_id", "=", "boundaries_id", ",", "labels_id", "=", "labels_id", ",", "config", "=", "config", ")", "# Compute evaluations", "results", "=", "msaf", ".", "eval", ".", "process", "(", "in_path", ",", "boundaries_id", ",", "labels_id", ",", "save", "=", "True", ",", "n_jobs", "=", "n_jobs", ",", "config", "=", "config", ")", "# Save avg results", "new_columns", "=", "{", "\"config_M\"", ":", "M", ",", "\"config_m\"", ":", "m", ",", "\"config_k\"", ":", "k", ",", "\"config_Mp\"", ":", "Mp", ",", "\"config_ot\"", ":", "ot", "}", "results", "=", "results", ".", "append", "(", "[", "new_columns", "]", ",", "ignore_index", "=", "True", ")", "all_results", "=", "all_results", ".", "append", "(", "results", ".", "mean", "(", ")", ",", "ignore_index", "=", "True", ")", "all_results", ".", "to_csv", "(", "results_file", ")", "else", ":", "logging", ".", "error", "(", "\"Can't sweep parameters for %s algorithm. \"", "\"Implement me! :D\"", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
main
Main function to sweep parameters of a certain algorithm.
examples/run_sweep.py
def main(): """Main function to sweep parameters of a certain algorithm.""" parser = argparse.ArgumentParser( description="Runs the speficied algorithm(s) on the MSAF " "formatted dataset.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("in_path", action="store", help="Input dataset") parser.add_argument("-f", action="store", dest="feature", default="pcp", type=str, help="Type of features", choices=["pcp", "tonnetz", "mfcc", "cqt", "tempogram"]) parser.add_argument("-b", action="store_true", dest="annot_beats", help="Use annotated beats", default=False) parser.add_argument("-fs", action="store_true", dest="framesync", help="Use frame-synchronous features", default=False) parser.add_argument("-bid", action="store", help="Boundary algorithm identifier", dest="boundaries_id", default="gt", choices=["gt"] + io.get_all_boundary_algorithms()) parser.add_argument("-lid", action="store", help="Label algorithm identifier", dest="labels_id", default=None, choices=io.get_all_label_algorithms()) parser.add_argument("-j", action="store", dest="n_jobs", default=4, type=int, help="The number of threads to use") args = parser.parse_args() start_time = time.time() # Run the algorithm(s) process(args.in_path, annot_beats=args.annot_beats, feature=args.feature, framesync=args.framesync, boundaries_id=args.boundaries_id, labels_id=args.labels_id, n_jobs=args.n_jobs) # Done! logging.info("Done! Took %.2f seconds." % (time.time() - start_time))
def main(): """Main function to sweep parameters of a certain algorithm.""" parser = argparse.ArgumentParser( description="Runs the speficied algorithm(s) on the MSAF " "formatted dataset.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("in_path", action="store", help="Input dataset") parser.add_argument("-f", action="store", dest="feature", default="pcp", type=str, help="Type of features", choices=["pcp", "tonnetz", "mfcc", "cqt", "tempogram"]) parser.add_argument("-b", action="store_true", dest="annot_beats", help="Use annotated beats", default=False) parser.add_argument("-fs", action="store_true", dest="framesync", help="Use frame-synchronous features", default=False) parser.add_argument("-bid", action="store", help="Boundary algorithm identifier", dest="boundaries_id", default="gt", choices=["gt"] + io.get_all_boundary_algorithms()) parser.add_argument("-lid", action="store", help="Label algorithm identifier", dest="labels_id", default=None, choices=io.get_all_label_algorithms()) parser.add_argument("-j", action="store", dest="n_jobs", default=4, type=int, help="The number of threads to use") args = parser.parse_args() start_time = time.time() # Run the algorithm(s) process(args.in_path, annot_beats=args.annot_beats, feature=args.feature, framesync=args.framesync, boundaries_id=args.boundaries_id, labels_id=args.labels_id, n_jobs=args.n_jobs) # Done! logging.info("Done! Took %.2f seconds." % (time.time() - start_time))
[ "Main", "function", "to", "sweep", "parameters", "of", "a", "certain", "algorithm", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/examples/run_sweep.py#L112-L166
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Runs the speficied algorithm(s) on the MSAF \"", "\"formatted dataset.\"", ",", "formatter_class", "=", "argparse", ".", "ArgumentDefaultsHelpFormatter", ")", "parser", ".", "add_argument", "(", "\"in_path\"", ",", "action", "=", "\"store\"", ",", "help", "=", "\"Input dataset\"", ")", "parser", ".", "add_argument", "(", "\"-f\"", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"feature\"", ",", "default", "=", "\"pcp\"", ",", "type", "=", "str", ",", "help", "=", "\"Type of features\"", ",", "choices", "=", "[", "\"pcp\"", ",", "\"tonnetz\"", ",", "\"mfcc\"", ",", "\"cqt\"", ",", "\"tempogram\"", "]", ")", "parser", ".", "add_argument", "(", "\"-b\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"annot_beats\"", ",", "help", "=", "\"Use annotated beats\"", ",", "default", "=", "False", ")", "parser", ".", "add_argument", "(", "\"-fs\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"framesync\"", ",", "help", "=", "\"Use frame-synchronous features\"", ",", "default", "=", "False", ")", "parser", ".", "add_argument", "(", "\"-bid\"", ",", "action", "=", "\"store\"", ",", "help", "=", "\"Boundary algorithm identifier\"", ",", "dest", "=", "\"boundaries_id\"", ",", "default", "=", "\"gt\"", ",", "choices", "=", "[", "\"gt\"", "]", "+", "io", ".", "get_all_boundary_algorithms", "(", ")", ")", "parser", ".", "add_argument", "(", "\"-lid\"", ",", "action", "=", "\"store\"", ",", "help", "=", "\"Label algorithm identifier\"", ",", "dest", "=", "\"labels_id\"", ",", "default", "=", "None", ",", "choices", "=", "io", ".", "get_all_label_algorithms", "(", ")", ")", "parser", ".", "add_argument", "(", "\"-j\"", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"n_jobs\"", ",", "default", "=", "4", ",", "type", "=", "int", ",", "help", "=", "\"The number of threads to use\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "start_time", "=", "time", ".", "time", "(", ")", "# Run the algorithm(s)", "process", "(", "args", ".", "in_path", ",", "annot_beats", "=", "args", ".", "annot_beats", ",", "feature", "=", "args", ".", "feature", ",", "framesync", "=", "args", ".", "framesync", ",", "boundaries_id", "=", "args", ".", "boundaries_id", ",", "labels_id", "=", "args", ".", "labels_id", ",", "n_jobs", "=", "args", ".", "n_jobs", ")", "# Done!", "logging", ".", "info", "(", "\"Done! Took %.2f seconds.\"", "%", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
main
Main function to parse the arguments and call the main process.
examples/run_mirex.py
def main(): """Main function to parse the arguments and call the main process.""" parser = argparse.ArgumentParser( description="Runs the speficied algorithm(s) on the input file and " "the results using the MIREX format.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-bid", action="store", help="Boundary algorithm identifier", dest="boundaries_id", default=msaf.config.default_bound_id, choices=["gt"] + msaf.io.get_all_boundary_algorithms()) parser.add_argument("-lid", action="store", help="Label algorithm identifier", dest="labels_id", default=msaf.config.default_label_id, choices=msaf.io.get_all_label_algorithms()) parser.add_argument("-i", action="store", dest="in_file", help="Input audio file") parser.add_argument("-o", action="store", dest="out_file", help="Output file with the results", default="out.txt") args = parser.parse_args() start_time = time.time() # Setup the logger logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s', level=logging.INFO) # Run MSAF params = { "annot_beats": False, "feature": "cqt", "framesync": False, "boundaries_id": args.boundaries_id, "labels_id": args.labels_id, "n_jobs": 1, "hier": False, "sonify_bounds": False, "plot": False } res = msaf.run.process(args.in_file, **params) msaf.io.write_mirex(res[0], res[1], args.out_file) # Done! logging.info("Done! Took %.2f seconds." % (time.time() - start_time))
def main(): """Main function to parse the arguments and call the main process.""" parser = argparse.ArgumentParser( description="Runs the speficied algorithm(s) on the input file and " "the results using the MIREX format.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-bid", action="store", help="Boundary algorithm identifier", dest="boundaries_id", default=msaf.config.default_bound_id, choices=["gt"] + msaf.io.get_all_boundary_algorithms()) parser.add_argument("-lid", action="store", help="Label algorithm identifier", dest="labels_id", default=msaf.config.default_label_id, choices=msaf.io.get_all_label_algorithms()) parser.add_argument("-i", action="store", dest="in_file", help="Input audio file") parser.add_argument("-o", action="store", dest="out_file", help="Output file with the results", default="out.txt") args = parser.parse_args() start_time = time.time() # Setup the logger logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s', level=logging.INFO) # Run MSAF params = { "annot_beats": False, "feature": "cqt", "framesync": False, "boundaries_id": args.boundaries_id, "labels_id": args.labels_id, "n_jobs": 1, "hier": False, "sonify_bounds": False, "plot": False } res = msaf.run.process(args.in_file, **params) msaf.io.write_mirex(res[0], res[1], args.out_file) # Done! logging.info("Done! Took %.2f seconds." % (time.time() - start_time))
[ "Main", "function", "to", "parse", "the", "arguments", "and", "call", "the", "main", "process", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/examples/run_mirex.py#L14-L66
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Runs the speficied algorithm(s) on the input file and \"", "\"the results using the MIREX format.\"", ",", "formatter_class", "=", "argparse", ".", "ArgumentDefaultsHelpFormatter", ")", "parser", ".", "add_argument", "(", "\"-bid\"", ",", "action", "=", "\"store\"", ",", "help", "=", "\"Boundary algorithm identifier\"", ",", "dest", "=", "\"boundaries_id\"", ",", "default", "=", "msaf", ".", "config", ".", "default_bound_id", ",", "choices", "=", "[", "\"gt\"", "]", "+", "msaf", ".", "io", ".", "get_all_boundary_algorithms", "(", ")", ")", "parser", ".", "add_argument", "(", "\"-lid\"", ",", "action", "=", "\"store\"", ",", "help", "=", "\"Label algorithm identifier\"", ",", "dest", "=", "\"labels_id\"", ",", "default", "=", "msaf", ".", "config", ".", "default_label_id", ",", "choices", "=", "msaf", ".", "io", ".", "get_all_label_algorithms", "(", ")", ")", "parser", ".", "add_argument", "(", "\"-i\"", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"in_file\"", ",", "help", "=", "\"Input audio file\"", ")", "parser", ".", "add_argument", "(", "\"-o\"", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"out_file\"", ",", "help", "=", "\"Output file with the results\"", ",", "default", "=", "\"out.txt\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "start_time", "=", "time", ".", "time", "(", ")", "# Setup the logger", "logging", ".", "basicConfig", "(", "format", "=", "'%(asctime)s: %(levelname)s: %(message)s'", ",", "level", "=", "logging", ".", "INFO", ")", "# Run MSAF", "params", "=", "{", "\"annot_beats\"", ":", "False", ",", "\"feature\"", ":", "\"cqt\"", ",", "\"framesync\"", ":", "False", ",", "\"boundaries_id\"", ":", "args", ".", "boundaries_id", ",", "\"labels_id\"", ":", "args", ".", "labels_id", ",", "\"n_jobs\"", ":", "1", ",", "\"hier\"", ":", "False", ",", "\"sonify_bounds\"", ":", "False", ",", "\"plot\"", ":", "False", "}", "res", "=", "msaf", ".", "run", ".", "process", "(", "args", ".", "in_file", ",", "*", "*", "params", ")", "msaf", ".", "io", ".", "write_mirex", "(", "res", "[", "0", "]", ",", "res", "[", "1", "]", ",", "args", ".", "out_file", ")", "# Done!", "logging", ".", "info", "(", "\"Done! Took %.2f seconds.\"", "%", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
print_results
Print all the results. Parameters ---------- results: pd.DataFrame Dataframe with all the results
msaf/eval.py
def print_results(results): """Print all the results. Parameters ---------- results: pd.DataFrame Dataframe with all the results """ if len(results) == 0: logging.warning("No results to print!") return res = results.mean() logging.info("Results:\n%s" % res)
def print_results(results): """Print all the results. Parameters ---------- results: pd.DataFrame Dataframe with all the results """ if len(results) == 0: logging.warning("No results to print!") return res = results.mean() logging.info("Results:\n%s" % res)
[ "Print", "all", "the", "results", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/eval.py#L26-L38
[ "def", "print_results", "(", "results", ")", ":", "if", "len", "(", "results", ")", "==", "0", ":", "logging", ".", "warning", "(", "\"No results to print!\"", ")", "return", "res", "=", "results", ".", "mean", "(", ")", "logging", ".", "info", "(", "\"Results:\\n%s\"", "%", "res", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
compute_results
Compute the results using all the available evaluations. Parameters ---------- ann_inter : np.array Annotated intervals in seconds. est_inter : np.array Estimated intervals in seconds. ann_labels : np.array Annotated labels. est_labels : np.array Estimated labels bins : int Number of bins for the information gain. est_file : str Path to the output file to store results. weight: float Weight the Precision and Recall values of the hit rate boundaries differently (<1 will weight Precision higher, >1 will weight Recall higher). The default parameter (0.58) is the one proposed in (Nieto et al. 2014) Return ------ results : dict Contains the results of all the evaluations for the given file. Keys are the following: track_id: Name of the track HitRate_3F: F-measure of hit rate at 3 seconds HitRate_3P: Precision of hit rate at 3 seconds HitRate_3R: Recall of hit rate at 3 seconds HitRate_0.5F: F-measure of hit rate at 0.5 seconds HitRate_0.5P: Precision of hit rate at 0.5 seconds HitRate_0.5R: Recall of hit rate at 0.5 seconds HitRate_w3F: F-measure of hit rate at 3 seconds weighted HitRate_w0.5F: F-measure of hit rate at 0.5 seconds weighted HitRate_wt3F: F-measure of hit rate at 3 seconds weighted and trimmed HitRate_wt0.5F: F-measure of hit rate at 0.5 seconds weighted and trimmed HitRate_t3F: F-measure of hit rate at 3 seconds (trimmed) HitRate_t3P: Precision of hit rate at 3 seconds (trimmed) HitRate_t3F: Recall of hit rate at 3 seconds (trimmed) HitRate_t0.5F: F-measure of hit rate at 0.5 seconds (trimmed) HitRate_t0.5P: Precision of hit rate at 0.5 seconds (trimmed) HitRate_t0.5R: Recall of hit rate at 0.5 seconds (trimmed) DevA2E: Median deviation of annotation to estimation DevE2A: Median deviation of estimation to annotation D: Information gain PWF: F-measure of pair-wise frame clustering PWP: Precision of pair-wise frame clustering PWR: Recall of pair-wise frame clustering Sf: F-measure normalized entropy score So: Oversegmentation normalized entropy score Su: Undersegmentation normalized entropy score
msaf/eval.py
def compute_results(ann_inter, est_inter, ann_labels, est_labels, bins, est_file, weight=0.58): """Compute the results using all the available evaluations. Parameters ---------- ann_inter : np.array Annotated intervals in seconds. est_inter : np.array Estimated intervals in seconds. ann_labels : np.array Annotated labels. est_labels : np.array Estimated labels bins : int Number of bins for the information gain. est_file : str Path to the output file to store results. weight: float Weight the Precision and Recall values of the hit rate boundaries differently (<1 will weight Precision higher, >1 will weight Recall higher). The default parameter (0.58) is the one proposed in (Nieto et al. 2014) Return ------ results : dict Contains the results of all the evaluations for the given file. Keys are the following: track_id: Name of the track HitRate_3F: F-measure of hit rate at 3 seconds HitRate_3P: Precision of hit rate at 3 seconds HitRate_3R: Recall of hit rate at 3 seconds HitRate_0.5F: F-measure of hit rate at 0.5 seconds HitRate_0.5P: Precision of hit rate at 0.5 seconds HitRate_0.5R: Recall of hit rate at 0.5 seconds HitRate_w3F: F-measure of hit rate at 3 seconds weighted HitRate_w0.5F: F-measure of hit rate at 0.5 seconds weighted HitRate_wt3F: F-measure of hit rate at 3 seconds weighted and trimmed HitRate_wt0.5F: F-measure of hit rate at 0.5 seconds weighted and trimmed HitRate_t3F: F-measure of hit rate at 3 seconds (trimmed) HitRate_t3P: Precision of hit rate at 3 seconds (trimmed) HitRate_t3F: Recall of hit rate at 3 seconds (trimmed) HitRate_t0.5F: F-measure of hit rate at 0.5 seconds (trimmed) HitRate_t0.5P: Precision of hit rate at 0.5 seconds (trimmed) HitRate_t0.5R: Recall of hit rate at 0.5 seconds (trimmed) DevA2E: Median deviation of annotation to estimation DevE2A: Median deviation of estimation to annotation D: Information gain PWF: F-measure of pair-wise frame clustering PWP: Precision of pair-wise frame clustering PWR: Recall of pair-wise frame clustering Sf: F-measure normalized entropy score So: Oversegmentation normalized entropy score Su: Undersegmentation normalized entropy score """ res = {} # --Boundaries-- # # Hit Rate standard res["HitRate_3P"], res["HitRate_3R"], res["HitRate_3F"] = \ mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=False) res["HitRate_0.5P"], res["HitRate_0.5R"], res["HitRate_0.5F"] = \ mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=False) # Hit rate trimmed res["HitRate_t3P"], res["HitRate_t3R"], res["HitRate_t3F"] = \ mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=True) res["HitRate_t0.5P"], res["HitRate_t0.5R"], res["HitRate_t0.5F"] = \ mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=True) # Hit rate weighted _, _, res["HitRate_w3F"] = mir_eval.segment.detection( ann_inter, est_inter, window=3, trim=False, beta=weight) _, _, res["HitRate_w0.5F"] = mir_eval.segment.detection( ann_inter, est_inter, window=.5, trim=False, beta=weight) # Hit rate weighted and trimmed _, _, res["HitRate_wt3F"] = mir_eval.segment.detection( ann_inter, est_inter, window=3, trim=True, beta=weight) _, _, res["HitRate_wt0.5F"] = mir_eval.segment.detection( ann_inter, est_inter, window=.5, trim=True, beta=weight) # Information gain res["D"] = compute_information_gain(ann_inter, est_inter, est_file, bins=bins) # Median Deviations res["DevR2E"], res["DevE2R"] = mir_eval.segment.deviation( ann_inter, est_inter, trim=False) res["DevtR2E"], res["DevtE2R"] = mir_eval.segment.deviation( ann_inter, est_inter, trim=True) # --Labels-- # if est_labels is not None and ("-1" in est_labels or "@" in est_labels): est_labels = None if est_labels is not None and len(est_labels) != 0: # Align labels with intervals ann_labels = list(ann_labels) est_labels = list(est_labels) ann_inter, ann_labels = mir_eval.util.adjust_intervals(ann_inter, ann_labels) est_inter, est_labels = mir_eval.util.adjust_intervals( est_inter, est_labels, t_min=0.0, t_max=ann_inter.max()) # Pair-wise frame clustering res["PWP"], res["PWR"], res["PWF"] = mir_eval.segment.pairwise( ann_inter, ann_labels, est_inter, est_labels) # Normalized Conditional Entropies res["So"], res["Su"], res["Sf"] = mir_eval.segment.nce( ann_inter, ann_labels, est_inter, est_labels) # Names base = os.path.basename(est_file) res["track_id"] = base[:-5] res["ds_name"] = base.split("_")[0] return res
def compute_results(ann_inter, est_inter, ann_labels, est_labels, bins, est_file, weight=0.58): """Compute the results using all the available evaluations. Parameters ---------- ann_inter : np.array Annotated intervals in seconds. est_inter : np.array Estimated intervals in seconds. ann_labels : np.array Annotated labels. est_labels : np.array Estimated labels bins : int Number of bins for the information gain. est_file : str Path to the output file to store results. weight: float Weight the Precision and Recall values of the hit rate boundaries differently (<1 will weight Precision higher, >1 will weight Recall higher). The default parameter (0.58) is the one proposed in (Nieto et al. 2014) Return ------ results : dict Contains the results of all the evaluations for the given file. Keys are the following: track_id: Name of the track HitRate_3F: F-measure of hit rate at 3 seconds HitRate_3P: Precision of hit rate at 3 seconds HitRate_3R: Recall of hit rate at 3 seconds HitRate_0.5F: F-measure of hit rate at 0.5 seconds HitRate_0.5P: Precision of hit rate at 0.5 seconds HitRate_0.5R: Recall of hit rate at 0.5 seconds HitRate_w3F: F-measure of hit rate at 3 seconds weighted HitRate_w0.5F: F-measure of hit rate at 0.5 seconds weighted HitRate_wt3F: F-measure of hit rate at 3 seconds weighted and trimmed HitRate_wt0.5F: F-measure of hit rate at 0.5 seconds weighted and trimmed HitRate_t3F: F-measure of hit rate at 3 seconds (trimmed) HitRate_t3P: Precision of hit rate at 3 seconds (trimmed) HitRate_t3F: Recall of hit rate at 3 seconds (trimmed) HitRate_t0.5F: F-measure of hit rate at 0.5 seconds (trimmed) HitRate_t0.5P: Precision of hit rate at 0.5 seconds (trimmed) HitRate_t0.5R: Recall of hit rate at 0.5 seconds (trimmed) DevA2E: Median deviation of annotation to estimation DevE2A: Median deviation of estimation to annotation D: Information gain PWF: F-measure of pair-wise frame clustering PWP: Precision of pair-wise frame clustering PWR: Recall of pair-wise frame clustering Sf: F-measure normalized entropy score So: Oversegmentation normalized entropy score Su: Undersegmentation normalized entropy score """ res = {} # --Boundaries-- # # Hit Rate standard res["HitRate_3P"], res["HitRate_3R"], res["HitRate_3F"] = \ mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=False) res["HitRate_0.5P"], res["HitRate_0.5R"], res["HitRate_0.5F"] = \ mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=False) # Hit rate trimmed res["HitRate_t3P"], res["HitRate_t3R"], res["HitRate_t3F"] = \ mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=True) res["HitRate_t0.5P"], res["HitRate_t0.5R"], res["HitRate_t0.5F"] = \ mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=True) # Hit rate weighted _, _, res["HitRate_w3F"] = mir_eval.segment.detection( ann_inter, est_inter, window=3, trim=False, beta=weight) _, _, res["HitRate_w0.5F"] = mir_eval.segment.detection( ann_inter, est_inter, window=.5, trim=False, beta=weight) # Hit rate weighted and trimmed _, _, res["HitRate_wt3F"] = mir_eval.segment.detection( ann_inter, est_inter, window=3, trim=True, beta=weight) _, _, res["HitRate_wt0.5F"] = mir_eval.segment.detection( ann_inter, est_inter, window=.5, trim=True, beta=weight) # Information gain res["D"] = compute_information_gain(ann_inter, est_inter, est_file, bins=bins) # Median Deviations res["DevR2E"], res["DevE2R"] = mir_eval.segment.deviation( ann_inter, est_inter, trim=False) res["DevtR2E"], res["DevtE2R"] = mir_eval.segment.deviation( ann_inter, est_inter, trim=True) # --Labels-- # if est_labels is not None and ("-1" in est_labels or "@" in est_labels): est_labels = None if est_labels is not None and len(est_labels) != 0: # Align labels with intervals ann_labels = list(ann_labels) est_labels = list(est_labels) ann_inter, ann_labels = mir_eval.util.adjust_intervals(ann_inter, ann_labels) est_inter, est_labels = mir_eval.util.adjust_intervals( est_inter, est_labels, t_min=0.0, t_max=ann_inter.max()) # Pair-wise frame clustering res["PWP"], res["PWR"], res["PWF"] = mir_eval.segment.pairwise( ann_inter, ann_labels, est_inter, est_labels) # Normalized Conditional Entropies res["So"], res["Su"], res["Sf"] = mir_eval.segment.nce( ann_inter, ann_labels, est_inter, est_labels) # Names base = os.path.basename(est_file) res["track_id"] = base[:-5] res["ds_name"] = base.split("_")[0] return res
[ "Compute", "the", "results", "using", "all", "the", "available", "evaluations", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/eval.py#L41-L161
[ "def", "compute_results", "(", "ann_inter", ",", "est_inter", ",", "ann_labels", ",", "est_labels", ",", "bins", ",", "est_file", ",", "weight", "=", "0.58", ")", ":", "res", "=", "{", "}", "# --Boundaries-- #", "# Hit Rate standard", "res", "[", "\"HitRate_3P\"", "]", ",", "res", "[", "\"HitRate_3R\"", "]", ",", "res", "[", "\"HitRate_3F\"", "]", "=", "mir_eval", ".", "segment", ".", "detection", "(", "ann_inter", ",", "est_inter", ",", "window", "=", "3", ",", "trim", "=", "False", ")", "res", "[", "\"HitRate_0.5P\"", "]", ",", "res", "[", "\"HitRate_0.5R\"", "]", ",", "res", "[", "\"HitRate_0.5F\"", "]", "=", "mir_eval", ".", "segment", ".", "detection", "(", "ann_inter", ",", "est_inter", ",", "window", "=", ".5", ",", "trim", "=", "False", ")", "# Hit rate trimmed", "res", "[", "\"HitRate_t3P\"", "]", ",", "res", "[", "\"HitRate_t3R\"", "]", ",", "res", "[", "\"HitRate_t3F\"", "]", "=", "mir_eval", ".", "segment", ".", "detection", "(", "ann_inter", ",", "est_inter", ",", "window", "=", "3", ",", "trim", "=", "True", ")", "res", "[", "\"HitRate_t0.5P\"", "]", ",", "res", "[", "\"HitRate_t0.5R\"", "]", ",", "res", "[", "\"HitRate_t0.5F\"", "]", "=", "mir_eval", ".", "segment", ".", "detection", "(", "ann_inter", ",", "est_inter", ",", "window", "=", ".5", ",", "trim", "=", "True", ")", "# Hit rate weighted", "_", ",", "_", ",", "res", "[", "\"HitRate_w3F\"", "]", "=", "mir_eval", ".", "segment", ".", "detection", "(", "ann_inter", ",", "est_inter", ",", "window", "=", "3", ",", "trim", "=", "False", ",", "beta", "=", "weight", ")", "_", ",", "_", ",", "res", "[", "\"HitRate_w0.5F\"", "]", "=", "mir_eval", ".", "segment", ".", "detection", "(", "ann_inter", ",", "est_inter", ",", "window", "=", ".5", ",", "trim", "=", "False", ",", "beta", "=", "weight", ")", "# Hit rate weighted and trimmed", "_", ",", "_", ",", "res", "[", "\"HitRate_wt3F\"", "]", "=", "mir_eval", ".", "segment", ".", "detection", "(", "ann_inter", ",", "est_inter", ",", "window", "=", "3", ",", "trim", "=", "True", ",", "beta", "=", "weight", ")", "_", ",", "_", ",", "res", "[", "\"HitRate_wt0.5F\"", "]", "=", "mir_eval", ".", "segment", ".", "detection", "(", "ann_inter", ",", "est_inter", ",", "window", "=", ".5", ",", "trim", "=", "True", ",", "beta", "=", "weight", ")", "# Information gain", "res", "[", "\"D\"", "]", "=", "compute_information_gain", "(", "ann_inter", ",", "est_inter", ",", "est_file", ",", "bins", "=", "bins", ")", "# Median Deviations", "res", "[", "\"DevR2E\"", "]", ",", "res", "[", "\"DevE2R\"", "]", "=", "mir_eval", ".", "segment", ".", "deviation", "(", "ann_inter", ",", "est_inter", ",", "trim", "=", "False", ")", "res", "[", "\"DevtR2E\"", "]", ",", "res", "[", "\"DevtE2R\"", "]", "=", "mir_eval", ".", "segment", ".", "deviation", "(", "ann_inter", ",", "est_inter", ",", "trim", "=", "True", ")", "# --Labels-- #", "if", "est_labels", "is", "not", "None", "and", "(", "\"-1\"", "in", "est_labels", "or", "\"@\"", "in", "est_labels", ")", ":", "est_labels", "=", "None", "if", "est_labels", "is", "not", "None", "and", "len", "(", "est_labels", ")", "!=", "0", ":", "# Align labels with intervals", "ann_labels", "=", "list", "(", "ann_labels", ")", "est_labels", "=", "list", "(", "est_labels", ")", "ann_inter", ",", "ann_labels", "=", "mir_eval", ".", "util", ".", "adjust_intervals", "(", "ann_inter", ",", "ann_labels", ")", "est_inter", ",", "est_labels", "=", "mir_eval", ".", "util", ".", "adjust_intervals", "(", "est_inter", ",", "est_labels", ",", "t_min", "=", "0.0", ",", "t_max", "=", "ann_inter", ".", "max", "(", ")", ")", "# Pair-wise frame clustering", "res", "[", "\"PWP\"", "]", ",", "res", "[", "\"PWR\"", "]", ",", "res", "[", "\"PWF\"", "]", "=", "mir_eval", ".", "segment", ".", "pairwise", "(", "ann_inter", ",", "ann_labels", ",", "est_inter", ",", "est_labels", ")", "# Normalized Conditional Entropies", "res", "[", "\"So\"", "]", ",", "res", "[", "\"Su\"", "]", ",", "res", "[", "\"Sf\"", "]", "=", "mir_eval", ".", "segment", ".", "nce", "(", "ann_inter", ",", "ann_labels", ",", "est_inter", ",", "est_labels", ")", "# Names", "base", "=", "os", ".", "path", ".", "basename", "(", "est_file", ")", "res", "[", "\"track_id\"", "]", "=", "base", "[", ":", "-", "5", "]", "res", "[", "\"ds_name\"", "]", "=", "base", ".", "split", "(", "\"_\"", ")", "[", "0", "]", "return", "res" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
compute_gt_results
Computes the results by using the ground truth dataset identified by the annotator parameter. Return ------ results : dict Dictionary of the results (see function compute_results).
msaf/eval.py
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config, bins=251, annotator_id=0): """Computes the results by using the ground truth dataset identified by the annotator parameter. Return ------ results : dict Dictionary of the results (see function compute_results). """ if config["hier"]: ref_times, ref_labels, ref_levels = \ msaf.io.read_hier_references( ref_file, annotation_id=annotator_id, exclude_levels=["segment_salami_function"]) else: jam = jams.load(ref_file, validate=False) ann = jam.search(namespace='segment_.*')[annotator_id] ref_inter, ref_labels = ann.to_interval_values() # Read estimations with correct configuration est_inter, est_labels = io.read_estimations(est_file, boundaries_id, labels_id, **config) # Compute the results and return logging.info("Evaluating %s" % os.path.basename(est_file)) if config["hier"]: # Hierarchical assert len(est_inter) == len(est_labels), "Same number of levels " \ "are required in the boundaries and labels for the hierarchical " \ "evaluation." est_times = [] est_labels = [] # Sort based on how many segments per level est_inter = sorted(est_inter, key=lambda level: len(level)) for inter in est_inter: est_times.append(msaf.utils.intervals_to_times(inter)) # Add fake labels (hierarchical eval does not use labels --yet--) est_labels.append(np.ones(len(est_times[-1]) - 1) * -1) # Align the times utils.align_end_hierarchies(est_times, ref_times, thres=1) # To intervals est_hier = [utils.times_to_intervals(times) for times in est_times] ref_hier = [utils.times_to_intervals(times) for times in ref_times] # Compute evaluations res = {} res["t_recall10"], res["t_precision10"], res["t_measure10"] = \ mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=10) res["t_recall15"], res["t_precision15"], res["t_measure15"] = \ mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=15) res["track_id"] = os.path.basename(est_file)[:-5] return res else: # Flat return compute_results(ref_inter, est_inter, ref_labels, est_labels, bins, est_file)
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config, bins=251, annotator_id=0): """Computes the results by using the ground truth dataset identified by the annotator parameter. Return ------ results : dict Dictionary of the results (see function compute_results). """ if config["hier"]: ref_times, ref_labels, ref_levels = \ msaf.io.read_hier_references( ref_file, annotation_id=annotator_id, exclude_levels=["segment_salami_function"]) else: jam = jams.load(ref_file, validate=False) ann = jam.search(namespace='segment_.*')[annotator_id] ref_inter, ref_labels = ann.to_interval_values() # Read estimations with correct configuration est_inter, est_labels = io.read_estimations(est_file, boundaries_id, labels_id, **config) # Compute the results and return logging.info("Evaluating %s" % os.path.basename(est_file)) if config["hier"]: # Hierarchical assert len(est_inter) == len(est_labels), "Same number of levels " \ "are required in the boundaries and labels for the hierarchical " \ "evaluation." est_times = [] est_labels = [] # Sort based on how many segments per level est_inter = sorted(est_inter, key=lambda level: len(level)) for inter in est_inter: est_times.append(msaf.utils.intervals_to_times(inter)) # Add fake labels (hierarchical eval does not use labels --yet--) est_labels.append(np.ones(len(est_times[-1]) - 1) * -1) # Align the times utils.align_end_hierarchies(est_times, ref_times, thres=1) # To intervals est_hier = [utils.times_to_intervals(times) for times in est_times] ref_hier = [utils.times_to_intervals(times) for times in ref_times] # Compute evaluations res = {} res["t_recall10"], res["t_precision10"], res["t_measure10"] = \ mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=10) res["t_recall15"], res["t_precision15"], res["t_measure15"] = \ mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=15) res["track_id"] = os.path.basename(est_file)[:-5] return res else: # Flat return compute_results(ref_inter, est_inter, ref_labels, est_labels, bins, est_file)
[ "Computes", "the", "results", "by", "using", "the", "ground", "truth", "dataset", "identified", "by", "the", "annotator", "parameter", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/eval.py#L164-L225
[ "def", "compute_gt_results", "(", "est_file", ",", "ref_file", ",", "boundaries_id", ",", "labels_id", ",", "config", ",", "bins", "=", "251", ",", "annotator_id", "=", "0", ")", ":", "if", "config", "[", "\"hier\"", "]", ":", "ref_times", ",", "ref_labels", ",", "ref_levels", "=", "msaf", ".", "io", ".", "read_hier_references", "(", "ref_file", ",", "annotation_id", "=", "annotator_id", ",", "exclude_levels", "=", "[", "\"segment_salami_function\"", "]", ")", "else", ":", "jam", "=", "jams", ".", "load", "(", "ref_file", ",", "validate", "=", "False", ")", "ann", "=", "jam", ".", "search", "(", "namespace", "=", "'segment_.*'", ")", "[", "annotator_id", "]", "ref_inter", ",", "ref_labels", "=", "ann", ".", "to_interval_values", "(", ")", "# Read estimations with correct configuration", "est_inter", ",", "est_labels", "=", "io", ".", "read_estimations", "(", "est_file", ",", "boundaries_id", ",", "labels_id", ",", "*", "*", "config", ")", "# Compute the results and return", "logging", ".", "info", "(", "\"Evaluating %s\"", "%", "os", ".", "path", ".", "basename", "(", "est_file", ")", ")", "if", "config", "[", "\"hier\"", "]", ":", "# Hierarchical", "assert", "len", "(", "est_inter", ")", "==", "len", "(", "est_labels", ")", ",", "\"Same number of levels \"", "\"are required in the boundaries and labels for the hierarchical \"", "\"evaluation.\"", "est_times", "=", "[", "]", "est_labels", "=", "[", "]", "# Sort based on how many segments per level", "est_inter", "=", "sorted", "(", "est_inter", ",", "key", "=", "lambda", "level", ":", "len", "(", "level", ")", ")", "for", "inter", "in", "est_inter", ":", "est_times", ".", "append", "(", "msaf", ".", "utils", ".", "intervals_to_times", "(", "inter", ")", ")", "# Add fake labels (hierarchical eval does not use labels --yet--)", "est_labels", ".", "append", "(", "np", ".", "ones", "(", "len", "(", "est_times", "[", "-", "1", "]", ")", "-", "1", ")", "*", "-", "1", ")", "# Align the times", "utils", ".", "align_end_hierarchies", "(", "est_times", ",", "ref_times", ",", "thres", "=", "1", ")", "# To intervals", "est_hier", "=", "[", "utils", ".", "times_to_intervals", "(", "times", ")", "for", "times", "in", "est_times", "]", "ref_hier", "=", "[", "utils", ".", "times_to_intervals", "(", "times", ")", "for", "times", "in", "ref_times", "]", "# Compute evaluations", "res", "=", "{", "}", "res", "[", "\"t_recall10\"", "]", ",", "res", "[", "\"t_precision10\"", "]", ",", "res", "[", "\"t_measure10\"", "]", "=", "mir_eval", ".", "hierarchy", ".", "tmeasure", "(", "ref_hier", ",", "est_hier", ",", "window", "=", "10", ")", "res", "[", "\"t_recall15\"", "]", ",", "res", "[", "\"t_precision15\"", "]", ",", "res", "[", "\"t_measure15\"", "]", "=", "mir_eval", ".", "hierarchy", ".", "tmeasure", "(", "ref_hier", ",", "est_hier", ",", "window", "=", "15", ")", "res", "[", "\"track_id\"", "]", "=", "os", ".", "path", ".", "basename", "(", "est_file", ")", "[", ":", "-", "5", "]", "return", "res", "else", ":", "# Flat", "return", "compute_results", "(", "ref_inter", ",", "est_inter", ",", "ref_labels", ",", "est_labels", ",", "bins", ",", "est_file", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
compute_information_gain
Computes the information gain of the est_file from the annotated intervals and the estimated intervals.
msaf/eval.py
def compute_information_gain(ann_inter, est_inter, est_file, bins): """Computes the information gain of the est_file from the annotated intervals and the estimated intervals.""" ann_times = utils.intervals_to_times(ann_inter) est_times = utils.intervals_to_times(est_inter) return mir_eval.beat.information_gain(ann_times, est_times, bins=bins)
def compute_information_gain(ann_inter, est_inter, est_file, bins): """Computes the information gain of the est_file from the annotated intervals and the estimated intervals.""" ann_times = utils.intervals_to_times(ann_inter) est_times = utils.intervals_to_times(est_inter) return mir_eval.beat.information_gain(ann_times, est_times, bins=bins)
[ "Computes", "the", "information", "gain", "of", "the", "est_file", "from", "the", "annotated", "intervals", "and", "the", "estimated", "intervals", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/eval.py#L228-L233
[ "def", "compute_information_gain", "(", "ann_inter", ",", "est_inter", ",", "est_file", ",", "bins", ")", ":", "ann_times", "=", "utils", ".", "intervals_to_times", "(", "ann_inter", ")", "est_times", "=", "utils", ".", "intervals_to_times", "(", "est_inter", ")", "return", "mir_eval", ".", "beat", ".", "information_gain", "(", "ann_times", ",", "est_times", ",", "bins", "=", "bins", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
process_track
Processes a single track. Parameters ---------- file_struct : object (FileStruct) or str File struct or full path of the audio file to be evaluated. boundaries_id : str Identifier of the boundaries algorithm. labels_id : str Identifier of the labels algorithm. config : dict Configuration of the algorithms to be evaluated. annotator_id : int Number identifiying the annotator. Returns ------- one_res : dict Dictionary of the results (see function compute_results).
msaf/eval.py
def process_track(file_struct, boundaries_id, labels_id, config, annotator_id=0): """Processes a single track. Parameters ---------- file_struct : object (FileStruct) or str File struct or full path of the audio file to be evaluated. boundaries_id : str Identifier of the boundaries algorithm. labels_id : str Identifier of the labels algorithm. config : dict Configuration of the algorithms to be evaluated. annotator_id : int Number identifiying the annotator. Returns ------- one_res : dict Dictionary of the results (see function compute_results). """ # Convert to file_struct if string is passed if isinstance(file_struct, six.string_types): file_struct = io.FileStruct(file_struct) est_file = file_struct.est_file ref_file = file_struct.ref_file # Sanity check assert os.path.basename(est_file)[:-4] == \ os.path.basename(ref_file)[:-4], "File names are different %s --- %s" \ % (os.path.basename(est_file)[:-4], os.path.basename(ref_file)[:-4]) if not os.path.isfile(ref_file): raise NoReferencesError("Reference file %s does not exist. You must " "have annotated references to run " "evaluations." % ref_file) one_res = compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config, annotator_id=annotator_id) return one_res
def process_track(file_struct, boundaries_id, labels_id, config, annotator_id=0): """Processes a single track. Parameters ---------- file_struct : object (FileStruct) or str File struct or full path of the audio file to be evaluated. boundaries_id : str Identifier of the boundaries algorithm. labels_id : str Identifier of the labels algorithm. config : dict Configuration of the algorithms to be evaluated. annotator_id : int Number identifiying the annotator. Returns ------- one_res : dict Dictionary of the results (see function compute_results). """ # Convert to file_struct if string is passed if isinstance(file_struct, six.string_types): file_struct = io.FileStruct(file_struct) est_file = file_struct.est_file ref_file = file_struct.ref_file # Sanity check assert os.path.basename(est_file)[:-4] == \ os.path.basename(ref_file)[:-4], "File names are different %s --- %s" \ % (os.path.basename(est_file)[:-4], os.path.basename(ref_file)[:-4]) if not os.path.isfile(ref_file): raise NoReferencesError("Reference file %s does not exist. You must " "have annotated references to run " "evaluations." % ref_file) one_res = compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config, annotator_id=annotator_id) return one_res
[ "Processes", "a", "single", "track", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/eval.py#L236-L278
[ "def", "process_track", "(", "file_struct", ",", "boundaries_id", ",", "labels_id", ",", "config", ",", "annotator_id", "=", "0", ")", ":", "# Convert to file_struct if string is passed", "if", "isinstance", "(", "file_struct", ",", "six", ".", "string_types", ")", ":", "file_struct", "=", "io", ".", "FileStruct", "(", "file_struct", ")", "est_file", "=", "file_struct", ".", "est_file", "ref_file", "=", "file_struct", ".", "ref_file", "# Sanity check", "assert", "os", ".", "path", ".", "basename", "(", "est_file", ")", "[", ":", "-", "4", "]", "==", "os", ".", "path", ".", "basename", "(", "ref_file", ")", "[", ":", "-", "4", "]", ",", "\"File names are different %s --- %s\"", "%", "(", "os", ".", "path", ".", "basename", "(", "est_file", ")", "[", ":", "-", "4", "]", ",", "os", ".", "path", ".", "basename", "(", "ref_file", ")", "[", ":", "-", "4", "]", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "ref_file", ")", ":", "raise", "NoReferencesError", "(", "\"Reference file %s does not exist. You must \"", "\"have annotated references to run \"", "\"evaluations.\"", "%", "ref_file", ")", "one_res", "=", "compute_gt_results", "(", "est_file", ",", "ref_file", ",", "boundaries_id", ",", "labels_id", ",", "config", ",", "annotator_id", "=", "annotator_id", ")", "return", "one_res" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
get_results_file_name
Based on the config and the dataset, get the file name to store the results.
msaf/eval.py
def get_results_file_name(boundaries_id, labels_id, config, annotator_id): """Based on the config and the dataset, get the file name to store the results.""" utils.ensure_dir(msaf.config.results_dir) file_name = os.path.join(msaf.config.results_dir, "results") file_name += "_boundsE%s_labelsE%s" % (boundaries_id, labels_id) file_name += "_annotatorE%d" % (annotator_id) sorted_keys = sorted(config.keys(), key=str.lower) for key in sorted_keys: file_name += "_%sE%s" % (key, str(config[key]).replace("/", "_")) # Check for max file length if len(file_name) > 255 - len(msaf.config.results_ext): file_name = file_name[:255 - len(msaf.config.results_ext)] return file_name + msaf.config.results_ext
def get_results_file_name(boundaries_id, labels_id, config, annotator_id): """Based on the config and the dataset, get the file name to store the results.""" utils.ensure_dir(msaf.config.results_dir) file_name = os.path.join(msaf.config.results_dir, "results") file_name += "_boundsE%s_labelsE%s" % (boundaries_id, labels_id) file_name += "_annotatorE%d" % (annotator_id) sorted_keys = sorted(config.keys(), key=str.lower) for key in sorted_keys: file_name += "_%sE%s" % (key, str(config[key]).replace("/", "_")) # Check for max file length if len(file_name) > 255 - len(msaf.config.results_ext): file_name = file_name[:255 - len(msaf.config.results_ext)] return file_name + msaf.config.results_ext
[ "Based", "on", "the", "config", "and", "the", "dataset", "get", "the", "file", "name", "to", "store", "the", "results", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/eval.py#L281-L297
[ "def", "get_results_file_name", "(", "boundaries_id", ",", "labels_id", ",", "config", ",", "annotator_id", ")", ":", "utils", ".", "ensure_dir", "(", "msaf", ".", "config", ".", "results_dir", ")", "file_name", "=", "os", ".", "path", ".", "join", "(", "msaf", ".", "config", ".", "results_dir", ",", "\"results\"", ")", "file_name", "+=", "\"_boundsE%s_labelsE%s\"", "%", "(", "boundaries_id", ",", "labels_id", ")", "file_name", "+=", "\"_annotatorE%d\"", "%", "(", "annotator_id", ")", "sorted_keys", "=", "sorted", "(", "config", ".", "keys", "(", ")", ",", "key", "=", "str", ".", "lower", ")", "for", "key", "in", "sorted_keys", ":", "file_name", "+=", "\"_%sE%s\"", "%", "(", "key", ",", "str", "(", "config", "[", "key", "]", ")", ".", "replace", "(", "\"/\"", ",", "\"_\"", ")", ")", "# Check for max file length", "if", "len", "(", "file_name", ")", ">", "255", "-", "len", "(", "msaf", ".", "config", ".", "results_ext", ")", ":", "file_name", "=", "file_name", "[", ":", "255", "-", "len", "(", "msaf", ".", "config", ".", "results_ext", ")", "]", "return", "file_name", "+", "msaf", ".", "config", ".", "results_ext" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
process
Main process to evaluate algorithms' results. Parameters ---------- in_path : str Path to the dataset root folder. boundaries_id : str Boundaries algorithm identifier (e.g. siplca, cnmf) labels_id : str Labels algorithm identifier (e.g. siplca, cnmf) ds_name : str Name of the dataset to be evaluated (e.g. SALAMI). * stands for all. annot_beats : boolean Whether to use the annotated beats or not. framesync: str Whether to use framesync features or not (default: False -> beatsync) feature: str String representing the feature to be used (e.g. pcp, mfcc, tonnetz) hier : bool Whether to compute a hierarchical or flat segmentation. save: boolean Whether to save the results into the `out_file` csv file. out_file: str Path to the csv file to save the results (if `None` and `save = True` it will save the results in the default file name obtained by calling `get_results_file_name`). n_jobs: int Number of processes to run in parallel. Only available in collection mode. annotator_id : int Number identifiying the annotator. config: dict Dictionary containing custom configuration parameters for the algorithms. If None, the default parameters are used. Return ------ results : pd.DataFrame DataFrame containing the evaluations for each file.
msaf/eval.py
def process(in_path, boundaries_id=msaf.config.default_bound_id, labels_id=msaf.config.default_label_id, annot_beats=False, framesync=False, feature="pcp", hier=False, save=False, out_file=None, n_jobs=4, annotator_id=0, config=None): """Main process to evaluate algorithms' results. Parameters ---------- in_path : str Path to the dataset root folder. boundaries_id : str Boundaries algorithm identifier (e.g. siplca, cnmf) labels_id : str Labels algorithm identifier (e.g. siplca, cnmf) ds_name : str Name of the dataset to be evaluated (e.g. SALAMI). * stands for all. annot_beats : boolean Whether to use the annotated beats or not. framesync: str Whether to use framesync features or not (default: False -> beatsync) feature: str String representing the feature to be used (e.g. pcp, mfcc, tonnetz) hier : bool Whether to compute a hierarchical or flat segmentation. save: boolean Whether to save the results into the `out_file` csv file. out_file: str Path to the csv file to save the results (if `None` and `save = True` it will save the results in the default file name obtained by calling `get_results_file_name`). n_jobs: int Number of processes to run in parallel. Only available in collection mode. annotator_id : int Number identifiying the annotator. config: dict Dictionary containing custom configuration parameters for the algorithms. If None, the default parameters are used. Return ------ results : pd.DataFrame DataFrame containing the evaluations for each file. """ # Set up configuration based on algorithms parameters if config is None: config = io.get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id) # Hierarchical segmentation config["hier"] = hier # Remove actual features config.pop("features", None) # Get out file in case we want to save results if out_file is None: out_file = get_results_file_name(boundaries_id, labels_id, config, annotator_id) # If out_file already exists, read and return them if os.path.exists(out_file): logging.warning("Results already exists, reading from file %s" % out_file) results = pd.read_csv(out_file) print_results(results) return results # Perform actual evaluations if os.path.isfile(in_path): # Single File mode evals = [process_track(in_path, boundaries_id, labels_id, config, annotator_id=annotator_id)] else: # Collection mode # Get files file_structs = io.get_dataset_files(in_path) # Evaluate in parallel logging.info("Evaluating %d tracks..." % len(file_structs)) evals = Parallel(n_jobs=n_jobs)(delayed(process_track)( file_struct, boundaries_id, labels_id, config, annotator_id=annotator_id) for file_struct in file_structs[:]) # Aggregate evaluations in pandas format results = pd.DataFrame() for e in evals: if e != []: results = results.append(e, ignore_index=True) logging.info("%d tracks analyzed" % len(results)) # Print results print_results(results) # Save all results if save: logging.info("Writing results in %s" % out_file) results.to_csv(out_file) return results
def process(in_path, boundaries_id=msaf.config.default_bound_id, labels_id=msaf.config.default_label_id, annot_beats=False, framesync=False, feature="pcp", hier=False, save=False, out_file=None, n_jobs=4, annotator_id=0, config=None): """Main process to evaluate algorithms' results. Parameters ---------- in_path : str Path to the dataset root folder. boundaries_id : str Boundaries algorithm identifier (e.g. siplca, cnmf) labels_id : str Labels algorithm identifier (e.g. siplca, cnmf) ds_name : str Name of the dataset to be evaluated (e.g. SALAMI). * stands for all. annot_beats : boolean Whether to use the annotated beats or not. framesync: str Whether to use framesync features or not (default: False -> beatsync) feature: str String representing the feature to be used (e.g. pcp, mfcc, tonnetz) hier : bool Whether to compute a hierarchical or flat segmentation. save: boolean Whether to save the results into the `out_file` csv file. out_file: str Path to the csv file to save the results (if `None` and `save = True` it will save the results in the default file name obtained by calling `get_results_file_name`). n_jobs: int Number of processes to run in parallel. Only available in collection mode. annotator_id : int Number identifiying the annotator. config: dict Dictionary containing custom configuration parameters for the algorithms. If None, the default parameters are used. Return ------ results : pd.DataFrame DataFrame containing the evaluations for each file. """ # Set up configuration based on algorithms parameters if config is None: config = io.get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id) # Hierarchical segmentation config["hier"] = hier # Remove actual features config.pop("features", None) # Get out file in case we want to save results if out_file is None: out_file = get_results_file_name(boundaries_id, labels_id, config, annotator_id) # If out_file already exists, read and return them if os.path.exists(out_file): logging.warning("Results already exists, reading from file %s" % out_file) results = pd.read_csv(out_file) print_results(results) return results # Perform actual evaluations if os.path.isfile(in_path): # Single File mode evals = [process_track(in_path, boundaries_id, labels_id, config, annotator_id=annotator_id)] else: # Collection mode # Get files file_structs = io.get_dataset_files(in_path) # Evaluate in parallel logging.info("Evaluating %d tracks..." % len(file_structs)) evals = Parallel(n_jobs=n_jobs)(delayed(process_track)( file_struct, boundaries_id, labels_id, config, annotator_id=annotator_id) for file_struct in file_structs[:]) # Aggregate evaluations in pandas format results = pd.DataFrame() for e in evals: if e != []: results = results.append(e, ignore_index=True) logging.info("%d tracks analyzed" % len(results)) # Print results print_results(results) # Save all results if save: logging.info("Writing results in %s" % out_file) results.to_csv(out_file) return results
[ "Main", "process", "to", "evaluate", "algorithms", "results", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/eval.py#L300-L400
[ "def", "process", "(", "in_path", ",", "boundaries_id", "=", "msaf", ".", "config", ".", "default_bound_id", ",", "labels_id", "=", "msaf", ".", "config", ".", "default_label_id", ",", "annot_beats", "=", "False", ",", "framesync", "=", "False", ",", "feature", "=", "\"pcp\"", ",", "hier", "=", "False", ",", "save", "=", "False", ",", "out_file", "=", "None", ",", "n_jobs", "=", "4", ",", "annotator_id", "=", "0", ",", "config", "=", "None", ")", ":", "# Set up configuration based on algorithms parameters", "if", "config", "is", "None", ":", "config", "=", "io", ".", "get_configuration", "(", "feature", ",", "annot_beats", ",", "framesync", ",", "boundaries_id", ",", "labels_id", ")", "# Hierarchical segmentation", "config", "[", "\"hier\"", "]", "=", "hier", "# Remove actual features", "config", ".", "pop", "(", "\"features\"", ",", "None", ")", "# Get out file in case we want to save results", "if", "out_file", "is", "None", ":", "out_file", "=", "get_results_file_name", "(", "boundaries_id", ",", "labels_id", ",", "config", ",", "annotator_id", ")", "# If out_file already exists, read and return them", "if", "os", ".", "path", ".", "exists", "(", "out_file", ")", ":", "logging", ".", "warning", "(", "\"Results already exists, reading from file %s\"", "%", "out_file", ")", "results", "=", "pd", ".", "read_csv", "(", "out_file", ")", "print_results", "(", "results", ")", "return", "results", "# Perform actual evaluations", "if", "os", ".", "path", ".", "isfile", "(", "in_path", ")", ":", "# Single File mode", "evals", "=", "[", "process_track", "(", "in_path", ",", "boundaries_id", ",", "labels_id", ",", "config", ",", "annotator_id", "=", "annotator_id", ")", "]", "else", ":", "# Collection mode", "# Get files", "file_structs", "=", "io", ".", "get_dataset_files", "(", "in_path", ")", "# Evaluate in parallel", "logging", ".", "info", "(", "\"Evaluating %d tracks...\"", "%", "len", "(", "file_structs", ")", ")", "evals", "=", "Parallel", "(", "n_jobs", "=", "n_jobs", ")", "(", "delayed", "(", "process_track", ")", "(", "file_struct", ",", "boundaries_id", ",", "labels_id", ",", "config", ",", "annotator_id", "=", "annotator_id", ")", "for", "file_struct", "in", "file_structs", "[", ":", "]", ")", "# Aggregate evaluations in pandas format", "results", "=", "pd", ".", "DataFrame", "(", ")", "for", "e", "in", "evals", ":", "if", "e", "!=", "[", "]", ":", "results", "=", "results", ".", "append", "(", "e", ",", "ignore_index", "=", "True", ")", "logging", ".", "info", "(", "\"%d tracks analyzed\"", "%", "len", "(", "results", ")", ")", "# Print results", "print_results", "(", "results", ")", "# Save all results", "if", "save", ":", "logging", ".", "info", "(", "\"Writing results in %s\"", "%", "out_file", ")", "results", ".", "to_csv", "(", "out_file", ")", "return", "results" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
parse_config_string
Parses a config string (comma-separated key=value components) into a dict.
msaf/configparser.py
def parse_config_string(config_string, issue_warnings=True): """ Parses a config string (comma-separated key=value components) into a dict. """ config_dict = {} my_splitter = shlex.shlex(config_string, posix=True) my_splitter.whitespace = ',' my_splitter.whitespace_split = True for kv_pair in my_splitter: kv_pair = kv_pair.strip() if not kv_pair: continue kv_tuple = kv_pair.split('=', 1) if len(kv_tuple) == 1: if issue_warnings: MsafConfigWarning.warn( ("Config key '%s' has no value, ignoring it" % kv_tuple[0]), stacklevel=1) else: k, v = kv_tuple # subsequent values for k will override earlier ones config_dict[k] = v return config_dict
def parse_config_string(config_string, issue_warnings=True): """ Parses a config string (comma-separated key=value components) into a dict. """ config_dict = {} my_splitter = shlex.shlex(config_string, posix=True) my_splitter.whitespace = ',' my_splitter.whitespace_split = True for kv_pair in my_splitter: kv_pair = kv_pair.strip() if not kv_pair: continue kv_tuple = kv_pair.split('=', 1) if len(kv_tuple) == 1: if issue_warnings: MsafConfigWarning.warn( ("Config key '%s' has no value, ignoring it" % kv_tuple[0]), stacklevel=1) else: k, v = kv_tuple # subsequent values for k will override earlier ones config_dict[k] = v return config_dict
[ "Parses", "a", "config", "string", "(", "comma", "-", "separated", "key", "=", "value", "components", ")", "into", "a", "dict", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/configparser.py#L34-L56
[ "def", "parse_config_string", "(", "config_string", ",", "issue_warnings", "=", "True", ")", ":", "config_dict", "=", "{", "}", "my_splitter", "=", "shlex", ".", "shlex", "(", "config_string", ",", "posix", "=", "True", ")", "my_splitter", ".", "whitespace", "=", "','", "my_splitter", ".", "whitespace_split", "=", "True", "for", "kv_pair", "in", "my_splitter", ":", "kv_pair", "=", "kv_pair", ".", "strip", "(", ")", "if", "not", "kv_pair", ":", "continue", "kv_tuple", "=", "kv_pair", ".", "split", "(", "'='", ",", "1", ")", "if", "len", "(", "kv_tuple", ")", "==", "1", ":", "if", "issue_warnings", ":", "MsafConfigWarning", ".", "warn", "(", "(", "\"Config key '%s' has no value, ignoring it\"", "%", "kv_tuple", "[", "0", "]", ")", ",", "stacklevel", "=", "1", ")", "else", ":", "k", ",", "v", "=", "kv_tuple", "# subsequent values for k will override earlier ones", "config_dict", "[", "k", "]", "=", "v", "return", "config_dict" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
fetch_val_for_key
Return the overriding config value for a key. A successful search returns a string value. An unsuccessful search raises a KeyError The (decreasing) priority order is: - MSAF_FLAGS - ~./msafrc
msaf/configparser.py
def fetch_val_for_key(key, delete_key=False): """Return the overriding config value for a key. A successful search returns a string value. An unsuccessful search raises a KeyError The (decreasing) priority order is: - MSAF_FLAGS - ~./msafrc """ # first try to find it in the FLAGS try: if delete_key: return MSAF_FLAGS_DICT.pop(key) return MSAF_FLAGS_DICT[key] except KeyError: pass # next try to find it in the config file # config file keys can be of form option, or section.option key_tokens = key.rsplit('.', 1) if len(key_tokens) == 2: section, option = key_tokens else: section, option = 'global', key try: try: return msaf_cfg.get(section, option) except InterpolationError: return msaf_raw_cfg.get(section, option) except (NoOptionError, NoSectionError): raise KeyError(key)
def fetch_val_for_key(key, delete_key=False): """Return the overriding config value for a key. A successful search returns a string value. An unsuccessful search raises a KeyError The (decreasing) priority order is: - MSAF_FLAGS - ~./msafrc """ # first try to find it in the FLAGS try: if delete_key: return MSAF_FLAGS_DICT.pop(key) return MSAF_FLAGS_DICT[key] except KeyError: pass # next try to find it in the config file # config file keys can be of form option, or section.option key_tokens = key.rsplit('.', 1) if len(key_tokens) == 2: section, option = key_tokens else: section, option = 'global', key try: try: return msaf_cfg.get(section, option) except InterpolationError: return msaf_raw_cfg.get(section, option) except (NoOptionError, NoSectionError): raise KeyError(key)
[ "Return", "the", "overriding", "config", "value", "for", "a", "key", ".", "A", "successful", "search", "returns", "a", "string", "value", ".", "An", "unsuccessful", "search", "raises", "a", "KeyError", "The", "(", "decreasing", ")", "priority", "order", "is", ":", "-", "MSAF_FLAGS", "-", "~", ".", "/", "msafrc" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/configparser.py#L92-L123
[ "def", "fetch_val_for_key", "(", "key", ",", "delete_key", "=", "False", ")", ":", "# first try to find it in the FLAGS", "try", ":", "if", "delete_key", ":", "return", "MSAF_FLAGS_DICT", ".", "pop", "(", "key", ")", "return", "MSAF_FLAGS_DICT", "[", "key", "]", "except", "KeyError", ":", "pass", "# next try to find it in the config file", "# config file keys can be of form option, or section.option", "key_tokens", "=", "key", ".", "rsplit", "(", "'.'", ",", "1", ")", "if", "len", "(", "key_tokens", ")", "==", "2", ":", "section", ",", "option", "=", "key_tokens", "else", ":", "section", ",", "option", "=", "'global'", ",", "key", "try", ":", "try", ":", "return", "msaf_cfg", ".", "get", "(", "section", ",", "option", ")", "except", "InterpolationError", ":", "return", "msaf_raw_cfg", ".", "get", "(", "section", ",", "option", ")", "except", "(", "NoOptionError", ",", "NoSectionError", ")", ":", "raise", "KeyError", "(", "key", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
AddConfigVar
Add a new variable to msaf.config Parameters ---------- name: str String of the form "[section0.[section1.[etc]]]option", containing the full name for this configuration variable. string: str What does this variable specify? configparam: `ConfigParam` An object for getting and setting this configuration parameter. root: object Used for recursive calls -- do not provide an argument for this parameter.
msaf/configparser.py
def AddConfigVar(name, doc, configparam, root=config): """Add a new variable to msaf.config Parameters ---------- name: str String of the form "[section0.[section1.[etc]]]option", containing the full name for this configuration variable. string: str What does this variable specify? configparam: `ConfigParam` An object for getting and setting this configuration parameter. root: object Used for recursive calls -- do not provide an argument for this parameter. """ # This method also performs some of the work of initializing ConfigParam # instances if root is config: # only set the name in the first call, not the recursive ones configparam.fullname = name sections = name.split('.') if len(sections) > 1: # set up a subobject if not hasattr(root, sections[0]): # every internal node in the config tree is an instance of its own # unique class class SubObj(object): _i_am_a_config_class = True setattr(root.__class__, sections[0], SubObj()) newroot = getattr(root, sections[0]) if (not getattr(newroot, '_i_am_a_config_class', False) or isinstance(newroot, type)): raise TypeError( 'Internal config nodes must be config class instances', newroot) return AddConfigVar('.'.join(sections[1:]), doc, configparam, root=newroot) else: if hasattr(root, name): raise AttributeError('This name is already taken', configparam.fullname) configparam.doc = doc # Trigger a read of the value from config files and env vars # This allow to filter wrong value from the user. if not callable(configparam.default): configparam.__get__(root, type(root), delete_key=True) else: # We do not want to evaluate now the default value # when it is a callable. try: fetch_val_for_key(configparam.fullname) # The user provided a value, filter it now. configparam.__get__(root, type(root), delete_key=True) except KeyError: pass setattr(root.__class__, sections[0], configparam) _config_var_list.append(configparam)
def AddConfigVar(name, doc, configparam, root=config): """Add a new variable to msaf.config Parameters ---------- name: str String of the form "[section0.[section1.[etc]]]option", containing the full name for this configuration variable. string: str What does this variable specify? configparam: `ConfigParam` An object for getting and setting this configuration parameter. root: object Used for recursive calls -- do not provide an argument for this parameter. """ # This method also performs some of the work of initializing ConfigParam # instances if root is config: # only set the name in the first call, not the recursive ones configparam.fullname = name sections = name.split('.') if len(sections) > 1: # set up a subobject if not hasattr(root, sections[0]): # every internal node in the config tree is an instance of its own # unique class class SubObj(object): _i_am_a_config_class = True setattr(root.__class__, sections[0], SubObj()) newroot = getattr(root, sections[0]) if (not getattr(newroot, '_i_am_a_config_class', False) or isinstance(newroot, type)): raise TypeError( 'Internal config nodes must be config class instances', newroot) return AddConfigVar('.'.join(sections[1:]), doc, configparam, root=newroot) else: if hasattr(root, name): raise AttributeError('This name is already taken', configparam.fullname) configparam.doc = doc # Trigger a read of the value from config files and env vars # This allow to filter wrong value from the user. if not callable(configparam.default): configparam.__get__(root, type(root), delete_key=True) else: # We do not want to evaluate now the default value # when it is a callable. try: fetch_val_for_key(configparam.fullname) # The user provided a value, filter it now. configparam.__get__(root, type(root), delete_key=True) except KeyError: pass setattr(root.__class__, sections[0], configparam) _config_var_list.append(configparam)
[ "Add", "a", "new", "variable", "to", "msaf", ".", "config" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/configparser.py#L162-L221
[ "def", "AddConfigVar", "(", "name", ",", "doc", ",", "configparam", ",", "root", "=", "config", ")", ":", "# This method also performs some of the work of initializing ConfigParam", "# instances", "if", "root", "is", "config", ":", "# only set the name in the first call, not the recursive ones", "configparam", ".", "fullname", "=", "name", "sections", "=", "name", ".", "split", "(", "'.'", ")", "if", "len", "(", "sections", ")", ">", "1", ":", "# set up a subobject", "if", "not", "hasattr", "(", "root", ",", "sections", "[", "0", "]", ")", ":", "# every internal node in the config tree is an instance of its own", "# unique class", "class", "SubObj", "(", "object", ")", ":", "_i_am_a_config_class", "=", "True", "setattr", "(", "root", ".", "__class__", ",", "sections", "[", "0", "]", ",", "SubObj", "(", ")", ")", "newroot", "=", "getattr", "(", "root", ",", "sections", "[", "0", "]", ")", "if", "(", "not", "getattr", "(", "newroot", ",", "'_i_am_a_config_class'", ",", "False", ")", "or", "isinstance", "(", "newroot", ",", "type", ")", ")", ":", "raise", "TypeError", "(", "'Internal config nodes must be config class instances'", ",", "newroot", ")", "return", "AddConfigVar", "(", "'.'", ".", "join", "(", "sections", "[", "1", ":", "]", ")", ",", "doc", ",", "configparam", ",", "root", "=", "newroot", ")", "else", ":", "if", "hasattr", "(", "root", ",", "name", ")", ":", "raise", "AttributeError", "(", "'This name is already taken'", ",", "configparam", ".", "fullname", ")", "configparam", ".", "doc", "=", "doc", "# Trigger a read of the value from config files and env vars", "# This allow to filter wrong value from the user.", "if", "not", "callable", "(", "configparam", ".", "default", ")", ":", "configparam", ".", "__get__", "(", "root", ",", "type", "(", "root", ")", ",", "delete_key", "=", "True", ")", "else", ":", "# We do not want to evaluate now the default value", "# when it is a callable.", "try", ":", "fetch_val_for_key", "(", "configparam", ".", "fullname", ")", "# The user provided a value, filter it now.", "configparam", ".", "__get__", "(", "root", ",", "type", "(", "root", ")", ",", "delete_key", "=", "True", ")", "except", "KeyError", ":", "pass", "setattr", "(", "root", ".", "__class__", ",", "sections", "[", "0", "]", ",", "configparam", ")", "_config_var_list", ".", "append", "(", "configparam", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Segmenter.processFlat
Main process. Returns ------- est_idxs : np.array(N) Estimated indeces the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments.
msaf/algorithms/vmo/segmenter.py
def processFlat(self): """Main process. Returns ------- est_idxs : np.array(N) Estimated indeces the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments. """ # Preprocess to obtain features (array(n_frames, n_features)) F = self._preprocess() F = librosa.util.normalize(F, axis=0) F = librosa.feature.stack_memory(F.T).T self.config["hier"] = False my_bounds, my_labels, _ = main.scluster_segment(F, self.config, self.in_bound_idxs) # Post process estimations est_idxs, est_labels = self._postprocess(my_bounds, my_labels) assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1 # We're done! return est_idxs, est_labels
def processFlat(self): """Main process. Returns ------- est_idxs : np.array(N) Estimated indeces the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments. """ # Preprocess to obtain features (array(n_frames, n_features)) F = self._preprocess() F = librosa.util.normalize(F, axis=0) F = librosa.feature.stack_memory(F.T).T self.config["hier"] = False my_bounds, my_labels, _ = main.scluster_segment(F, self.config, self.in_bound_idxs) # Post process estimations est_idxs, est_labels = self._postprocess(my_bounds, my_labels) assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1 # We're done! return est_idxs, est_labels
[ "Main", "process", ".", "Returns", "-------", "est_idxs", ":", "np", ".", "array", "(", "N", ")", "Estimated", "indeces", "the", "segment", "boundaries", "in", "frame", "indeces", ".", "est_labels", ":", "np", ".", "array", "(", "N", "-", "1", ")", "Estimated", "labels", "for", "the", "segments", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/vmo/segmenter.py#L21-L44
[ "def", "processFlat", "(", "self", ")", ":", "# Preprocess to obtain features (array(n_frames, n_features))", "F", "=", "self", ".", "_preprocess", "(", ")", "F", "=", "librosa", ".", "util", ".", "normalize", "(", "F", ",", "axis", "=", "0", ")", "F", "=", "librosa", ".", "feature", ".", "stack_memory", "(", "F", ".", "T", ")", ".", "T", "self", ".", "config", "[", "\"hier\"", "]", "=", "False", "my_bounds", ",", "my_labels", ",", "_", "=", "main", ".", "scluster_segment", "(", "F", ",", "self", ".", "config", ",", "self", ".", "in_bound_idxs", ")", "# Post process estimations", "est_idxs", ",", "est_labels", "=", "self", ".", "_postprocess", "(", "my_bounds", ",", "my_labels", ")", "assert", "est_idxs", "[", "0", "]", "==", "0", "and", "est_idxs", "[", "-", "1", "]", "==", "F", ".", "shape", "[", "0", "]", "-", "1", "# We're done!", "return", "est_idxs", ",", "est_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Segmenter.processHierarchical
Main process.for hierarchial segmentation. Returns ------- est_idxs : list List with np.arrays for each layer of segmentation containing the estimated indeces for the segment boundaries. est_labels : list List with np.arrays containing the labels for each layer of the hierarchical segmentation.
msaf/algorithms/vmo/segmenter.py
def processHierarchical(self): """Main process.for hierarchial segmentation. Returns ------- est_idxs : list List with np.arrays for each layer of segmentation containing the estimated indeces for the segment boundaries. est_labels : list List with np.arrays containing the labels for each layer of the hierarchical segmentation. """ F = self._preprocess() F = librosa.util.normalize(F, axis=0) F = librosa.feature.stack_memory(F.T).T self.config["hier"] = True est_idxs, est_labels, F = main.scluster_segment(F, self.config, self.in_bound_idxs) for layer in range(len(est_idxs)): assert est_idxs[layer][0] == 0 and \ est_idxs[layer][-1] == F.shape[1] - 1 est_idxs[layer], est_labels[layer] = \ self._postprocess(est_idxs[layer], est_labels[layer]) return est_idxs, est_labels
def processHierarchical(self): """Main process.for hierarchial segmentation. Returns ------- est_idxs : list List with np.arrays for each layer of segmentation containing the estimated indeces for the segment boundaries. est_labels : list List with np.arrays containing the labels for each layer of the hierarchical segmentation. """ F = self._preprocess() F = librosa.util.normalize(F, axis=0) F = librosa.feature.stack_memory(F.T).T self.config["hier"] = True est_idxs, est_labels, F = main.scluster_segment(F, self.config, self.in_bound_idxs) for layer in range(len(est_idxs)): assert est_idxs[layer][0] == 0 and \ est_idxs[layer][-1] == F.shape[1] - 1 est_idxs[layer], est_labels[layer] = \ self._postprocess(est_idxs[layer], est_labels[layer]) return est_idxs, est_labels
[ "Main", "process", ".", "for", "hierarchial", "segmentation", ".", "Returns", "-------", "est_idxs", ":", "list", "List", "with", "np", ".", "arrays", "for", "each", "layer", "of", "segmentation", "containing", "the", "estimated", "indeces", "for", "the", "segment", "boundaries", ".", "est_labels", ":", "list", "List", "with", "np", ".", "arrays", "containing", "the", "labels", "for", "each", "layer", "of", "the", "hierarchical", "segmentation", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/vmo/segmenter.py#L46-L68
[ "def", "processHierarchical", "(", "self", ")", ":", "F", "=", "self", ".", "_preprocess", "(", ")", "F", "=", "librosa", ".", "util", ".", "normalize", "(", "F", ",", "axis", "=", "0", ")", "F", "=", "librosa", ".", "feature", ".", "stack_memory", "(", "F", ".", "T", ")", ".", "T", "self", ".", "config", "[", "\"hier\"", "]", "=", "True", "est_idxs", ",", "est_labels", ",", "F", "=", "main", ".", "scluster_segment", "(", "F", ",", "self", ".", "config", ",", "self", ".", "in_bound_idxs", ")", "for", "layer", "in", "range", "(", "len", "(", "est_idxs", ")", ")", ":", "assert", "est_idxs", "[", "layer", "]", "[", "0", "]", "==", "0", "and", "est_idxs", "[", "layer", "]", "[", "-", "1", "]", "==", "F", ".", "shape", "[", "1", "]", "-", "1", "est_idxs", "[", "layer", "]", ",", "est_labels", "[", "layer", "]", "=", "self", ".", "_postprocess", "(", "est_idxs", "[", "layer", "]", ",", "est_labels", "[", "layer", "]", ")", "return", "est_idxs", ",", "est_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
NMF.frobenius_norm
Frobenius norm (||data - WH||) of a data matrix and a low rank approximation given by WH Returns: frobenius norm: F = ||data - WH||
msaf/pymf/nmf.py
def frobenius_norm(self): """ Frobenius norm (||data - WH||) of a data matrix and a low rank approximation given by WH Returns: frobenius norm: F = ||data - WH|| """ # check if W and H exist if hasattr(self,'H') and hasattr(self,'W') and not scipy.sparse.issparse(self.data): err = np.sqrt( np.sum((self.data[:,:] - np.dot(self.W, self.H))**2 )) else: err = -123456 return err
def frobenius_norm(self): """ Frobenius norm (||data - WH||) of a data matrix and a low rank approximation given by WH Returns: frobenius norm: F = ||data - WH|| """ # check if W and H exist if hasattr(self,'H') and hasattr(self,'W') and not scipy.sparse.issparse(self.data): err = np.sqrt( np.sum((self.data[:,:] - np.dot(self.W, self.H))**2 )) else: err = -123456 return err
[ "Frobenius", "norm", "(", "||data", "-", "WH||", ")", "of", "a", "data", "matrix", "and", "a", "low", "rank", "approximation", "given", "by", "WH" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/pymf/nmf.py#L100-L114
[ "def", "frobenius_norm", "(", "self", ")", ":", "# check if W and H exist", "if", "hasattr", "(", "self", ",", "'H'", ")", "and", "hasattr", "(", "self", ",", "'W'", ")", "and", "not", "scipy", ".", "sparse", ".", "issparse", "(", "self", ".", "data", ")", ":", "err", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "(", "self", ".", "data", "[", ":", ",", ":", "]", "-", "np", ".", "dot", "(", "self", ".", "W", ",", "self", ".", "H", ")", ")", "**", "2", ")", ")", "else", ":", "err", "=", "-", "123456", "return", "err" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
compute_all_features
Computes all features for the given file.
examples/compute_features.py
def compute_all_features(file_struct, framesync): """Computes all features for the given file.""" for feature_id in msaf.features_registry: logging.info("Computing %s for file %s" % (feature_id, file_struct.audio_file)) feats = Features.select_features(feature_id, file_struct, False, framesync) feats.features
def compute_all_features(file_struct, framesync): """Computes all features for the given file.""" for feature_id in msaf.features_registry: logging.info("Computing %s for file %s" % (feature_id, file_struct.audio_file)) feats = Features.select_features(feature_id, file_struct, False, framesync) feats.features
[ "Computes", "all", "features", "for", "the", "given", "file", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/examples/compute_features.py#L28-L34
[ "def", "compute_all_features", "(", "file_struct", ",", "framesync", ")", ":", "for", "feature_id", "in", "msaf", ".", "features_registry", ":", "logging", ".", "info", "(", "\"Computing %s for file %s\"", "%", "(", "feature_id", ",", "file_struct", ".", "audio_file", ")", ")", "feats", "=", "Features", ".", "select_features", "(", "feature_id", ",", "file_struct", ",", "False", ",", "framesync", ")", "feats", ".", "features" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
process
Computes the features for the selected dataset or file.
examples/compute_features.py
def process(in_path, out_file, n_jobs, framesync): """Computes the features for the selected dataset or file.""" if os.path.isfile(in_path): # Single file mode # Get (if they exitst) or compute features file_struct = msaf.io.FileStruct(in_path) file_struct.features_file = out_file compute_all_features(file_struct, framesync) else: # Collection mode file_structs = msaf.io.get_dataset_files(in_path) # Call in parallel return Parallel(n_jobs=n_jobs)(delayed(compute_all_features)( file_struct, framesync) for file_struct in file_structs)
def process(in_path, out_file, n_jobs, framesync): """Computes the features for the selected dataset or file.""" if os.path.isfile(in_path): # Single file mode # Get (if they exitst) or compute features file_struct = msaf.io.FileStruct(in_path) file_struct.features_file = out_file compute_all_features(file_struct, framesync) else: # Collection mode file_structs = msaf.io.get_dataset_files(in_path) # Call in parallel return Parallel(n_jobs=n_jobs)(delayed(compute_all_features)( file_struct, framesync) for file_struct in file_structs)
[ "Computes", "the", "features", "for", "the", "selected", "dataset", "or", "file", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/examples/compute_features.py#L37-L51
[ "def", "process", "(", "in_path", ",", "out_file", ",", "n_jobs", ",", "framesync", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "in_path", ")", ":", "# Single file mode", "# Get (if they exitst) or compute features", "file_struct", "=", "msaf", ".", "io", ".", "FileStruct", "(", "in_path", ")", "file_struct", ".", "features_file", "=", "out_file", "compute_all_features", "(", "file_struct", ",", "framesync", ")", "else", ":", "# Collection mode", "file_structs", "=", "msaf", ".", "io", ".", "get_dataset_files", "(", "in_path", ")", "# Call in parallel", "return", "Parallel", "(", "n_jobs", "=", "n_jobs", ")", "(", "delayed", "(", "compute_all_features", ")", "(", "file_struct", ",", "framesync", ")", "for", "file_struct", "in", "file_structs", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
main
Main function to parse the arguments and call the main process.
examples/compute_features.py
def main(): """Main function to parse the arguments and call the main process.""" parser = argparse.ArgumentParser( description="Extracts a set of features from a given dataset " "or audio file and saves them into the 'features' folder of " "the dataset or the specified single file.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("in_path", action="store", help="Input dataset dir or audio file") parser.add_argument("-j", action="store", dest="n_jobs", type=int, help="Number of jobs (only for collection mode)", default=4) parser.add_argument("-o", action="store", dest="out_file", type=str, help="Output file (only for single file mode)", default="out.json") parser.add_argument("-d", action="store", dest="ds_name", default="*", help="The prefix of the dataset to use " "(e.g. Isophonics, SALAMI)") parser.add_argument("-fs", action="store_true", dest="framesync", help="Use frame-synchronous features", default=False) args = parser.parse_args() start_time = time.time() # Setup the logger logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s', level=logging.INFO) # Run the main process process(args.in_path, out_file=args.out_file, n_jobs=args.n_jobs, framesync=args.framesync) # Done! logging.info("Done! Took %.2f seconds." % (time.time() - start_time))
def main(): """Main function to parse the arguments and call the main process.""" parser = argparse.ArgumentParser( description="Extracts a set of features from a given dataset " "or audio file and saves them into the 'features' folder of " "the dataset or the specified single file.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("in_path", action="store", help="Input dataset dir or audio file") parser.add_argument("-j", action="store", dest="n_jobs", type=int, help="Number of jobs (only for collection mode)", default=4) parser.add_argument("-o", action="store", dest="out_file", type=str, help="Output file (only for single file mode)", default="out.json") parser.add_argument("-d", action="store", dest="ds_name", default="*", help="The prefix of the dataset to use " "(e.g. Isophonics, SALAMI)") parser.add_argument("-fs", action="store_true", dest="framesync", help="Use frame-synchronous features", default=False) args = parser.parse_args() start_time = time.time() # Setup the logger logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s', level=logging.INFO) # Run the main process process(args.in_path, out_file=args.out_file, n_jobs=args.n_jobs, framesync=args.framesync) # Done! logging.info("Done! Took %.2f seconds." % (time.time() - start_time))
[ "Main", "function", "to", "parse", "the", "arguments", "and", "call", "the", "main", "process", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/examples/compute_features.py#L54-L98
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Extracts a set of features from a given dataset \"", "\"or audio file and saves them into the 'features' folder of \"", "\"the dataset or the specified single file.\"", ",", "formatter_class", "=", "argparse", ".", "ArgumentDefaultsHelpFormatter", ")", "parser", ".", "add_argument", "(", "\"in_path\"", ",", "action", "=", "\"store\"", ",", "help", "=", "\"Input dataset dir or audio file\"", ")", "parser", ".", "add_argument", "(", "\"-j\"", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"n_jobs\"", ",", "type", "=", "int", ",", "help", "=", "\"Number of jobs (only for collection mode)\"", ",", "default", "=", "4", ")", "parser", ".", "add_argument", "(", "\"-o\"", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"out_file\"", ",", "type", "=", "str", ",", "help", "=", "\"Output file (only for single file mode)\"", ",", "default", "=", "\"out.json\"", ")", "parser", ".", "add_argument", "(", "\"-d\"", ",", "action", "=", "\"store\"", ",", "dest", "=", "\"ds_name\"", ",", "default", "=", "\"*\"", ",", "help", "=", "\"The prefix of the dataset to use \"", "\"(e.g. Isophonics, SALAMI)\"", ")", "parser", ".", "add_argument", "(", "\"-fs\"", ",", "action", "=", "\"store_true\"", ",", "dest", "=", "\"framesync\"", ",", "help", "=", "\"Use frame-synchronous features\"", ",", "default", "=", "False", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "start_time", "=", "time", ".", "time", "(", ")", "# Setup the logger", "logging", ".", "basicConfig", "(", "format", "=", "'%(asctime)s: %(levelname)s: %(message)s'", ",", "level", "=", "logging", ".", "INFO", ")", "# Run the main process", "process", "(", "args", ".", "in_path", ",", "out_file", "=", "args", ".", "out_file", ",", "n_jobs", "=", "args", ".", "n_jobs", ",", "framesync", "=", "args", ".", "framesync", ")", "# Done!", "logging", ".", "info", "(", "\"Done! Took %.2f seconds.\"", "%", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
features
Feature-extraction for audio segmentation Arguments: file_struct -- msaf.io.FileStruct paths to the input files in the Segmentation dataset Returns: - X -- ndarray beat-synchronous feature matrix: MFCC (mean-aggregated) Chroma (median-aggregated) Latent timbre repetition Latent chroma repetition Time index Beat index - dur -- float duration of the track in seconds
msaf/algorithms/olda/segmenter.py
def features(file_struct, annot_beats=False, framesync=False): '''Feature-extraction for audio segmentation Arguments: file_struct -- msaf.io.FileStruct paths to the input files in the Segmentation dataset Returns: - X -- ndarray beat-synchronous feature matrix: MFCC (mean-aggregated) Chroma (median-aggregated) Latent timbre repetition Latent chroma repetition Time index Beat index - dur -- float duration of the track in seconds ''' def compress_data(X, k): Xtemp = X.dot(X.T) if len(Xtemp) == 0: return None e_vals, e_vecs = np.linalg.eig(Xtemp) e_vals = np.maximum(0.0, np.real(e_vals)) e_vecs = np.real(e_vecs) idx = np.argsort(e_vals)[::-1] e_vals = e_vals[idx] e_vecs = e_vecs[:, idx] # Truncate to k dimensions if k < len(e_vals): e_vals = e_vals[:k] e_vecs = e_vecs[:, :k] # Normalize by the leading singular value of X Z = np.sqrt(e_vals.max()) if Z > 0: e_vecs = e_vecs / Z return e_vecs.T.dot(X) # Latent factor repetition features def repetition(X, metric='euclidean'): R = librosa.segment.recurrence_matrix( X, k=2 * int(np.ceil(np.sqrt(X.shape[1]))), width=REP_WIDTH, metric=metric, sym=False).astype(np.float32) P = scipy.signal.medfilt2d(librosa.segment.recurrence_to_lag(R), [1, REP_FILTER]) # Discard empty rows. # This should give an equivalent SVD, but resolves some numerical # instabilities. P = P[P.any(axis=1)] return compress_data(P, N_REP) ######### # '\tloading annotations and features of ', audio_path pcp_obj = Features.select_features("pcp", file_struct, annot_beats, framesync) mfcc_obj = Features.select_features("mfcc", file_struct, annot_beats, framesync) chroma = pcp_obj.features mfcc = mfcc_obj.features beats = pcp_obj.frame_times dur = pcp_obj.dur # Sampling Rate sr = msaf.config.sample_rate ########## # print '\treading beats' B = beats[:chroma.shape[0]] # beat_frames = librosa.time_to_frames(B, sr=sr, #hop_length=msaf.config.hop_size) #print beat_frames, len(beat_frames), uidx ######### M = mfcc.T #plt.imshow(M, interpolation="nearest", aspect="auto"); plt.show() ######### # Get the beat-sync chroma C = chroma.T C += C.min() + 0.1 C = C / C.max(axis=0) C = 80 * np.log10(C) # Normalize from -80 to 0 #plt.imshow(C, interpolation="nearest", aspect="auto"); plt.show() # Time-stamp features N = np.arange(float(chroma.shape[0])) ######### #print '\tgenerating structure features' # TODO: This might fail if audio file (or number of beats) is too small R_timbre = repetition(librosa.feature.stack_memory(M)) R_chroma = repetition(librosa.feature.stack_memory(C)) if R_timbre is None or R_chroma is None: return None, dur R_timbre += R_timbre.min() R_timbre /= R_timbre.max() R_chroma += R_chroma.min() R_chroma /= R_chroma.max() #plt.imshow(R_chroma, interpolation="nearest", aspect="auto"); plt.show() # Stack it all up #print M.shape, C.shape, R_timbre.shape, R_chroma.shape, len(B), len(N) X = np.vstack([M, C, R_timbre, R_chroma, B, B / dur, N, N / float(chroma.shape[0])]) #plt.imshow(X, interpolation="nearest", aspect="auto"); plt.show() return X, dur
def features(file_struct, annot_beats=False, framesync=False): '''Feature-extraction for audio segmentation Arguments: file_struct -- msaf.io.FileStruct paths to the input files in the Segmentation dataset Returns: - X -- ndarray beat-synchronous feature matrix: MFCC (mean-aggregated) Chroma (median-aggregated) Latent timbre repetition Latent chroma repetition Time index Beat index - dur -- float duration of the track in seconds ''' def compress_data(X, k): Xtemp = X.dot(X.T) if len(Xtemp) == 0: return None e_vals, e_vecs = np.linalg.eig(Xtemp) e_vals = np.maximum(0.0, np.real(e_vals)) e_vecs = np.real(e_vecs) idx = np.argsort(e_vals)[::-1] e_vals = e_vals[idx] e_vecs = e_vecs[:, idx] # Truncate to k dimensions if k < len(e_vals): e_vals = e_vals[:k] e_vecs = e_vecs[:, :k] # Normalize by the leading singular value of X Z = np.sqrt(e_vals.max()) if Z > 0: e_vecs = e_vecs / Z return e_vecs.T.dot(X) # Latent factor repetition features def repetition(X, metric='euclidean'): R = librosa.segment.recurrence_matrix( X, k=2 * int(np.ceil(np.sqrt(X.shape[1]))), width=REP_WIDTH, metric=metric, sym=False).astype(np.float32) P = scipy.signal.medfilt2d(librosa.segment.recurrence_to_lag(R), [1, REP_FILTER]) # Discard empty rows. # This should give an equivalent SVD, but resolves some numerical # instabilities. P = P[P.any(axis=1)] return compress_data(P, N_REP) ######### # '\tloading annotations and features of ', audio_path pcp_obj = Features.select_features("pcp", file_struct, annot_beats, framesync) mfcc_obj = Features.select_features("mfcc", file_struct, annot_beats, framesync) chroma = pcp_obj.features mfcc = mfcc_obj.features beats = pcp_obj.frame_times dur = pcp_obj.dur # Sampling Rate sr = msaf.config.sample_rate ########## # print '\treading beats' B = beats[:chroma.shape[0]] # beat_frames = librosa.time_to_frames(B, sr=sr, #hop_length=msaf.config.hop_size) #print beat_frames, len(beat_frames), uidx ######### M = mfcc.T #plt.imshow(M, interpolation="nearest", aspect="auto"); plt.show() ######### # Get the beat-sync chroma C = chroma.T C += C.min() + 0.1 C = C / C.max(axis=0) C = 80 * np.log10(C) # Normalize from -80 to 0 #plt.imshow(C, interpolation="nearest", aspect="auto"); plt.show() # Time-stamp features N = np.arange(float(chroma.shape[0])) ######### #print '\tgenerating structure features' # TODO: This might fail if audio file (or number of beats) is too small R_timbre = repetition(librosa.feature.stack_memory(M)) R_chroma = repetition(librosa.feature.stack_memory(C)) if R_timbre is None or R_chroma is None: return None, dur R_timbre += R_timbre.min() R_timbre /= R_timbre.max() R_chroma += R_chroma.min() R_chroma /= R_chroma.max() #plt.imshow(R_chroma, interpolation="nearest", aspect="auto"); plt.show() # Stack it all up #print M.shape, C.shape, R_timbre.shape, R_chroma.shape, len(B), len(N) X = np.vstack([M, C, R_timbre, R_chroma, B, B / dur, N, N / float(chroma.shape[0])]) #plt.imshow(X, interpolation="nearest", aspect="auto"); plt.show() return X, dur
[ "Feature", "-", "extraction", "for", "audio", "segmentation", "Arguments", ":", "file_struct", "--", "msaf", ".", "io", ".", "FileStruct", "paths", "to", "the", "input", "files", "in", "the", "Segmentation", "dataset" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/olda/segmenter.py#L29-L151
[ "def", "features", "(", "file_struct", ",", "annot_beats", "=", "False", ",", "framesync", "=", "False", ")", ":", "def", "compress_data", "(", "X", ",", "k", ")", ":", "Xtemp", "=", "X", ".", "dot", "(", "X", ".", "T", ")", "if", "len", "(", "Xtemp", ")", "==", "0", ":", "return", "None", "e_vals", ",", "e_vecs", "=", "np", ".", "linalg", ".", "eig", "(", "Xtemp", ")", "e_vals", "=", "np", ".", "maximum", "(", "0.0", ",", "np", ".", "real", "(", "e_vals", ")", ")", "e_vecs", "=", "np", ".", "real", "(", "e_vecs", ")", "idx", "=", "np", ".", "argsort", "(", "e_vals", ")", "[", ":", ":", "-", "1", "]", "e_vals", "=", "e_vals", "[", "idx", "]", "e_vecs", "=", "e_vecs", "[", ":", ",", "idx", "]", "# Truncate to k dimensions", "if", "k", "<", "len", "(", "e_vals", ")", ":", "e_vals", "=", "e_vals", "[", ":", "k", "]", "e_vecs", "=", "e_vecs", "[", ":", ",", ":", "k", "]", "# Normalize by the leading singular value of X", "Z", "=", "np", ".", "sqrt", "(", "e_vals", ".", "max", "(", ")", ")", "if", "Z", ">", "0", ":", "e_vecs", "=", "e_vecs", "/", "Z", "return", "e_vecs", ".", "T", ".", "dot", "(", "X", ")", "# Latent factor repetition features", "def", "repetition", "(", "X", ",", "metric", "=", "'euclidean'", ")", ":", "R", "=", "librosa", ".", "segment", ".", "recurrence_matrix", "(", "X", ",", "k", "=", "2", "*", "int", "(", "np", ".", "ceil", "(", "np", ".", "sqrt", "(", "X", ".", "shape", "[", "1", "]", ")", ")", ")", ",", "width", "=", "REP_WIDTH", ",", "metric", "=", "metric", ",", "sym", "=", "False", ")", ".", "astype", "(", "np", ".", "float32", ")", "P", "=", "scipy", ".", "signal", ".", "medfilt2d", "(", "librosa", ".", "segment", ".", "recurrence_to_lag", "(", "R", ")", ",", "[", "1", ",", "REP_FILTER", "]", ")", "# Discard empty rows.", "# This should give an equivalent SVD, but resolves some numerical", "# instabilities.", "P", "=", "P", "[", "P", ".", "any", "(", "axis", "=", "1", ")", "]", "return", "compress_data", "(", "P", ",", "N_REP", ")", "#########", "# '\\tloading annotations and features of ', audio_path", "pcp_obj", "=", "Features", ".", "select_features", "(", "\"pcp\"", ",", "file_struct", ",", "annot_beats", ",", "framesync", ")", "mfcc_obj", "=", "Features", ".", "select_features", "(", "\"mfcc\"", ",", "file_struct", ",", "annot_beats", ",", "framesync", ")", "chroma", "=", "pcp_obj", ".", "features", "mfcc", "=", "mfcc_obj", ".", "features", "beats", "=", "pcp_obj", ".", "frame_times", "dur", "=", "pcp_obj", ".", "dur", "# Sampling Rate", "sr", "=", "msaf", ".", "config", ".", "sample_rate", "##########", "# print '\\treading beats'", "B", "=", "beats", "[", ":", "chroma", ".", "shape", "[", "0", "]", "]", "# beat_frames = librosa.time_to_frames(B, sr=sr,", "#hop_length=msaf.config.hop_size)", "#print beat_frames, len(beat_frames), uidx", "#########", "M", "=", "mfcc", ".", "T", "#plt.imshow(M, interpolation=\"nearest\", aspect=\"auto\"); plt.show()", "#########", "# Get the beat-sync chroma", "C", "=", "chroma", ".", "T", "C", "+=", "C", ".", "min", "(", ")", "+", "0.1", "C", "=", "C", "/", "C", ".", "max", "(", "axis", "=", "0", ")", "C", "=", "80", "*", "np", ".", "log10", "(", "C", ")", "# Normalize from -80 to 0", "#plt.imshow(C, interpolation=\"nearest\", aspect=\"auto\"); plt.show()", "# Time-stamp features", "N", "=", "np", ".", "arange", "(", "float", "(", "chroma", ".", "shape", "[", "0", "]", ")", ")", "#########", "#print '\\tgenerating structure features'", "# TODO: This might fail if audio file (or number of beats) is too small", "R_timbre", "=", "repetition", "(", "librosa", ".", "feature", ".", "stack_memory", "(", "M", ")", ")", "R_chroma", "=", "repetition", "(", "librosa", ".", "feature", ".", "stack_memory", "(", "C", ")", ")", "if", "R_timbre", "is", "None", "or", "R_chroma", "is", "None", ":", "return", "None", ",", "dur", "R_timbre", "+=", "R_timbre", ".", "min", "(", ")", "R_timbre", "/=", "R_timbre", ".", "max", "(", ")", "R_chroma", "+=", "R_chroma", ".", "min", "(", ")", "R_chroma", "/=", "R_chroma", ".", "max", "(", ")", "#plt.imshow(R_chroma, interpolation=\"nearest\", aspect=\"auto\"); plt.show()", "# Stack it all up", "#print M.shape, C.shape, R_timbre.shape, R_chroma.shape, len(B), len(N)", "X", "=", "np", ".", "vstack", "(", "[", "M", ",", "C", ",", "R_timbre", ",", "R_chroma", ",", "B", ",", "B", "/", "dur", ",", "N", ",", "N", "/", "float", "(", "chroma", ".", "shape", "[", "0", "]", ")", "]", ")", "#plt.imshow(X, interpolation=\"nearest\", aspect=\"auto\"); plt.show()", "return", "X", ",", "dur" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
gaussian_cost
Return the average log-likelihood of data under a standard normal
msaf/algorithms/olda/segmenter.py
def gaussian_cost(X): '''Return the average log-likelihood of data under a standard normal ''' d, n = X.shape if n < 2: return 0 sigma = np.var(X, axis=1, ddof=1) cost = -0.5 * d * n * np.log(2. * np.pi) - 0.5 * (n - 1.) * np.sum(sigma) return cost
def gaussian_cost(X): '''Return the average log-likelihood of data under a standard normal ''' d, n = X.shape if n < 2: return 0 sigma = np.var(X, axis=1, ddof=1) cost = -0.5 * d * n * np.log(2. * np.pi) - 0.5 * (n - 1.) * np.sum(sigma) return cost
[ "Return", "the", "average", "log", "-", "likelihood", "of", "data", "under", "a", "standard", "normal" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/olda/segmenter.py#L154-L166
[ "def", "gaussian_cost", "(", "X", ")", ":", "d", ",", "n", "=", "X", ".", "shape", "if", "n", "<", "2", ":", "return", "0", "sigma", "=", "np", ".", "var", "(", "X", ",", "axis", "=", "1", ",", "ddof", "=", "1", ")", "cost", "=", "-", "0.5", "*", "d", "*", "n", "*", "np", ".", "log", "(", "2.", "*", "np", ".", "pi", ")", "-", "0.5", "*", "(", "n", "-", "1.", ")", "*", "np", ".", "sum", "(", "sigma", ")", "return", "cost" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Segmenter.processFlat
Main process for flat segmentation. Returns ------- est_idxs : np.array(N) Estimated times for the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments.
msaf/algorithms/olda/segmenter.py
def processFlat(self): """Main process for flat segmentation. Returns ------- est_idxs : np.array(N) Estimated times for the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments. """ # Preprocess to obtain features and duration F, dur = features(self.file_struct, self.annot_beats, self.framesync) try: # Load and apply transform W = load_transform(self.config["transform"]) F = W.dot(F) # Get Segments kmin, kmax = get_num_segs(dur) est_idxs = get_segments(F, kmin=kmin, kmax=kmax) except: # The audio file is too short, only beginning and end logging.warning("Audio file too short! " "Only start and end boundaries.") est_idxs = [0, F.shape[1] - 1] # Make sure that the first and last boundaries are included assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[1] - 1 # Empty labels est_labels = np.ones(len(est_idxs) - 1) * -1 # Post process estimations est_idxs, est_labels = self._postprocess(est_idxs, est_labels) return est_idxs, est_labels
def processFlat(self): """Main process for flat segmentation. Returns ------- est_idxs : np.array(N) Estimated times for the segment boundaries in frame indeces. est_labels : np.array(N-1) Estimated labels for the segments. """ # Preprocess to obtain features and duration F, dur = features(self.file_struct, self.annot_beats, self.framesync) try: # Load and apply transform W = load_transform(self.config["transform"]) F = W.dot(F) # Get Segments kmin, kmax = get_num_segs(dur) est_idxs = get_segments(F, kmin=kmin, kmax=kmax) except: # The audio file is too short, only beginning and end logging.warning("Audio file too short! " "Only start and end boundaries.") est_idxs = [0, F.shape[1] - 1] # Make sure that the first and last boundaries are included assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[1] - 1 # Empty labels est_labels = np.ones(len(est_idxs) - 1) * -1 # Post process estimations est_idxs, est_labels = self._postprocess(est_idxs, est_labels) return est_idxs, est_labels
[ "Main", "process", "for", "flat", "segmentation", ".", "Returns", "-------", "est_idxs", ":", "np", ".", "array", "(", "N", ")", "Estimated", "times", "for", "the", "segment", "boundaries", "in", "frame", "indeces", ".", "est_labels", ":", "np", ".", "array", "(", "N", "-", "1", ")", "Estimated", "labels", "for", "the", "segments", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/olda/segmenter.py#L265-L300
[ "def", "processFlat", "(", "self", ")", ":", "# Preprocess to obtain features and duration", "F", ",", "dur", "=", "features", "(", "self", ".", "file_struct", ",", "self", ".", "annot_beats", ",", "self", ".", "framesync", ")", "try", ":", "# Load and apply transform", "W", "=", "load_transform", "(", "self", ".", "config", "[", "\"transform\"", "]", ")", "F", "=", "W", ".", "dot", "(", "F", ")", "# Get Segments", "kmin", ",", "kmax", "=", "get_num_segs", "(", "dur", ")", "est_idxs", "=", "get_segments", "(", "F", ",", "kmin", "=", "kmin", ",", "kmax", "=", "kmax", ")", "except", ":", "# The audio file is too short, only beginning and end", "logging", ".", "warning", "(", "\"Audio file too short! \"", "\"Only start and end boundaries.\"", ")", "est_idxs", "=", "[", "0", ",", "F", ".", "shape", "[", "1", "]", "-", "1", "]", "# Make sure that the first and last boundaries are included", "assert", "est_idxs", "[", "0", "]", "==", "0", "and", "est_idxs", "[", "-", "1", "]", "==", "F", ".", "shape", "[", "1", "]", "-", "1", "# Empty labels", "est_labels", "=", "np", ".", "ones", "(", "len", "(", "est_idxs", ")", "-", "1", ")", "*", "-", "1", "# Post process estimations", "est_idxs", ",", "est_labels", "=", "self", ".", "_postprocess", "(", "est_idxs", ",", "est_labels", ")", "return", "est_idxs", ",", "est_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Segmenter.processHierarchical
Main process for hierarchical segmentation. Returns ------- est_idxs : list List containing estimated times for each layer in the hierarchy as np.arrays est_labels : list List containing estimated labels for each layer in the hierarchy as np.arrays
msaf/algorithms/olda/segmenter.py
def processHierarchical(self): """Main process for hierarchical segmentation. Returns ------- est_idxs : list List containing estimated times for each layer in the hierarchy as np.arrays est_labels : list List containing estimated labels for each layer in the hierarchy as np.arrays """ # Preprocess to obtain features, times, and input boundary indeces F, dur = features(self.file_struct, self.annot_beats, self.framesync) try: # Load and apply transform W = load_transform(self.config["transform"]) F = W.dot(F) # Get Segments kmin, kmax = get_num_segs(dur) # Run algorithm layer by layer est_idxs = [] est_labels = [] for k in range(kmin, kmax): S, cost = get_k_segments(F, k) est_idxs.append(S) est_labels.append(np.ones(len(S) - 1) * -1) # Make sure that the first and last boundaries are included assert est_idxs[-1][0] == 0 and \ est_idxs[-1][-1] == F.shape[1] - 1, "Layer %d does not " \ "start or end in the right frame(s)." % k # Post process layer est_idxs[-1], est_labels[-1] = \ self._postprocess(est_idxs[-1], est_labels[-1]) except: # The audio file is too short, only beginning and end logging.warning("Audio file too short! " "Only start and end boundaries.") est_idxs = [np.array([0, F.shape[1] - 1])] est_labels = [np.ones(1) * -1] return est_idxs, est_labels
def processHierarchical(self): """Main process for hierarchical segmentation. Returns ------- est_idxs : list List containing estimated times for each layer in the hierarchy as np.arrays est_labels : list List containing estimated labels for each layer in the hierarchy as np.arrays """ # Preprocess to obtain features, times, and input boundary indeces F, dur = features(self.file_struct, self.annot_beats, self.framesync) try: # Load and apply transform W = load_transform(self.config["transform"]) F = W.dot(F) # Get Segments kmin, kmax = get_num_segs(dur) # Run algorithm layer by layer est_idxs = [] est_labels = [] for k in range(kmin, kmax): S, cost = get_k_segments(F, k) est_idxs.append(S) est_labels.append(np.ones(len(S) - 1) * -1) # Make sure that the first and last boundaries are included assert est_idxs[-1][0] == 0 and \ est_idxs[-1][-1] == F.shape[1] - 1, "Layer %d does not " \ "start or end in the right frame(s)." % k # Post process layer est_idxs[-1], est_labels[-1] = \ self._postprocess(est_idxs[-1], est_labels[-1]) except: # The audio file is too short, only beginning and end logging.warning("Audio file too short! " "Only start and end boundaries.") est_idxs = [np.array([0, F.shape[1] - 1])] est_labels = [np.ones(1) * -1] return est_idxs, est_labels
[ "Main", "process", "for", "hierarchical", "segmentation", ".", "Returns", "-------", "est_idxs", ":", "list", "List", "containing", "estimated", "times", "for", "each", "layer", "in", "the", "hierarchy", "as", "np", ".", "arrays", "est_labels", ":", "list", "List", "containing", "estimated", "labels", "for", "each", "layer", "in", "the", "hierarchy", "as", "np", ".", "arrays" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/olda/segmenter.py#L302-L347
[ "def", "processHierarchical", "(", "self", ")", ":", "# Preprocess to obtain features, times, and input boundary indeces", "F", ",", "dur", "=", "features", "(", "self", ".", "file_struct", ",", "self", ".", "annot_beats", ",", "self", ".", "framesync", ")", "try", ":", "# Load and apply transform", "W", "=", "load_transform", "(", "self", ".", "config", "[", "\"transform\"", "]", ")", "F", "=", "W", ".", "dot", "(", "F", ")", "# Get Segments", "kmin", ",", "kmax", "=", "get_num_segs", "(", "dur", ")", "# Run algorithm layer by layer", "est_idxs", "=", "[", "]", "est_labels", "=", "[", "]", "for", "k", "in", "range", "(", "kmin", ",", "kmax", ")", ":", "S", ",", "cost", "=", "get_k_segments", "(", "F", ",", "k", ")", "est_idxs", ".", "append", "(", "S", ")", "est_labels", ".", "append", "(", "np", ".", "ones", "(", "len", "(", "S", ")", "-", "1", ")", "*", "-", "1", ")", "# Make sure that the first and last boundaries are included", "assert", "est_idxs", "[", "-", "1", "]", "[", "0", "]", "==", "0", "and", "est_idxs", "[", "-", "1", "]", "[", "-", "1", "]", "==", "F", ".", "shape", "[", "1", "]", "-", "1", ",", "\"Layer %d does not \"", "\"start or end in the right frame(s).\"", "%", "k", "# Post process layer", "est_idxs", "[", "-", "1", "]", ",", "est_labels", "[", "-", "1", "]", "=", "self", ".", "_postprocess", "(", "est_idxs", "[", "-", "1", "]", ",", "est_labels", "[", "-", "1", "]", ")", "except", ":", "# The audio file is too short, only beginning and end", "logging", ".", "warning", "(", "\"Audio file too short! \"", "\"Only start and end boundaries.\"", ")", "est_idxs", "=", "[", "np", ".", "array", "(", "[", "0", ",", "F", ".", "shape", "[", "1", "]", "-", "1", "]", ")", "]", "est_labels", "=", "[", "np", ".", "ones", "(", "1", ")", "*", "-", "1", "]", "return", "est_idxs", ",", "est_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
lognormalize
Log-normalizes features such that each vector is between min_db to 0.
msaf/utils.py
def lognormalize(F, floor=0.1, min_db=-80): """Log-normalizes features such that each vector is between min_db to 0.""" assert min_db < 0 F = min_max_normalize(F, floor=floor) F = np.abs(min_db) * np.log10(F) # Normalize from min_db to 0 return F
def lognormalize(F, floor=0.1, min_db=-80): """Log-normalizes features such that each vector is between min_db to 0.""" assert min_db < 0 F = min_max_normalize(F, floor=floor) F = np.abs(min_db) * np.log10(F) # Normalize from min_db to 0 return F
[ "Log", "-", "normalizes", "features", "such", "that", "each", "vector", "is", "between", "min_db", "to", "0", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/utils.py#L12-L17
[ "def", "lognormalize", "(", "F", ",", "floor", "=", "0.1", ",", "min_db", "=", "-", "80", ")", ":", "assert", "min_db", "<", "0", "F", "=", "min_max_normalize", "(", "F", ",", "floor", "=", "floor", ")", "F", "=", "np", ".", "abs", "(", "min_db", ")", "*", "np", ".", "log10", "(", "F", ")", "# Normalize from min_db to 0", "return", "F" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
min_max_normalize
Normalizes features such that each vector is between floor to 1.
msaf/utils.py
def min_max_normalize(F, floor=0.001): """Normalizes features such that each vector is between floor to 1.""" F += -F.min() + floor F = F / F.max(axis=0) return F
def min_max_normalize(F, floor=0.001): """Normalizes features such that each vector is between floor to 1.""" F += -F.min() + floor F = F / F.max(axis=0) return F
[ "Normalizes", "features", "such", "that", "each", "vector", "is", "between", "floor", "to", "1", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/utils.py#L20-L24
[ "def", "min_max_normalize", "(", "F", ",", "floor", "=", "0.001", ")", ":", "F", "+=", "-", "F", ".", "min", "(", ")", "+", "floor", "F", "=", "F", "/", "F", ".", "max", "(", "axis", "=", "0", ")", "return", "F" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
normalize
Normalizes the given matrix of features. Parameters ---------- X: np.array Each row represents a feature vector. norm_type: {"min_max", "log", np.inf, -np.inf, 0, float > 0, None} - `"min_max"`: Min/max scaling is performed - `"log"`: Logarithmic scaling is performed - `np.inf`: Maximum absolute value - `-np.inf`: Mininum absolute value - `0`: Number of non-zeros - float: Corresponding l_p norm. - None : No normalization is performed Returns ------- norm_X: np.array Normalized `X` according the the input parameters.
msaf/utils.py
def normalize(X, norm_type, floor=0.0, min_db=-80): """Normalizes the given matrix of features. Parameters ---------- X: np.array Each row represents a feature vector. norm_type: {"min_max", "log", np.inf, -np.inf, 0, float > 0, None} - `"min_max"`: Min/max scaling is performed - `"log"`: Logarithmic scaling is performed - `np.inf`: Maximum absolute value - `-np.inf`: Mininum absolute value - `0`: Number of non-zeros - float: Corresponding l_p norm. - None : No normalization is performed Returns ------- norm_X: np.array Normalized `X` according the the input parameters. """ if isinstance(norm_type, six.string_types): if norm_type == "min_max": return min_max_normalize(X, floor=floor) if norm_type == "log": return lognormalize(X, floor=floor, min_db=min_db) return librosa.util.normalize(X, norm=norm_type, axis=1)
def normalize(X, norm_type, floor=0.0, min_db=-80): """Normalizes the given matrix of features. Parameters ---------- X: np.array Each row represents a feature vector. norm_type: {"min_max", "log", np.inf, -np.inf, 0, float > 0, None} - `"min_max"`: Min/max scaling is performed - `"log"`: Logarithmic scaling is performed - `np.inf`: Maximum absolute value - `-np.inf`: Mininum absolute value - `0`: Number of non-zeros - float: Corresponding l_p norm. - None : No normalization is performed Returns ------- norm_X: np.array Normalized `X` according the the input parameters. """ if isinstance(norm_type, six.string_types): if norm_type == "min_max": return min_max_normalize(X, floor=floor) if norm_type == "log": return lognormalize(X, floor=floor, min_db=min_db) return librosa.util.normalize(X, norm=norm_type, axis=1)
[ "Normalizes", "the", "given", "matrix", "of", "features", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/utils.py#L27-L53
[ "def", "normalize", "(", "X", ",", "norm_type", ",", "floor", "=", "0.0", ",", "min_db", "=", "-", "80", ")", ":", "if", "isinstance", "(", "norm_type", ",", "six", ".", "string_types", ")", ":", "if", "norm_type", "==", "\"min_max\"", ":", "return", "min_max_normalize", "(", "X", ",", "floor", "=", "floor", ")", "if", "norm_type", "==", "\"log\"", ":", "return", "lognormalize", "(", "X", ",", "floor", "=", "floor", ",", "min_db", "=", "min_db", ")", "return", "librosa", ".", "util", ".", "normalize", "(", "X", ",", "norm", "=", "norm_type", ",", "axis", "=", "1", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
get_time_frames
Gets the time frames and puts them in a numpy array.
msaf/utils.py
def get_time_frames(dur, anal): """Gets the time frames and puts them in a numpy array.""" n_frames = get_num_frames(dur, anal) return np.linspace(0, dur, num=n_frames)
def get_time_frames(dur, anal): """Gets the time frames and puts them in a numpy array.""" n_frames = get_num_frames(dur, anal) return np.linspace(0, dur, num=n_frames)
[ "Gets", "the", "time", "frames", "and", "puts", "them", "in", "a", "numpy", "array", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/utils.py#L101-L104
[ "def", "get_time_frames", "(", "dur", ",", "anal", ")", ":", "n_frames", "=", "get_num_frames", "(", "dur", ",", "anal", ")", "return", "np", ".", "linspace", "(", "0", ",", "dur", ",", "num", "=", "n_frames", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
remove_empty_segments
Removes empty segments if needed.
msaf/utils.py
def remove_empty_segments(times, labels): """Removes empty segments if needed.""" assert len(times) - 1 == len(labels) inters = times_to_intervals(times) new_inters = [] new_labels = [] for inter, label in zip(inters, labels): if inter[0] < inter[1]: new_inters.append(inter) new_labels.append(label) return intervals_to_times(np.asarray(new_inters)), new_labels
def remove_empty_segments(times, labels): """Removes empty segments if needed.""" assert len(times) - 1 == len(labels) inters = times_to_intervals(times) new_inters = [] new_labels = [] for inter, label in zip(inters, labels): if inter[0] < inter[1]: new_inters.append(inter) new_labels.append(label) return intervals_to_times(np.asarray(new_inters)), new_labels
[ "Removes", "empty", "segments", "if", "needed", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/utils.py#L107-L117
[ "def", "remove_empty_segments", "(", "times", ",", "labels", ")", ":", "assert", "len", "(", "times", ")", "-", "1", "==", "len", "(", "labels", ")", "inters", "=", "times_to_intervals", "(", "times", ")", "new_inters", "=", "[", "]", "new_labels", "=", "[", "]", "for", "inter", ",", "label", "in", "zip", "(", "inters", ",", "labels", ")", ":", "if", "inter", "[", "0", "]", "<", "inter", "[", "1", "]", ":", "new_inters", ".", "append", "(", "inter", ")", "new_labels", ".", "append", "(", "label", ")", "return", "intervals_to_times", "(", "np", ".", "asarray", "(", "new_inters", ")", ")", ",", "new_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
sonify_clicks
Sonifies the estimated times into the output file. Parameters ---------- audio: np.array Audio samples of the input track. clicks: np.array Click positions in seconds. out_file: str Path to the output file. fs: int Sample rate. offset: float Offset of the clicks with respect to the audio.
msaf/utils.py
def sonify_clicks(audio, clicks, out_file, fs, offset=0): """Sonifies the estimated times into the output file. Parameters ---------- audio: np.array Audio samples of the input track. clicks: np.array Click positions in seconds. out_file: str Path to the output file. fs: int Sample rate. offset: float Offset of the clicks with respect to the audio. """ # Generate clicks (this should be done by mir_eval, but its # latest release is not compatible with latest numpy) times = clicks + offset # 1 kHz tone, 100ms click = np.sin(2 * np.pi * np.arange(fs * .1) * 1000 / (1. * fs)) # Exponential decay click *= np.exp(-np.arange(fs * .1) / (fs * .01)) length = int(times.max() * fs + click.shape[0] + 1) audio_clicks = mir_eval.sonify.clicks(times, fs, length=length) # Create array to store the audio plus the clicks out_audio = np.zeros(max(len(audio), len(audio_clicks))) # Assign the audio and the clicks out_audio[:len(audio)] = audio out_audio[:len(audio_clicks)] += audio_clicks # Write to file scipy.io.wavfile.write(out_file, fs, out_audio)
def sonify_clicks(audio, clicks, out_file, fs, offset=0): """Sonifies the estimated times into the output file. Parameters ---------- audio: np.array Audio samples of the input track. clicks: np.array Click positions in seconds. out_file: str Path to the output file. fs: int Sample rate. offset: float Offset of the clicks with respect to the audio. """ # Generate clicks (this should be done by mir_eval, but its # latest release is not compatible with latest numpy) times = clicks + offset # 1 kHz tone, 100ms click = np.sin(2 * np.pi * np.arange(fs * .1) * 1000 / (1. * fs)) # Exponential decay click *= np.exp(-np.arange(fs * .1) / (fs * .01)) length = int(times.max() * fs + click.shape[0] + 1) audio_clicks = mir_eval.sonify.clicks(times, fs, length=length) # Create array to store the audio plus the clicks out_audio = np.zeros(max(len(audio), len(audio_clicks))) # Assign the audio and the clicks out_audio[:len(audio)] = audio out_audio[:len(audio_clicks)] += audio_clicks # Write to file scipy.io.wavfile.write(out_file, fs, out_audio)
[ "Sonifies", "the", "estimated", "times", "into", "the", "output", "file", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/utils.py#L120-L154
[ "def", "sonify_clicks", "(", "audio", ",", "clicks", ",", "out_file", ",", "fs", ",", "offset", "=", "0", ")", ":", "# Generate clicks (this should be done by mir_eval, but its", "# latest release is not compatible with latest numpy)", "times", "=", "clicks", "+", "offset", "# 1 kHz tone, 100ms", "click", "=", "np", ".", "sin", "(", "2", "*", "np", ".", "pi", "*", "np", ".", "arange", "(", "fs", "*", ".1", ")", "*", "1000", "/", "(", "1.", "*", "fs", ")", ")", "# Exponential decay", "click", "*=", "np", ".", "exp", "(", "-", "np", ".", "arange", "(", "fs", "*", ".1", ")", "/", "(", "fs", "*", ".01", ")", ")", "length", "=", "int", "(", "times", ".", "max", "(", ")", "*", "fs", "+", "click", ".", "shape", "[", "0", "]", "+", "1", ")", "audio_clicks", "=", "mir_eval", ".", "sonify", ".", "clicks", "(", "times", ",", "fs", ",", "length", "=", "length", ")", "# Create array to store the audio plus the clicks", "out_audio", "=", "np", ".", "zeros", "(", "max", "(", "len", "(", "audio", ")", ",", "len", "(", "audio_clicks", ")", ")", ")", "# Assign the audio and the clicks", "out_audio", "[", ":", "len", "(", "audio", ")", "]", "=", "audio", "out_audio", "[", ":", "len", "(", "audio_clicks", ")", "]", "+=", "audio_clicks", "# Write to file", "scipy", ".", "io", ".", "wavfile", ".", "write", "(", "out_file", ",", "fs", ",", "out_audio", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
synchronize_labels
Synchronizes the labels from the old_bound_idxs to the new_bound_idxs. Parameters ---------- new_bound_idxs: np.array New indeces to synchronize with. old_bound_idxs: np.array Old indeces, same shape as labels + 1. old_labels: np.array Labels associated to the old_bound_idxs. N: int Total number of frames. Returns ------- new_labels: np.array New labels, synchronized to the new boundary indeces.
msaf/utils.py
def synchronize_labels(new_bound_idxs, old_bound_idxs, old_labels, N): """Synchronizes the labels from the old_bound_idxs to the new_bound_idxs. Parameters ---------- new_bound_idxs: np.array New indeces to synchronize with. old_bound_idxs: np.array Old indeces, same shape as labels + 1. old_labels: np.array Labels associated to the old_bound_idxs. N: int Total number of frames. Returns ------- new_labels: np.array New labels, synchronized to the new boundary indeces. """ assert len(old_bound_idxs) - 1 == len(old_labels) # Construct unfolded labels array unfold_labels = np.zeros(N) for i, (bound_idx, label) in enumerate( zip(old_bound_idxs[:-1], old_labels)): unfold_labels[bound_idx:old_bound_idxs[i + 1]] = label # Constuct new labels new_labels = np.zeros(len(new_bound_idxs) - 1) for i, bound_idx in enumerate(new_bound_idxs[:-1]): new_labels[i] = np.median( unfold_labels[bound_idx:new_bound_idxs[i + 1]]) return new_labels
def synchronize_labels(new_bound_idxs, old_bound_idxs, old_labels, N): """Synchronizes the labels from the old_bound_idxs to the new_bound_idxs. Parameters ---------- new_bound_idxs: np.array New indeces to synchronize with. old_bound_idxs: np.array Old indeces, same shape as labels + 1. old_labels: np.array Labels associated to the old_bound_idxs. N: int Total number of frames. Returns ------- new_labels: np.array New labels, synchronized to the new boundary indeces. """ assert len(old_bound_idxs) - 1 == len(old_labels) # Construct unfolded labels array unfold_labels = np.zeros(N) for i, (bound_idx, label) in enumerate( zip(old_bound_idxs[:-1], old_labels)): unfold_labels[bound_idx:old_bound_idxs[i + 1]] = label # Constuct new labels new_labels = np.zeros(len(new_bound_idxs) - 1) for i, bound_idx in enumerate(new_bound_idxs[:-1]): new_labels[i] = np.median( unfold_labels[bound_idx:new_bound_idxs[i + 1]]) return new_labels
[ "Synchronizes", "the", "labels", "from", "the", "old_bound_idxs", "to", "the", "new_bound_idxs", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/utils.py#L157-L190
[ "def", "synchronize_labels", "(", "new_bound_idxs", ",", "old_bound_idxs", ",", "old_labels", ",", "N", ")", ":", "assert", "len", "(", "old_bound_idxs", ")", "-", "1", "==", "len", "(", "old_labels", ")", "# Construct unfolded labels array", "unfold_labels", "=", "np", ".", "zeros", "(", "N", ")", "for", "i", ",", "(", "bound_idx", ",", "label", ")", "in", "enumerate", "(", "zip", "(", "old_bound_idxs", "[", ":", "-", "1", "]", ",", "old_labels", ")", ")", ":", "unfold_labels", "[", "bound_idx", ":", "old_bound_idxs", "[", "i", "+", "1", "]", "]", "=", "label", "# Constuct new labels", "new_labels", "=", "np", ".", "zeros", "(", "len", "(", "new_bound_idxs", ")", "-", "1", ")", "for", "i", ",", "bound_idx", "in", "enumerate", "(", "new_bound_idxs", "[", ":", "-", "1", "]", ")", ":", "new_labels", "[", "i", "]", "=", "np", ".", "median", "(", "unfold_labels", "[", "bound_idx", ":", "new_bound_idxs", "[", "i", "+", "1", "]", "]", ")", "return", "new_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
process_segmentation_level
Processes a level of segmentation, and converts it into times. Parameters ---------- est_idxs: np.array Estimated boundaries in frame indeces. est_labels: np.array Estimated labels. N: int Number of frames in the whole track. frame_times: np.array Time stamp for each frame. dur: float Duration of the audio track. Returns ------- est_times: np.array Estimated segment boundaries in seconds. est_labels: np.array Estimated labels for each segment.
msaf/utils.py
def process_segmentation_level(est_idxs, est_labels, N, frame_times, dur): """Processes a level of segmentation, and converts it into times. Parameters ---------- est_idxs: np.array Estimated boundaries in frame indeces. est_labels: np.array Estimated labels. N: int Number of frames in the whole track. frame_times: np.array Time stamp for each frame. dur: float Duration of the audio track. Returns ------- est_times: np.array Estimated segment boundaries in seconds. est_labels: np.array Estimated labels for each segment. """ assert est_idxs[0] == 0 and est_idxs[-1] == N - 1 assert len(est_idxs) - 1 == len(est_labels) # Add silences, if needed est_times = np.concatenate(([0], frame_times[est_idxs], [dur])) silence_label = np.max(est_labels) + 1 est_labels = np.concatenate(([silence_label], est_labels, [silence_label])) # Remove empty segments if needed est_times, est_labels = remove_empty_segments(est_times, est_labels) # Make sure that the first and last times are 0 and duration, respectively assert np.allclose([est_times[0]], [0]) and \ np.allclose([est_times[-1]], [dur]) return est_times, est_labels
def process_segmentation_level(est_idxs, est_labels, N, frame_times, dur): """Processes a level of segmentation, and converts it into times. Parameters ---------- est_idxs: np.array Estimated boundaries in frame indeces. est_labels: np.array Estimated labels. N: int Number of frames in the whole track. frame_times: np.array Time stamp for each frame. dur: float Duration of the audio track. Returns ------- est_times: np.array Estimated segment boundaries in seconds. est_labels: np.array Estimated labels for each segment. """ assert est_idxs[0] == 0 and est_idxs[-1] == N - 1 assert len(est_idxs) - 1 == len(est_labels) # Add silences, if needed est_times = np.concatenate(([0], frame_times[est_idxs], [dur])) silence_label = np.max(est_labels) + 1 est_labels = np.concatenate(([silence_label], est_labels, [silence_label])) # Remove empty segments if needed est_times, est_labels = remove_empty_segments(est_times, est_labels) # Make sure that the first and last times are 0 and duration, respectively assert np.allclose([est_times[0]], [0]) and \ np.allclose([est_times[-1]], [dur]) return est_times, est_labels
[ "Processes", "a", "level", "of", "segmentation", "and", "converts", "it", "into", "times", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/utils.py#L193-L231
[ "def", "process_segmentation_level", "(", "est_idxs", ",", "est_labels", ",", "N", ",", "frame_times", ",", "dur", ")", ":", "assert", "est_idxs", "[", "0", "]", "==", "0", "and", "est_idxs", "[", "-", "1", "]", "==", "N", "-", "1", "assert", "len", "(", "est_idxs", ")", "-", "1", "==", "len", "(", "est_labels", ")", "# Add silences, if needed", "est_times", "=", "np", ".", "concatenate", "(", "(", "[", "0", "]", ",", "frame_times", "[", "est_idxs", "]", ",", "[", "dur", "]", ")", ")", "silence_label", "=", "np", ".", "max", "(", "est_labels", ")", "+", "1", "est_labels", "=", "np", ".", "concatenate", "(", "(", "[", "silence_label", "]", ",", "est_labels", ",", "[", "silence_label", "]", ")", ")", "# Remove empty segments if needed", "est_times", ",", "est_labels", "=", "remove_empty_segments", "(", "est_times", ",", "est_labels", ")", "# Make sure that the first and last times are 0 and duration, respectively", "assert", "np", ".", "allclose", "(", "[", "est_times", "[", "0", "]", "]", ",", "[", "0", "]", ")", "and", "np", ".", "allclose", "(", "[", "est_times", "[", "-", "1", "]", "]", ",", "[", "dur", "]", ")", "return", "est_times", ",", "est_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
align_end_hierarchies
Align the end of the hierarchies such that they end at the same exact second as long they have the same duration within a certain threshold. Parameters ---------- hier1: list List containing hierarchical segment boundaries. hier2: list List containing hierarchical segment boundaries. thres: float > 0 Threshold to decide whether two values are the same.
msaf/utils.py
def align_end_hierarchies(hier1, hier2, thres=0.5): """Align the end of the hierarchies such that they end at the same exact second as long they have the same duration within a certain threshold. Parameters ---------- hier1: list List containing hierarchical segment boundaries. hier2: list List containing hierarchical segment boundaries. thres: float > 0 Threshold to decide whether two values are the same. """ # Make sure we have correctly formatted hierarchies dur_h1 = hier1[0][-1] for hier in hier1: assert hier[-1] == dur_h1, "hier1 is not correctly " \ "formatted {} {}".format(hier[-1], dur_h1) dur_h2 = hier2[0][-1] for hier in hier2: assert hier[-1] == dur_h2, "hier2 is not correctly formatted" # If durations are different, do nothing if abs(dur_h1 - dur_h2) > thres: return # Align h1 with h2 for hier in hier1: hier[-1] = dur_h2
def align_end_hierarchies(hier1, hier2, thres=0.5): """Align the end of the hierarchies such that they end at the same exact second as long they have the same duration within a certain threshold. Parameters ---------- hier1: list List containing hierarchical segment boundaries. hier2: list List containing hierarchical segment boundaries. thres: float > 0 Threshold to decide whether two values are the same. """ # Make sure we have correctly formatted hierarchies dur_h1 = hier1[0][-1] for hier in hier1: assert hier[-1] == dur_h1, "hier1 is not correctly " \ "formatted {} {}".format(hier[-1], dur_h1) dur_h2 = hier2[0][-1] for hier in hier2: assert hier[-1] == dur_h2, "hier2 is not correctly formatted" # If durations are different, do nothing if abs(dur_h1 - dur_h2) > thres: return # Align h1 with h2 for hier in hier1: hier[-1] = dur_h2
[ "Align", "the", "end", "of", "the", "hierarchies", "such", "that", "they", "end", "at", "the", "same", "exact", "second", "as", "long", "they", "have", "the", "same", "duration", "within", "a", "certain", "threshold", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/utils.py#L234-L262
[ "def", "align_end_hierarchies", "(", "hier1", ",", "hier2", ",", "thres", "=", "0.5", ")", ":", "# Make sure we have correctly formatted hierarchies", "dur_h1", "=", "hier1", "[", "0", "]", "[", "-", "1", "]", "for", "hier", "in", "hier1", ":", "assert", "hier", "[", "-", "1", "]", "==", "dur_h1", ",", "\"hier1 is not correctly \"", "\"formatted {} {}\"", ".", "format", "(", "hier", "[", "-", "1", "]", ",", "dur_h1", ")", "dur_h2", "=", "hier2", "[", "0", "]", "[", "-", "1", "]", "for", "hier", "in", "hier2", ":", "assert", "hier", "[", "-", "1", "]", "==", "dur_h2", ",", "\"hier2 is not correctly formatted\"", "# If durations are different, do nothing", "if", "abs", "(", "dur_h1", "-", "dur_h2", ")", ">", "thres", ":", "return", "# Align h1 with h2", "for", "hier", "in", "hier1", ":", "hier", "[", "-", "1", "]", "=", "dur_h2" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
SIVM._distance
compute distances of a specific data point to all other samples
msaf/pymf/sivm.py
def _distance(self, idx): """ compute distances of a specific data point to all other samples""" if scipy.sparse.issparse(self.data): step = self.data.shape[1] else: step = 50000 d = np.zeros((self.data.shape[1])) if idx == -1: # set vec to origin if idx=-1 vec = np.zeros((self.data.shape[0], 1)) if scipy.sparse.issparse(self.data): vec = scipy.sparse.csc_matrix(vec) else: vec = self.data[:, idx:idx+1] self._logger.info('compute distance to node ' + str(idx)) # slice data into smaller chunks for idx_start in range(0, self.data.shape[1], step): if idx_start + step > self.data.shape[1]: idx_end = self.data.shape[1] else: idx_end = idx_start + step d[idx_start:idx_end] = self._distfunc( self.data[:,idx_start:idx_end], vec) self._logger.info('completed:' + str(idx_end/(self.data.shape[1]/100.0)) + "%") return d
def _distance(self, idx): """ compute distances of a specific data point to all other samples""" if scipy.sparse.issparse(self.data): step = self.data.shape[1] else: step = 50000 d = np.zeros((self.data.shape[1])) if idx == -1: # set vec to origin if idx=-1 vec = np.zeros((self.data.shape[0], 1)) if scipy.sparse.issparse(self.data): vec = scipy.sparse.csc_matrix(vec) else: vec = self.data[:, idx:idx+1] self._logger.info('compute distance to node ' + str(idx)) # slice data into smaller chunks for idx_start in range(0, self.data.shape[1], step): if idx_start + step > self.data.shape[1]: idx_end = self.data.shape[1] else: idx_end = idx_start + step d[idx_start:idx_end] = self._distfunc( self.data[:,idx_start:idx_end], vec) self._logger.info('completed:' + str(idx_end/(self.data.shape[1]/100.0)) + "%") return d
[ "compute", "distances", "of", "a", "specific", "data", "point", "to", "all", "other", "samples" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/pymf/sivm.py#L107-L137
[ "def", "_distance", "(", "self", ",", "idx", ")", ":", "if", "scipy", ".", "sparse", ".", "issparse", "(", "self", ".", "data", ")", ":", "step", "=", "self", ".", "data", ".", "shape", "[", "1", "]", "else", ":", "step", "=", "50000", "d", "=", "np", ".", "zeros", "(", "(", "self", ".", "data", ".", "shape", "[", "1", "]", ")", ")", "if", "idx", "==", "-", "1", ":", "# set vec to origin if idx=-1", "vec", "=", "np", ".", "zeros", "(", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "if", "scipy", ".", "sparse", ".", "issparse", "(", "self", ".", "data", ")", ":", "vec", "=", "scipy", ".", "sparse", ".", "csc_matrix", "(", "vec", ")", "else", ":", "vec", "=", "self", ".", "data", "[", ":", ",", "idx", ":", "idx", "+", "1", "]", "self", ".", "_logger", ".", "info", "(", "'compute distance to node '", "+", "str", "(", "idx", ")", ")", "# slice data into smaller chunks", "for", "idx_start", "in", "range", "(", "0", ",", "self", ".", "data", ".", "shape", "[", "1", "]", ",", "step", ")", ":", "if", "idx_start", "+", "step", ">", "self", ".", "data", ".", "shape", "[", "1", "]", ":", "idx_end", "=", "self", ".", "data", ".", "shape", "[", "1", "]", "else", ":", "idx_end", "=", "idx_start", "+", "step", "d", "[", "idx_start", ":", "idx_end", "]", "=", "self", ".", "_distfunc", "(", "self", ".", "data", "[", ":", ",", "idx_start", ":", "idx_end", "]", ",", "vec", ")", "self", ".", "_logger", ".", "info", "(", "'completed:'", "+", "str", "(", "idx_end", "/", "(", "self", ".", "data", ".", "shape", "[", "1", "]", "/", "100.0", ")", ")", "+", "\"%\"", ")", "return", "d" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
SIVM.update_w
compute new W
msaf/pymf/sivm.py
def update_w(self): """ compute new W """ EPS = 10**-8 self.init_sivm() # initialize some of the recursively updated distance measures .... d_square = np.zeros((self.data.shape[1])) d_sum = np.zeros((self.data.shape[1])) d_i_times_d_j = np.zeros((self.data.shape[1])) distiter = np.zeros((self.data.shape[1])) a = np.log(self._maxd) a_inc = a.copy() for l in range(1, self._num_bases): d = self._distance(self.select[l-1]) # take the log of d (sually more stable that d) d = np.log(d + EPS) d_i_times_d_j += d * d_sum d_sum += d d_square += d**2 distiter = d_i_times_d_j + a*d_sum - (l/2.0) * d_square # detect the next best data point self.select.append(np.argmax(distiter)) self._logger.info('cur_nodes: ' + str(self.select)) # sort indices, otherwise h5py won't work self.W = self.data[:, np.sort(self.select)] # "unsort" it again to keep the correct order self.W = self.W[:, np.argsort(np.argsort(self.select))]
def update_w(self): """ compute new W """ EPS = 10**-8 self.init_sivm() # initialize some of the recursively updated distance measures .... d_square = np.zeros((self.data.shape[1])) d_sum = np.zeros((self.data.shape[1])) d_i_times_d_j = np.zeros((self.data.shape[1])) distiter = np.zeros((self.data.shape[1])) a = np.log(self._maxd) a_inc = a.copy() for l in range(1, self._num_bases): d = self._distance(self.select[l-1]) # take the log of d (sually more stable that d) d = np.log(d + EPS) d_i_times_d_j += d * d_sum d_sum += d d_square += d**2 distiter = d_i_times_d_j + a*d_sum - (l/2.0) * d_square # detect the next best data point self.select.append(np.argmax(distiter)) self._logger.info('cur_nodes: ' + str(self.select)) # sort indices, otherwise h5py won't work self.W = self.data[:, np.sort(self.select)] # "unsort" it again to keep the correct order self.W = self.W[:, np.argsort(np.argsort(self.select))]
[ "compute", "new", "W" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/pymf/sivm.py#L168-L201
[ "def", "update_w", "(", "self", ")", ":", "EPS", "=", "10", "**", "-", "8", "self", ".", "init_sivm", "(", ")", "# initialize some of the recursively updated distance measures ....", "d_square", "=", "np", ".", "zeros", "(", "(", "self", ".", "data", ".", "shape", "[", "1", "]", ")", ")", "d_sum", "=", "np", ".", "zeros", "(", "(", "self", ".", "data", ".", "shape", "[", "1", "]", ")", ")", "d_i_times_d_j", "=", "np", ".", "zeros", "(", "(", "self", ".", "data", ".", "shape", "[", "1", "]", ")", ")", "distiter", "=", "np", ".", "zeros", "(", "(", "self", ".", "data", ".", "shape", "[", "1", "]", ")", ")", "a", "=", "np", ".", "log", "(", "self", ".", "_maxd", ")", "a_inc", "=", "a", ".", "copy", "(", ")", "for", "l", "in", "range", "(", "1", ",", "self", ".", "_num_bases", ")", ":", "d", "=", "self", ".", "_distance", "(", "self", ".", "select", "[", "l", "-", "1", "]", ")", "# take the log of d (sually more stable that d)", "d", "=", "np", ".", "log", "(", "d", "+", "EPS", ")", "d_i_times_d_j", "+=", "d", "*", "d_sum", "d_sum", "+=", "d", "d_square", "+=", "d", "**", "2", "distiter", "=", "d_i_times_d_j", "+", "a", "*", "d_sum", "-", "(", "l", "/", "2.0", ")", "*", "d_square", "# detect the next best data point", "self", ".", "select", ".", "append", "(", "np", ".", "argmax", "(", "distiter", ")", ")", "self", ".", "_logger", ".", "info", "(", "'cur_nodes: '", "+", "str", "(", "self", ".", "select", ")", ")", "# sort indices, otherwise h5py won't work", "self", ".", "W", "=", "self", ".", "data", "[", ":", ",", "np", ".", "sort", "(", "self", ".", "select", ")", "]", "# \"unsort\" it again to keep the correct order", "self", ".", "W", "=", "self", ".", "W", "[", ":", ",", "np", ".", "argsort", "(", "np", ".", "argsort", "(", "self", ".", "select", ")", ")", "]" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
XMeans.estimate_K_xmeans
Estimates K running X-means algorithm (Pelleg & Moore, 2000).
msaf/algorithms/fmc2d/xmeans.py
def estimate_K_xmeans(self, th=0.2, maxK = 10): """Estimates K running X-means algorithm (Pelleg & Moore, 2000).""" # Run initial K-means means, labels = self.run_kmeans(self.X, self.init_K) # Run X-means algorithm stop = False curr_K = self.init_K while not stop: stop = True final_means = [] for k in range(curr_K): # Find the data that corresponds to the k-th cluster D = self.get_clustered_data(self.X, labels, k) if len(D) == 0 or D.shape[0] == 1: continue # Whiten and find whitened mean stdD = np.std(D, axis=0) #D = vq.whiten(D) D /= float(stdD) # Same as line above mean = D.mean(axis=0) # Cluster this subspace by half (K=2) half_means, half_labels = self.run_kmeans(D, K=2) # Compute BICs bic1 = self.compute_bic(D, [mean], K=1, labels=np.zeros(D.shape[0]), R=D.shape[0]) bic2 = self.compute_bic(D, half_means, K=2, labels=half_labels, R=D.shape[0]) # Split or not max_bic = np.max([np.abs(bic1), np.abs(bic2)]) norm_bic1 = bic1 / float(max_bic) norm_bic2 = bic2 / float(max_bic) diff_bic = np.abs(norm_bic1 - norm_bic2) # Split! #print "diff_bic", diff_bic if diff_bic > th: final_means.append(half_means[0] * stdD) final_means.append(half_means[1] * stdD) curr_K += 1 stop = False # Don't split else: final_means.append(mean * stdD) final_means = np.asarray(final_means) #print "Estimated K: ", curr_K if self.plot: plt.scatter(self.X[:, 0], self.X[:, 1]) plt.scatter(final_means[:, 0], final_means[:, 1], color="y") plt.show() if curr_K >= maxK or self.X.shape[-1] != final_means.shape[-1]: stop = True else: labels, dist = vq.vq(self.X, final_means) return curr_K
def estimate_K_xmeans(self, th=0.2, maxK = 10): """Estimates K running X-means algorithm (Pelleg & Moore, 2000).""" # Run initial K-means means, labels = self.run_kmeans(self.X, self.init_K) # Run X-means algorithm stop = False curr_K = self.init_K while not stop: stop = True final_means = [] for k in range(curr_K): # Find the data that corresponds to the k-th cluster D = self.get_clustered_data(self.X, labels, k) if len(D) == 0 or D.shape[0] == 1: continue # Whiten and find whitened mean stdD = np.std(D, axis=0) #D = vq.whiten(D) D /= float(stdD) # Same as line above mean = D.mean(axis=0) # Cluster this subspace by half (K=2) half_means, half_labels = self.run_kmeans(D, K=2) # Compute BICs bic1 = self.compute_bic(D, [mean], K=1, labels=np.zeros(D.shape[0]), R=D.shape[0]) bic2 = self.compute_bic(D, half_means, K=2, labels=half_labels, R=D.shape[0]) # Split or not max_bic = np.max([np.abs(bic1), np.abs(bic2)]) norm_bic1 = bic1 / float(max_bic) norm_bic2 = bic2 / float(max_bic) diff_bic = np.abs(norm_bic1 - norm_bic2) # Split! #print "diff_bic", diff_bic if diff_bic > th: final_means.append(half_means[0] * stdD) final_means.append(half_means[1] * stdD) curr_K += 1 stop = False # Don't split else: final_means.append(mean * stdD) final_means = np.asarray(final_means) #print "Estimated K: ", curr_K if self.plot: plt.scatter(self.X[:, 0], self.X[:, 1]) plt.scatter(final_means[:, 0], final_means[:, 1], color="y") plt.show() if curr_K >= maxK or self.X.shape[-1] != final_means.shape[-1]: stop = True else: labels, dist = vq.vq(self.X, final_means) return curr_K
[ "Estimates", "K", "running", "X", "-", "means", "algorithm", "(", "Pelleg", "&", "Moore", "2000", ")", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/xmeans.py#L18-L82
[ "def", "estimate_K_xmeans", "(", "self", ",", "th", "=", "0.2", ",", "maxK", "=", "10", ")", ":", "# Run initial K-means", "means", ",", "labels", "=", "self", ".", "run_kmeans", "(", "self", ".", "X", ",", "self", ".", "init_K", ")", "# Run X-means algorithm", "stop", "=", "False", "curr_K", "=", "self", ".", "init_K", "while", "not", "stop", ":", "stop", "=", "True", "final_means", "=", "[", "]", "for", "k", "in", "range", "(", "curr_K", ")", ":", "# Find the data that corresponds to the k-th cluster", "D", "=", "self", ".", "get_clustered_data", "(", "self", ".", "X", ",", "labels", ",", "k", ")", "if", "len", "(", "D", ")", "==", "0", "or", "D", ".", "shape", "[", "0", "]", "==", "1", ":", "continue", "# Whiten and find whitened mean", "stdD", "=", "np", ".", "std", "(", "D", ",", "axis", "=", "0", ")", "#D = vq.whiten(D)", "D", "/=", "float", "(", "stdD", ")", "# Same as line above", "mean", "=", "D", ".", "mean", "(", "axis", "=", "0", ")", "# Cluster this subspace by half (K=2)", "half_means", ",", "half_labels", "=", "self", ".", "run_kmeans", "(", "D", ",", "K", "=", "2", ")", "# Compute BICs", "bic1", "=", "self", ".", "compute_bic", "(", "D", ",", "[", "mean", "]", ",", "K", "=", "1", ",", "labels", "=", "np", ".", "zeros", "(", "D", ".", "shape", "[", "0", "]", ")", ",", "R", "=", "D", ".", "shape", "[", "0", "]", ")", "bic2", "=", "self", ".", "compute_bic", "(", "D", ",", "half_means", ",", "K", "=", "2", ",", "labels", "=", "half_labels", ",", "R", "=", "D", ".", "shape", "[", "0", "]", ")", "# Split or not", "max_bic", "=", "np", ".", "max", "(", "[", "np", ".", "abs", "(", "bic1", ")", ",", "np", ".", "abs", "(", "bic2", ")", "]", ")", "norm_bic1", "=", "bic1", "/", "float", "(", "max_bic", ")", "norm_bic2", "=", "bic2", "/", "float", "(", "max_bic", ")", "diff_bic", "=", "np", ".", "abs", "(", "norm_bic1", "-", "norm_bic2", ")", "# Split!", "#print \"diff_bic\", diff_bic", "if", "diff_bic", ">", "th", ":", "final_means", ".", "append", "(", "half_means", "[", "0", "]", "*", "stdD", ")", "final_means", ".", "append", "(", "half_means", "[", "1", "]", "*", "stdD", ")", "curr_K", "+=", "1", "stop", "=", "False", "# Don't split", "else", ":", "final_means", ".", "append", "(", "mean", "*", "stdD", ")", "final_means", "=", "np", ".", "asarray", "(", "final_means", ")", "#print \"Estimated K: \", curr_K", "if", "self", ".", "plot", ":", "plt", ".", "scatter", "(", "self", ".", "X", "[", ":", ",", "0", "]", ",", "self", ".", "X", "[", ":", ",", "1", "]", ")", "plt", ".", "scatter", "(", "final_means", "[", ":", ",", "0", "]", ",", "final_means", "[", ":", ",", "1", "]", ",", "color", "=", "\"y\"", ")", "plt", ".", "show", "(", ")", "if", "curr_K", ">=", "maxK", "or", "self", ".", "X", ".", "shape", "[", "-", "1", "]", "!=", "final_means", ".", "shape", "[", "-", "1", "]", ":", "stop", "=", "True", "else", ":", "labels", ",", "dist", "=", "vq", ".", "vq", "(", "self", ".", "X", ",", "final_means", ")", "return", "curr_K" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
XMeans.estimate_K_knee
Estimates the K using K-means and BIC, by sweeping various K and choosing the optimal BIC.
msaf/algorithms/fmc2d/xmeans.py
def estimate_K_knee(self, th=.015, maxK=12): """Estimates the K using K-means and BIC, by sweeping various K and choosing the optimal BIC.""" # Sweep K-means if self.X.shape[0] < maxK: maxK = self.X.shape[0] if maxK < 2: maxK = 2 K = np.arange(1, maxK) bics = [] for k in K: means, labels = self.run_kmeans(self.X, k) bic = self.compute_bic(self.X, means, labels, K=k, R=self.X.shape[0]) bics.append(bic) diff_bics = np.diff(bics) finalK = K[-1] if len(bics) == 1: finalK = 2 else: # Normalize bics = np.asarray(bics) bics -= bics.min() #bics /= bics.max() diff_bics -= diff_bics.min() #diff_bics /= diff_bics.max() #print bics, diff_bics # Find optimum K for i in range(len(K[:-1])): #if bics[i] > diff_bics[i]: if diff_bics[i] < th and K[i] != 1: finalK = K[i] break #print "Estimated K: ", finalK if self.plot: plt.subplot(2, 1, 1) plt.plot(K, bics, label="BIC") plt.plot(K[:-1], diff_bics, label="BIC diff") plt.legend(loc=2) plt.subplot(2, 1, 2) plt.scatter(self.X[:, 0], self.X[:, 1]) plt.show() return finalK
def estimate_K_knee(self, th=.015, maxK=12): """Estimates the K using K-means and BIC, by sweeping various K and choosing the optimal BIC.""" # Sweep K-means if self.X.shape[0] < maxK: maxK = self.X.shape[0] if maxK < 2: maxK = 2 K = np.arange(1, maxK) bics = [] for k in K: means, labels = self.run_kmeans(self.X, k) bic = self.compute_bic(self.X, means, labels, K=k, R=self.X.shape[0]) bics.append(bic) diff_bics = np.diff(bics) finalK = K[-1] if len(bics) == 1: finalK = 2 else: # Normalize bics = np.asarray(bics) bics -= bics.min() #bics /= bics.max() diff_bics -= diff_bics.min() #diff_bics /= diff_bics.max() #print bics, diff_bics # Find optimum K for i in range(len(K[:-1])): #if bics[i] > diff_bics[i]: if diff_bics[i] < th and K[i] != 1: finalK = K[i] break #print "Estimated K: ", finalK if self.plot: plt.subplot(2, 1, 1) plt.plot(K, bics, label="BIC") plt.plot(K[:-1], diff_bics, label="BIC diff") plt.legend(loc=2) plt.subplot(2, 1, 2) plt.scatter(self.X[:, 0], self.X[:, 1]) plt.show() return finalK
[ "Estimates", "the", "K", "using", "K", "-", "means", "and", "BIC", "by", "sweeping", "various", "K", "and", "choosing", "the", "optimal", "BIC", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/xmeans.py#L84-L131
[ "def", "estimate_K_knee", "(", "self", ",", "th", "=", ".015", ",", "maxK", "=", "12", ")", ":", "# Sweep K-means", "if", "self", ".", "X", ".", "shape", "[", "0", "]", "<", "maxK", ":", "maxK", "=", "self", ".", "X", ".", "shape", "[", "0", "]", "if", "maxK", "<", "2", ":", "maxK", "=", "2", "K", "=", "np", ".", "arange", "(", "1", ",", "maxK", ")", "bics", "=", "[", "]", "for", "k", "in", "K", ":", "means", ",", "labels", "=", "self", ".", "run_kmeans", "(", "self", ".", "X", ",", "k", ")", "bic", "=", "self", ".", "compute_bic", "(", "self", ".", "X", ",", "means", ",", "labels", ",", "K", "=", "k", ",", "R", "=", "self", ".", "X", ".", "shape", "[", "0", "]", ")", "bics", ".", "append", "(", "bic", ")", "diff_bics", "=", "np", ".", "diff", "(", "bics", ")", "finalK", "=", "K", "[", "-", "1", "]", "if", "len", "(", "bics", ")", "==", "1", ":", "finalK", "=", "2", "else", ":", "# Normalize", "bics", "=", "np", ".", "asarray", "(", "bics", ")", "bics", "-=", "bics", ".", "min", "(", ")", "#bics /= bics.max()", "diff_bics", "-=", "diff_bics", ".", "min", "(", ")", "#diff_bics /= diff_bics.max()", "#print bics, diff_bics", "# Find optimum K", "for", "i", "in", "range", "(", "len", "(", "K", "[", ":", "-", "1", "]", ")", ")", ":", "#if bics[i] > diff_bics[i]:", "if", "diff_bics", "[", "i", "]", "<", "th", "and", "K", "[", "i", "]", "!=", "1", ":", "finalK", "=", "K", "[", "i", "]", "break", "#print \"Estimated K: \", finalK", "if", "self", ".", "plot", ":", "plt", ".", "subplot", "(", "2", ",", "1", ",", "1", ")", "plt", ".", "plot", "(", "K", ",", "bics", ",", "label", "=", "\"BIC\"", ")", "plt", ".", "plot", "(", "K", "[", ":", "-", "1", "]", ",", "diff_bics", ",", "label", "=", "\"BIC diff\"", ")", "plt", ".", "legend", "(", "loc", "=", "2", ")", "plt", ".", "subplot", "(", "2", ",", "1", ",", "2", ")", "plt", ".", "scatter", "(", "self", ".", "X", "[", ":", ",", "0", "]", ",", "self", ".", "X", "[", ":", ",", "1", "]", ")", "plt", ".", "show", "(", ")", "return", "finalK" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
XMeans.get_clustered_data
Returns the data with a specific label_index, using the previously learned labels.
msaf/algorithms/fmc2d/xmeans.py
def get_clustered_data(self, X, labels, label_index): """Returns the data with a specific label_index, using the previously learned labels.""" D = X[np.argwhere(labels == label_index)] return D.reshape((D.shape[0], D.shape[-1]))
def get_clustered_data(self, X, labels, label_index): """Returns the data with a specific label_index, using the previously learned labels.""" D = X[np.argwhere(labels == label_index)] return D.reshape((D.shape[0], D.shape[-1]))
[ "Returns", "the", "data", "with", "a", "specific", "label_index", "using", "the", "previously", "learned", "labels", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/xmeans.py#L133-L137
[ "def", "get_clustered_data", "(", "self", ",", "X", ",", "labels", ",", "label_index", ")", ":", "D", "=", "X", "[", "np", ".", "argwhere", "(", "labels", "==", "label_index", ")", "]", "return", "D", ".", "reshape", "(", "(", "D", ".", "shape", "[", "0", "]", ",", "D", ".", "shape", "[", "-", "1", "]", ")", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
XMeans.run_kmeans
Runs k-means and returns the labels assigned to the data.
msaf/algorithms/fmc2d/xmeans.py
def run_kmeans(self, X, K): """Runs k-means and returns the labels assigned to the data.""" wX = vq.whiten(X) means, dist = vq.kmeans(wX, K, iter=100) labels, dist = vq.vq(wX, means) return means, labels
def run_kmeans(self, X, K): """Runs k-means and returns the labels assigned to the data.""" wX = vq.whiten(X) means, dist = vq.kmeans(wX, K, iter=100) labels, dist = vq.vq(wX, means) return means, labels
[ "Runs", "k", "-", "means", "and", "returns", "the", "labels", "assigned", "to", "the", "data", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/xmeans.py#L139-L144
[ "def", "run_kmeans", "(", "self", ",", "X", ",", "K", ")", ":", "wX", "=", "vq", ".", "whiten", "(", "X", ")", "means", ",", "dist", "=", "vq", ".", "kmeans", "(", "wX", ",", "K", ",", "iter", "=", "100", ")", "labels", ",", "dist", "=", "vq", ".", "vq", "(", "wX", ",", "means", ")", "return", "means", ",", "labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
XMeans.compute_bic
Computes the Bayesian Information Criterion.
msaf/algorithms/fmc2d/xmeans.py
def compute_bic(self, D, means, labels, K, R): """Computes the Bayesian Information Criterion.""" D = vq.whiten(D) Rn = D.shape[0] M = D.shape[1] if R == K: return 1 # Maximum likelihood estimate (MLE) mle_var = 0 for k in range(len(means)): X = D[np.argwhere(labels == k)] X = X.reshape((X.shape[0], X.shape[-1])) for x in X: mle_var += distance.euclidean(x, means[k]) #print x, means[k], mle_var mle_var /= float(R - K) # Log-likelihood of the data l_D = - Rn/2. * np.log(2*np.pi) - (Rn * M)/2. * np.log(mle_var) - \ (Rn - K) / 2. + Rn * np.log(Rn) - Rn * np.log(R) # Params of BIC p = (K-1) + M * K + mle_var #print "BIC:", l_D, p, R, K # Return the bic return l_D - p / 2. * np.log(R)
def compute_bic(self, D, means, labels, K, R): """Computes the Bayesian Information Criterion.""" D = vq.whiten(D) Rn = D.shape[0] M = D.shape[1] if R == K: return 1 # Maximum likelihood estimate (MLE) mle_var = 0 for k in range(len(means)): X = D[np.argwhere(labels == k)] X = X.reshape((X.shape[0], X.shape[-1])) for x in X: mle_var += distance.euclidean(x, means[k]) #print x, means[k], mle_var mle_var /= float(R - K) # Log-likelihood of the data l_D = - Rn/2. * np.log(2*np.pi) - (Rn * M)/2. * np.log(mle_var) - \ (Rn - K) / 2. + Rn * np.log(Rn) - Rn * np.log(R) # Params of BIC p = (K-1) + M * K + mle_var #print "BIC:", l_D, p, R, K # Return the bic return l_D - p / 2. * np.log(R)
[ "Computes", "the", "Bayesian", "Information", "Criterion", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/xmeans.py#L146-L175
[ "def", "compute_bic", "(", "self", ",", "D", ",", "means", ",", "labels", ",", "K", ",", "R", ")", ":", "D", "=", "vq", ".", "whiten", "(", "D", ")", "Rn", "=", "D", ".", "shape", "[", "0", "]", "M", "=", "D", ".", "shape", "[", "1", "]", "if", "R", "==", "K", ":", "return", "1", "# Maximum likelihood estimate (MLE)", "mle_var", "=", "0", "for", "k", "in", "range", "(", "len", "(", "means", ")", ")", ":", "X", "=", "D", "[", "np", ".", "argwhere", "(", "labels", "==", "k", ")", "]", "X", "=", "X", ".", "reshape", "(", "(", "X", ".", "shape", "[", "0", "]", ",", "X", ".", "shape", "[", "-", "1", "]", ")", ")", "for", "x", "in", "X", ":", "mle_var", "+=", "distance", ".", "euclidean", "(", "x", ",", "means", "[", "k", "]", ")", "#print x, means[k], mle_var", "mle_var", "/=", "float", "(", "R", "-", "K", ")", "# Log-likelihood of the data", "l_D", "=", "-", "Rn", "/", "2.", "*", "np", ".", "log", "(", "2", "*", "np", ".", "pi", ")", "-", "(", "Rn", "*", "M", ")", "/", "2.", "*", "np", ".", "log", "(", "mle_var", ")", "-", "(", "Rn", "-", "K", ")", "/", "2.", "+", "Rn", "*", "np", ".", "log", "(", "Rn", ")", "-", "Rn", "*", "np", ".", "log", "(", "R", ")", "# Params of BIC", "p", "=", "(", "K", "-", "1", ")", "+", "M", "*", "K", "+", "mle_var", "#print \"BIC:\", l_D, p, R, K", "# Return the bic", "return", "l_D", "-", "p", "/", "2.", "*", "np", ".", "log", "(", "R", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
XMeans.generate_2d_data
Generates N*K 2D data points with K means and N data points for each mean.
msaf/algorithms/fmc2d/xmeans.py
def generate_2d_data(self, N=100, K=5): """Generates N*K 2D data points with K means and N data points for each mean.""" # Seed the random np.random.seed(seed=int(time.time())) # Amount of spread of the centroids spread = 30 # Generate random data X = np.empty((0, 2)) for i in range(K): mean = np.array([np.random.random()*spread, np.random.random()*spread]) x = np.random.normal(0.0, scale=1.0, size=(N, 2)) + mean X = np.append(X, x, axis=0) return X
def generate_2d_data(self, N=100, K=5): """Generates N*K 2D data points with K means and N data points for each mean.""" # Seed the random np.random.seed(seed=int(time.time())) # Amount of spread of the centroids spread = 30 # Generate random data X = np.empty((0, 2)) for i in range(K): mean = np.array([np.random.random()*spread, np.random.random()*spread]) x = np.random.normal(0.0, scale=1.0, size=(N, 2)) + mean X = np.append(X, x, axis=0) return X
[ "Generates", "N", "*", "K", "2D", "data", "points", "with", "K", "means", "and", "N", "data", "points", "for", "each", "mean", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/xmeans.py#L178-L195
[ "def", "generate_2d_data", "(", "self", ",", "N", "=", "100", ",", "K", "=", "5", ")", ":", "# Seed the random", "np", ".", "random", ".", "seed", "(", "seed", "=", "int", "(", "time", ".", "time", "(", ")", ")", ")", "# Amount of spread of the centroids", "spread", "=", "30", "# Generate random data", "X", "=", "np", ".", "empty", "(", "(", "0", ",", "2", ")", ")", "for", "i", "in", "range", "(", "K", ")", ":", "mean", "=", "np", ".", "array", "(", "[", "np", ".", "random", ".", "random", "(", ")", "*", "spread", ",", "np", ".", "random", ".", "random", "(", ")", "*", "spread", "]", ")", "x", "=", "np", ".", "random", ".", "normal", "(", "0.0", ",", "scale", "=", "1.0", ",", "size", "=", "(", "N", ",", "2", ")", ")", "+", "mean", "X", "=", "np", ".", "append", "(", "X", ",", "x", ",", "axis", "=", "0", ")", "return", "X" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
SUB.factorize
Do factorization s.t. data = dot(dot(data,beta),H), under the convexity constraint beta >=0, sum(beta)=1, H >=0, sum(H)=1
msaf/pymf/sub.py
def factorize(self): """Do factorization s.t. data = dot(dot(data,beta),H), under the convexity constraint beta >=0, sum(beta)=1, H >=0, sum(H)=1 """ # compute new coefficients for reconstructing data points self.update_w() # for CHNMF it is sometimes useful to only compute # the basis vectors if self._compute_h: self.update_h() self.W = self.mdl.W self.H = self.mdl.H self.ferr = np.zeros(1) self.ferr[0] = self.mdl.frobenius_norm() self._print_cur_status(' Fro:' + str(self.ferr[0]))
def factorize(self): """Do factorization s.t. data = dot(dot(data,beta),H), under the convexity constraint beta >=0, sum(beta)=1, H >=0, sum(H)=1 """ # compute new coefficients for reconstructing data points self.update_w() # for CHNMF it is sometimes useful to only compute # the basis vectors if self._compute_h: self.update_h() self.W = self.mdl.W self.H = self.mdl.H self.ferr = np.zeros(1) self.ferr[0] = self.mdl.frobenius_norm() self._print_cur_status(' Fro:' + str(self.ferr[0]))
[ "Do", "factorization", "s", ".", "t", ".", "data", "=", "dot", "(", "dot", "(", "data", "beta", ")", "H", ")", "under", "the", "convexity", "constraint", "beta", ">", "=", "0", "sum", "(", "beta", ")", "=", "1", "H", ">", "=", "0", "sum", "(", "H", ")", "=", "1" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/pymf/sub.py#L206-L223
[ "def", "factorize", "(", "self", ")", ":", "# compute new coefficients for reconstructing data points", "self", ".", "update_w", "(", ")", "# for CHNMF it is sometimes useful to only compute", "# the basis vectors", "if", "self", ".", "_compute_h", ":", "self", ".", "update_h", "(", ")", "self", ".", "W", "=", "self", ".", "mdl", ".", "W", "self", ".", "H", "=", "self", ".", "mdl", ".", "H", "self", ".", "ferr", "=", "np", ".", "zeros", "(", "1", ")", "self", ".", "ferr", "[", "0", "]", "=", "self", ".", "mdl", ".", "frobenius_norm", "(", ")", "self", ".", "_print_cur_status", "(", "' Fro:'", "+", "str", "(", "self", ".", "ferr", "[", "0", "]", ")", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
resample_mx
Y = resample_mx(X, incolpos, outcolpos) X is taken as a set of columns, each starting at 'time' colpos, and continuing until the start of the next column. Y is a similar matrix, with time boundaries defined by outcolpos. Each column of Y is a duration-weighted average of the overlapping columns of X. 2010-04-14 Dan Ellis dpwe@ee.columbia.edu based on samplemx/beatavg -> python: TBM, 2011-11-05, TESTED
msaf/algorithms/fmc2d/utils_2dfmc.py
def resample_mx(X, incolpos, outcolpos): """ Y = resample_mx(X, incolpos, outcolpos) X is taken as a set of columns, each starting at 'time' colpos, and continuing until the start of the next column. Y is a similar matrix, with time boundaries defined by outcolpos. Each column of Y is a duration-weighted average of the overlapping columns of X. 2010-04-14 Dan Ellis dpwe@ee.columbia.edu based on samplemx/beatavg -> python: TBM, 2011-11-05, TESTED """ noutcols = len(outcolpos) Y = np.zeros((X.shape[0], noutcols)) # assign 'end times' to final columns if outcolpos.max() > incolpos.max(): incolpos = np.concatenate([incolpos,[outcolpos.max()]]) X = np.concatenate([X, X[:,-1].reshape(X.shape[0],1)], axis=1) outcolpos = np.concatenate([outcolpos, [outcolpos[-1]]]) # durations (default weights) of input columns) incoldurs = np.concatenate([np.diff(incolpos), [1]]) for c in range(noutcols): firstincol = np.where(incolpos <= outcolpos[c])[0][-1] firstincolnext = np.where(incolpos < outcolpos[c+1])[0][-1] lastincol = max(firstincol,firstincolnext) # default weights wts = copy.deepcopy(incoldurs[firstincol:lastincol+1]) # now fix up by partial overlap at ends if len(wts) > 1: wts[0] = wts[0] - (outcolpos[c] - incolpos[firstincol]) wts[-1] = wts[-1] - (incolpos[lastincol+1] - outcolpos[c+1]) wts = wts * 1. / float(sum(wts)) Y[:,c] = np.dot(X[:,firstincol:lastincol+1], wts) # done return Y
def resample_mx(X, incolpos, outcolpos): """ Y = resample_mx(X, incolpos, outcolpos) X is taken as a set of columns, each starting at 'time' colpos, and continuing until the start of the next column. Y is a similar matrix, with time boundaries defined by outcolpos. Each column of Y is a duration-weighted average of the overlapping columns of X. 2010-04-14 Dan Ellis dpwe@ee.columbia.edu based on samplemx/beatavg -> python: TBM, 2011-11-05, TESTED """ noutcols = len(outcolpos) Y = np.zeros((X.shape[0], noutcols)) # assign 'end times' to final columns if outcolpos.max() > incolpos.max(): incolpos = np.concatenate([incolpos,[outcolpos.max()]]) X = np.concatenate([X, X[:,-1].reshape(X.shape[0],1)], axis=1) outcolpos = np.concatenate([outcolpos, [outcolpos[-1]]]) # durations (default weights) of input columns) incoldurs = np.concatenate([np.diff(incolpos), [1]]) for c in range(noutcols): firstincol = np.where(incolpos <= outcolpos[c])[0][-1] firstincolnext = np.where(incolpos < outcolpos[c+1])[0][-1] lastincol = max(firstincol,firstincolnext) # default weights wts = copy.deepcopy(incoldurs[firstincol:lastincol+1]) # now fix up by partial overlap at ends if len(wts) > 1: wts[0] = wts[0] - (outcolpos[c] - incolpos[firstincol]) wts[-1] = wts[-1] - (incolpos[lastincol+1] - outcolpos[c+1]) wts = wts * 1. / float(sum(wts)) Y[:,c] = np.dot(X[:,firstincol:lastincol+1], wts) # done return Y
[ "Y", "=", "resample_mx", "(", "X", "incolpos", "outcolpos", ")", "X", "is", "taken", "as", "a", "set", "of", "columns", "each", "starting", "at", "time", "colpos", "and", "continuing", "until", "the", "start", "of", "the", "next", "column", ".", "Y", "is", "a", "similar", "matrix", "with", "time", "boundaries", "defined", "by", "outcolpos", ".", "Each", "column", "of", "Y", "is", "a", "duration", "-", "weighted", "average", "of", "the", "overlapping", "columns", "of", "X", ".", "2010", "-", "04", "-", "14", "Dan", "Ellis", "dpwe" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/utils_2dfmc.py#L11-L45
[ "def", "resample_mx", "(", "X", ",", "incolpos", ",", "outcolpos", ")", ":", "noutcols", "=", "len", "(", "outcolpos", ")", "Y", "=", "np", ".", "zeros", "(", "(", "X", ".", "shape", "[", "0", "]", ",", "noutcols", ")", ")", "# assign 'end times' to final columns", "if", "outcolpos", ".", "max", "(", ")", ">", "incolpos", ".", "max", "(", ")", ":", "incolpos", "=", "np", ".", "concatenate", "(", "[", "incolpos", ",", "[", "outcolpos", ".", "max", "(", ")", "]", "]", ")", "X", "=", "np", ".", "concatenate", "(", "[", "X", ",", "X", "[", ":", ",", "-", "1", "]", ".", "reshape", "(", "X", ".", "shape", "[", "0", "]", ",", "1", ")", "]", ",", "axis", "=", "1", ")", "outcolpos", "=", "np", ".", "concatenate", "(", "[", "outcolpos", ",", "[", "outcolpos", "[", "-", "1", "]", "]", "]", ")", "# durations (default weights) of input columns)", "incoldurs", "=", "np", ".", "concatenate", "(", "[", "np", ".", "diff", "(", "incolpos", ")", ",", "[", "1", "]", "]", ")", "for", "c", "in", "range", "(", "noutcols", ")", ":", "firstincol", "=", "np", ".", "where", "(", "incolpos", "<=", "outcolpos", "[", "c", "]", ")", "[", "0", "]", "[", "-", "1", "]", "firstincolnext", "=", "np", ".", "where", "(", "incolpos", "<", "outcolpos", "[", "c", "+", "1", "]", ")", "[", "0", "]", "[", "-", "1", "]", "lastincol", "=", "max", "(", "firstincol", ",", "firstincolnext", ")", "# default weights", "wts", "=", "copy", ".", "deepcopy", "(", "incoldurs", "[", "firstincol", ":", "lastincol", "+", "1", "]", ")", "# now fix up by partial overlap at ends", "if", "len", "(", "wts", ")", ">", "1", ":", "wts", "[", "0", "]", "=", "wts", "[", "0", "]", "-", "(", "outcolpos", "[", "c", "]", "-", "incolpos", "[", "firstincol", "]", ")", "wts", "[", "-", "1", "]", "=", "wts", "[", "-", "1", "]", "-", "(", "incolpos", "[", "lastincol", "+", "1", "]", "-", "outcolpos", "[", "c", "+", "1", "]", ")", "wts", "=", "wts", "*", "1.", "/", "float", "(", "sum", "(", "wts", ")", ")", "Y", "[", ":", ",", "c", "]", "=", "np", ".", "dot", "(", "X", "[", ":", ",", "firstincol", ":", "lastincol", "+", "1", "]", ",", "wts", ")", "# done", "return", "Y" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
magnitude
Magnitude of a complex matrix.
msaf/algorithms/fmc2d/utils_2dfmc.py
def magnitude(X): """Magnitude of a complex matrix.""" r = np.real(X) i = np.imag(X) return np.sqrt(r * r + i * i);
def magnitude(X): """Magnitude of a complex matrix.""" r = np.real(X) i = np.imag(X) return np.sqrt(r * r + i * i);
[ "Magnitude", "of", "a", "complex", "matrix", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/utils_2dfmc.py#L47-L51
[ "def", "magnitude", "(", "X", ")", ":", "r", "=", "np", ".", "real", "(", "X", ")", "i", "=", "np", ".", "imag", "(", "X", ")", "return", "np", ".", "sqrt", "(", "r", "*", "r", "+", "i", "*", "i", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
json_to_bounds
Extracts the boundaries from a json file and puts them into an np array.
msaf/algorithms/fmc2d/utils_2dfmc.py
def json_to_bounds(segments_json): """Extracts the boundaries from a json file and puts them into an np array.""" f = open(segments_json) segments = json.load(f)["segments"] bounds = [] for segment in segments: bounds.append(segment["start"]) bounds.append(bounds[-1] + segments[-1]["duration"]) # Add last boundary f.close() return np.asarray(bounds)
def json_to_bounds(segments_json): """Extracts the boundaries from a json file and puts them into an np array.""" f = open(segments_json) segments = json.load(f)["segments"] bounds = [] for segment in segments: bounds.append(segment["start"]) bounds.append(bounds[-1] + segments[-1]["duration"]) # Add last boundary f.close() return np.asarray(bounds)
[ "Extracts", "the", "boundaries", "from", "a", "json", "file", "and", "puts", "them", "into", "an", "np", "array", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/utils_2dfmc.py#L53-L63
[ "def", "json_to_bounds", "(", "segments_json", ")", ":", "f", "=", "open", "(", "segments_json", ")", "segments", "=", "json", ".", "load", "(", "f", ")", "[", "\"segments\"", "]", "bounds", "=", "[", "]", "for", "segment", "in", "segments", ":", "bounds", ".", "append", "(", "segment", "[", "\"start\"", "]", ")", "bounds", ".", "append", "(", "bounds", "[", "-", "1", "]", "+", "segments", "[", "-", "1", "]", "[", "\"duration\"", "]", ")", "# Add last boundary", "f", ".", "close", "(", ")", "return", "np", ".", "asarray", "(", "bounds", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
json_bounds_to_bounds
Extracts the boundaries from a bounds json file and puts them into an np array.
msaf/algorithms/fmc2d/utils_2dfmc.py
def json_bounds_to_bounds(bounds_json): """Extracts the boundaries from a bounds json file and puts them into an np array.""" f = open(bounds_json) segments = json.load(f)["bounds"] bounds = [] for segment in segments: bounds.append(segment["start"]) f.close() return np.asarray(bounds)
def json_bounds_to_bounds(bounds_json): """Extracts the boundaries from a bounds json file and puts them into an np array.""" f = open(bounds_json) segments = json.load(f)["bounds"] bounds = [] for segment in segments: bounds.append(segment["start"]) f.close() return np.asarray(bounds)
[ "Extracts", "the", "boundaries", "from", "a", "bounds", "json", "file", "and", "puts", "them", "into", "an", "np", "array", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/utils_2dfmc.py#L65-L74
[ "def", "json_bounds_to_bounds", "(", "bounds_json", ")", ":", "f", "=", "open", "(", "bounds_json", ")", "segments", "=", "json", ".", "load", "(", "f", ")", "[", "\"bounds\"", "]", "bounds", "=", "[", "]", "for", "segment", "in", "segments", ":", "bounds", ".", "append", "(", "segment", "[", "\"start\"", "]", ")", "f", ".", "close", "(", ")", "return", "np", ".", "asarray", "(", "bounds", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
json_to_labels
Extracts the labels from a json file and puts them into an np array.
msaf/algorithms/fmc2d/utils_2dfmc.py
def json_to_labels(segments_json): """Extracts the labels from a json file and puts them into an np array.""" f = open(segments_json) segments = json.load(f)["segments"] labels = [] str_labels = [] for segment in segments: if not segment["label"] in str_labels: str_labels.append(segment["label"]) labels.append(len(str_labels)-1) else: label_idx = np.where(np.asarray(str_labels) == segment["label"])[0][0] labels.append(label_idx) f.close() return np.asarray(labels)
def json_to_labels(segments_json): """Extracts the labels from a json file and puts them into an np array.""" f = open(segments_json) segments = json.load(f)["segments"] labels = [] str_labels = [] for segment in segments: if not segment["label"] in str_labels: str_labels.append(segment["label"]) labels.append(len(str_labels)-1) else: label_idx = np.where(np.asarray(str_labels) == segment["label"])[0][0] labels.append(label_idx) f.close() return np.asarray(labels)
[ "Extracts", "the", "labels", "from", "a", "json", "file", "and", "puts", "them", "into", "an", "np", "array", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/utils_2dfmc.py#L76-L91
[ "def", "json_to_labels", "(", "segments_json", ")", ":", "f", "=", "open", "(", "segments_json", ")", "segments", "=", "json", ".", "load", "(", "f", ")", "[", "\"segments\"", "]", "labels", "=", "[", "]", "str_labels", "=", "[", "]", "for", "segment", "in", "segments", ":", "if", "not", "segment", "[", "\"label\"", "]", "in", "str_labels", ":", "str_labels", ".", "append", "(", "segment", "[", "\"label\"", "]", ")", "labels", ".", "append", "(", "len", "(", "str_labels", ")", "-", "1", ")", "else", ":", "label_idx", "=", "np", ".", "where", "(", "np", ".", "asarray", "(", "str_labels", ")", "==", "segment", "[", "\"label\"", "]", ")", "[", "0", "]", "[", "0", "]", "labels", ".", "append", "(", "label_idx", ")", "f", ".", "close", "(", ")", "return", "np", ".", "asarray", "(", "labels", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
json_to_beats
Extracts the beats from the beats_json_file and puts them into an np array.
msaf/algorithms/fmc2d/utils_2dfmc.py
def json_to_beats(beats_json_file): """Extracts the beats from the beats_json_file and puts them into an np array.""" f = open(beats_json_file, "r") beats_json = json.load(f) beats = [] for beat in beats_json["beats"]: beats.append(beat["start"]) f.close() return np.asarray(beats)
def json_to_beats(beats_json_file): """Extracts the beats from the beats_json_file and puts them into an np array.""" f = open(beats_json_file, "r") beats_json = json.load(f) beats = [] for beat in beats_json["beats"]: beats.append(beat["start"]) f.close() return np.asarray(beats)
[ "Extracts", "the", "beats", "from", "the", "beats_json_file", "and", "puts", "them", "into", "an", "np", "array", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/utils_2dfmc.py#L93-L102
[ "def", "json_to_beats", "(", "beats_json_file", ")", ":", "f", "=", "open", "(", "beats_json_file", ",", "\"r\"", ")", "beats_json", "=", "json", ".", "load", "(", "f", ")", "beats", "=", "[", "]", "for", "beat", "in", "beats_json", "[", "\"beats\"", "]", ":", "beats", ".", "append", "(", "beat", "[", "\"start\"", "]", ")", "f", ".", "close", "(", ")", "return", "np", ".", "asarray", "(", "beats", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
compute_ffmc2d
Computes the 2D-Fourier Magnitude Coefficients.
msaf/algorithms/fmc2d/utils_2dfmc.py
def compute_ffmc2d(X): """Computes the 2D-Fourier Magnitude Coefficients.""" # 2d-fft fft2 = scipy.fftpack.fft2(X) # Magnitude fft2m = magnitude(fft2) # FFTshift and flatten fftshift = scipy.fftpack.fftshift(fft2m).flatten() #cmap = plt.cm.get_cmap('hot') #plt.imshow(np.log1p(scipy.fftpack.fftshift(fft2m)).T, interpolation="nearest", # aspect="auto", cmap=cmap) #plt.show() # Take out redundant components return fftshift[:fftshift.shape[0] // 2 + 1]
def compute_ffmc2d(X): """Computes the 2D-Fourier Magnitude Coefficients.""" # 2d-fft fft2 = scipy.fftpack.fft2(X) # Magnitude fft2m = magnitude(fft2) # FFTshift and flatten fftshift = scipy.fftpack.fftshift(fft2m).flatten() #cmap = plt.cm.get_cmap('hot') #plt.imshow(np.log1p(scipy.fftpack.fftshift(fft2m)).T, interpolation="nearest", # aspect="auto", cmap=cmap) #plt.show() # Take out redundant components return fftshift[:fftshift.shape[0] // 2 + 1]
[ "Computes", "the", "2D", "-", "Fourier", "Magnitude", "Coefficients", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/fmc2d/utils_2dfmc.py#L113-L130
[ "def", "compute_ffmc2d", "(", "X", ")", ":", "# 2d-fft", "fft2", "=", "scipy", ".", "fftpack", ".", "fft2", "(", "X", ")", "# Magnitude", "fft2m", "=", "magnitude", "(", "fft2", ")", "# FFTshift and flatten", "fftshift", "=", "scipy", ".", "fftpack", ".", "fftshift", "(", "fft2m", ")", ".", "flatten", "(", ")", "#cmap = plt.cm.get_cmap('hot')", "#plt.imshow(np.log1p(scipy.fftpack.fftshift(fft2m)).T, interpolation=\"nearest\",", "# aspect=\"auto\", cmap=cmap)", "#plt.show()", "# Take out redundant components", "return", "fftshift", "[", ":", "fftshift", ".", "shape", "[", "0", "]", "//", "2", "+", "1", "]" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
SVD.frobenius_norm
Frobenius norm (||data - USV||) for a data matrix and a low rank approximation given by SVH using rank k for U and V Returns: frobenius norm: F = ||data - USV||
msaf/pymf/svd.py
def frobenius_norm(self): """ Frobenius norm (||data - USV||) for a data matrix and a low rank approximation given by SVH using rank k for U and V Returns: frobenius norm: F = ||data - USV|| """ if scipy.sparse.issparse(self.data): err = self.data - self.U*self.S*self.V err = err.multiply(err) err = np.sqrt(err.sum()) else: err = self.data[:,:] - np.dot(np.dot(self.U, self.S), self.V) err = np.sqrt(np.sum(err**2)) return err
def frobenius_norm(self): """ Frobenius norm (||data - USV||) for a data matrix and a low rank approximation given by SVH using rank k for U and V Returns: frobenius norm: F = ||data - USV|| """ if scipy.sparse.issparse(self.data): err = self.data - self.U*self.S*self.V err = err.multiply(err) err = np.sqrt(err.sum()) else: err = self.data[:,:] - np.dot(np.dot(self.U, self.S), self.V) err = np.sqrt(np.sum(err**2)) return err
[ "Frobenius", "norm", "(", "||data", "-", "USV||", ")", "for", "a", "data", "matrix", "and", "a", "low", "rank", "approximation", "given", "by", "SVH", "using", "rank", "k", "for", "U", "and", "V", "Returns", ":", "frobenius", "norm", ":", "F", "=", "||data", "-", "USV||" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/pymf/svd.py#L92-L107
[ "def", "frobenius_norm", "(", "self", ")", ":", "if", "scipy", ".", "sparse", ".", "issparse", "(", "self", ".", "data", ")", ":", "err", "=", "self", ".", "data", "-", "self", ".", "U", "*", "self", ".", "S", "*", "self", ".", "V", "err", "=", "err", ".", "multiply", "(", "err", ")", "err", "=", "np", ".", "sqrt", "(", "err", ".", "sum", "(", ")", ")", "else", ":", "err", "=", "self", ".", "data", "[", ":", ",", ":", "]", "-", "np", ".", "dot", "(", "np", ".", "dot", "(", "self", ".", "U", ",", "self", ".", "S", ")", ",", "self", ".", "V", ")", "err", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "err", "**", "2", ")", ")", "return", "err" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
CNMF.factorize
Factorize s.t. WH = data Parameters ---------- niter : int number of iterations. show_progress : bool print some extra information to stdout. compute_h : bool iteratively update values for H. compute_w : bool iteratively update values for W. compute_err : bool compute Frobenius norm |data-WH| after each update and store it to .ferr[k]. Updated Values -------------- .W : updated values for W. .H : updated values for H. .ferr : Frobenius norm |data-WH| for each iteration.
msaf/pymf/cnmf.py
def factorize(self, niter=10, compute_w=True, compute_h=True, compute_err=True, show_progress=False): """ Factorize s.t. WH = data Parameters ---------- niter : int number of iterations. show_progress : bool print some extra information to stdout. compute_h : bool iteratively update values for H. compute_w : bool iteratively update values for W. compute_err : bool compute Frobenius norm |data-WH| after each update and store it to .ferr[k]. Updated Values -------------- .W : updated values for W. .H : updated values for H. .ferr : Frobenius norm |data-WH| for each iteration. """ if not hasattr(self,'W'): self.init_w() if not hasattr(self,'H'): self.init_h() def separate_positive(m): return (np.abs(m) + m)/2.0 def separate_negative(m): return (np.abs(m) - m)/2.0 if show_progress: self._logger.setLevel(logging.INFO) else: self._logger.setLevel(logging.ERROR) XtX = np.dot(self.data[:,:].T, self.data[:,:]) XtX_pos = separate_positive(XtX) XtX_neg = separate_negative(XtX) self.ferr = np.zeros(niter) # iterate over W and H for i in range(niter): # update H XtX_neg_x_W = np.dot(XtX_neg, self.G) XtX_pos_x_W = np.dot(XtX_pos, self.G) if compute_h: H_x_WT = np.dot(self.H.T, self.G.T) ha = XtX_pos_x_W + np.dot(H_x_WT, XtX_neg_x_W) hb = XtX_neg_x_W + np.dot(H_x_WT, XtX_pos_x_W) + 10**-9 self.H = (self.H.T*np.sqrt(ha/hb)).T # update W if compute_w: HT_x_H = np.dot(self.H, self.H.T) wa = np.dot(XtX_pos, self.H.T) + np.dot(XtX_neg_x_W, HT_x_H) wb = np.dot(XtX_neg, self.H.T) + np.dot(XtX_pos_x_W, HT_x_H) + 10**-9 self.G *= np.sqrt(wa/wb) self.W = np.dot(self.data[:,:], self.G) if compute_err: self.ferr[i] = self.frobenius_norm() self._logger.info('Iteration ' + str(i+1) + '/' + str(niter) + ' FN:' + str(self.ferr[i])) else: self._logger.info('Iteration ' + str(i+1) + '/' + str(niter)) if i > 1 and compute_err: if self.converged(i): self.ferr = self.ferr[:i] break
def factorize(self, niter=10, compute_w=True, compute_h=True, compute_err=True, show_progress=False): """ Factorize s.t. WH = data Parameters ---------- niter : int number of iterations. show_progress : bool print some extra information to stdout. compute_h : bool iteratively update values for H. compute_w : bool iteratively update values for W. compute_err : bool compute Frobenius norm |data-WH| after each update and store it to .ferr[k]. Updated Values -------------- .W : updated values for W. .H : updated values for H. .ferr : Frobenius norm |data-WH| for each iteration. """ if not hasattr(self,'W'): self.init_w() if not hasattr(self,'H'): self.init_h() def separate_positive(m): return (np.abs(m) + m)/2.0 def separate_negative(m): return (np.abs(m) - m)/2.0 if show_progress: self._logger.setLevel(logging.INFO) else: self._logger.setLevel(logging.ERROR) XtX = np.dot(self.data[:,:].T, self.data[:,:]) XtX_pos = separate_positive(XtX) XtX_neg = separate_negative(XtX) self.ferr = np.zeros(niter) # iterate over W and H for i in range(niter): # update H XtX_neg_x_W = np.dot(XtX_neg, self.G) XtX_pos_x_W = np.dot(XtX_pos, self.G) if compute_h: H_x_WT = np.dot(self.H.T, self.G.T) ha = XtX_pos_x_W + np.dot(H_x_WT, XtX_neg_x_W) hb = XtX_neg_x_W + np.dot(H_x_WT, XtX_pos_x_W) + 10**-9 self.H = (self.H.T*np.sqrt(ha/hb)).T # update W if compute_w: HT_x_H = np.dot(self.H, self.H.T) wa = np.dot(XtX_pos, self.H.T) + np.dot(XtX_neg_x_W, HT_x_H) wb = np.dot(XtX_neg, self.H.T) + np.dot(XtX_pos_x_W, HT_x_H) + 10**-9 self.G *= np.sqrt(wa/wb) self.W = np.dot(self.data[:,:], self.G) if compute_err: self.ferr[i] = self.frobenius_norm() self._logger.info('Iteration ' + str(i+1) + '/' + str(niter) + ' FN:' + str(self.ferr[i])) else: self._logger.info('Iteration ' + str(i+1) + '/' + str(niter)) if i > 1 and compute_err: if self.converged(i): self.ferr = self.ferr[:i] break
[ "Factorize", "s", ".", "t", ".", "WH", "=", "data" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/pymf/cnmf.py#L108-L187
[ "def", "factorize", "(", "self", ",", "niter", "=", "10", ",", "compute_w", "=", "True", ",", "compute_h", "=", "True", ",", "compute_err", "=", "True", ",", "show_progress", "=", "False", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'W'", ")", ":", "self", ".", "init_w", "(", ")", "if", "not", "hasattr", "(", "self", ",", "'H'", ")", ":", "self", ".", "init_h", "(", ")", "def", "separate_positive", "(", "m", ")", ":", "return", "(", "np", ".", "abs", "(", "m", ")", "+", "m", ")", "/", "2.0", "def", "separate_negative", "(", "m", ")", ":", "return", "(", "np", ".", "abs", "(", "m", ")", "-", "m", ")", "/", "2.0", "if", "show_progress", ":", "self", ".", "_logger", ".", "setLevel", "(", "logging", ".", "INFO", ")", "else", ":", "self", ".", "_logger", ".", "setLevel", "(", "logging", ".", "ERROR", ")", "XtX", "=", "np", ".", "dot", "(", "self", ".", "data", "[", ":", ",", ":", "]", ".", "T", ",", "self", ".", "data", "[", ":", ",", ":", "]", ")", "XtX_pos", "=", "separate_positive", "(", "XtX", ")", "XtX_neg", "=", "separate_negative", "(", "XtX", ")", "self", ".", "ferr", "=", "np", ".", "zeros", "(", "niter", ")", "# iterate over W and H", "for", "i", "in", "range", "(", "niter", ")", ":", "# update H", "XtX_neg_x_W", "=", "np", ".", "dot", "(", "XtX_neg", ",", "self", ".", "G", ")", "XtX_pos_x_W", "=", "np", ".", "dot", "(", "XtX_pos", ",", "self", ".", "G", ")", "if", "compute_h", ":", "H_x_WT", "=", "np", ".", "dot", "(", "self", ".", "H", ".", "T", ",", "self", ".", "G", ".", "T", ")", "ha", "=", "XtX_pos_x_W", "+", "np", ".", "dot", "(", "H_x_WT", ",", "XtX_neg_x_W", ")", "hb", "=", "XtX_neg_x_W", "+", "np", ".", "dot", "(", "H_x_WT", ",", "XtX_pos_x_W", ")", "+", "10", "**", "-", "9", "self", ".", "H", "=", "(", "self", ".", "H", ".", "T", "*", "np", ".", "sqrt", "(", "ha", "/", "hb", ")", ")", ".", "T", "# update W", "if", "compute_w", ":", "HT_x_H", "=", "np", ".", "dot", "(", "self", ".", "H", ",", "self", ".", "H", ".", "T", ")", "wa", "=", "np", ".", "dot", "(", "XtX_pos", ",", "self", ".", "H", ".", "T", ")", "+", "np", ".", "dot", "(", "XtX_neg_x_W", ",", "HT_x_H", ")", "wb", "=", "np", ".", "dot", "(", "XtX_neg", ",", "self", ".", "H", ".", "T", ")", "+", "np", ".", "dot", "(", "XtX_pos_x_W", ",", "HT_x_H", ")", "+", "10", "**", "-", "9", "self", ".", "G", "*=", "np", ".", "sqrt", "(", "wa", "/", "wb", ")", "self", ".", "W", "=", "np", ".", "dot", "(", "self", ".", "data", "[", ":", ",", ":", "]", ",", "self", ".", "G", ")", "if", "compute_err", ":", "self", ".", "ferr", "[", "i", "]", "=", "self", ".", "frobenius_norm", "(", ")", "self", ".", "_logger", ".", "info", "(", "'Iteration '", "+", "str", "(", "i", "+", "1", ")", "+", "'/'", "+", "str", "(", "niter", ")", "+", "' FN:'", "+", "str", "(", "self", ".", "ferr", "[", "i", "]", ")", ")", "else", ":", "self", ".", "_logger", ".", "info", "(", "'Iteration '", "+", "str", "(", "i", "+", "1", ")", "+", "'/'", "+", "str", "(", "niter", ")", ")", "if", "i", ">", "1", "and", "compute_err", ":", "if", "self", ".", "converged", "(", "i", ")", ":", "self", ".", "ferr", "=", "self", ".", "ferr", "[", ":", "i", "]", "break" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
cnmf
(Convex) Non-Negative Matrix Factorization. Parameters ---------- S: np.array(p, N) Features matrix. p row features and N column observations. rank: int Rank of decomposition niter: int Number of iterations to be used Returns ------- F: np.array Cluster matrix (decomposed matrix) G: np.array Activation matrix (decomposed matrix) (s.t. S ~= F * G)
msaf/algorithms/cnmf/segmenter.py
def cnmf(S, rank, niter=500, hull=False): """(Convex) Non-Negative Matrix Factorization. Parameters ---------- S: np.array(p, N) Features matrix. p row features and N column observations. rank: int Rank of decomposition niter: int Number of iterations to be used Returns ------- F: np.array Cluster matrix (decomposed matrix) G: np.array Activation matrix (decomposed matrix) (s.t. S ~= F * G) """ if hull: nmf_mdl = pymf.CHNMF(S, num_bases=rank) else: nmf_mdl = pymf.CNMF(S, num_bases=rank) nmf_mdl.factorize(niter=niter) F = np.asarray(nmf_mdl.W) G = np.asarray(nmf_mdl.H) return F, G
def cnmf(S, rank, niter=500, hull=False): """(Convex) Non-Negative Matrix Factorization. Parameters ---------- S: np.array(p, N) Features matrix. p row features and N column observations. rank: int Rank of decomposition niter: int Number of iterations to be used Returns ------- F: np.array Cluster matrix (decomposed matrix) G: np.array Activation matrix (decomposed matrix) (s.t. S ~= F * G) """ if hull: nmf_mdl = pymf.CHNMF(S, num_bases=rank) else: nmf_mdl = pymf.CNMF(S, num_bases=rank) nmf_mdl.factorize(niter=niter) F = np.asarray(nmf_mdl.W) G = np.asarray(nmf_mdl.H) return F, G
[ "(", "Convex", ")", "Non", "-", "Negative", "Matrix", "Factorization", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/cnmf/segmenter.py#L17-L44
[ "def", "cnmf", "(", "S", ",", "rank", ",", "niter", "=", "500", ",", "hull", "=", "False", ")", ":", "if", "hull", ":", "nmf_mdl", "=", "pymf", ".", "CHNMF", "(", "S", ",", "num_bases", "=", "rank", ")", "else", ":", "nmf_mdl", "=", "pymf", ".", "CNMF", "(", "S", ",", "num_bases", "=", "rank", ")", "nmf_mdl", ".", "factorize", "(", "niter", "=", "niter", ")", "F", "=", "np", ".", "asarray", "(", "nmf_mdl", ".", "W", ")", "G", "=", "np", ".", "asarray", "(", "nmf_mdl", ".", "H", ")", "return", "F", ",", "G" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
compute_labels
Computes the labels using the bounds.
msaf/algorithms/cnmf/segmenter.py
def compute_labels(X, rank, R, bound_idxs, niter=300): """Computes the labels using the bounds.""" try: F, G = cnmf(X, rank, niter=niter, hull=False) except: return [1] label_frames = filter_activation_matrix(G.T, R) label_frames = np.asarray(label_frames, dtype=int) #labels = [label_frames[0]] labels = [] bound_inters = zip(bound_idxs[:-1], bound_idxs[1:]) for bound_inter in bound_inters: if bound_inter[1] - bound_inter[0] <= 0: labels.append(np.max(label_frames) + 1) else: labels.append(most_frequent( label_frames[bound_inter[0]: bound_inter[1]])) #print bound_inter, labels[-1] #labels.append(label_frames[-1]) return labels
def compute_labels(X, rank, R, bound_idxs, niter=300): """Computes the labels using the bounds.""" try: F, G = cnmf(X, rank, niter=niter, hull=False) except: return [1] label_frames = filter_activation_matrix(G.T, R) label_frames = np.asarray(label_frames, dtype=int) #labels = [label_frames[0]] labels = [] bound_inters = zip(bound_idxs[:-1], bound_idxs[1:]) for bound_inter in bound_inters: if bound_inter[1] - bound_inter[0] <= 0: labels.append(np.max(label_frames) + 1) else: labels.append(most_frequent( label_frames[bound_inter[0]: bound_inter[1]])) #print bound_inter, labels[-1] #labels.append(label_frames[-1]) return labels
[ "Computes", "the", "labels", "using", "the", "bounds", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/cnmf/segmenter.py#L52-L75
[ "def", "compute_labels", "(", "X", ",", "rank", ",", "R", ",", "bound_idxs", ",", "niter", "=", "300", ")", ":", "try", ":", "F", ",", "G", "=", "cnmf", "(", "X", ",", "rank", ",", "niter", "=", "niter", ",", "hull", "=", "False", ")", "except", ":", "return", "[", "1", "]", "label_frames", "=", "filter_activation_matrix", "(", "G", ".", "T", ",", "R", ")", "label_frames", "=", "np", ".", "asarray", "(", "label_frames", ",", "dtype", "=", "int", ")", "#labels = [label_frames[0]]", "labels", "=", "[", "]", "bound_inters", "=", "zip", "(", "bound_idxs", "[", ":", "-", "1", "]", ",", "bound_idxs", "[", "1", ":", "]", ")", "for", "bound_inter", "in", "bound_inters", ":", "if", "bound_inter", "[", "1", "]", "-", "bound_inter", "[", "0", "]", "<=", "0", ":", "labels", ".", "append", "(", "np", ".", "max", "(", "label_frames", ")", "+", "1", ")", "else", ":", "labels", ".", "append", "(", "most_frequent", "(", "label_frames", "[", "bound_inter", "[", "0", "]", ":", "bound_inter", "[", "1", "]", "]", ")", ")", "#print bound_inter, labels[-1]", "#labels.append(label_frames[-1])", "return", "labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
filter_activation_matrix
Filters the activation matrix G, and returns a flattened copy.
msaf/algorithms/cnmf/segmenter.py
def filter_activation_matrix(G, R): """Filters the activation matrix G, and returns a flattened copy.""" #import pylab as plt #plt.imshow(G, interpolation="nearest", aspect="auto") #plt.show() idx = np.argmax(G, axis=1) max_idx = np.arange(G.shape[0]) max_idx = (max_idx, idx.flatten()) G[:, :] = 0 G[max_idx] = idx + 1 # TODO: Order matters? G = np.sum(G, axis=1) G = median_filter(G[:, np.newaxis], R) return G.flatten()
def filter_activation_matrix(G, R): """Filters the activation matrix G, and returns a flattened copy.""" #import pylab as plt #plt.imshow(G, interpolation="nearest", aspect="auto") #plt.show() idx = np.argmax(G, axis=1) max_idx = np.arange(G.shape[0]) max_idx = (max_idx, idx.flatten()) G[:, :] = 0 G[max_idx] = idx + 1 # TODO: Order matters? G = np.sum(G, axis=1) G = median_filter(G[:, np.newaxis], R) return G.flatten()
[ "Filters", "the", "activation", "matrix", "G", "and", "returns", "a", "flattened", "copy", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/cnmf/segmenter.py#L78-L95
[ "def", "filter_activation_matrix", "(", "G", ",", "R", ")", ":", "#import pylab as plt", "#plt.imshow(G, interpolation=\"nearest\", aspect=\"auto\")", "#plt.show()", "idx", "=", "np", ".", "argmax", "(", "G", ",", "axis", "=", "1", ")", "max_idx", "=", "np", ".", "arange", "(", "G", ".", "shape", "[", "0", "]", ")", "max_idx", "=", "(", "max_idx", ",", "idx", ".", "flatten", "(", ")", ")", "G", "[", ":", ",", ":", "]", "=", "0", "G", "[", "max_idx", "]", "=", "idx", "+", "1", "# TODO: Order matters?", "G", "=", "np", ".", "sum", "(", "G", ",", "axis", "=", "1", ")", "G", "=", "median_filter", "(", "G", "[", ":", ",", "np", ".", "newaxis", "]", ",", "R", ")", "return", "G", ".", "flatten", "(", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
get_segmentation
Gets the segmentation (boundaries and labels) from the factorization matrices. Parameters ---------- X: np.array() Features matrix (e.g. chromagram) rank: int Rank of decomposition R: int Size of the median filter for activation matrix niter: int Number of iterations for k-means bound_idxs : list Use previously found boundaries (None to detect them) in_labels : np.array() List of input labels (None to compute them) Returns ------- bounds_idx: np.array Bound indeces found labels: np.array Indeces of the labels representing the similarity between segments.
msaf/algorithms/cnmf/segmenter.py
def get_segmentation(X, rank, R, rank_labels, R_labels, niter=300, bound_idxs=None, in_labels=None): """ Gets the segmentation (boundaries and labels) from the factorization matrices. Parameters ---------- X: np.array() Features matrix (e.g. chromagram) rank: int Rank of decomposition R: int Size of the median filter for activation matrix niter: int Number of iterations for k-means bound_idxs : list Use previously found boundaries (None to detect them) in_labels : np.array() List of input labels (None to compute them) Returns ------- bounds_idx: np.array Bound indeces found labels: np.array Indeces of the labels representing the similarity between segments. """ #import pylab as plt #plt.imshow(X, interpolation="nearest", aspect="auto") #plt.show() # Find non filtered boundaries compute_bounds = True if bound_idxs is None else False while True: if bound_idxs is None: try: F, G = cnmf(X, rank, niter=niter, hull=False) except: return np.empty(0), [1] # Filter G G = filter_activation_matrix(G.T, R) if bound_idxs is None: bound_idxs = np.where(np.diff(G) != 0)[0] + 1 # Increase rank if we found too few boundaries if compute_bounds and len(np.unique(bound_idxs)) <= 2: rank += 1 bound_idxs = None else: break # Add first and last boundary bound_idxs = np.concatenate(([0], bound_idxs, [X.shape[1] - 1])) bound_idxs = np.asarray(bound_idxs, dtype=int) if in_labels is None: labels = compute_labels(X, rank_labels, R_labels, bound_idxs, niter=niter) else: labels = np.ones(len(bound_idxs) - 1) #plt.imshow(G[:, np.newaxis], interpolation="nearest", aspect="auto") #for b in bound_idxs: #plt.axvline(b, linewidth=2.0, color="k") #plt.show() return bound_idxs, labels
def get_segmentation(X, rank, R, rank_labels, R_labels, niter=300, bound_idxs=None, in_labels=None): """ Gets the segmentation (boundaries and labels) from the factorization matrices. Parameters ---------- X: np.array() Features matrix (e.g. chromagram) rank: int Rank of decomposition R: int Size of the median filter for activation matrix niter: int Number of iterations for k-means bound_idxs : list Use previously found boundaries (None to detect them) in_labels : np.array() List of input labels (None to compute them) Returns ------- bounds_idx: np.array Bound indeces found labels: np.array Indeces of the labels representing the similarity between segments. """ #import pylab as plt #plt.imshow(X, interpolation="nearest", aspect="auto") #plt.show() # Find non filtered boundaries compute_bounds = True if bound_idxs is None else False while True: if bound_idxs is None: try: F, G = cnmf(X, rank, niter=niter, hull=False) except: return np.empty(0), [1] # Filter G G = filter_activation_matrix(G.T, R) if bound_idxs is None: bound_idxs = np.where(np.diff(G) != 0)[0] + 1 # Increase rank if we found too few boundaries if compute_bounds and len(np.unique(bound_idxs)) <= 2: rank += 1 bound_idxs = None else: break # Add first and last boundary bound_idxs = np.concatenate(([0], bound_idxs, [X.shape[1] - 1])) bound_idxs = np.asarray(bound_idxs, dtype=int) if in_labels is None: labels = compute_labels(X, rank_labels, R_labels, bound_idxs, niter=niter) else: labels = np.ones(len(bound_idxs) - 1) #plt.imshow(G[:, np.newaxis], interpolation="nearest", aspect="auto") #for b in bound_idxs: #plt.axvline(b, linewidth=2.0, color="k") #plt.show() return bound_idxs, labels
[ "Gets", "the", "segmentation", "(", "boundaries", "and", "labels", ")", "from", "the", "factorization", "matrices", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/cnmf/segmenter.py#L98-L166
[ "def", "get_segmentation", "(", "X", ",", "rank", ",", "R", ",", "rank_labels", ",", "R_labels", ",", "niter", "=", "300", ",", "bound_idxs", "=", "None", ",", "in_labels", "=", "None", ")", ":", "#import pylab as plt", "#plt.imshow(X, interpolation=\"nearest\", aspect=\"auto\")", "#plt.show()", "# Find non filtered boundaries", "compute_bounds", "=", "True", "if", "bound_idxs", "is", "None", "else", "False", "while", "True", ":", "if", "bound_idxs", "is", "None", ":", "try", ":", "F", ",", "G", "=", "cnmf", "(", "X", ",", "rank", ",", "niter", "=", "niter", ",", "hull", "=", "False", ")", "except", ":", "return", "np", ".", "empty", "(", "0", ")", ",", "[", "1", "]", "# Filter G", "G", "=", "filter_activation_matrix", "(", "G", ".", "T", ",", "R", ")", "if", "bound_idxs", "is", "None", ":", "bound_idxs", "=", "np", ".", "where", "(", "np", ".", "diff", "(", "G", ")", "!=", "0", ")", "[", "0", "]", "+", "1", "# Increase rank if we found too few boundaries", "if", "compute_bounds", "and", "len", "(", "np", ".", "unique", "(", "bound_idxs", ")", ")", "<=", "2", ":", "rank", "+=", "1", "bound_idxs", "=", "None", "else", ":", "break", "# Add first and last boundary", "bound_idxs", "=", "np", ".", "concatenate", "(", "(", "[", "0", "]", ",", "bound_idxs", ",", "[", "X", ".", "shape", "[", "1", "]", "-", "1", "]", ")", ")", "bound_idxs", "=", "np", ".", "asarray", "(", "bound_idxs", ",", "dtype", "=", "int", ")", "if", "in_labels", "is", "None", ":", "labels", "=", "compute_labels", "(", "X", ",", "rank_labels", ",", "R_labels", ",", "bound_idxs", ",", "niter", "=", "niter", ")", "else", ":", "labels", "=", "np", ".", "ones", "(", "len", "(", "bound_idxs", ")", "-", "1", ")", "#plt.imshow(G[:, np.newaxis], interpolation=\"nearest\", aspect=\"auto\")", "#for b in bound_idxs:", "#plt.axvline(b, linewidth=2.0, color=\"k\")", "#plt.show()", "return", "bound_idxs", ",", "labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
Segmenter.processFlat
Main process. Returns ------- est_idxs : np.array(N) Estimated indeces for the segment boundaries in frames. est_labels : np.array(N-1) Estimated labels for the segments.
msaf/algorithms/cnmf/segmenter.py
def processFlat(self): """Main process. Returns ------- est_idxs : np.array(N) Estimated indeces for the segment boundaries in frames. est_labels : np.array(N-1) Estimated labels for the segments. """ # C-NMF params niter = self.config["niters"] # Iterations for the MF and clustering # Preprocess to obtain features, times, and input boundary indeces F = self._preprocess() # Normalize F = U.normalize(F, norm_type=self.config["norm_feats"]) if F.shape[0] >= self.config["h"]: # Median filter F = median_filter(F, M=self.config["h"]) #plt.imshow(F.T, interpolation="nearest", aspect="auto"); plt.show() # Find the boundary indices and labels using matrix factorization est_idxs, est_labels = get_segmentation( F.T, self.config["rank"], self.config["R"], self.config["rank_labels"], self.config["R_labels"], niter=niter, bound_idxs=self.in_bound_idxs, in_labels=None) # Remove empty segments if needed est_idxs, est_labels = U.remove_empty_segments(est_idxs, est_labels) else: # The track is too short. We will only output the first and last # time stamps if self.in_bound_idxs is None: est_idxs = np.array([0, F.shape[0] - 1]) est_labels = [1] else: est_idxs = self.in_bound_idxs est_labels = [1] * (len(est_idxs) + 1) # Make sure that the first and last boundaries are included assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1 # Post process estimations est_idxs, est_labels = self._postprocess(est_idxs, est_labels) return est_idxs, est_labels
def processFlat(self): """Main process. Returns ------- est_idxs : np.array(N) Estimated indeces for the segment boundaries in frames. est_labels : np.array(N-1) Estimated labels for the segments. """ # C-NMF params niter = self.config["niters"] # Iterations for the MF and clustering # Preprocess to obtain features, times, and input boundary indeces F = self._preprocess() # Normalize F = U.normalize(F, norm_type=self.config["norm_feats"]) if F.shape[0] >= self.config["h"]: # Median filter F = median_filter(F, M=self.config["h"]) #plt.imshow(F.T, interpolation="nearest", aspect="auto"); plt.show() # Find the boundary indices and labels using matrix factorization est_idxs, est_labels = get_segmentation( F.T, self.config["rank"], self.config["R"], self.config["rank_labels"], self.config["R_labels"], niter=niter, bound_idxs=self.in_bound_idxs, in_labels=None) # Remove empty segments if needed est_idxs, est_labels = U.remove_empty_segments(est_idxs, est_labels) else: # The track is too short. We will only output the first and last # time stamps if self.in_bound_idxs is None: est_idxs = np.array([0, F.shape[0] - 1]) est_labels = [1] else: est_idxs = self.in_bound_idxs est_labels = [1] * (len(est_idxs) + 1) # Make sure that the first and last boundaries are included assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1 # Post process estimations est_idxs, est_labels = self._postprocess(est_idxs, est_labels) return est_idxs, est_labels
[ "Main", "process", ".", "Returns", "-------", "est_idxs", ":", "np", ".", "array", "(", "N", ")", "Estimated", "indeces", "for", "the", "segment", "boundaries", "in", "frames", ".", "est_labels", ":", "np", ".", "array", "(", "N", "-", "1", ")", "Estimated", "labels", "for", "the", "segments", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/cnmf/segmenter.py#L184-L231
[ "def", "processFlat", "(", "self", ")", ":", "# C-NMF params", "niter", "=", "self", ".", "config", "[", "\"niters\"", "]", "# Iterations for the MF and clustering", "# Preprocess to obtain features, times, and input boundary indeces", "F", "=", "self", ".", "_preprocess", "(", ")", "# Normalize", "F", "=", "U", ".", "normalize", "(", "F", ",", "norm_type", "=", "self", ".", "config", "[", "\"norm_feats\"", "]", ")", "if", "F", ".", "shape", "[", "0", "]", ">=", "self", ".", "config", "[", "\"h\"", "]", ":", "# Median filter", "F", "=", "median_filter", "(", "F", ",", "M", "=", "self", ".", "config", "[", "\"h\"", "]", ")", "#plt.imshow(F.T, interpolation=\"nearest\", aspect=\"auto\"); plt.show()", "# Find the boundary indices and labels using matrix factorization", "est_idxs", ",", "est_labels", "=", "get_segmentation", "(", "F", ".", "T", ",", "self", ".", "config", "[", "\"rank\"", "]", ",", "self", ".", "config", "[", "\"R\"", "]", ",", "self", ".", "config", "[", "\"rank_labels\"", "]", ",", "self", ".", "config", "[", "\"R_labels\"", "]", ",", "niter", "=", "niter", ",", "bound_idxs", "=", "self", ".", "in_bound_idxs", ",", "in_labels", "=", "None", ")", "# Remove empty segments if needed", "est_idxs", ",", "est_labels", "=", "U", ".", "remove_empty_segments", "(", "est_idxs", ",", "est_labels", ")", "else", ":", "# The track is too short. We will only output the first and last", "# time stamps", "if", "self", ".", "in_bound_idxs", "is", "None", ":", "est_idxs", "=", "np", ".", "array", "(", "[", "0", ",", "F", ".", "shape", "[", "0", "]", "-", "1", "]", ")", "est_labels", "=", "[", "1", "]", "else", ":", "est_idxs", "=", "self", ".", "in_bound_idxs", "est_labels", "=", "[", "1", "]", "*", "(", "len", "(", "est_idxs", ")", "+", "1", ")", "# Make sure that the first and last boundaries are included", "assert", "est_idxs", "[", "0", "]", "==", "0", "and", "est_idxs", "[", "-", "1", "]", "==", "F", ".", "shape", "[", "0", "]", "-", "1", "# Post process estimations", "est_idxs", ",", "est_labels", "=", "self", ".", "_postprocess", "(", "est_idxs", ",", "est_labels", ")", "return", "est_idxs", ",", "est_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
get_boundaries_module
Obtains the boundaries module given a boundary algorithm identificator. Parameters ---------- boundaries_id: str Boundary algorithm identificator (e.g., foote, sf). Returns ------- module: object Object containing the selected boundary module. None for "ground truth".
msaf/run.py
def get_boundaries_module(boundaries_id): """Obtains the boundaries module given a boundary algorithm identificator. Parameters ---------- boundaries_id: str Boundary algorithm identificator (e.g., foote, sf). Returns ------- module: object Object containing the selected boundary module. None for "ground truth". """ if boundaries_id == "gt": return None try: module = eval(algorithms.__name__ + "." + boundaries_id) except AttributeError: raise RuntimeError("Algorithm %s can not be found in msaf!" % boundaries_id) if not module.is_boundary_type: raise RuntimeError("Algorithm %s can not identify boundaries!" % boundaries_id) return module
def get_boundaries_module(boundaries_id): """Obtains the boundaries module given a boundary algorithm identificator. Parameters ---------- boundaries_id: str Boundary algorithm identificator (e.g., foote, sf). Returns ------- module: object Object containing the selected boundary module. None for "ground truth". """ if boundaries_id == "gt": return None try: module = eval(algorithms.__name__ + "." + boundaries_id) except AttributeError: raise RuntimeError("Algorithm %s can not be found in msaf!" % boundaries_id) if not module.is_boundary_type: raise RuntimeError("Algorithm %s can not identify boundaries!" % boundaries_id) return module
[ "Obtains", "the", "boundaries", "module", "given", "a", "boundary", "algorithm", "identificator", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/run.py#L20-L44
[ "def", "get_boundaries_module", "(", "boundaries_id", ")", ":", "if", "boundaries_id", "==", "\"gt\"", ":", "return", "None", "try", ":", "module", "=", "eval", "(", "algorithms", ".", "__name__", "+", "\".\"", "+", "boundaries_id", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "\"Algorithm %s can not be found in msaf!\"", "%", "boundaries_id", ")", "if", "not", "module", ".", "is_boundary_type", ":", "raise", "RuntimeError", "(", "\"Algorithm %s can not identify boundaries!\"", "%", "boundaries_id", ")", "return", "module" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
get_labels_module
Obtains the label module given a label algorithm identificator. Parameters ---------- labels_id: str Label algorithm identificator (e.g., fmc2d, cnmf). Returns ------- module: object Object containing the selected label module. None for not computing the labeling part of music segmentation.
msaf/run.py
def get_labels_module(labels_id): """Obtains the label module given a label algorithm identificator. Parameters ---------- labels_id: str Label algorithm identificator (e.g., fmc2d, cnmf). Returns ------- module: object Object containing the selected label module. None for not computing the labeling part of music segmentation. """ if labels_id is None: return None try: module = eval(algorithms.__name__ + "." + labels_id) except AttributeError: raise RuntimeError("Algorithm %s can not be found in msaf!" % labels_id) if not module.is_label_type: raise RuntimeError("Algorithm %s can not label segments!" % labels_id) return module
def get_labels_module(labels_id): """Obtains the label module given a label algorithm identificator. Parameters ---------- labels_id: str Label algorithm identificator (e.g., fmc2d, cnmf). Returns ------- module: object Object containing the selected label module. None for not computing the labeling part of music segmentation. """ if labels_id is None: return None try: module = eval(algorithms.__name__ + "." + labels_id) except AttributeError: raise RuntimeError("Algorithm %s can not be found in msaf!" % labels_id) if not module.is_label_type: raise RuntimeError("Algorithm %s can not label segments!" % labels_id) return module
[ "Obtains", "the", "label", "module", "given", "a", "label", "algorithm", "identificator", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/run.py#L47-L71
[ "def", "get_labels_module", "(", "labels_id", ")", ":", "if", "labels_id", "is", "None", ":", "return", "None", "try", ":", "module", "=", "eval", "(", "algorithms", ".", "__name__", "+", "\".\"", "+", "labels_id", ")", "except", "AttributeError", ":", "raise", "RuntimeError", "(", "\"Algorithm %s can not be found in msaf!\"", "%", "labels_id", ")", "if", "not", "module", ".", "is_label_type", ":", "raise", "RuntimeError", "(", "\"Algorithm %s can not label segments!\"", "%", "labels_id", ")", "return", "module" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
run_hierarchical
Runs hierarchical algorithms with the specified identifiers on the audio_file. See run_algorithm for more information.
msaf/run.py
def run_hierarchical(audio_file, bounds_module, labels_module, frame_times, config, annotator_id=0): """Runs hierarchical algorithms with the specified identifiers on the audio_file. See run_algorithm for more information. """ # Sanity check if bounds_module is None: raise NoHierBoundaryError("A boundary algorithm is needed when using " "hierarchical segmentation.") # Get features to make code nicer features = config["features"].features # Compute boundaries S = bounds_module.Segmenter(audio_file, **config) est_idxs, est_labels = S.processHierarchical() # Compute labels if needed if labels_module is not None and \ bounds_module.__name__ != labels_module.__name__: # Compute labels for each level in the hierarchy flat_config = deepcopy(config) flat_config["hier"] = False for i, level_idxs in enumerate(est_idxs): S = labels_module.Segmenter(audio_file, in_bound_idxs=level_idxs, **flat_config) est_labels[i] = S.processFlat()[1] # Make sure the first and last boundaries are included for each # level in the hierarchy est_times = [] cleaned_est_labels = [] for level in range(len(est_idxs)): est_level_times, est_level_labels = \ utils.process_segmentation_level( est_idxs[level], est_labels[level], features.shape[0], frame_times, config["features"].dur) est_times.append(est_level_times) cleaned_est_labels.append(est_level_labels) est_labels = cleaned_est_labels return est_times, est_labels
def run_hierarchical(audio_file, bounds_module, labels_module, frame_times, config, annotator_id=0): """Runs hierarchical algorithms with the specified identifiers on the audio_file. See run_algorithm for more information. """ # Sanity check if bounds_module is None: raise NoHierBoundaryError("A boundary algorithm is needed when using " "hierarchical segmentation.") # Get features to make code nicer features = config["features"].features # Compute boundaries S = bounds_module.Segmenter(audio_file, **config) est_idxs, est_labels = S.processHierarchical() # Compute labels if needed if labels_module is not None and \ bounds_module.__name__ != labels_module.__name__: # Compute labels for each level in the hierarchy flat_config = deepcopy(config) flat_config["hier"] = False for i, level_idxs in enumerate(est_idxs): S = labels_module.Segmenter(audio_file, in_bound_idxs=level_idxs, **flat_config) est_labels[i] = S.processFlat()[1] # Make sure the first and last boundaries are included for each # level in the hierarchy est_times = [] cleaned_est_labels = [] for level in range(len(est_idxs)): est_level_times, est_level_labels = \ utils.process_segmentation_level( est_idxs[level], est_labels[level], features.shape[0], frame_times, config["features"].dur) est_times.append(est_level_times) cleaned_est_labels.append(est_level_labels) est_labels = cleaned_est_labels return est_times, est_labels
[ "Runs", "hierarchical", "algorithms", "with", "the", "specified", "identifiers", "on", "the", "audio_file", ".", "See", "run_algorithm", "for", "more", "information", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/run.py#L74-L116
[ "def", "run_hierarchical", "(", "audio_file", ",", "bounds_module", ",", "labels_module", ",", "frame_times", ",", "config", ",", "annotator_id", "=", "0", ")", ":", "# Sanity check", "if", "bounds_module", "is", "None", ":", "raise", "NoHierBoundaryError", "(", "\"A boundary algorithm is needed when using \"", "\"hierarchical segmentation.\"", ")", "# Get features to make code nicer", "features", "=", "config", "[", "\"features\"", "]", ".", "features", "# Compute boundaries", "S", "=", "bounds_module", ".", "Segmenter", "(", "audio_file", ",", "*", "*", "config", ")", "est_idxs", ",", "est_labels", "=", "S", ".", "processHierarchical", "(", ")", "# Compute labels if needed", "if", "labels_module", "is", "not", "None", "and", "bounds_module", ".", "__name__", "!=", "labels_module", ".", "__name__", ":", "# Compute labels for each level in the hierarchy", "flat_config", "=", "deepcopy", "(", "config", ")", "flat_config", "[", "\"hier\"", "]", "=", "False", "for", "i", ",", "level_idxs", "in", "enumerate", "(", "est_idxs", ")", ":", "S", "=", "labels_module", ".", "Segmenter", "(", "audio_file", ",", "in_bound_idxs", "=", "level_idxs", ",", "*", "*", "flat_config", ")", "est_labels", "[", "i", "]", "=", "S", ".", "processFlat", "(", ")", "[", "1", "]", "# Make sure the first and last boundaries are included for each", "# level in the hierarchy", "est_times", "=", "[", "]", "cleaned_est_labels", "=", "[", "]", "for", "level", "in", "range", "(", "len", "(", "est_idxs", ")", ")", ":", "est_level_times", ",", "est_level_labels", "=", "utils", ".", "process_segmentation_level", "(", "est_idxs", "[", "level", "]", ",", "est_labels", "[", "level", "]", ",", "features", ".", "shape", "[", "0", "]", ",", "frame_times", ",", "config", "[", "\"features\"", "]", ".", "dur", ")", "est_times", ".", "append", "(", "est_level_times", ")", "cleaned_est_labels", ".", "append", "(", "est_level_labels", ")", "est_labels", "=", "cleaned_est_labels", "return", "est_times", ",", "est_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
run_flat
Runs the flat algorithms with the specified identifiers on the audio_file. See run_algorithm for more information.
msaf/run.py
def run_flat(file_struct, bounds_module, labels_module, frame_times, config, annotator_id): """Runs the flat algorithms with the specified identifiers on the audio_file. See run_algorithm for more information. """ # Get features to make code nicer features = config["features"].features # Segment using the specified boundaries and labels # Case when boundaries and labels algorithms are the same if bounds_module is not None and labels_module is not None and \ bounds_module.__name__ == labels_module.__name__: S = bounds_module.Segmenter(file_struct, **config) est_idxs, est_labels = S.processFlat() # Different boundary and label algorithms else: # Identify segment boundaries if bounds_module is not None: S = bounds_module.Segmenter(file_struct, in_labels=[], **config) est_idxs, est_labels = S.processFlat() else: try: # Ground-truth boundaries est_times, est_labels = io.read_references( file_struct.audio_file, annotator_id=annotator_id) est_idxs = io.align_times(est_times, frame_times) if est_idxs[0] != 0: est_idxs = np.concatenate(([0], est_idxs)) except IOError: logging.warning("No references found for file: %s" % file_struct.audio_file) return [], [] # Label segments if labels_module is not None: if len(est_idxs) == 2: est_labels = np.array([0]) else: S = labels_module.Segmenter(file_struct, in_bound_idxs=est_idxs, **config) est_labels = S.processFlat()[1] # Make sure the first and last boundaries are included est_times, est_labels = utils.process_segmentation_level( est_idxs, est_labels, features.shape[0], frame_times, config["features"].dur) return est_times, est_labels
def run_flat(file_struct, bounds_module, labels_module, frame_times, config, annotator_id): """Runs the flat algorithms with the specified identifiers on the audio_file. See run_algorithm for more information. """ # Get features to make code nicer features = config["features"].features # Segment using the specified boundaries and labels # Case when boundaries and labels algorithms are the same if bounds_module is not None and labels_module is not None and \ bounds_module.__name__ == labels_module.__name__: S = bounds_module.Segmenter(file_struct, **config) est_idxs, est_labels = S.processFlat() # Different boundary and label algorithms else: # Identify segment boundaries if bounds_module is not None: S = bounds_module.Segmenter(file_struct, in_labels=[], **config) est_idxs, est_labels = S.processFlat() else: try: # Ground-truth boundaries est_times, est_labels = io.read_references( file_struct.audio_file, annotator_id=annotator_id) est_idxs = io.align_times(est_times, frame_times) if est_idxs[0] != 0: est_idxs = np.concatenate(([0], est_idxs)) except IOError: logging.warning("No references found for file: %s" % file_struct.audio_file) return [], [] # Label segments if labels_module is not None: if len(est_idxs) == 2: est_labels = np.array([0]) else: S = labels_module.Segmenter(file_struct, in_bound_idxs=est_idxs, **config) est_labels = S.processFlat()[1] # Make sure the first and last boundaries are included est_times, est_labels = utils.process_segmentation_level( est_idxs, est_labels, features.shape[0], frame_times, config["features"].dur) return est_times, est_labels
[ "Runs", "the", "flat", "algorithms", "with", "the", "specified", "identifiers", "on", "the", "audio_file", ".", "See", "run_algorithm", "for", "more", "information", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/run.py#L119-L167
[ "def", "run_flat", "(", "file_struct", ",", "bounds_module", ",", "labels_module", ",", "frame_times", ",", "config", ",", "annotator_id", ")", ":", "# Get features to make code nicer", "features", "=", "config", "[", "\"features\"", "]", ".", "features", "# Segment using the specified boundaries and labels", "# Case when boundaries and labels algorithms are the same", "if", "bounds_module", "is", "not", "None", "and", "labels_module", "is", "not", "None", "and", "bounds_module", ".", "__name__", "==", "labels_module", ".", "__name__", ":", "S", "=", "bounds_module", ".", "Segmenter", "(", "file_struct", ",", "*", "*", "config", ")", "est_idxs", ",", "est_labels", "=", "S", ".", "processFlat", "(", ")", "# Different boundary and label algorithms", "else", ":", "# Identify segment boundaries", "if", "bounds_module", "is", "not", "None", ":", "S", "=", "bounds_module", ".", "Segmenter", "(", "file_struct", ",", "in_labels", "=", "[", "]", ",", "*", "*", "config", ")", "est_idxs", ",", "est_labels", "=", "S", ".", "processFlat", "(", ")", "else", ":", "try", ":", "# Ground-truth boundaries", "est_times", ",", "est_labels", "=", "io", ".", "read_references", "(", "file_struct", ".", "audio_file", ",", "annotator_id", "=", "annotator_id", ")", "est_idxs", "=", "io", ".", "align_times", "(", "est_times", ",", "frame_times", ")", "if", "est_idxs", "[", "0", "]", "!=", "0", ":", "est_idxs", "=", "np", ".", "concatenate", "(", "(", "[", "0", "]", ",", "est_idxs", ")", ")", "except", "IOError", ":", "logging", ".", "warning", "(", "\"No references found for file: %s\"", "%", "file_struct", ".", "audio_file", ")", "return", "[", "]", ",", "[", "]", "# Label segments", "if", "labels_module", "is", "not", "None", ":", "if", "len", "(", "est_idxs", ")", "==", "2", ":", "est_labels", "=", "np", ".", "array", "(", "[", "0", "]", ")", "else", ":", "S", "=", "labels_module", ".", "Segmenter", "(", "file_struct", ",", "in_bound_idxs", "=", "est_idxs", ",", "*", "*", "config", ")", "est_labels", "=", "S", ".", "processFlat", "(", ")", "[", "1", "]", "# Make sure the first and last boundaries are included", "est_times", ",", "est_labels", "=", "utils", ".", "process_segmentation_level", "(", "est_idxs", ",", "est_labels", ",", "features", ".", "shape", "[", "0", "]", ",", "frame_times", ",", "config", "[", "\"features\"", "]", ".", "dur", ")", "return", "est_times", ",", "est_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
run_algorithms
Runs the algorithms with the specified identifiers on the audio_file. Parameters ---------- file_struct: `msaf.io.FileStruct` Object with the file paths. boundaries_id: str Identifier of the boundaries algorithm to use ("gt" for ground truth). labels_id: str Identifier of the labels algorithm to use (None for not labeling). config: dict Dictionary containing the custom parameters of the algorithms to use. annotator_id: int Annotator identificator in the ground truth. Returns ------- est_times: np.array or list List of estimated times for the segment boundaries. If `list`, it will be a list of np.arrays, sorted by segmentation layer. est_labels: np.array or list List of all the labels associated segments. If `list`, it will be a list of np.arrays, sorted by segmentation layer.
msaf/run.py
def run_algorithms(file_struct, boundaries_id, labels_id, config, annotator_id=0): """Runs the algorithms with the specified identifiers on the audio_file. Parameters ---------- file_struct: `msaf.io.FileStruct` Object with the file paths. boundaries_id: str Identifier of the boundaries algorithm to use ("gt" for ground truth). labels_id: str Identifier of the labels algorithm to use (None for not labeling). config: dict Dictionary containing the custom parameters of the algorithms to use. annotator_id: int Annotator identificator in the ground truth. Returns ------- est_times: np.array or list List of estimated times for the segment boundaries. If `list`, it will be a list of np.arrays, sorted by segmentation layer. est_labels: np.array or list List of all the labels associated segments. If `list`, it will be a list of np.arrays, sorted by segmentation layer. """ # Check that there are enough audio frames if config["features"].features.shape[0] <= msaf.config.minimum_frames: logging.warning("Audio file too short, or too many few beats " "estimated. Returning empty estimations.") return np.asarray([0, config["features"].dur]), \ np.asarray([0], dtype=int) # Get the corresponding modules bounds_module = get_boundaries_module(boundaries_id) labels_module = get_labels_module(labels_id) # Get the correct frame times frame_times = config["features"].frame_times # Segment audio based on type of segmentation run_fun = run_hierarchical if config["hier"] else run_flat est_times, est_labels = run_fun(file_struct, bounds_module, labels_module, frame_times, config, annotator_id) return est_times, est_labels
def run_algorithms(file_struct, boundaries_id, labels_id, config, annotator_id=0): """Runs the algorithms with the specified identifiers on the audio_file. Parameters ---------- file_struct: `msaf.io.FileStruct` Object with the file paths. boundaries_id: str Identifier of the boundaries algorithm to use ("gt" for ground truth). labels_id: str Identifier of the labels algorithm to use (None for not labeling). config: dict Dictionary containing the custom parameters of the algorithms to use. annotator_id: int Annotator identificator in the ground truth. Returns ------- est_times: np.array or list List of estimated times for the segment boundaries. If `list`, it will be a list of np.arrays, sorted by segmentation layer. est_labels: np.array or list List of all the labels associated segments. If `list`, it will be a list of np.arrays, sorted by segmentation layer. """ # Check that there are enough audio frames if config["features"].features.shape[0] <= msaf.config.minimum_frames: logging.warning("Audio file too short, or too many few beats " "estimated. Returning empty estimations.") return np.asarray([0, config["features"].dur]), \ np.asarray([0], dtype=int) # Get the corresponding modules bounds_module = get_boundaries_module(boundaries_id) labels_module = get_labels_module(labels_id) # Get the correct frame times frame_times = config["features"].frame_times # Segment audio based on type of segmentation run_fun = run_hierarchical if config["hier"] else run_flat est_times, est_labels = run_fun(file_struct, bounds_module, labels_module, frame_times, config, annotator_id) return est_times, est_labels
[ "Runs", "the", "algorithms", "with", "the", "specified", "identifiers", "on", "the", "audio_file", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/run.py#L170-L217
[ "def", "run_algorithms", "(", "file_struct", ",", "boundaries_id", ",", "labels_id", ",", "config", ",", "annotator_id", "=", "0", ")", ":", "# Check that there are enough audio frames", "if", "config", "[", "\"features\"", "]", ".", "features", ".", "shape", "[", "0", "]", "<=", "msaf", ".", "config", ".", "minimum_frames", ":", "logging", ".", "warning", "(", "\"Audio file too short, or too many few beats \"", "\"estimated. Returning empty estimations.\"", ")", "return", "np", ".", "asarray", "(", "[", "0", ",", "config", "[", "\"features\"", "]", ".", "dur", "]", ")", ",", "np", ".", "asarray", "(", "[", "0", "]", ",", "dtype", "=", "int", ")", "# Get the corresponding modules", "bounds_module", "=", "get_boundaries_module", "(", "boundaries_id", ")", "labels_module", "=", "get_labels_module", "(", "labels_id", ")", "# Get the correct frame times", "frame_times", "=", "config", "[", "\"features\"", "]", ".", "frame_times", "# Segment audio based on type of segmentation", "run_fun", "=", "run_hierarchical", "if", "config", "[", "\"hier\"", "]", "else", "run_flat", "est_times", ",", "est_labels", "=", "run_fun", "(", "file_struct", ",", "bounds_module", ",", "labels_module", ",", "frame_times", ",", "config", ",", "annotator_id", ")", "return", "est_times", ",", "est_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
process_track
Prepares the parameters, runs the algorithms, and saves results. Parameters ---------- file_struct: `msaf.io.FileStruct` FileStruct containing the paths of the input files (audio file, features file, reference file, output estimation file). boundaries_id: str Identifier of the boundaries algorithm to use ("gt" for ground truth). labels_id: str Identifier of the labels algorithm to use (None for not labeling). config: dict Dictionary containing the custom parameters of the algorithms to use. annotator_id: int Annotator identificator in the ground truth. Returns ------- est_times: np.array List of estimated times for the segment boundaries. est_labels: np.array List of all the labels associated segments.
msaf/run.py
def process_track(file_struct, boundaries_id, labels_id, config, annotator_id=0): """Prepares the parameters, runs the algorithms, and saves results. Parameters ---------- file_struct: `msaf.io.FileStruct` FileStruct containing the paths of the input files (audio file, features file, reference file, output estimation file). boundaries_id: str Identifier of the boundaries algorithm to use ("gt" for ground truth). labels_id: str Identifier of the labels algorithm to use (None for not labeling). config: dict Dictionary containing the custom parameters of the algorithms to use. annotator_id: int Annotator identificator in the ground truth. Returns ------- est_times: np.array List of estimated times for the segment boundaries. est_labels: np.array List of all the labels associated segments. """ logging.info("Segmenting %s" % file_struct.audio_file) # Get features config["features"] = Features.select_features( config["feature"], file_struct, config["annot_beats"], config["framesync"]) # Get estimations est_times, est_labels = run_algorithms(file_struct, boundaries_id, labels_id, config, annotator_id=annotator_id) # Save logging.info("Writing results in: %s" % file_struct.est_file) io.save_estimations(file_struct, est_times, est_labels, boundaries_id, labels_id, **config) return est_times, est_labels
def process_track(file_struct, boundaries_id, labels_id, config, annotator_id=0): """Prepares the parameters, runs the algorithms, and saves results. Parameters ---------- file_struct: `msaf.io.FileStruct` FileStruct containing the paths of the input files (audio file, features file, reference file, output estimation file). boundaries_id: str Identifier of the boundaries algorithm to use ("gt" for ground truth). labels_id: str Identifier of the labels algorithm to use (None for not labeling). config: dict Dictionary containing the custom parameters of the algorithms to use. annotator_id: int Annotator identificator in the ground truth. Returns ------- est_times: np.array List of estimated times for the segment boundaries. est_labels: np.array List of all the labels associated segments. """ logging.info("Segmenting %s" % file_struct.audio_file) # Get features config["features"] = Features.select_features( config["feature"], file_struct, config["annot_beats"], config["framesync"]) # Get estimations est_times, est_labels = run_algorithms(file_struct, boundaries_id, labels_id, config, annotator_id=annotator_id) # Save logging.info("Writing results in: %s" % file_struct.est_file) io.save_estimations(file_struct, est_times, est_labels, boundaries_id, labels_id, **config) return est_times, est_labels
[ "Prepares", "the", "parameters", "runs", "the", "algorithms", "and", "saves", "results", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/run.py#L220-L262
[ "def", "process_track", "(", "file_struct", ",", "boundaries_id", ",", "labels_id", ",", "config", ",", "annotator_id", "=", "0", ")", ":", "logging", ".", "info", "(", "\"Segmenting %s\"", "%", "file_struct", ".", "audio_file", ")", "# Get features", "config", "[", "\"features\"", "]", "=", "Features", ".", "select_features", "(", "config", "[", "\"feature\"", "]", ",", "file_struct", ",", "config", "[", "\"annot_beats\"", "]", ",", "config", "[", "\"framesync\"", "]", ")", "# Get estimations", "est_times", ",", "est_labels", "=", "run_algorithms", "(", "file_struct", ",", "boundaries_id", ",", "labels_id", ",", "config", ",", "annotator_id", "=", "annotator_id", ")", "# Save", "logging", ".", "info", "(", "\"Writing results in: %s\"", "%", "file_struct", ".", "est_file", ")", "io", ".", "save_estimations", "(", "file_struct", ",", "est_times", ",", "est_labels", ",", "boundaries_id", ",", "labels_id", ",", "*", "*", "config", ")", "return", "est_times", ",", "est_labels" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
process
Main process to segment a file or a collection of files. Parameters ---------- in_path: str Input path. If a directory, MSAF will function in collection mode. If audio file, MSAF will be in single file mode. annot_beats: bool Whether to use annotated beats or not. feature: str String representing the feature to be used (e.g. pcp, mfcc, tonnetz) framesync: str Whether to use framesync features or not (default: False -> beatsync) boundaries_id: str Identifier of the boundaries algorithm (use "gt" for groundtruth) labels_id: str Identifier of the labels algorithm (use None to not compute labels) hier : bool Whether to compute a hierarchical or flat segmentation. sonify_bounds: bool Whether to write an output audio file with the annotated boundaries or not (only available in Single File Mode). plot: bool Whether to plot the boundaries and labels against the ground truth. n_jobs: int Number of processes to run in parallel. Only available in collection mode. annotator_id: int Annotator identificator in the ground truth. config: dict Dictionary containing custom configuration parameters for the algorithms. If None, the default parameters are used. out_bounds: str Path to the output for the sonified boundaries (only in single file mode, when sonify_bounds is True. out_sr : int Sampling rate for the sonified bounds. Returns ------- results : list List containing tuples of (est_times, est_labels) of estimated boundary times and estimated labels. If labels_id is None, est_labels will be a list of -1.
msaf/run.py
def process(in_path, annot_beats=False, feature="pcp", framesync=False, boundaries_id=msaf.config.default_bound_id, labels_id=msaf.config.default_label_id, hier=False, sonify_bounds=False, plot=False, n_jobs=4, annotator_id=0, config=None, out_bounds="out_bounds.wav", out_sr=22050): """Main process to segment a file or a collection of files. Parameters ---------- in_path: str Input path. If a directory, MSAF will function in collection mode. If audio file, MSAF will be in single file mode. annot_beats: bool Whether to use annotated beats or not. feature: str String representing the feature to be used (e.g. pcp, mfcc, tonnetz) framesync: str Whether to use framesync features or not (default: False -> beatsync) boundaries_id: str Identifier of the boundaries algorithm (use "gt" for groundtruth) labels_id: str Identifier of the labels algorithm (use None to not compute labels) hier : bool Whether to compute a hierarchical or flat segmentation. sonify_bounds: bool Whether to write an output audio file with the annotated boundaries or not (only available in Single File Mode). plot: bool Whether to plot the boundaries and labels against the ground truth. n_jobs: int Number of processes to run in parallel. Only available in collection mode. annotator_id: int Annotator identificator in the ground truth. config: dict Dictionary containing custom configuration parameters for the algorithms. If None, the default parameters are used. out_bounds: str Path to the output for the sonified boundaries (only in single file mode, when sonify_bounds is True. out_sr : int Sampling rate for the sonified bounds. Returns ------- results : list List containing tuples of (est_times, est_labels) of estimated boundary times and estimated labels. If labels_id is None, est_labels will be a list of -1. """ # Seed random to reproduce results np.random.seed(123) # Set up configuration based on algorithms parameters if config is None: config = io.get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id) config["features"] = None # Save multi-segment (hierarchical) configuration config["hier"] = hier if not os.path.exists(in_path): raise NoAudioFileError("File or directory does not exists, %s" % in_path) if os.path.isfile(in_path): # Single file mode # Get (if they exitst) or compute features file_struct = msaf.io.FileStruct(in_path) # Use temporary file in single mode file_struct.features_file = msaf.config.features_tmp_file # Get features config["features"] = Features.select_features( feature, file_struct, annot_beats, framesync) # And run the algorithms est_times, est_labels = run_algorithms(file_struct, boundaries_id, labels_id, config, annotator_id=annotator_id) if sonify_bounds: logging.info("Sonifying boundaries in %s..." % out_bounds) audio_hq, sr = librosa.load(in_path, sr=out_sr) utils.sonify_clicks(audio_hq, est_times, out_bounds, out_sr) if plot: plotting.plot_one_track(file_struct, est_times, est_labels, boundaries_id, labels_id) # TODO: Only save if needed # Save estimations msaf.utils.ensure_dir(os.path.dirname(file_struct.est_file)) io.save_estimations(file_struct, est_times, est_labels, boundaries_id, labels_id, **config) return est_times, est_labels else: # Collection mode file_structs = io.get_dataset_files(in_path) return Parallel(n_jobs=n_jobs)(delayed(process_track)( file_struct, boundaries_id, labels_id, config, annotator_id=annotator_id) for file_struct in file_structs[:])
def process(in_path, annot_beats=False, feature="pcp", framesync=False, boundaries_id=msaf.config.default_bound_id, labels_id=msaf.config.default_label_id, hier=False, sonify_bounds=False, plot=False, n_jobs=4, annotator_id=0, config=None, out_bounds="out_bounds.wav", out_sr=22050): """Main process to segment a file or a collection of files. Parameters ---------- in_path: str Input path. If a directory, MSAF will function in collection mode. If audio file, MSAF will be in single file mode. annot_beats: bool Whether to use annotated beats or not. feature: str String representing the feature to be used (e.g. pcp, mfcc, tonnetz) framesync: str Whether to use framesync features or not (default: False -> beatsync) boundaries_id: str Identifier of the boundaries algorithm (use "gt" for groundtruth) labels_id: str Identifier of the labels algorithm (use None to not compute labels) hier : bool Whether to compute a hierarchical or flat segmentation. sonify_bounds: bool Whether to write an output audio file with the annotated boundaries or not (only available in Single File Mode). plot: bool Whether to plot the boundaries and labels against the ground truth. n_jobs: int Number of processes to run in parallel. Only available in collection mode. annotator_id: int Annotator identificator in the ground truth. config: dict Dictionary containing custom configuration parameters for the algorithms. If None, the default parameters are used. out_bounds: str Path to the output for the sonified boundaries (only in single file mode, when sonify_bounds is True. out_sr : int Sampling rate for the sonified bounds. Returns ------- results : list List containing tuples of (est_times, est_labels) of estimated boundary times and estimated labels. If labels_id is None, est_labels will be a list of -1. """ # Seed random to reproduce results np.random.seed(123) # Set up configuration based on algorithms parameters if config is None: config = io.get_configuration(feature, annot_beats, framesync, boundaries_id, labels_id) config["features"] = None # Save multi-segment (hierarchical) configuration config["hier"] = hier if not os.path.exists(in_path): raise NoAudioFileError("File or directory does not exists, %s" % in_path) if os.path.isfile(in_path): # Single file mode # Get (if they exitst) or compute features file_struct = msaf.io.FileStruct(in_path) # Use temporary file in single mode file_struct.features_file = msaf.config.features_tmp_file # Get features config["features"] = Features.select_features( feature, file_struct, annot_beats, framesync) # And run the algorithms est_times, est_labels = run_algorithms(file_struct, boundaries_id, labels_id, config, annotator_id=annotator_id) if sonify_bounds: logging.info("Sonifying boundaries in %s..." % out_bounds) audio_hq, sr = librosa.load(in_path, sr=out_sr) utils.sonify_clicks(audio_hq, est_times, out_bounds, out_sr) if plot: plotting.plot_one_track(file_struct, est_times, est_labels, boundaries_id, labels_id) # TODO: Only save if needed # Save estimations msaf.utils.ensure_dir(os.path.dirname(file_struct.est_file)) io.save_estimations(file_struct, est_times, est_labels, boundaries_id, labels_id, **config) return est_times, est_labels else: # Collection mode file_structs = io.get_dataset_files(in_path) return Parallel(n_jobs=n_jobs)(delayed(process_track)( file_struct, boundaries_id, labels_id, config, annotator_id=annotator_id) for file_struct in file_structs[:])
[ "Main", "process", "to", "segment", "a", "file", "or", "a", "collection", "of", "files", "." ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/run.py#L265-L368
[ "def", "process", "(", "in_path", ",", "annot_beats", "=", "False", ",", "feature", "=", "\"pcp\"", ",", "framesync", "=", "False", ",", "boundaries_id", "=", "msaf", ".", "config", ".", "default_bound_id", ",", "labels_id", "=", "msaf", ".", "config", ".", "default_label_id", ",", "hier", "=", "False", ",", "sonify_bounds", "=", "False", ",", "plot", "=", "False", ",", "n_jobs", "=", "4", ",", "annotator_id", "=", "0", ",", "config", "=", "None", ",", "out_bounds", "=", "\"out_bounds.wav\"", ",", "out_sr", "=", "22050", ")", ":", "# Seed random to reproduce results", "np", ".", "random", ".", "seed", "(", "123", ")", "# Set up configuration based on algorithms parameters", "if", "config", "is", "None", ":", "config", "=", "io", ".", "get_configuration", "(", "feature", ",", "annot_beats", ",", "framesync", ",", "boundaries_id", ",", "labels_id", ")", "config", "[", "\"features\"", "]", "=", "None", "# Save multi-segment (hierarchical) configuration", "config", "[", "\"hier\"", "]", "=", "hier", "if", "not", "os", ".", "path", ".", "exists", "(", "in_path", ")", ":", "raise", "NoAudioFileError", "(", "\"File or directory does not exists, %s\"", "%", "in_path", ")", "if", "os", ".", "path", ".", "isfile", "(", "in_path", ")", ":", "# Single file mode", "# Get (if they exitst) or compute features", "file_struct", "=", "msaf", ".", "io", ".", "FileStruct", "(", "in_path", ")", "# Use temporary file in single mode", "file_struct", ".", "features_file", "=", "msaf", ".", "config", ".", "features_tmp_file", "# Get features", "config", "[", "\"features\"", "]", "=", "Features", ".", "select_features", "(", "feature", ",", "file_struct", ",", "annot_beats", ",", "framesync", ")", "# And run the algorithms", "est_times", ",", "est_labels", "=", "run_algorithms", "(", "file_struct", ",", "boundaries_id", ",", "labels_id", ",", "config", ",", "annotator_id", "=", "annotator_id", ")", "if", "sonify_bounds", ":", "logging", ".", "info", "(", "\"Sonifying boundaries in %s...\"", "%", "out_bounds", ")", "audio_hq", ",", "sr", "=", "librosa", ".", "load", "(", "in_path", ",", "sr", "=", "out_sr", ")", "utils", ".", "sonify_clicks", "(", "audio_hq", ",", "est_times", ",", "out_bounds", ",", "out_sr", ")", "if", "plot", ":", "plotting", ".", "plot_one_track", "(", "file_struct", ",", "est_times", ",", "est_labels", ",", "boundaries_id", ",", "labels_id", ")", "# TODO: Only save if needed", "# Save estimations", "msaf", ".", "utils", ".", "ensure_dir", "(", "os", ".", "path", ".", "dirname", "(", "file_struct", ".", "est_file", ")", ")", "io", ".", "save_estimations", "(", "file_struct", ",", "est_times", ",", "est_labels", ",", "boundaries_id", ",", "labels_id", ",", "*", "*", "config", ")", "return", "est_times", ",", "est_labels", "else", ":", "# Collection mode", "file_structs", "=", "io", ".", "get_dataset_files", "(", "in_path", ")", "return", "Parallel", "(", "n_jobs", "=", "n_jobs", ")", "(", "delayed", "(", "process_track", ")", "(", "file_struct", ",", "boundaries_id", ",", "labels_id", ",", "config", ",", "annotator_id", "=", "annotator_id", ")", "for", "file_struct", "in", "file_structs", "[", ":", "]", ")" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
AA.update_w
alternating least squares step, update W under the convexity constraint
msaf/pymf/aa.py
def update_w(self): """ alternating least squares step, update W under the convexity constraint """ def update_single_w(i): """ compute single W[:,i] """ # optimize beta using qp solver from cvxopt FB = base.matrix(np.float64(np.dot(-self.data.T, W_hat[:,i]))) be = solvers.qp(HB, FB, INQa, INQb, EQa, EQb) self.beta[i,:] = np.array(be['x']).reshape((1, self._num_samples)) # float64 required for cvxopt HB = base.matrix(np.float64(np.dot(self.data[:,:].T, self.data[:,:]))) EQb = base.matrix(1.0, (1, 1)) W_hat = np.dot(self.data, pinv(self.H)) INQa = base.matrix(-np.eye(self._num_samples)) INQb = base.matrix(0.0, (self._num_samples, 1)) EQa = base.matrix(1.0, (1, self._num_samples)) for i in range(self._num_bases): update_single_w(i) self.W = np.dot(self.beta, self.data.T).T
def update_w(self): """ alternating least squares step, update W under the convexity constraint """ def update_single_w(i): """ compute single W[:,i] """ # optimize beta using qp solver from cvxopt FB = base.matrix(np.float64(np.dot(-self.data.T, W_hat[:,i]))) be = solvers.qp(HB, FB, INQa, INQb, EQa, EQb) self.beta[i,:] = np.array(be['x']).reshape((1, self._num_samples)) # float64 required for cvxopt HB = base.matrix(np.float64(np.dot(self.data[:,:].T, self.data[:,:]))) EQb = base.matrix(1.0, (1, 1)) W_hat = np.dot(self.data, pinv(self.H)) INQa = base.matrix(-np.eye(self._num_samples)) INQb = base.matrix(0.0, (self._num_samples, 1)) EQa = base.matrix(1.0, (1, self._num_samples)) for i in range(self._num_bases): update_single_w(i) self.W = np.dot(self.beta, self.data.T).T
[ "alternating", "least", "squares", "step", "update", "W", "under", "the", "convexity", "constraint" ]
urinieto/msaf
python
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/pymf/aa.py#L113-L134
[ "def", "update_w", "(", "self", ")", ":", "def", "update_single_w", "(", "i", ")", ":", "\"\"\" compute single W[:,i] \"\"\"", "# optimize beta using qp solver from cvxopt", "FB", "=", "base", ".", "matrix", "(", "np", ".", "float64", "(", "np", ".", "dot", "(", "-", "self", ".", "data", ".", "T", ",", "W_hat", "[", ":", ",", "i", "]", ")", ")", ")", "be", "=", "solvers", ".", "qp", "(", "HB", ",", "FB", ",", "INQa", ",", "INQb", ",", "EQa", ",", "EQb", ")", "self", ".", "beta", "[", "i", ",", ":", "]", "=", "np", ".", "array", "(", "be", "[", "'x'", "]", ")", ".", "reshape", "(", "(", "1", ",", "self", ".", "_num_samples", ")", ")", "# float64 required for cvxopt", "HB", "=", "base", ".", "matrix", "(", "np", ".", "float64", "(", "np", ".", "dot", "(", "self", ".", "data", "[", ":", ",", ":", "]", ".", "T", ",", "self", ".", "data", "[", ":", ",", ":", "]", ")", ")", ")", "EQb", "=", "base", ".", "matrix", "(", "1.0", ",", "(", "1", ",", "1", ")", ")", "W_hat", "=", "np", ".", "dot", "(", "self", ".", "data", ",", "pinv", "(", "self", ".", "H", ")", ")", "INQa", "=", "base", ".", "matrix", "(", "-", "np", ".", "eye", "(", "self", ".", "_num_samples", ")", ")", "INQb", "=", "base", ".", "matrix", "(", "0.0", ",", "(", "self", ".", "_num_samples", ",", "1", ")", ")", "EQa", "=", "base", ".", "matrix", "(", "1.0", ",", "(", "1", ",", "self", ".", "_num_samples", ")", ")", "for", "i", "in", "range", "(", "self", ".", "_num_bases", ")", ":", "update_single_w", "(", "i", ")", "self", ".", "W", "=", "np", ".", "dot", "(", "self", ".", "beta", ",", "self", ".", "data", ".", "T", ")", ".", "T" ]
9dbb57d77a1310465a65cc40f1641d083ca74385
test
main
Main Entry point for translator and argument parser
translate/__main__.py
def main(): ''' Main Entry point for translator and argument parser ''' args = command_line() translate = partial(translator, args.source, args.dest, version=' '.join([__version__, __build__])) return source(spool(set_task(translate, translit=args.translit)), args.text)
def main(): ''' Main Entry point for translator and argument parser ''' args = command_line() translate = partial(translator, args.source, args.dest, version=' '.join([__version__, __build__])) return source(spool(set_task(translate, translit=args.translit)), args.text)
[ "Main", "Entry", "point", "for", "translator", "and", "argument", "parser" ]
jjangsangy/py-translate
python
https://github.com/jjangsangy/py-translate/blob/fe6279b2ee353f42ce73333ffae104e646311956/translate/__main__.py#L107-L115
[ "def", "main", "(", ")", ":", "args", "=", "command_line", "(", ")", "translate", "=", "partial", "(", "translator", ",", "args", ".", "source", ",", "args", ".", "dest", ",", "version", "=", "' '", ".", "join", "(", "[", "__version__", ",", "__build__", "]", ")", ")", "return", "source", "(", "spool", "(", "set_task", "(", "translate", ",", "translit", "=", "args", ".", "translit", ")", ")", ",", "args", ".", "text", ")" ]
fe6279b2ee353f42ce73333ffae104e646311956
test
coroutine
Initializes coroutine essentially priming it to the yield statement. Used as a decorator over functions that generate coroutines. .. code-block:: python # Basic coroutine producer/consumer pattern from translate import coroutine @coroutine def coroutine_foo(bar): try: while True: baz = (yield) bar.send(baz) except GeneratorExit: bar.close() :param func: Unprimed Generator :type func: Function :return: Initialized Coroutine :rtype: Function
translate/coroutines.py
def coroutine(func): """ Initializes coroutine essentially priming it to the yield statement. Used as a decorator over functions that generate coroutines. .. code-block:: python # Basic coroutine producer/consumer pattern from translate import coroutine @coroutine def coroutine_foo(bar): try: while True: baz = (yield) bar.send(baz) except GeneratorExit: bar.close() :param func: Unprimed Generator :type func: Function :return: Initialized Coroutine :rtype: Function """ @wraps(func) def initialization(*args, **kwargs): start = func(*args, **kwargs) next(start) return start return initialization
def coroutine(func): """ Initializes coroutine essentially priming it to the yield statement. Used as a decorator over functions that generate coroutines. .. code-block:: python # Basic coroutine producer/consumer pattern from translate import coroutine @coroutine def coroutine_foo(bar): try: while True: baz = (yield) bar.send(baz) except GeneratorExit: bar.close() :param func: Unprimed Generator :type func: Function :return: Initialized Coroutine :rtype: Function """ @wraps(func) def initialization(*args, **kwargs): start = func(*args, **kwargs) next(start) return start return initialization
[ "Initializes", "coroutine", "essentially", "priming", "it", "to", "the", "yield", "statement", ".", "Used", "as", "a", "decorator", "over", "functions", "that", "generate", "coroutines", "." ]
jjangsangy/py-translate
python
https://github.com/jjangsangy/py-translate/blob/fe6279b2ee353f42ce73333ffae104e646311956/translate/coroutines.py#L24-L59
[ "def", "coroutine", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "initialization", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "start", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "next", "(", "start", ")", "return", "start", "return", "initialization" ]
fe6279b2ee353f42ce73333ffae104e646311956
test
accumulator
Generic accumulator function. .. code-block:: python # Simplest Form >>> a = 'this' + ' ' >>> b = 'that' >>> c = functools.reduce(accumulator, a, b) >>> c 'this that' # The type of the initial value determines output type. >>> a = 5 >>> b = Hello >>> c = functools.reduce(accumulator, a, b) >>> c 10 :param init: Initial Value :param update: Value to accumulate :return: Combined Values
translate/coroutines.py
def accumulator(init, update): """ Generic accumulator function. .. code-block:: python # Simplest Form >>> a = 'this' + ' ' >>> b = 'that' >>> c = functools.reduce(accumulator, a, b) >>> c 'this that' # The type of the initial value determines output type. >>> a = 5 >>> b = Hello >>> c = functools.reduce(accumulator, a, b) >>> c 10 :param init: Initial Value :param update: Value to accumulate :return: Combined Values """ return ( init + len(update) if isinstance(init, int) else init + update )
def accumulator(init, update): """ Generic accumulator function. .. code-block:: python # Simplest Form >>> a = 'this' + ' ' >>> b = 'that' >>> c = functools.reduce(accumulator, a, b) >>> c 'this that' # The type of the initial value determines output type. >>> a = 5 >>> b = Hello >>> c = functools.reduce(accumulator, a, b) >>> c 10 :param init: Initial Value :param update: Value to accumulate :return: Combined Values """ return ( init + len(update) if isinstance(init, int) else init + update )
[ "Generic", "accumulator", "function", "." ]
jjangsangy/py-translate
python
https://github.com/jjangsangy/py-translate/blob/fe6279b2ee353f42ce73333ffae104e646311956/translate/coroutines.py#L62-L91
[ "def", "accumulator", "(", "init", ",", "update", ")", ":", "return", "(", "init", "+", "len", "(", "update", ")", "if", "isinstance", "(", "init", ",", "int", ")", "else", "init", "+", "update", ")" ]
fe6279b2ee353f42ce73333ffae104e646311956
test
write_stream
:param script: Translated Text :type script: Iterable :param output: Output Type (either 'trans' or 'translit') :type output: String
translate/coroutines.py
def write_stream(script, output='trans'): """ :param script: Translated Text :type script: Iterable :param output: Output Type (either 'trans' or 'translit') :type output: String """ first = operator.itemgetter(0) sentence, _ = script printer = partial(print, file=sys.stdout, end='') for line in sentence: if isinstance(first(line), str): printer(first(line)) else: printer(first(line).encode('UTF-8')) printer('\n') return sys.stdout.flush()
def write_stream(script, output='trans'): """ :param script: Translated Text :type script: Iterable :param output: Output Type (either 'trans' or 'translit') :type output: String """ first = operator.itemgetter(0) sentence, _ = script printer = partial(print, file=sys.stdout, end='') for line in sentence: if isinstance(first(line), str): printer(first(line)) else: printer(first(line).encode('UTF-8')) printer('\n') return sys.stdout.flush()
[ ":", "param", "script", ":", "Translated", "Text", ":", "type", "script", ":", "Iterable" ]
jjangsangy/py-translate
python
https://github.com/jjangsangy/py-translate/blob/fe6279b2ee353f42ce73333ffae104e646311956/translate/coroutines.py#L94-L114
[ "def", "write_stream", "(", "script", ",", "output", "=", "'trans'", ")", ":", "first", "=", "operator", ".", "itemgetter", "(", "0", ")", "sentence", ",", "_", "=", "script", "printer", "=", "partial", "(", "print", ",", "file", "=", "sys", ".", "stdout", ",", "end", "=", "''", ")", "for", "line", "in", "sentence", ":", "if", "isinstance", "(", "first", "(", "line", ")", ",", "str", ")", ":", "printer", "(", "first", "(", "line", ")", ")", "else", ":", "printer", "(", "first", "(", "line", ")", ".", "encode", "(", "'UTF-8'", ")", ")", "printer", "(", "'\\n'", ")", "return", "sys", ".", "stdout", ".", "flush", "(", ")" ]
fe6279b2ee353f42ce73333ffae104e646311956
test
set_task
Task Setter Coroutine End point destination coroutine of a purely consumer type. Delegates Text IO to the `write_stream` function. :param translation_function: Translator :type translation_function: Function :param translit: Transliteration Switch :type translit: Boolean
translate/coroutines.py
def set_task(translator, translit=False): """ Task Setter Coroutine End point destination coroutine of a purely consumer type. Delegates Text IO to the `write_stream` function. :param translation_function: Translator :type translation_function: Function :param translit: Transliteration Switch :type translit: Boolean """ # Initialize Task Queue task = str() queue = list() # Function Partial output = ('translit' if translit else 'trans') stream = partial(write_stream, output=output) workers = ThreadPoolExecutor(max_workers=8) try: while True: task = yield queue.append(task) except GeneratorExit: list(map(stream, workers.map(translator, queue)))
def set_task(translator, translit=False): """ Task Setter Coroutine End point destination coroutine of a purely consumer type. Delegates Text IO to the `write_stream` function. :param translation_function: Translator :type translation_function: Function :param translit: Transliteration Switch :type translit: Boolean """ # Initialize Task Queue task = str() queue = list() # Function Partial output = ('translit' if translit else 'trans') stream = partial(write_stream, output=output) workers = ThreadPoolExecutor(max_workers=8) try: while True: task = yield queue.append(task) except GeneratorExit: list(map(stream, workers.map(translator, queue)))
[ "Task", "Setter", "Coroutine" ]
jjangsangy/py-translate
python
https://github.com/jjangsangy/py-translate/blob/fe6279b2ee353f42ce73333ffae104e646311956/translate/coroutines.py#L120-L149
[ "def", "set_task", "(", "translator", ",", "translit", "=", "False", ")", ":", "# Initialize Task Queue", "task", "=", "str", "(", ")", "queue", "=", "list", "(", ")", "# Function Partial", "output", "=", "(", "'translit'", "if", "translit", "else", "'trans'", ")", "stream", "=", "partial", "(", "write_stream", ",", "output", "=", "output", ")", "workers", "=", "ThreadPoolExecutor", "(", "max_workers", "=", "8", ")", "try", ":", "while", "True", ":", "task", "=", "yield", "queue", ".", "append", "(", "task", ")", "except", "GeneratorExit", ":", "list", "(", "map", "(", "stream", ",", "workers", ".", "map", "(", "translator", ",", "queue", ")", ")", ")" ]
fe6279b2ee353f42ce73333ffae104e646311956
test
spool
Consumes text streams and spools them together for more io efficient processes. :param iterable: Sends text stream for further processing :type iterable: Coroutine :param maxlen: Maximum query string size :type maxlen: Integer
translate/coroutines.py
def spool(iterable, maxlen=1250): """ Consumes text streams and spools them together for more io efficient processes. :param iterable: Sends text stream for further processing :type iterable: Coroutine :param maxlen: Maximum query string size :type maxlen: Integer """ words = int() text = str() try: while True: while words < maxlen: stream = yield text = reduce(accumulator, stream, text) words = reduce(accumulator, stream, words) iterable.send(text) words = int() text = str() except GeneratorExit: iterable.send(text) iterable.close()
def spool(iterable, maxlen=1250): """ Consumes text streams and spools them together for more io efficient processes. :param iterable: Sends text stream for further processing :type iterable: Coroutine :param maxlen: Maximum query string size :type maxlen: Integer """ words = int() text = str() try: while True: while words < maxlen: stream = yield text = reduce(accumulator, stream, text) words = reduce(accumulator, stream, words) iterable.send(text) words = int() text = str() except GeneratorExit: iterable.send(text) iterable.close()
[ "Consumes", "text", "streams", "and", "spools", "them", "together", "for", "more", "io", "efficient", "processes", "." ]
jjangsangy/py-translate
python
https://github.com/jjangsangy/py-translate/blob/fe6279b2ee353f42ce73333ffae104e646311956/translate/coroutines.py#L152-L180
[ "def", "spool", "(", "iterable", ",", "maxlen", "=", "1250", ")", ":", "words", "=", "int", "(", ")", "text", "=", "str", "(", ")", "try", ":", "while", "True", ":", "while", "words", "<", "maxlen", ":", "stream", "=", "yield", "text", "=", "reduce", "(", "accumulator", ",", "stream", ",", "text", ")", "words", "=", "reduce", "(", "accumulator", ",", "stream", ",", "words", ")", "iterable", ".", "send", "(", "text", ")", "words", "=", "int", "(", ")", "text", "=", "str", "(", ")", "except", "GeneratorExit", ":", "iterable", ".", "send", "(", "text", ")", "iterable", ".", "close", "(", ")" ]
fe6279b2ee353f42ce73333ffae104e646311956
test
source
Coroutine starting point. Produces text stream and forwards to consumers :param target: Target coroutine consumer :type target: Coroutine :param inputstream: Input Source :type inputstream: BufferedTextIO Object
translate/coroutines.py
def source(target, inputstream=sys.stdin): """ Coroutine starting point. Produces text stream and forwards to consumers :param target: Target coroutine consumer :type target: Coroutine :param inputstream: Input Source :type inputstream: BufferedTextIO Object """ for line in inputstream: while len(line) > 600: init, sep, line = line.partition(' ') assert len(init) <= 600 target.send(''.join([init, sep])) target.send(line) inputstream.close() return target.close()
def source(target, inputstream=sys.stdin): """ Coroutine starting point. Produces text stream and forwards to consumers :param target: Target coroutine consumer :type target: Coroutine :param inputstream: Input Source :type inputstream: BufferedTextIO Object """ for line in inputstream: while len(line) > 600: init, sep, line = line.partition(' ') assert len(init) <= 600 target.send(''.join([init, sep])) target.send(line) inputstream.close() return target.close()
[ "Coroutine", "starting", "point", ".", "Produces", "text", "stream", "and", "forwards", "to", "consumers" ]
jjangsangy/py-translate
python
https://github.com/jjangsangy/py-translate/blob/fe6279b2ee353f42ce73333ffae104e646311956/translate/coroutines.py#L183-L204
[ "def", "source", "(", "target", ",", "inputstream", "=", "sys", ".", "stdin", ")", ":", "for", "line", "in", "inputstream", ":", "while", "len", "(", "line", ")", ">", "600", ":", "init", ",", "sep", ",", "line", "=", "line", ".", "partition", "(", "' '", ")", "assert", "len", "(", "init", ")", "<=", "600", "target", ".", "send", "(", "''", ".", "join", "(", "[", "init", ",", "sep", "]", ")", ")", "target", ".", "send", "(", "line", ")", "inputstream", ".", "close", "(", ")", "return", "target", ".", "close", "(", ")" ]
fe6279b2ee353f42ce73333ffae104e646311956
test
push_url
Decorates a function returning the url of translation API. Creates and maintains HTTP connection state Returns a dict response object from the server containing the translated text and metadata of the request body :param interface: Callable Request Interface :type interface: Function
translate/translator.py
def push_url(interface): ''' Decorates a function returning the url of translation API. Creates and maintains HTTP connection state Returns a dict response object from the server containing the translated text and metadata of the request body :param interface: Callable Request Interface :type interface: Function ''' @functools.wraps(interface) def connection(*args, **kwargs): """ Extends and wraps a HTTP interface. :return: Response Content :rtype: Dictionary """ session = Session() session.mount('http://', HTTPAdapter(max_retries=2)) session.mount('https://', HTTPAdapter(max_retries=2)) request = Request(**interface(*args, **kwargs)) prepare = session.prepare_request(request) response = session.send(prepare, verify=True) if response.status_code != requests.codes.ok: response.raise_for_status() cleanup = re.subn(r',(?=,)', '', response.content.decode('utf-8'))[0] return json.loads(cleanup.replace(r'\xA0', r' ').replace('[,', '[1,'), encoding='UTF-8') return connection
def push_url(interface): ''' Decorates a function returning the url of translation API. Creates and maintains HTTP connection state Returns a dict response object from the server containing the translated text and metadata of the request body :param interface: Callable Request Interface :type interface: Function ''' @functools.wraps(interface) def connection(*args, **kwargs): """ Extends and wraps a HTTP interface. :return: Response Content :rtype: Dictionary """ session = Session() session.mount('http://', HTTPAdapter(max_retries=2)) session.mount('https://', HTTPAdapter(max_retries=2)) request = Request(**interface(*args, **kwargs)) prepare = session.prepare_request(request) response = session.send(prepare, verify=True) if response.status_code != requests.codes.ok: response.raise_for_status() cleanup = re.subn(r',(?=,)', '', response.content.decode('utf-8'))[0] return json.loads(cleanup.replace(r'\xA0', r' ').replace('[,', '[1,'), encoding='UTF-8') return connection
[ "Decorates", "a", "function", "returning", "the", "url", "of", "translation", "API", ".", "Creates", "and", "maintains", "HTTP", "connection", "state" ]
jjangsangy/py-translate
python
https://github.com/jjangsangy/py-translate/blob/fe6279b2ee353f42ce73333ffae104e646311956/translate/translator.py#L22-L57
[ "def", "push_url", "(", "interface", ")", ":", "@", "functools", ".", "wraps", "(", "interface", ")", "def", "connection", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n Extends and wraps a HTTP interface.\n\n :return: Response Content\n :rtype: Dictionary\n \"\"\"", "session", "=", "Session", "(", ")", "session", ".", "mount", "(", "'http://'", ",", "HTTPAdapter", "(", "max_retries", "=", "2", ")", ")", "session", ".", "mount", "(", "'https://'", ",", "HTTPAdapter", "(", "max_retries", "=", "2", ")", ")", "request", "=", "Request", "(", "*", "*", "interface", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "prepare", "=", "session", ".", "prepare_request", "(", "request", ")", "response", "=", "session", ".", "send", "(", "prepare", ",", "verify", "=", "True", ")", "if", "response", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "response", ".", "raise_for_status", "(", ")", "cleanup", "=", "re", ".", "subn", "(", "r',(?=,)'", ",", "''", ",", "response", ".", "content", ".", "decode", "(", "'utf-8'", ")", ")", "[", "0", "]", "return", "json", ".", "loads", "(", "cleanup", ".", "replace", "(", "r'\\xA0'", ",", "r' '", ")", ".", "replace", "(", "'[,'", ",", "'[1,'", ")", ",", "encoding", "=", "'UTF-8'", ")", "return", "connection" ]
fe6279b2ee353f42ce73333ffae104e646311956
test
translator
Returns the url encoded string that will be pushed to the translation server for parsing. List of acceptable language codes for source and target languages can be found as a JSON file in the etc directory. Some source languages are limited in scope of the possible target languages that are available. .. code-block:: python >>> from translate import translator >>> translator('en', 'zh-TW', 'Hello World!') '你好世界!' :param source: Language code for translation source :type source: String :param target: Language code that source will be translate into :type target: String :param phrase: Text body string that will be url encoded and translated :type phrase: String :return: Request Interface :rtype: Dictionary
translate/translator.py
def translator(source, target, phrase, version='0.0 test', charset='utf-8'): """ Returns the url encoded string that will be pushed to the translation server for parsing. List of acceptable language codes for source and target languages can be found as a JSON file in the etc directory. Some source languages are limited in scope of the possible target languages that are available. .. code-block:: python >>> from translate import translator >>> translator('en', 'zh-TW', 'Hello World!') '你好世界!' :param source: Language code for translation source :type source: String :param target: Language code that source will be translate into :type target: String :param phrase: Text body string that will be url encoded and translated :type phrase: String :return: Request Interface :rtype: Dictionary """ url = 'https://translate.google.com/translate_a/single' agent = 'User-Agent', 'py-translate v{}'.format(version) content = 'Content-Type', 'application/json; charset={}'.format(charset) params = {'client': 'a', 'ie': charset, 'oe': charset, 'dt': 't', 'sl': source, 'tl': target, 'q': phrase} request = {'method': 'GET', 'url': url, 'params': params, 'headers': dict([agent, content])} return request
def translator(source, target, phrase, version='0.0 test', charset='utf-8'): """ Returns the url encoded string that will be pushed to the translation server for parsing. List of acceptable language codes for source and target languages can be found as a JSON file in the etc directory. Some source languages are limited in scope of the possible target languages that are available. .. code-block:: python >>> from translate import translator >>> translator('en', 'zh-TW', 'Hello World!') '你好世界!' :param source: Language code for translation source :type source: String :param target: Language code that source will be translate into :type target: String :param phrase: Text body string that will be url encoded and translated :type phrase: String :return: Request Interface :rtype: Dictionary """ url = 'https://translate.google.com/translate_a/single' agent = 'User-Agent', 'py-translate v{}'.format(version) content = 'Content-Type', 'application/json; charset={}'.format(charset) params = {'client': 'a', 'ie': charset, 'oe': charset, 'dt': 't', 'sl': source, 'tl': target, 'q': phrase} request = {'method': 'GET', 'url': url, 'params': params, 'headers': dict([agent, content])} return request
[ "Returns", "the", "url", "encoded", "string", "that", "will", "be", "pushed", "to", "the", "translation", "server", "for", "parsing", "." ]
jjangsangy/py-translate
python
https://github.com/jjangsangy/py-translate/blob/fe6279b2ee353f42ce73333ffae104e646311956/translate/translator.py#L60-L102
[ "def", "translator", "(", "source", ",", "target", ",", "phrase", ",", "version", "=", "'0.0 test'", ",", "charset", "=", "'utf-8'", ")", ":", "url", "=", "'https://translate.google.com/translate_a/single'", "agent", "=", "'User-Agent'", ",", "'py-translate v{}'", ".", "format", "(", "version", ")", "content", "=", "'Content-Type'", ",", "'application/json; charset={}'", ".", "format", "(", "charset", ")", "params", "=", "{", "'client'", ":", "'a'", ",", "'ie'", ":", "charset", ",", "'oe'", ":", "charset", ",", "'dt'", ":", "'t'", ",", "'sl'", ":", "source", ",", "'tl'", ":", "target", ",", "'q'", ":", "phrase", "}", "request", "=", "{", "'method'", ":", "'GET'", ",", "'url'", ":", "url", ",", "'params'", ":", "params", ",", "'headers'", ":", "dict", "(", "[", "agent", ",", "content", "]", ")", "}", "return", "request" ]
fe6279b2ee353f42ce73333ffae104e646311956
test
translation_table
Opens up file located under the etc directory containing language codes and prints them out. :param file: Path to location of json file :type file: str :return: language codes :rtype: dict
translate/languages.py
def translation_table(language, filepath='supported_translations.json'): ''' Opens up file located under the etc directory containing language codes and prints them out. :param file: Path to location of json file :type file: str :return: language codes :rtype: dict ''' fullpath = abspath(join(dirname(__file__), 'etc', filepath)) if not isfile(fullpath): raise IOError('File does not exist at {0}'.format(fullpath)) with open(fullpath, 'rt') as fp: raw_data = json.load(fp).get(language, None) assert(raw_data is not None) return dict((code['language'], code['name']) for code in raw_data)
def translation_table(language, filepath='supported_translations.json'): ''' Opens up file located under the etc directory containing language codes and prints them out. :param file: Path to location of json file :type file: str :return: language codes :rtype: dict ''' fullpath = abspath(join(dirname(__file__), 'etc', filepath)) if not isfile(fullpath): raise IOError('File does not exist at {0}'.format(fullpath)) with open(fullpath, 'rt') as fp: raw_data = json.load(fp).get(language, None) assert(raw_data is not None) return dict((code['language'], code['name']) for code in raw_data)
[ "Opens", "up", "file", "located", "under", "the", "etc", "directory", "containing", "language", "codes", "and", "prints", "them", "out", "." ]
jjangsangy/py-translate
python
https://github.com/jjangsangy/py-translate/blob/fe6279b2ee353f42ce73333ffae104e646311956/translate/languages.py#L12-L32
[ "def", "translation_table", "(", "language", ",", "filepath", "=", "'supported_translations.json'", ")", ":", "fullpath", "=", "abspath", "(", "join", "(", "dirname", "(", "__file__", ")", ",", "'etc'", ",", "filepath", ")", ")", "if", "not", "isfile", "(", "fullpath", ")", ":", "raise", "IOError", "(", "'File does not exist at {0}'", ".", "format", "(", "fullpath", ")", ")", "with", "open", "(", "fullpath", ",", "'rt'", ")", "as", "fp", ":", "raw_data", "=", "json", ".", "load", "(", "fp", ")", ".", "get", "(", "language", ",", "None", ")", "assert", "(", "raw_data", "is", "not", "None", ")", "return", "dict", "(", "(", "code", "[", "'language'", "]", ",", "code", "[", "'name'", "]", ")", "for", "code", "in", "raw_data", ")" ]
fe6279b2ee353f42ce73333ffae104e646311956
test
print_table
Generates a formatted table of language codes
translate/languages.py
def print_table(language): ''' Generates a formatted table of language codes ''' table = translation_table(language) for code, name in sorted(table.items(), key=operator.itemgetter(0)): print(u'{language:<8} {name:\u3000<20}'.format( name=name, language=code )) return None
def print_table(language): ''' Generates a formatted table of language codes ''' table = translation_table(language) for code, name in sorted(table.items(), key=operator.itemgetter(0)): print(u'{language:<8} {name:\u3000<20}'.format( name=name, language=code )) return None
[ "Generates", "a", "formatted", "table", "of", "language", "codes" ]
jjangsangy/py-translate
python
https://github.com/jjangsangy/py-translate/blob/fe6279b2ee353f42ce73333ffae104e646311956/translate/languages.py#L35-L46
[ "def", "print_table", "(", "language", ")", ":", "table", "=", "translation_table", "(", "language", ")", "for", "code", ",", "name", "in", "sorted", "(", "table", ".", "items", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "0", ")", ")", ":", "print", "(", "u'{language:<8} {name:\\u3000<20}'", ".", "format", "(", "name", "=", "name", ",", "language", "=", "code", ")", ")", "return", "None" ]
fe6279b2ee353f42ce73333ffae104e646311956