INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
Automatically Adds a dependency of a process.
|
def _add_dependency(self, p, template, inlane, outlane, pid):
"""Automatically Adds a dependency of a process.
This method adds a template to the process list attribute as a
dependency. It will adapt the input lane, output lane and process
id of the process that depends on it.
Parameters
----------
p : Process
Process class that contains the dependency.
template : str
Template name of the dependency.
inlane : int
Input lane.
outlane : int
Output lane.
pid : int
Process ID.
"""
dependency_proc = self.process_map[template](template=template)
if dependency_proc.input_type != p.input_type:
logger.error("Cannot automatically add dependency with different"
" input type. Input type of process '{}' is '{}."
" Input type of dependency '{}' is '{}'".format(
p.template, p.input_type, template,
dependency_proc.input_type))
input_suf = "{}_{}_dep".format(inlane, pid)
output_suf = "{}_{}_dep".format(outlane, pid)
dependency_proc.set_main_channel_names(input_suf, output_suf, outlane)
# To insert the dependency process before the current process, we'll
# need to move the input channel name of the later to the former, and
# set a new connection between the dependency and the process.
dependency_proc.input_channel = p.input_channel
p.input_channel = dependency_proc.output_channel
# If the current process was the first in the pipeline, change the
# lanes so that the dependency becomes the first process
if not p.parent_lane:
p.parent_lane = outlane
dependency_proc.parent_lane = None
else:
dependency_proc.parent_lane = inlane
p.parent_lane = outlane
self.processes.append(dependency_proc)
|
Searches the process tree backwards in search of a provided process
|
def _search_tree_backwards(self, template, parent_lanes):
"""Searches the process tree backwards in search of a provided process
The search takes into consideration the provided parent lanes and
searches only those
Parameters
----------
template : str
Name of the process template attribute being searched
parent_lanes : list
List of integers with the parent lanes to be searched
Returns
-------
bool
Returns True when the template is found. Otherwise returns False.
"""
for p in self.processes[::-1]:
# Ignore process in different lanes
if p.lane not in parent_lanes:
continue
# template found
if p.template == template:
return True
return False
|
Adds the header template to the master template string
|
def _build_header(self):
"""Adds the header template to the master template string
"""
logger.debug("===============")
logger.debug("Building header")
logger.debug("===============")
self.template += hs.header
|
Adds the footer template to the master template string
|
def _build_footer(self):
"""Adds the footer template to the master template string"""
logger.debug("===============")
logger.debug("Building header")
logger.debug("===============")
self.template += fs.footer
|
Given a process this method updates the: attr: ~Process. main_raw_inputs attribute with the corresponding raw input channel of that process. The input channel and input type can be overridden if the input_channel and input_type arguments are provided.
|
def _update_raw_input(self, p, sink_channel=None, input_type=None):
"""Given a process, this method updates the
:attr:`~Process.main_raw_inputs` attribute with the corresponding
raw input channel of that process. The input channel and input type
can be overridden if the `input_channel` and `input_type` arguments
are provided.
Parameters
----------
p : flowcraft.Process.Process
Process instance whose raw input will be modified
sink_channel: str
Sets the channel where the raw input will fork into. It overrides
the process's `input_channel` attribute.
input_type: str
Sets the type of the raw input. It overrides the process's
`input_type` attribute.
"""
process_input = input_type if input_type else p.input_type
process_channel = sink_channel if sink_channel else p.input_channel
logger.debug("[{}] Setting raw input channel "
"with input type '{}'".format(p.template, process_input))
# Get the dictionary with the raw forking information for the
# provided input
raw_in = p.get_user_channel(process_channel, process_input)
logger.debug("[{}] Fetched process raw user: {}".format(p.template,
raw_in))
if process_input in self.main_raw_inputs:
self.main_raw_inputs[process_input]["raw_forks"].append(
raw_in["input_channel"])
else:
self.main_raw_inputs[process_input] = {
"channel": raw_in["channel"],
"channel_str": "{}\n{} = {}".format(
raw_in["checks"].format(raw_in["params"]),
raw_in["channel"],
raw_in["channel_str"].format(raw_in["params"])),
"raw_forks": [raw_in["input_channel"]]
}
logger.debug("[{}] Updated main raw inputs: {}".format(
p.template, self.main_raw_inputs))
|
Given a process this method updates the: attr: ~Process. extra_inputs attribute with the corresponding extra inputs of that process
|
def _update_extra_inputs(self, p):
"""Given a process, this method updates the
:attr:`~Process.extra_inputs` attribute with the corresponding extra
inputs of that process
Parameters
----------
p : flowcraft.Process.Process
"""
if p.extra_input:
logger.debug("[{}] Found extra input: {}".format(
p.template, p.extra_input))
if p.extra_input == "default":
# Check if the default type is now present in the main raw
# inputs. If so, issue an error. The default param can only
# be used when not present in the main raw inputs
if p.input_type in self.main_raw_inputs:
logger.error(colored_print(
"\nThe default input param '{}' of the process '{}'"
" is already specified as a main input parameter of"
" the pipeline. Please choose a different extra_input"
" name.".format(p.input_type, p.template), "red_bold"))
sys.exit(1)
param = p.input_type
else:
param = p.extra_input
dest_channel = "EXTRA_{}_{}".format(p.template, p.pid)
if param not in self.extra_inputs:
self.extra_inputs[param] = {
"input_type": p.input_type,
"channels": [dest_channel]
}
else:
if self.extra_inputs[param]["input_type"] != p.input_type:
logger.error(colored_print(
"\nThe extra_input parameter '{}' for process"
" '{}' was already defined with a different "
"input type '{}'. Please choose a different "
"extra_input name.".format(
p.input_type, p.template,
self.extra_inputs[param]["input_type"]),
"red_bold"))
sys.exit(1)
self.extra_inputs[param]["channels"].append(dest_channel)
logger.debug("[{}] Added extra channel '{}' linked to param: '{}' "
"".format(p.template, param,
self.extra_inputs[param]))
p.update_main_input(
"{}.mix({})".format(p.input_channel, dest_channel)
)
|
Given a process this method updates the: attr: ~Process. secondary_channels attribute with the corresponding secondary inputs of that channel.
|
def _update_secondary_channels(self, p):
"""Given a process, this method updates the
:attr:`~Process.secondary_channels` attribute with the corresponding
secondary inputs of that channel.
The rationale of the secondary channels is the following:
- Start storing any secondary emitting channels, by checking the
`link_start` list attribute of each process. If there are
channel names in the link start, it adds to the secondary
channels dictionary.
- Check for secondary receiving channels, by checking the
`link_end` list attribute. If the link name starts with a
`__` signature, it will created an implicit link with the last
process with an output type after the signature. Otherwise,
it will check is a corresponding link start already exists in
the at least one process upstream of the pipeline and if so,
it will update the ``secondary_channels`` attribute with the
new link.
Parameters
----------
p : flowcraft.Process.Process
"""
# Check if the current process has a start of a secondary
# side channel
if p.link_start:
logger.debug("[{}] Found secondary link start: {}".format(
p.template, p.link_start))
for l in p.link_start:
# If there are multiple link starts in the same lane, the
# last one is the only one saved.
if l in self.secondary_channels:
self.secondary_channels[l][p.lane] = {"p": p, "end": []}
else:
self.secondary_channels[l] = {p.lane: {"p": p, "end": []}}
# check if the current process receives a secondary side channel.
# If so, add to the links list of that side channel
if p.link_end:
logger.debug("[{}] Found secondary link end: {}".format(
p.template, p.link_end))
for l in p.link_end:
# Get list of lanes from the parent forks.
parent_forks = self._get_fork_tree(p.lane)
# Parse special case where the secondary channel links with
# the main output of the specified type
if l["link"].startswith("__"):
self._set_implicit_link(p, l)
continue
# Skip if there is no match for the current link in the
# secondary channels
if l["link"] not in self.secondary_channels:
continue
for lane in parent_forks:
if lane in self.secondary_channels[l["link"]]:
self.secondary_channels[
l["link"]][lane]["end"].append("{}".format(
"{}_{}".format(l["alias"], p.pid)))
logger.debug("[{}] Secondary links updated: {}".format(
p.template, self.secondary_channels))
|
Sets the main channels for the pipeline
|
def _set_channels(self):
"""Sets the main channels for the pipeline
This method will parse de the :attr:`~Process.processes` attribute
and perform the following tasks for each process:
- Sets the input/output channels and main input forks and adds
them to the process's
:attr:`flowcraft.process.Process._context`
attribute (See
:func:`~NextflowGenerator.set_channels`).
- Automatically updates the main input channel of the first
process of each lane so that they fork from the user provide
parameters (See
:func:`~NextflowGenerator._update_raw_input`).
- Check for the presence of secondary channels and adds them to the
:attr:`~NextflowGenerator.secondary_channels` attribute.
Notes
-----
**On the secondary channel setup**: With this approach, there can only
be one secondary link start for each type of secondary link. For
instance, If there are two processes that start a secondary channel
for the ``SIDE_max_len`` channel, only the last one will be recorded,
and all receiving processes will get the channel from the latest
process. Secondary channels can only link if the source process if
downstream of the sink process in its "forking" path.
"""
logger.debug("=====================")
logger.debug("Setting main channels")
logger.debug("=====================")
for i, p in enumerate(self.processes):
# Set main channels for the process
logger.debug("[{}] Setting main channels with pid: {}".format(
p.template, i))
p.set_channels(pid=i)
# If there is no parent lane, set the raw input channel from user
logger.debug("{} {} {}".format(p.parent_lane, p.input_type, p.template))
if not p.parent_lane and p.input_type:
self._update_raw_input(p)
self._update_extra_inputs(p)
self._update_secondary_channels(p)
logger.info(colored_print(
"\tChannels set for {} \u2713".format(p.template)))
|
Sets the main raw inputs and secondary inputs on the init process
|
def _set_init_process(self):
"""Sets the main raw inputs and secondary inputs on the init process
This method will fetch the :class:`flowcraft.process.Init` process
instance and sets the raw input (
:func:`flowcraft.process.Init.set_raw_inputs`) for
that process. This will handle the connection of the user parameters
with channels that are then consumed in the pipeline.
"""
logger.debug("========================")
logger.debug("Setting secondary inputs")
logger.debug("========================")
# Get init process
init_process = self.processes[0]
logger.debug("Setting main raw inputs: "
"{}".format(self.main_raw_inputs))
init_process.set_raw_inputs(self.main_raw_inputs)
logger.debug("Setting extra inputs: {}".format(self.extra_inputs))
init_process.set_extra_inputs(self.extra_inputs)
|
Sets the secondary channels for the pipeline
|
def _set_secondary_channels(self):
"""Sets the secondary channels for the pipeline
This will iterate over the
:py:attr:`NextflowGenerator.secondary_channels` dictionary that is
populated when executing
:func:`~NextflowGenerator._update_secondary_channels` method.
"""
logger.debug("==========================")
logger.debug("Setting secondary channels")
logger.debug("==========================")
logger.debug("Setting secondary channels: {}".format(
self.secondary_channels))
for source, lanes in self.secondary_channels.items():
for vals in lanes.values():
if not vals["end"]:
logger.debug("[{}] No secondary links to setup".format(
vals["p"].template))
continue
logger.debug("[{}] Setting secondary links for "
"source {}: {}".format(vals["p"].template,
source,
vals["end"]))
vals["p"].set_secondary_channel(source, vals["end"])
|
Adds compiler channels to the: attr: processes attribute.
|
def _set_general_compilers(self):
"""Adds compiler channels to the :attr:`processes` attribute.
This method will iterate over the pipeline's processes and check
if any process is feeding channels to a compiler process. If so, that
compiler process is added to the pipeline and those channels are
linked to the compiler via some operator.
"""
for c, c_info in self.compilers.items():
# Instantiate compiler class object and set empty channel list
compiler_cls = c_info["cls"](template=c_info["template"])
c_info["channels"] = []
for p in self.processes:
if not any([isinstance(p, x) for x in self.skip_class]):
# Check if process has channels to feed to a compiler
if c in p.compiler:
# Correct channel names according to the pid of the
# process
channels = ["{}_{}".format(i, p.pid) for i in
p.compiler[c]]
c_info["channels"].extend(channels)
# If one ore more channels were detected, establish connections
# and append compiler to the process list.
if c_info["channels"]:
compiler_cls.set_compiler_channels(c_info["channels"],
operator="join")
self.processes.append(compiler_cls)
|
Compiles all status channels for the status compiler process
|
def _set_status_channels(self):
"""Compiles all status channels for the status compiler process
"""
status_inst = pc.StatusCompiler(template="status_compiler")
report_inst = pc.ReportCompiler(template="report_compiler")
# Compile status channels from pipeline process
status_channels = []
for p in [p for p in self.processes]:
if not any([isinstance(p, x) for x in self.skip_class]):
status_channels.extend(p.status_strs)
if not status_channels:
logger.debug("No status channels found. Skipping status compiler"
"process")
return
logger.debug("Setting status channels: {}".format(status_channels))
# Check for duplicate channels. Raise exception if found.
if len(status_channels) != len(set(status_channels)):
raise eh.ProcessError(
"Duplicate status channels detected. Please ensure that "
"the 'status_channels' attributes of each process are "
"unique. Here are the status channels:\n\n{}".format(
", ".join(status_channels)
))
status_inst.set_compiler_channels(status_channels)
report_channels = ["REPORT_{}".format(x.lstrip("STATUS_")) for x in
status_channels]
report_inst.set_compiler_channels(report_channels)
self.processes.extend([status_inst, report_inst])
|
Returns the nextflow resources string from a dictionary object
|
def _get_resources_string(res_dict, pid):
""" Returns the nextflow resources string from a dictionary object
If the dictionary has at least on of the resource directives, these
will be compiled for each process in the dictionary and returned
as a string read for injection in the nextflow config file template.
This dictionary should be::
dict = {"processA": {"cpus": 1, "memory": "4GB"},
"processB": {"cpus": 2}}
Parameters
----------
res_dict : dict
Dictionary with the resources for processes.
pid : int
Unique identified of the process
Returns
-------
str
nextflow config string
"""
config_str = ""
ignore_directives = ["container", "version"]
for p, directives in res_dict.items():
for d, val in directives.items():
if d in ignore_directives:
continue
config_str += '\n\t${}_{}.{} = {}'.format(p, pid, d, val)
return config_str
|
Returns the nextflow containers string from a dictionary object
|
def _get_container_string(cont_dict, pid):
""" Returns the nextflow containers string from a dictionary object
If the dictionary has at least on of the container directives, these
will be compiled for each process in the dictionary and returned
as a string read for injection in the nextflow config file template.
This dictionary should be::
dict = {"processA": {"container": "asd", "version": "1.0.0"},
"processB": {"container": "dsd"}}
Parameters
----------
cont_dict : dict
Dictionary with the containers for processes.
pid : int
Unique identified of the process
Returns
-------
str
nextflow config string
"""
config_str = ""
for p, directives in cont_dict.items():
container = ""
if "container" in directives:
container += directives["container"]
if "version" in directives:
container += ":{}".format(directives["version"])
else:
container += ":latest"
if container:
config_str += '\n\t${}_{}.container = "{}"'.format(p, pid, container)
return config_str
|
Returns the nextflow params string from a dictionary object.
|
def _get_params_string(self):
"""Returns the nextflow params string from a dictionary object.
The params dict should be a set of key:value pairs with the
parameter name, and the default parameter value::
self.params = {
"genomeSize": 2.1,
"minCoverage": 15
}
The values are then added to the string as they are. For instance,
a ``2.1`` float will appear as ``param = 2.1`` and a
``"'teste'" string will appear as ``param = 'teste'`` (Note the
string).
Returns
-------
str
Nextflow params configuration string
"""
params_str = ""
for p in self.processes:
logger.debug("[{}] Adding parameters: {}\n".format(
p.template, p.params)
)
# Add an header with the template name to structure the params
# configuration
if p.params and p.template != "init":
p.set_param_id("_{}".format(p.pid))
params_str += "\n\t/*"
params_str += "\n\tComponent '{}_{}'\n".format(p.template,
p.pid)
params_str += "\t{}\n".format("-" * (len(p.template) + len(p.pid) + 12))
params_str += "\t*/\n"
for param, val in p.params.items():
if p.template == "init":
param_id = param
else:
param_id = "{}_{}".format(param, p.pid)
params_str += "\t{} = {}\n".format(param_id, val["default"])
return params_str
|
Returns the merged nextflow params string from a dictionary object.
|
def _get_merged_params_string(self):
"""Returns the merged nextflow params string from a dictionary object.
The params dict should be a set of key:value pairs with the
parameter name, and the default parameter value::
self.params = {
"genomeSize": 2.1,
"minCoverage": 15
}
The values are then added to the string as they are. For instance,
a ``2.1`` float will appear as ``param = 2.1`` and a
``"'teste'" string will appear as ``param = 'teste'`` (Note the
string).
Identical parameters in multiple processes will be merged into the same
param.
Returns
-------
str
Nextflow params configuration string
"""
params_temp = {}
for p in self.processes:
logger.debug("[{}] Adding parameters: {}".format(p.template,
p.params))
for param, val in p.params.items():
params_temp[param] = val["default"]
config_str = "\n\t" + "\n\t".join([
"{} = {}".format(param, val) for param, val in params_temp.items()
])
return config_str
|
Returns the nextflow manifest config string to include in the config file from the information on the pipeline.
|
def _get_manifest_string(self):
"""Returns the nextflow manifest config string to include in the
config file from the information on the pipeline.
Returns
-------
str
Nextflow manifest configuration string
"""
config_str = ""
config_str += '\n\tname = "{}"'.format(self.pipeline_name)
config_str += '\n\tmainScript = "{}"'.format(self.nf_file)
return config_str
|
This method will iterate over all process in the pipeline and populate the nextflow configuration files with the directives of each process in the pipeline.
|
def _set_configurations(self):
"""This method will iterate over all process in the pipeline and
populate the nextflow configuration files with the directives
of each process in the pipeline.
"""
logger.debug("======================")
logger.debug("Setting configurations")
logger.debug("======================")
resources = ""
containers = ""
params = ""
manifest = ""
if self.merge_params:
params += self._get_merged_params_string()
help_list = self._get_merged_params_help()
else:
params += self._get_params_string()
help_list = self._get_params_help()
for p in self.processes:
# Skip processes with the directives attribute populated
if not p.directives:
continue
logger.debug("[{}] Adding directives: {}".format(
p.template, p.directives))
resources += self._get_resources_string(p.directives, p.pid)
containers += self._get_container_string(p.directives, p.pid)
manifest = self._get_manifest_string()
self.resources = self._render_config("resources.config", {
"process_info": resources
})
self.containers = self._render_config("containers.config", {
"container_info": containers
})
self.params = self._render_config("params.config", {
"params_info": params
})
self.manifest = self._render_config("manifest.config", {
"manifest_info": manifest
})
self.help = self._render_config("Helper.groovy", {
"nf_file": basename(self.nf_file),
"help_list": help_list,
"version": __version__,
"pipeline_name": " ".join([x.upper() for x in self.pipeline_name])
})
self.user_config = self._render_config("user.config", {})
|
Writes dag to output file
|
def dag_to_file(self, dict_viz, output_file=".treeDag.json"):
"""Writes dag to output file
Parameters
----------
dict_viz: dict
Tree like dictionary that is used to export tree data of processes
to html file and here for the dotfile .treeDag.json
"""
outfile_dag = open(os.path.join(dirname(self.nf_file), output_file)
, "w")
outfile_dag.write(json.dumps(dict_viz))
outfile_dag.close()
|
Write pipeline attributes to json
|
def render_pipeline(self):
"""Write pipeline attributes to json
This function writes the pipeline and their attributes to a json file,
that is intended to be read by resources/pipeline_graph.html to render
a graphical output showing the DAG.
"""
dict_viz = {
"name": "root",
"children": []
}
last_of_us = {}
f_tree = self._fork_tree if self._fork_tree else {1: [1]}
for x, (k, v) in enumerate(f_tree.items()):
for p in self.processes[1:]:
if x == 0 and p.lane not in [k] + v:
continue
if x > 0 and p.lane not in v:
continue
if not p.parent_lane:
lst = dict_viz["children"]
else:
lst = last_of_us[p.parent_lane]
tooltip = {
"name": "{}_{}".format(p.template, p.pid),
"process": {
"pid": p.pid,
"input": p.input_type,
"output": p.output_type if p.output_type else "None",
"lane": p.lane,
},
"children": []
}
dir_var = ""
for k2, v2 in p.directives.items():
dir_var += k2
for d in v2:
try:
# Remove quotes from string directives
directive = v2[d].replace("'", "").replace('"', '') \
if isinstance(v2[d], str) else v2[d]
dir_var += "{}: {}".format(d, directive)
except KeyError:
pass
if dir_var:
tooltip["process"]["directives"] = dir_var
else:
tooltip["process"]["directives"] = "N/A"
lst.append(tooltip)
last_of_us[p.lane] = lst[-1]["children"]
# write to file dict_viz
self.dag_to_file(dict_viz)
# Write tree forking information for dotfile
with open(os.path.join(dirname(self.nf_file),
".forkTree.json"), "w") as fh:
fh.write(json.dumps(self._fork_tree))
# send with jinja to html resource
return self._render_config("pipeline_graph.html", {"data": dict_viz})
|
Wrapper method that writes all configuration files to the pipeline directory
|
def write_configs(self, project_root):
"""Wrapper method that writes all configuration files to the pipeline
directory
"""
# Write resources config
with open(join(project_root, "resources.config"), "w") as fh:
fh.write(self.resources)
# Write containers config
with open(join(project_root, "containers.config"), "w") as fh:
fh.write(self.containers)
# Write containers config
with open(join(project_root, "params.config"), "w") as fh:
fh.write(self.params)
# Write manifest config
with open(join(project_root, "manifest.config"), "w") as fh:
fh.write(self.manifest)
# Write user config if not present in the project directory
if not exists(join(project_root, "user.config")):
with open(join(project_root, "user.config"), "w") as fh:
fh.write(self.user_config)
lib_dir = join(project_root, "lib")
if not exists(lib_dir):
os.makedirs(lib_dir)
with open(join(lib_dir, "Helper.groovy"), "w") as fh:
fh.write(self.help)
# Generate the pipeline DAG
pipeline_to_json = self.render_pipeline()
with open(splitext(self.nf_file)[0] + ".html", "w") as fh:
fh.write(pipeline_to_json)
|
Export pipeline params as a JSON to stdout
|
def export_params(self):
"""Export pipeline params as a JSON to stdout
This run mode iterates over the pipeline processes and exports the
params dictionary of each component as a JSON to stdout.
"""
params_json = {}
# Skip first init process
for p in self.processes[1:]:
params_json[p.template] = p.params
# Flush params json to stdout
sys.stdout.write(json.dumps(params_json))
|
Export pipeline directives as a JSON to stdout
|
def export_directives(self):
"""Export pipeline directives as a JSON to stdout
"""
directives_json = {}
# Skip first init process
for p in self.processes[1:]:
directives_json[p.template] = p.directives
# Flush params json to stdout
sys.stdout.write(json.dumps(directives_json))
|
Export all dockerhub tags associated with each component given by the - t flag.
|
def fetch_docker_tags(self):
"""
Export all dockerhub tags associated with each component given by
the -t flag.
"""
# dict to store the already parsed components (useful when forks are
# given to the pipeline string via -t flag
dict_of_parsed = {}
# fetches terminal width and subtracts 3 because we always add a
# new line character and we want a space at the beggining and at the end
# of each line
terminal_width = shutil.get_terminal_size().columns - 3
# first header
center_string = " Selected container tags "
# starts a list with the headers
tags_list = [
[
"=" * int(terminal_width / 4),
"{0}{1}{0}".format(
"=" * int(((terminal_width/2 - len(center_string)) / 2)),
center_string)
,
"{}\n".format("=" * int(terminal_width / 4))
],
["component", "container", "tags"],
[
"=" * int(terminal_width / 4),
"=" * int(terminal_width / 2),
"=" * int(terminal_width / 4)
]
]
# Skip first init process and iterate through the others
for p in self.processes[1:]:
template = p.template
# if component has already been printed then skip and don't print
# again
if template in dict_of_parsed:
continue
# starts a list of containers for the current process in
# dict_of_parsed, in which each containers will be added to this
# list once it gets parsed
dict_of_parsed[template] = {
"container": []
}
# fetch repo name from directives of each component.
for directives in p.directives.values():
try:
repo = directives["container"]
default_version = directives["version"]
except KeyError:
# adds the default container if container key isn't present
# this happens for instance in integrity_coverage
repo = "flowcraft/flowcraft_base"
default_version = "1.0.0-1"
# checks if repo_version already exists in list of the
# containers for the current component being queried
repo_version = repo + default_version
if repo_version not in dict_of_parsed[template]["container"]:
# make the request to docker hub
r = requests.get(
"https://hub.docker.com/v2/repositories/{}/tags/"
.format(repo)
)
# checks the status code of the request, if it is 200 then
# parses docker hub entry, otherwise retrieve no tags but
# alerts the user
if r.status_code != 404:
# parse response content to dict and fetch results key
r_content = json.loads(r.content)["results"]
for version in r_content:
printed_version = (version["name"] + "*") \
if version["name"] == default_version \
else version["name"]
tags_list.append([template, repo, printed_version])
else:
tags_list.append([template, repo, "No DockerHub tags"])
dict_of_parsed[template]["container"].append(repo_version)
# iterate through each entry in tags_list and print the list of tags
# for each component. Each entry (excluding the headers) contains
# 3 elements (component name, container and tag version)
for x, entry in enumerate(tags_list):
# adds different color to the header in the first list and
# if row is pair add one color and if is even add another (different
# background)
color = "blue_bold" if x < 3 else \
("white" if x % 2 != 0 else "0;37;40m")
# generates a small list with the terminal width for each column,
# this will be given to string formatting as the 3, 4 and 5 element
final_width = [
int(terminal_width/4),
int(terminal_width/2),
int(terminal_width/4)
]
# writes the string to the stdout
sys.stdout.write(
colored_print("\n {0: <{3}} {1: ^{4}} {2: >{5}}".format(
*entry, *final_width), color)
)
# assures that the entire line gets the same color
sys.stdout.write("\n{0: >{1}}\n".format("(* = default)",
terminal_width + 3))
|
Main pipeline builder
|
def build(self):
"""Main pipeline builder
This method is responsible for building the
:py:attr:`NextflowGenerator.template` attribute that will contain
the nextflow code of the pipeline.
First it builds the header, then sets the main channels, the
secondary inputs, secondary channels and finally the
status channels. When the pipeline is built, is writes the code
to a nextflow file.
"""
logger.info(colored_print(
"\tSuccessfully connected {} process(es) with {} "
"fork(s) across {} lane(s) \u2713".format(
len(self.processes[1:]), len(self._fork_tree), self.lanes)))
# Generate regular nextflow header that sets up the shebang, imports
# and all possible initial channels
self._build_header()
self._set_channels()
self._set_init_process()
self._set_secondary_channels()
logger.info(colored_print(
"\tSuccessfully set {} secondary channel(s) \u2713".format(
len(self.secondary_channels))))
self._set_compiler_channels()
self._set_configurations()
logger.info(colored_print(
"\tFinished configurations \u2713"))
for p in self.processes:
self.template += "\n{}".format(p.template_str)
self._build_footer()
project_root = dirname(self.nf_file)
# Write configs
self.write_configs(project_root)
# Write pipeline file
with open(self.nf_file, "w") as fh:
fh.write(self.template)
logger.info(colored_print(
"\tPipeline written into {} \u2713".format(self.nf_file)))
|
Returns a kmer list based on the provided kmer option and max read len.
|
def set_kmers(kmer_opt, max_read_len):
"""Returns a kmer list based on the provided kmer option and max read len.
Parameters
----------
kmer_opt : str
The k-mer option. Can be either ``'auto'``, ``'default'`` or a
sequence of space separated integers, ``'23, 45, 67'``.
max_read_len : int
The maximum read length of the current sample.
Returns
-------
kmers : list
List of k-mer values that will be provided to Spades.
"""
logger.debug("Kmer option set to: {}".format(kmer_opt))
# Check if kmer option is set to auto
if kmer_opt == "auto":
if max_read_len >= 175:
kmers = [55, 77, 99, 113, 127]
else:
kmers = [21, 33, 55, 67, 77]
logger.debug("Kmer range automatically selected based on max read"
"length of {}: {}".format(max_read_len, kmers))
# Check if manual kmers were specified
elif len(kmer_opt.split()) > 1:
kmers = kmer_opt.split()
logger.debug("Kmer range manually set to: {}".format(kmers))
else:
kmers = []
logger.debug("Kmer range set to empty (will be automatically "
"determined by SPAdes")
return kmers
|
Main executor of the spades template.
|
def main(sample_id, fastq_pair, max_len, kmer, clear):
"""Main executor of the spades template.
Parameters
----------
sample_id : str
Sample Identification string.
fastq_pair : list
Two element list containing the paired FastQ files.
max_len : int
Maximum read length. This value is determined in
:py:class:`templates.integrity_coverage`
kmer : str
Can be either ``'auto'``, ``'default'`` or a
sequence of space separated integers, ``'23, 45, 67'``.
"""
logger.info("Starting spades")
logger.info("Setting SPAdes kmers")
kmers = set_kmers(kmer, max_len)
logger.info("SPAdes kmers set to: {}".format(kmers))
cli = [
"metaspades.py",
"--only-assembler",
"--threads",
"$task.cpus",
"-o",
"."
]
# Add kmers, if any were specified
if kmers:
cli += ["-k {}".format(",".join([str(x) for x in kmers]))]
# Add FastQ files
cli += [
"-1",
fastq_pair[0],
"-2",
fastq_pair[1]
]
logger.debug("Running metaSPAdes subprocess with command: {}".format(cli))
p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
# Attempt to decode STDERR output from bytes. If unsuccessful, coerce to
# string
try:
stderr = stderr.decode("utf8")
stdout = stdout.decode("utf8")
except (UnicodeDecodeError, AttributeError):
stderr = str(stderr)
stdout = str(stdout)
logger.info("Finished metaSPAdes subprocess with STDOUT:\\n"
"======================================\\n{}".format(stdout))
logger.info("Fished metaSPAdes subprocesswith STDERR:\\n"
"======================================\\n{}".format(stderr))
logger.info("Finished metaSPAdes with return code: {}".format(
p.returncode))
with open(".status", "w") as fh:
if p.returncode != 0:
fh.write("error")
return
else:
fh.write("pass")
# Change the default contigs.fasta assembly name to a more informative one
if "_trim." in fastq_pair[0]:
sample_id += "_trim"
assembly_file = "{}_metaspades.fasta".format(
sample_id)
os.rename("contigs.fasta", assembly_file)
logger.info("Setting main assembly file to: {}".format(assembly_file))
# Remove input fastq files when clear option is specified.
# Only remove temporary input when the expected output exists.
if clear == "true" and os.path.exists(assembly_file):
clean_up(fastq_pair)
|
Returns a hash of the reports JSON file
|
def _get_report_id(self):
"""Returns a hash of the reports JSON file
"""
if self.watch:
# Searches for the first occurence of the nextflow pipeline
# file name in the .nextflow.log file
pipeline_path = get_nextflow_filepath(self.log_file)
# Get hash from the entire pipeline file
pipeline_hash = hashlib.md5()
with open(pipeline_path, "rb") as fh:
for chunk in iter(lambda: fh.read(4096), b""):
pipeline_hash.update(chunk)
# Get hash from the current working dir and hostname
workdir = os.getcwd().encode("utf8")
hostname = socket.gethostname().encode("utf8")
hardware_addr = str(uuid.getnode()).encode("utf8")
dir_hash = hashlib.md5(workdir + hostname + hardware_addr)
return pipeline_hash.hexdigest() + dir_hash.hexdigest()
else:
with open(self.report_file) as fh:
report_json = json.loads(fh.read())
metadata = report_json["data"]["results"][0]["nfMetadata"]
try:
report_id = metadata["scriptId"] + metadata["sessionId"]
except KeyError:
raise eh.ReportError("Incomplete or corrupt report JSON file "
"missing the 'scriptId' and/or 'sessionId' "
"metadata information")
return report_id
|
Parses the. nextflow. log file for signatures of pipeline status and sets the: attr: status_info attribute.
|
def _update_pipeline_status(self):
"""
Parses the .nextflow.log file for signatures of pipeline status and sets
the :attr:`status_info` attribute.
"""
prev_status = self.status_info
with open(self.log_file) as fh:
for line in fh:
if "Session aborted" in line:
self.status_info = "aborted"
self.send = True if prev_status != self.status_info \
else self.send
return
if "Execution complete -- Goodbye" in line:
self.status_info = "complete"
self.send = True if prev_status != self.status_info \
else self.send
return
self.status_info = "running"
self.send = True if prev_status != self.status_info \
else self.send
|
Parses the nextflow trace file and retrieves the path of report JSON files that have not been sent to the service yet.
|
def update_trace_watch(self):
"""Parses the nextflow trace file and retrieves the path of report JSON
files that have not been sent to the service yet.
"""
# Check the size stamp of the tracefile. Only proceed with the parsing
# if it changed from the previous size.
size_stamp = os.path.getsize(self.trace_file)
self.trace_retry = 0
if size_stamp and size_stamp == self.trace_sizestamp:
return
else:
logger.debug("Updating trace size stamp to: {}".format(size_stamp))
self.trace_sizestamp = size_stamp
with open(self.trace_file) as fh:
# Skip potential empty lines at the start of file
header = next(fh).strip()
while not header:
header = next(fh).strip()
# Get header mappings before parsing the file
hm = self._header_mapping(header)
for line in fh:
# Skip empty lines
if line.strip() == "":
continue
fields = line.strip().split("\t")
# Skip if task ID was already processes
if fields[hm["task_id"]] in self.stored_ids:
continue
if fields[hm["process"]] == "report":
self.report_queue.append(
self._expand_path(fields[hm["hash"]])
)
self.send = True
# Add the processed trace line to the stored ids. It will be
# skipped in future parsers
self.stored_ids.append(fields[hm["task_id"]])
|
Parses nextflow log file and updates the run status
|
def update_log_watch(self):
"""Parses nextflow log file and updates the run status
"""
# Check the size stamp of the tracefile. Only proceed with the parsing
# if it changed from the previous size.
size_stamp = os.path.getsize(self.log_file)
self.trace_retry = 0
if size_stamp and size_stamp == self.log_sizestamp:
return
else:
logger.debug("Updating log size stamp to: {}".format(size_stamp))
self.log_sizestamp = size_stamp
self._update_pipeline_status()
|
Sends a PUT request with the report JSON files currently in the report_queue attribute.
|
def _send_live_report(self, report_id):
"""Sends a PUT request with the report JSON files currently in the
report_queue attribute.
Parameters
----------
report_id : str
Hash of the report JSON as retrieved from :func:`~_get_report_hash`
"""
# Determines the maximum number of reports sent at the same time in
# the same payload
buffer_size = 100
logger.debug("Report buffer size set to: {}".format(buffer_size))
for i in range(0, len(self.report_queue), buffer_size):
# Reset the report compilation batch
reports_compilation = []
# Iterate over report JSON batches determined by buffer_size
for report in self.report_queue[i: i + buffer_size]:
try:
report_file = [x for x in os.listdir(report)
if x.endswith(".json")][0]
except IndexError:
continue
with open(join(report, report_file)) as fh:
reports_compilation.append(json.loads(fh.read()))
logger.debug("Payload sent with size: {}".format(
asizeof(json.dumps(reports_compilation))
))
logger.debug("status: {}".format(self.status_info))
try:
requests.put(
self.broadcast_address,
json={"run_id": report_id,
"report_json": reports_compilation,
"status": self.status_info}
)
except requests.exceptions.ConnectionError:
logger.error(colored_print(
"ERROR: Could not establish connection with server. The server"
" may be down or there is a problem with your internet "
"connection.", "red_bold"))
sys.exit(1)
# When there is no change in the report queue, but there is a change
# in the run status of the pipeline
if not self.report_queue:
logger.debug("status: {}".format(self.status_info))
try:
requests.put(
self.broadcast_address,
json={"run_id": report_id,
"report_json": [],
"status": self.status_info}
)
except requests.exceptions.ConnectionError:
logger.error(colored_print(
"ERROR: Could not establish connection with server. The"
" server may be down or there is a problem with your "
"internet connection.", "red_bold"))
sys.exit(1)
# Reset the report queue after sending the request
self.report_queue = []
|
Sends a POST request to initialize the live reports
|
def _init_live_reports(self, report_id):
"""Sends a POST request to initialize the live reports
Parameters
----------
report_id : str
Hash of the report JSON as retrieved from :func:`~_get_report_hash`
"""
logger.debug("Sending initial POST request to {} to start report live"
" update".format(self.broadcast_address))
try:
with open(".metadata.json") as fh:
metadata = [json.load(fh)]
except:
metadata = []
start_json = {
"data": {"results": metadata}
}
try:
requests.post(
self.broadcast_address,
json={"run_id": report_id, "report_json": start_json,
"status": self.status_info}
)
except requests.exceptions.ConnectionError:
logger.error(colored_print(
"ERROR: Could not establish connection with server. The server"
" may be down or there is a problem with your internet "
"connection.", "red_bold"))
sys.exit(1)
|
Sends a delete request for the report JSON hash
|
def _close_connection(self, report_id):
"""Sends a delete request for the report JSON hash
Parameters
----------
report_id : str
Hash of the report JSON as retrieved from :func:`~_get_report_hash`
"""
logger.debug(
"Closing connection and sending DELETE request to {}".format(
self.broadcast_address))
try:
r = requests.delete(self.broadcast_address,
json={"run_id": report_id})
if r.status_code != 202:
logger.error(colored_print(
"ERROR: There was a problem sending data to the server"
"with reason: {}".format(r.reason)))
except requests.exceptions.ConnectionError:
logger.error(colored_print(
"ERROR: Could not establish connection with server. The server"
" may be down or there is a problem with your internet "
"connection.", "red_bold"))
sys.exit(1)
|
Generates an adapter file for FastQC from a fasta file.
|
def convert_adatpers(adapter_fasta):
"""Generates an adapter file for FastQC from a fasta file.
The provided adapters file is assumed to be a simple fasta file with the
adapter's name as header and the corresponding sequence::
>TruSeq_Universal_Adapter
AATGATACGGCGACCACCGAGATCTACACTCTTTCCCTACACGACGCTCTTCCGATCT
>TruSeq_Adapter_Index 1
GATCGGAAGAGCACACGTCTGAACTCCAGTCACATCACGATCTCGTATGCCGTCTTCTGCTTG
Parameters
----------
adapter_fasta : str
Path to Fasta file with adapter sequences.
Returns
-------
adapter_out : str or None
The path to the reformatted adapter file. Returns ``None`` if the
adapters file does not exist or the path is incorrect.
"""
adapter_out = "fastqc_adapters.tab"
logger.debug("Setting output adapters file to: {}".format(adapter_out))
try:
with open(adapter_fasta) as fh, \
open(adapter_out, "w") as adap_fh:
for line in fh:
if line.startswith(">"):
head = line[1:].strip()
# Get the next line with the sequence string
sequence = next(fh).strip()
adap_fh.write("{}\\t{}\\n".format(head, sequence))
logger.info("Converted adapters file")
return adapter_out
# If an invalid adapters file is provided, return None.
except FileNotFoundError:
logger.warning("Could not find the provided adapters file: {}".format(
adapter_fasta))
return
|
Main executor of the fastq template.
|
def main(fastq_pair, adapter_file, cpus):
""" Main executor of the fastq template.
Parameters
----------
fastq_pair : list
Two element list containing the paired FastQ files.
adapter_file : str
Path to adapters file.
cpus : int or str
Number of cpu's that will be by FastQC.
"""
logger.info("Starting fastqc")
# If an adapter file was provided, convert it to FastQC format
if os.path.exists(adapter_file):
logger.info("Adapters file provided: {}".format(adapter_file))
adapters = convert_adatpers(adapter_file)
else:
logger.info("Adapters file '{}' not provided or does not "
"exist".format(adapter_file))
adapters = None
# Setting command line for FastQC
cli = [
"fastqc",
"--extract",
"--nogroup",
"--format",
"fastq",
"--threads",
str(cpus)
]
# Add adapters file to command line, if it exists
if adapters:
cli += ["--adapters", "{}".format(adapters)]
# Add FastQ files at the end of command line
cli += fastq_pair
logger.debug("Running fastqc subprocess with command: {}".format(cli))
p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE, shell=False)
stdout, stderr = p.communicate()
# Attempt to decode STDERR output from bytes. If unsuccessful, coerce to
# string
try:
stderr = stderr.decode("utf8")
except (UnicodeDecodeError, AttributeError):
stderr = str(stderr)
logger.info("Finished fastqc subprocess with STDOUT:\\n"
"======================================\\n{}".format(stdout))
logger.info("Fished fastqc subprocesswith STDERR:\\n"
"======================================\\n{}".format(stderr))
logger.info("Finished fastqc with return code: {}".format(
p.returncode))
logger.info("Checking if FastQC output was correctly generated")
# Check if the FastQC output was correctly generated.
with open(".status", "w") as status_fh:
for fastq in fastq_pair:
fpath = join(fastq.rsplit(".", 2)[0] + "_fastqc",
"fastqc_data.txt")
logger.debug("Checking path: {}".format(fpath))
# If the FastQC output does not exist, pass the STDERR to
# the output status channel and exit
if not exists(fpath):
logger.warning("Path does not exist: {}".format(fpath))
status_fh.write("fail")
return
logger.debug("Found path: {}".format(fpath))
# If the output directories exist, write 'pass' to the output status
# channel
status_fh.write("pass")
logger.info("Retrieving relevant FastQC output files")
# Both FastQC have been correctly executed. Get the relevant FastQC
# output files for the output channel
for i, fastq in enumerate(fastq_pair):
# Get results for each pair
fastqc_dir = fastq.rsplit(".", 2)[0] + "_fastqc"
summary_file = join(fastqc_dir, "summary.txt")
logger.debug("Retrieving summary file: {}".format(summary_file))
fastqc_data_file = join(fastqc_dir, "fastqc_data.txt")
logger.debug("Retrieving data file: {}".format(fastqc_data_file))
# Rename output files to a file name that is easier to handle in the
# output channel
os.rename(fastqc_data_file, "pair_{}_data".format(i + 1))
os.rename(summary_file, "pair_{}_summary".format(i + 1))
|
Send dictionary to output json file This function sends master_dict dictionary to a json file if master_dict is populated with entries otherwise it won t create the file
|
def send_to_output(master_dict, mash_output, sample_id, assembly_file):
"""Send dictionary to output json file
This function sends master_dict dictionary to a json file if master_dict is
populated with entries, otherwise it won't create the file
Parameters
----------
master_dict: dict
dictionary that stores all entries for a specific query sequence
in multi-fasta given to mash dist as input against patlas database
last_seq: str
string that stores the last sequence that was parsed before writing to
file and therefore after the change of query sequence between different
rows on the input file
mash_output: str
the name/path of input file to main function, i.e., the name/path of
the mash dist output txt file.
sample_id: str
The name of the sample being parse to .report.json file
Returns
-------
"""
plot_dict = {}
# create a new file only if master_dict is populated
if master_dict:
out_file = open("{}.json".format(
"".join(mash_output.split(".")[0])), "w")
out_file.write(json.dumps(master_dict))
out_file.close()
# iterate through master_dict in order to make contigs the keys
for k,v in master_dict.items():
if not v[2] in plot_dict:
plot_dict[v[2]] = [k]
else:
plot_dict[v[2]].append(k)
number_hits = len(master_dict)
else:
number_hits = 0
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [{
"header": "Mash Dist",
"table": "plasmids",
"patlas_mashdist": master_dict,
"value": number_hits
}]
}],
"plotData": [{
"sample": sample_id,
"data": {
"patlasMashDistXrange": plot_dict
},
"assemblyFile": assembly_file
}]
}
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":")))
|
Main function that allows to dump a mash dist txt file to a json file
|
def main(mash_output, hash_cutoff, sample_id, assembly_file):
"""
Main function that allows to dump a mash dist txt file to a json file
Parameters
----------
mash_output: str
A string with the input file.
hash_cutoff: str
the percentage cutoff for the percentage of shared hashes between query
and plasmid in database that is allowed for the plasmid to be reported
to the results outputs
sample_id: str
The name of the sample.
"""
input_f = open(mash_output, "r")
master_dict = {}
for line in input_f:
tab_split = line.split("\t")
current_seq = tab_split[1].strip()
ref_accession = "_".join(tab_split[0].strip().split("_")[0:3])
mash_dist = tab_split[2].strip()
hashes_list = tab_split[-1].strip().split("/")
# creates a percentage of the shared hashes between the sample and the
# reference
perc_hashes = float(hashes_list[0]) / float(hashes_list[1])
# if ref_accession already in dict, i.e., if the same accession number
# matches more than one contig.
if ref_accession in master_dict.keys():
current_seq += ", {}".format(master_dict[ref_accession][-1])
# assures that only the hashes with a given shared percentage are
# reported to json file
if perc_hashes > float(hash_cutoff):
master_dict[ref_accession] = [
round(1 - float(mash_dist), 2),
round(perc_hashes, 2),
current_seq
]
# assures that file is closed in last iteration of the loop
send_to_output(master_dict, mash_output, sample_id, assembly_file)
|
Writes versions JSON for a template file
|
def build_versions(self):
"""Writes versions JSON for a template file
This method creates the JSON file ``.versions`` based on the metadata
and specific functions that are present in a given template script.
It starts by fetching the template metadata, which can be specified
via the ``__version__``, ``__template__`` and ``__build__``
attributes. If all of these attributes exist, it starts to populate
a JSON/dict array (Note that the absence of any one of them will
prevent the version from being written).
Then, it will search the
template scope for functions that start with the substring
``__set_version`` (For example ``def __set_version_fastqc()`).
These functions should gather the version of
an arbitrary program and return a JSON/dict object with the following
information::
{
"program": <program_name>,
"version": <version>
"build": <build>
}
This JSON/dict object is then written in the ``.versions`` file.
"""
version_storage = []
template_version = self.context.get("__version__", None)
template_program = self.context.get("__template__", None)
template_build = self.context.get("__build__", None)
if template_version and template_program and template_build:
if self.logger:
self.logger.debug("Adding template version: {}; {}; "
"{}".format(template_program,
template_version,
template_build))
version_storage.append({
"program": template_program,
"version": template_version,
"build": template_build
})
for var, obj in self.context.items():
if var.startswith("__get_version"):
ver = obj()
version_storage.append(ver)
if self.logger:
self.logger.debug("Found additional software version"
"{}".format(ver))
with open(".versions", "w") as fh:
fh.write(json.dumps(version_storage, separators=(",", ":")))
|
converts top results from mash screen txt output to json format
|
def main(mash_output, sample_id):
'''
converts top results from mash screen txt output to json format
Parameters
----------
mash_output: str
this is a string that stores the path to this file, i.e, the name of
the file
sample_id: str
sample name
'''
logger.info("Reading file : {}".format(mash_output))
read_mash_output = open(mash_output)
dic = {}
median_list = []
filtered_dic = {}
logger.info("Generating dictionary and list to pre-process the final json")
for line in read_mash_output:
tab_split = line.split("\t")
identity = tab_split[0]
# shared_hashes = tab_split[1]
median_multiplicity = tab_split[2]
# p_value = tab_split[3]
query_id = tab_split[4]
# query-comment should not exist here and it is irrelevant
# here identity is what in fact interests to report to json but
# median_multiplicity also is important since it gives an rough
# estimation of the coverage depth for each plasmid.
# Plasmids should have higher coverage depth due to their increased
# copy number in relation to the chromosome.
dic[query_id] = [identity, median_multiplicity]
median_list.append(float(median_multiplicity))
output_json = open(" ".join(mash_output.split(".")[:-1]) + ".json", "w")
# median cutoff is twice the median of all median_multiplicity values
# reported by mash screen. In the case of plasmids, since the database
# has 9k entries and reads shouldn't have that many sequences it seems ok...
if len(median_list) > 0:
# this statement assures that median_list has indeed any entries
median_cutoff = median(median_list)
logger.info("Generating final json to dump to a file")
for k, v in dic.items():
# estimated copy number
copy_number = int(float(v[1]) / median_cutoff)
# assure that plasmid as at least twice the median coverage depth
if float(v[1]) > median_cutoff:
filtered_dic["_".join(k.split("_")[0:3])] = [
round(float(v[0]),2),
copy_number
]
logger.info(
"Exported dictionary has {} entries".format(len(filtered_dic)))
else:
# if no entries were found raise an error
logger.error("No matches were found using mash screen for the queried reads")
output_json.write(json.dumps(filtered_dic))
output_json.close()
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [{
"header": "Mash Screen",
"table": "plasmids",
"patlas_mashscreen": filtered_dic,
"value": len(filtered_dic)
}]
}],
}
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":")))
|
This function enables users to add a color to the print. It also enables to pass end_char to print allowing to print several strings in the same line in different prints.
|
def colored_print(msg, color_label="white_bold"):
"""
This function enables users to add a color to the print. It also enables
to pass end_char to print allowing to print several strings in the same line
in different prints.
Parameters
----------
color_string: str
The color code to pass to the function, which enables color change as
well as background color change.
msg: str
The actual text to be printed
end_char: str
The character in which each print should finish. By default it will be
"\n".
"""
if sys.stdout.encoding != "UTF-8":
msg = "".join([i if ord(i) < 128 else "" for i in msg])
# try except first looks for the color in COLORS dictionary, otherwise use
# color_label as the color.
try:
col = COLORS[color_label]
except KeyError:
col = color_label
return "\x1b[{}{}\x1b[0m".format(col, msg)
|
This function handles the dictionary of attributes of each Process class to print to stdout lists of all the components or the components which the user specifies in the - t flag.
|
def procs_dict_parser(procs_dict):
"""
This function handles the dictionary of attributes of each Process class
to print to stdout lists of all the components or the components which the
user specifies in the -t flag.
Parameters
----------
procs_dict: dict
A dictionary with the class attributes for all the components (or
components that are used by the -t flag), that allow to create
both the short_list and detailed_list. Dictionary example:
{"abyss": {'input_type': 'fastq', 'output_type': 'fasta',
'dependencies': [], 'directives': {'abyss': {'cpus': 4,
'memory': '{ 5.GB * task.attempt }', 'container': 'flowcraft/abyss',
'version': '2.1.1', 'scratch': 'true'}}}
"""
logger.info(colored_print(
"\n===== L I S T O F P R O C E S S E S =====\n", "green_bold"))
#Sort to print alphabetically ordered list of processes to ease reading
procs_dict_ordered = {k: procs_dict[k] for k in sorted(procs_dict)}
for template, dict_proc_info in procs_dict_ordered.items():
template_str = "=> {}".format(template)
logger.info(colored_print(template_str, "blue_bold"))
for info in dict_proc_info:
info_str = "{}:".format(info)
if isinstance(dict_proc_info[info], list):
if not dict_proc_info[info]:
arg_msg = "None"
else:
arg_msg = ", ".join(dict_proc_info[info])
elif info == "directives":
# this is used for the "directives", which is a dict
if not dict_proc_info[info]:
# if dict is empty then add None to the message
arg_msg = "None"
else:
# otherwise fetch all template names within a component
# and all the directives for each template to a list
list_msg = ["\n {}: {}".format(
templt,
" , ".join(["{}: {}".format(dr, val)
for dr, val in drs.items()]))
for templt, drs in dict_proc_info[info].items()
]
# write list to a str
arg_msg = "".join(list_msg)
else:
arg_msg = dict_proc_info[info]
logger.info(" {} {}".format(
colored_print(info_str, "white_underline"), arg_msg
))
|
Function that collects all processes available and stores a dictionary of the required arguments of each process class to be passed to procs_dict_parser
|
def proc_collector(process_map, args, pipeline_string):
"""
Function that collects all processes available and stores a dictionary of
the required arguments of each process class to be passed to
procs_dict_parser
Parameters
----------
process_map: dict
The dictionary with the Processes currently available in flowcraft
and their corresponding classes as values
args: argparse.Namespace
The arguments passed through argparser that will be access to check the
type of list to be printed
pipeline_string: str
the pipeline string
"""
arguments_list = []
# prints a detailed list of the process class arguments
if args.detailed_list:
# list of attributes to be passed to proc_collector
arguments_list += [
"input_type",
"output_type",
"description",
"dependencies",
"conflicts",
"directives"
]
# prints a short list with each process and the corresponding description
if args.short_list:
arguments_list += [
"description"
]
if arguments_list:
# dict to store only the required entries
procs_dict = {}
# loops between all process_map Processes
for name, cls in process_map.items():
# instantiates each Process class
cls_inst = cls(template=name)
# checks if recipe is provided
if pipeline_string:
if name not in pipeline_string:
continue
d = {arg_key: vars(cls_inst)[arg_key] for arg_key in
vars(cls_inst) if arg_key in arguments_list}
procs_dict[name] = d
procs_dict_parser(procs_dict)
sys.exit(0)
|
Guesses the compression of an input file.
|
def guess_file_compression(file_path, magic_dict=None):
"""Guesses the compression of an input file.
This function guesses the compression of a given file by checking for
a binary signature at the beginning of the file. These signatures are
stored in the :py:data:`MAGIC_DICT` dictionary. The supported compression
formats are gzip, bzip2 and zip. If none of the signatures in this
dictionary are found at the beginning of the file, it returns ``None``.
Parameters
----------
file_path : str
Path to input file.
magic_dict : dict, optional
Dictionary containing the signatures of the compression types. The
key should be the binary signature and the value should be the
compression format. If left ``None``, it falls back to
:py:data:`MAGIC_DICT`.
Returns
-------
file_type : str or None
If a compression type is detected, returns a string with the format.
If not, returns ``None``.
"""
if not magic_dict:
magic_dict = MAGIC_DICT
max_len = max(len(x) for x in magic_dict)
with open(file_path, "rb") as f:
file_start = f.read(max_len)
logger.debug("Binary signature start: {}".format(file_start))
for magic, file_type in magic_dict.items():
if file_start.startswith(magic):
return file_type
return None
|
Get range of the Unicode encode range for a given string of characters.
|
def get_qual_range(qual_str):
""" Get range of the Unicode encode range for a given string of characters.
The encoding is determined from the result of the :py:func:`ord` built-in.
Parameters
----------
qual_str : str
Arbitrary string.
Returns
-------
x : tuple
(Minimum Unicode code, Maximum Unicode code).
"""
vals = [ord(c) for c in qual_str]
return min(vals), max(vals)
|
Returns the valid encodings for a given encoding range.
|
def get_encodings_in_range(rmin, rmax):
""" Returns the valid encodings for a given encoding range.
The encoding ranges are stored in the :py:data:`RANGES` dictionary, with
the encoding name as a string and a list as a value containing the
phred score and a tuple with the encoding range. For a given encoding
range provided via the two first arguments, this function will return
all possible encodings and phred scores.
Parameters
----------
rmin : int
Minimum Unicode code in range.
rmax : int
Maximum Unicode code in range.
Returns
-------
valid_encodings : list
List of all possible encodings for the provided range.
valid_phred : list
List of all possible phred scores.
"""
valid_encodings = []
valid_phred = []
for encoding, (phred, (emin, emax)) in RANGES.items():
if rmin >= emin and rmax <= emax:
valid_encodings.append(encoding)
valid_phred.append(phred)
return valid_encodings, valid_phred
|
Main executor of the integrity_coverage template.
|
def main(sample_id, fastq_pair, gsize, minimum_coverage, opts):
""" Main executor of the integrity_coverage template.
Parameters
----------
sample_id : str
Sample Identification string.
fastq_pair : list
Two element list containing the paired FastQ files.
gsize : float or int
Estimate of genome size in Mb.
minimum_coverage : float or int
Minimum coverage required for a sample to pass the coverage check
opts : list
List of arbitrary options. See `Expected input`_.
"""
logger.info("Starting integrity coverage main")
# Check for runtime options
if "-e" in opts:
skip_encoding = True
else:
skip_encoding = False
# Information for encoding guess
gmin, gmax = 99, 0
encoding = []
phred = None
# Information for coverage estimation
chars = 0
nreads = 0
# Information on maximum read length
max_read_length = 0
# Get compression of each FastQ pair file
file_objects = []
for fastq in fastq_pair:
logger.info("Processing file {}".format(fastq))
logger.info("[{}] Guessing file compression".format(fastq))
ftype = guess_file_compression(fastq)
# This can guess the compression of gz, bz2 and zip. If it cannot
# find the compression type, it tries to open a regular file
if ftype:
logger.info("[{}] Found file compression: {}".format(
fastq, ftype))
file_objects.append(COPEN[ftype](fastq, "rt"))
else:
logger.info("[{}] File compression not found. Assuming an "
"uncompressed file".format(fastq))
file_objects.append(open(fastq))
logger.info("Starting FastQ file parsing")
# The '*_encoding' file stores a string with the encoding ('Sanger')
# If no encoding is guessed, 'None' should be stored
# The '*_phred' file stores a string with the phred score ('33')
# If no phred is guessed, 'None' should be stored
# The '*_coverage' file stores the estimated coverage ('88')
# The '*_report' file stores a csv report of the file
# The '*_max_len' file stores a string with the maximum contig len ('155')
with open("{}_encoding".format(sample_id), "w") as enc_fh, \
open("{}_phred".format(sample_id), "w") as phred_fh, \
open("{}_coverage".format(sample_id), "w") as cov_fh, \
open("{}_report".format(sample_id), "w") as cov_rep, \
open("{}_max_len".format(sample_id), "w") as len_fh, \
open(".report.json", "w") as json_report, \
open(".status", "w") as status_fh, \
open(".fail", "w") as fail_fh:
try:
# Iterate over both pair files sequentially using itertools.chain
for i, line in enumerate(chain(*file_objects)):
# Parse only every 4th line of the file for the encoding
# e.g.: AAAA/EEEEEEEEEEE<EEEEEEEEEEEEEEEEEEEEEEEEE (...)
if (i + 1) % 4 == 0 and not skip_encoding:
# It is important to strip() the line so that any newline
# character is removed and not accounted for in the
# encoding guess
lmin, lmax = get_qual_range(line.strip())
# Guess new encoding if the range expands the previously
# set boundaries of gmin and gmax
if lmin < gmin or lmax > gmax:
gmin, gmax = min(lmin, gmin), max(lmax, gmax)
encoding, phred = get_encodings_in_range(gmin, gmax)
logger.debug(
"Updating estimates at line {} with range {} to"
" '{}' (encoding) and '{}' (phred)".format(
i, [lmin, lmax], encoding, phred))
# Parse only every 2nd line of the file for the coverage
# e.g.: GGATAATCTACCTTGACGATTTGTACTGGCGTTGGTTTCTTA (...)
if (i + 3) % 4 == 0:
read_len = len(line.strip())
chars += read_len
nreads += 1
# Evaluate maximum read length for sample
if read_len > max_read_length:
logger.debug("Updating maximum read length at line "
"{} to {}".format(i, read_len))
max_read_length = read_len
# End of FastQ parsing
logger.info("Finished FastQ file parsing")
# The minimum expected coverage for a sample to pass
exp_coverage = round(chars / (gsize * 1e6), 2)
# Set json report
if "-e" not in opts:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Raw BP",
"value": chars,
"table": "qc",
"columnBar": True},
{"header": "Reads",
"value": nreads,
"table": "qc",
"columnBar": True},
{"header": "Coverage",
"value": exp_coverage,
"table": "qc",
"columnBar": True,
"failThreshold": minimum_coverage
}
]
}],
"plotData": [{
"sample": sample_id,
"data": {
"sparkline": chars
}
}],
}
else:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Coverage",
"value": exp_coverage,
"table": "qc",
"columnBar": True,
"failThreshold": minimum_coverage
}
],
}],
}
# Get encoding
if len(encoding) > 0:
encoding = set(encoding)
phred = set(phred)
# Get encoding and phred as strings
# e.g. enc: Sanger, Illumina-1.8
# e.g. phred: 64
enc = "{}".format(",".join([x for x in encoding]))
phred = "{}".format(",".join(str(x) for x in phred))
logger.info("Encoding set to {}".format(enc))
logger.info("Phred set to {}".format(enc))
enc_fh.write(enc)
phred_fh.write(phred)
# Encoding not found
else:
if not skip_encoding:
encoding_msg = "Could not guess encoding and phred from " \
"FastQ"
logger.warning(encoding_msg)
json_dic["warnings"] = [{
"sample": sample_id,
"table": "qc",
"value": [encoding_msg]
}]
enc_fh.write("None")
phred_fh.write("None")
# Estimate coverage
logger.info("Estimating coverage based on a genome size of "
"{}".format(gsize))
logger.info("Expected coverage is {}".format(exp_coverage))
if exp_coverage >= minimum_coverage:
cov_rep.write("{},{},{}\\n".format(
sample_id, str(exp_coverage), "PASS"))
cov_fh.write(str(exp_coverage))
status_fh.write("pass")
# Estimated coverage does not pass minimum threshold
else:
fail_msg = "Sample with low coverage ({}), below the {} " \
"threshold".format(exp_coverage, minimum_coverage)
logger.error(fail_msg)
fail_fh.write(fail_msg)
cov_fh.write("fail")
status_fh.write("fail")
cov_rep.write("{},{},{}\\n".format(
sample_id, str(exp_coverage), "FAIL"))
json_dic["fail"] = [{
"sample": sample_id,
"table": "qc",
"value": [fail_msg]
}]
json_report.write(json.dumps(json_dic, separators=(",", ":")))
# Maximum read length
len_fh.write("{}".format(max_read_length))
# This exception is raised when the input FastQ files are corrupted
except EOFError:
logger.error("The FastQ files could not be correctly "
"parsed. They may be corrupt")
for fh in [enc_fh, phred_fh, cov_fh, cov_rep, len_fh]:
fh.write("corrupt")
status_fh.write("fail")
fail_fh.write("Could not read/parse FastQ. "
"Possibly corrupt file")
|
Parses a file with coverage information into objects.
|
def parse_coverage_table(coverage_file):
"""Parses a file with coverage information into objects.
This function parses a TSV file containing coverage results for
all contigs in a given assembly and will build an ``OrderedDict``
with the information about their coverage and length. The length
information is actually gathered from the contig header using a
regular expression that assumes the usual header produced by Spades::
contig_len = int(re.search("length_(.+?)_", line).group(1))
Parameters
----------
coverage_file : str
Path to TSV file containing the coverage results.
Returns
-------
coverage_dict : OrderedDict
Contains the coverage and length information for each contig.
total_size : int
Total size of the assembly in base pairs.
total_cov : int
Sum of coverage values across all contigs.
"""
# Stores the correspondence between a contig and the corresponding coverage
# e.g.: {"contig_1": {"cov": 424} }
coverage_dict = OrderedDict()
# Stores the total coverage
total_cov = 0
with open(coverage_file) as fh:
for line in fh:
# Get contig and coverage
contig, cov = line.strip().split()
coverage_dict[contig] = {"cov": int(cov)}
# Add total coverage
total_cov += int(cov)
logger.debug("Processing contig '{}' with coverage '{}'"
"".format(contig, cov))
return coverage_dict, total_cov
|
Generates a filtered assembly file.
|
def filter_assembly(assembly_file, minimum_coverage, coverage_info,
output_file):
"""Generates a filtered assembly file.
This function generates a filtered assembly file based on an original
assembly and a minimum coverage threshold.
Parameters
----------
assembly_file : str
Path to original assembly file.
minimum_coverage : int or float
Minimum coverage required for a contig to pass the filter.
coverage_info : OrderedDict or dict
Dictionary containing the coverage information for each contig.
output_file : str
Path where the filtered assembly file will be generated.
"""
# This flag will determine whether sequence data should be written or
# ignored because the current contig did not pass the minimum
# coverage threshold
write_flag = False
with open(assembly_file) as fh, open(output_file, "w") as out_fh:
for line in fh:
if line.startswith(">"):
# Reset write_flag
write_flag = False
# Get header of contig
header = line.strip()[1:]
# Check coverage for current contig
contig_cov = coverage_info[header]["cov"]
# If the contig coverage is above the threshold, write to
# output filtered assembly
if contig_cov >= minimum_coverage:
write_flag = True
out_fh.write(line)
elif write_flag:
out_fh.write(line)
|
Uses Samtools to filter a BAM file according to minimum coverage
|
def filter_bam(coverage_info, bam_file, min_coverage, output_bam):
"""Uses Samtools to filter a BAM file according to minimum coverage
Provided with a minimum coverage value, this function will use Samtools
to filter a BAM file. This is performed to apply the same filter to
the BAM file as the one applied to the assembly file in
:py:func:`filter_assembly`.
Parameters
----------
coverage_info : OrderedDict or dict
Dictionary containing the coverage information for each contig.
bam_file : str
Path to the BAM file.
min_coverage : int
Minimum coverage required for a contig to pass the filter.
output_bam : str
Path to the generated filtered BAM file.
"""
# Get list of contigs that will be kept
contig_list = [x for x, vals in coverage_info.items()
if vals["cov"] >= min_coverage]
cli = [
"samtools",
"view",
"-bh",
"-F",
"4",
"-o",
output_bam,
"-@",
"1",
bam_file,
]
cli += contig_list
logger.debug("Runnig samtools view subprocess with command: {}".format(
cli))
p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
# Attempt to decode STDERR output from bytes. If unsuccessful, coerce to
# string
try:
stderr = stderr.decode("utf8")
stdout = stdout.decode("utf8")
except (UnicodeDecodeError, AttributeError):
stderr = str(stderr)
stdout = str(stdout)
logger.info("Finished samtools view subprocess with STDOUT:\\n"
"======================================\\n{}".format(stdout))
logger.info("Fished samtools view subprocesswith STDERR:\\n"
"======================================\\n{}".format(stderr))
logger.info("Finished samtools view with return code: {}".format(
p.returncode))
if not p.returncode:
# Create index
cli = [
"samtools",
"index",
output_bam
]
logger.debug("Runnig samtools index subprocess with command: "
"{}".format(cli))
p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
try:
stderr = stderr.decode("utf8")
stdout = stdout.decode("utf8")
except (UnicodeDecodeError, AttributeError):
stderr = str(stderr)
stdout = str(stdout)
logger.info("Finished samtools index subprocess with STDOUT:\\n"
"======================================\\n{}".format(
stdout))
logger.info("Fished samtools index subprocesswith STDERR:\\n"
"======================================\\n{}".format(
stderr))
logger.info("Finished samtools index with return code: {}".format(
p.returncode))
|
Checks whether a filtered assembly passes a size threshold
|
def check_filtered_assembly(coverage_info, coverage_bp, minimum_coverage,
genome_size, contig_size, max_contigs,
sample_id):
"""Checks whether a filtered assembly passes a size threshold
Given a minimum coverage threshold, this function evaluates whether an
assembly will pass the minimum threshold of ``genome_size * 1e6 * 0.8``,
which means 80% of the expected genome size or the maximum threshold
of ``genome_size * 1e6 * 1.5``, which means 150% of the expected genome
size. It will issue a warning if any of these thresholds is crossed.
In the case of an expected genome size below 80% it will return False.
Parameters
----------
coverage_info : OrderedDict or dict
Dictionary containing the coverage information for each contig.
coverage_bp : dict
Dictionary containing the per base coverage information for each
contig. Used to determine the total number of base pairs in the
final assembly.
minimum_coverage : int
Minimum coverage required for a contig to pass the filter.
genome_size : int
Expected genome size.
contig_size : dict
Dictionary with the len of each contig. Contig headers as keys and
the corresponding lenght as values.
max_contigs : int
Maximum threshold for contig number. A warning is issued if this
threshold is crossed.
sample_id : str
Id or name of the current sample
Returns
-------
x : bool
True if the filtered assembly size is higher than 80% of the
expected genome size.
"""
# Get size of assembly after filtering contigs below minimum_coverage
assembly_len = sum([v for k, v in contig_size.items()
if coverage_info[k]["cov"] >= minimum_coverage])
logger.debug("Assembly length after filtering for minimum coverage of"
" {}: {}".format(minimum_coverage, assembly_len))
# Get number of contigs after filtering
ncontigs = len([x for x in coverage_info.values()
if x["cov"] >= minimum_coverage])
logger.debug("Number of contigs: {}".format(ncontigs))
# Get number of bp after filtering
filtered_contigs = [k for k, v in coverage_info.items()
if v["cov"] >= minimum_coverage]
logger.debug("Filtered contigs for minimum coverage of "
"{}: {}".format(minimum_coverage, filtered_contigs))
total_assembled_bp = sum([sum(coverage_bp[x]) for x in filtered_contigs
if x in coverage_bp])
logger.debug("Total number of assembled base pairs:"
"{}".format(total_assembled_bp))
warnings = []
fails = []
health = True
with open(".warnings", "w") as warn_fh, \
open(".report.json", "w") as json_report:
logger.debug("Checking assembly size after filtering : {}".format(
assembly_len))
# If the filtered assembly size is above the 150% genome size
# threshold, issue a warning
if assembly_len > genome_size * 1e6 * 1.5:
warn_msg = "Assembly size ({}) smaller than the maximum" \
" threshold of 150% of expected genome size.".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails.append("Large_genome_size_({})".format(assembly_len))
# If the number of contigs in the filtered assembly size crosses the
# max_contigs threshold, issue a warning
logger.debug("Checking number of contigs: {}".format(
len(coverage_info)))
contig_threshold = max_contigs * genome_size / 1.5
if ncontigs > contig_threshold:
warn_msg = "The number of contigs ({}) exceeds the threshold of " \
"100 contigs per 1.5Mb ({})".format(
ncontigs, round(contig_threshold, 1))
logger.warning(warn_msg)
warn_fh.write(warn_msg)
warnings.append(warn_msg)
# If the filtered assembly size falls below the 80% genome size
# threshold, fail this check and return False
if assembly_len < genome_size * 1e6 * 0.8:
warn_msg = "Assembly size smaller than the minimum" \
" threshold of 80% of expected genome size: {}".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails.append("Small_genome_size_({})".format(assembly_len))
assembly_len = sum([v for v in contig_size.values()])
total_assembled_bp = sum(
[sum(coverage_bp[x]) for x in coverage_info if x in
coverage_bp])
logger.debug("Assembly length without coverage filtering: "
"{}".format(assembly_len))
logger.debug("Total number of assembled base pairs without"
" filtering: {}".format(total_assembled_bp))
health = False
json_dic = {
"plotData": [{
"sample": sample_id,
"data": {
"sparkline": total_assembled_bp
}
}]
}
if warnings:
json_dic["warnings"] = [{
"sample": sample_id,
"table": "assembly",
"value": warnings
}]
if fails:
json_dic["fail"] = [{
"sample": sample_id,
"table": "assembly",
"value": [fails]
}]
json_report.write(json.dumps(json_dic, separators=(",", ":")))
return health
|
Evaluates the minimum coverage threshold from the value provided in the coverage_opt.
|
def evaluate_min_coverage(coverage_opt, assembly_coverage, assembly_size):
""" Evaluates the minimum coverage threshold from the value provided in
the coverage_opt.
Parameters
----------
coverage_opt : str or int or float
If set to "auto" it will try to automatically determine the coverage
to 1/3 of the assembly size, to a minimum value of 10. If it set
to a int or float, the specified value will be used.
assembly_coverage : int or float
The average assembly coverage for a genome assembly. This value
is retrieved by the `:py:func:parse_coverage_table` function.
assembly_size : int
The size of the genome assembly. This value is retrieved by the
`py:func:get_assembly_size` function.
Returns
-------
x: int
Minimum coverage threshold.
"""
if coverage_opt == "auto":
# Get the 1/3 value of the current assembly coverage
min_coverage = (assembly_coverage / assembly_size) * .3
logger.info("Minimum assembly coverage automatically set to: "
"{}".format(min_coverage))
# If the 1/3 coverage is lower than 10, change it to the minimum of
# 10
if min_coverage < 10:
logger.info("Minimum assembly coverage cannot be set to lower"
" that 10. Setting to 10")
min_coverage = 10
else:
min_coverage = int(coverage_opt)
logger.info("Minimum assembly coverage manually set to: {}".format(
min_coverage))
return min_coverage
|
Returns the number of nucleotides and the size per contig for the provided assembly file path
|
def get_assembly_size(assembly_file):
"""Returns the number of nucleotides and the size per contig for the
provided assembly file path
Parameters
----------
assembly_file : str
Path to assembly file.
Returns
-------
assembly_size : int
Size of the assembly in nucleotides
contig_size : dict
Length of each contig (contig name as key and length as value)
"""
assembly_size = 0
contig_size = {}
header = ""
with open(assembly_file) as fh:
for line in fh:
# Skip empty lines
if line.strip() == "":
continue
if line.startswith(">"):
header = line.strip()[1:]
contig_size[header] = 0
else:
line_len = len(line.strip())
assembly_size += line_len
contig_size[header] += line_len
return assembly_size, contig_size
|
Main executor of the process_assembly_mapping template.
|
def main(sample_id, assembly_file, coverage_file, coverage_bp_file, bam_file,
opts, gsize):
"""Main executor of the process_assembly_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly_file : str
Path to assembly file in Fasta format.
coverage_file : str
Path to TSV file with coverage information for each contig.
coverage_bp_file : str
Path to TSV file with coverage information for each base.
bam_file : str
Path to BAM file.
opts : list
List of options for processing assembly mapping.
gsize : int
Expected genome size
"""
min_assembly_coverage, max_contigs = opts
logger.info("Starting assembly mapping processing")
# Get coverage info, total size and total coverage from the assembly
logger.info("Parsing coverage table")
coverage_info, a_cov = parse_coverage_table(coverage_file)
a_size, contig_size = get_assembly_size(assembly_file)
logger.info("Assembly processed with a total size of '{}' and coverage"
" of '{}'".format(a_size, a_cov))
# Get number of assembled bp after filters
logger.info("Parsing coverage per bp table")
coverage_bp_data = get_coverage_from_file(coverage_bp_file)
# Assess the minimum assembly coverage
min_coverage = evaluate_min_coverage(min_assembly_coverage, a_cov, a_size)
# Check if filtering the assembly using the provided min_coverage will
# reduce the final bp number to less than 80% of the estimated genome
# size.
# If the check below passes with True, then the filtered assembly
# is above the 80% genome size threshold.
filtered_assembly = "{}_filt.fasta".format(
os.path.splitext(assembly_file)[0])
filtered_bam = "filtered.bam"
logger.info("Checking filtered assembly")
if check_filtered_assembly(coverage_info, coverage_bp_data, min_coverage,
gsize, contig_size, int(max_contigs),
sample_id):
# Filter assembly contigs based on the minimum coverage.
logger.info("Filtered assembly passed minimum size threshold")
logger.info("Writting filtered assembly")
filter_assembly(assembly_file, min_coverage, coverage_info,
filtered_assembly)
logger.info("Filtering BAM file according to saved contigs")
filter_bam(coverage_info, bam_file, min_coverage, filtered_bam)
# Could not filter the assembly as it would drop below acceptable
# length levels. Copy the original assembly to the output assembly file
# for compliance with the output channel
else:
shutil.copy(assembly_file, filtered_assembly)
shutil.copy(bam_file, filtered_bam)
shutil.copy(bam_file + ".bai", filtered_bam + ".bai")
with open(".status", "w") as status_fh:
status_fh.write("pass")
|
Main executor of the process_spades template.
|
def main(sample_id, assembly_file, gsize, opts, assembler):
"""Main executor of the process_spades template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly_file : str
Path to the assembly file generated by Spades.
gsize : int
Estimate of genome size.
opts : list
List of options for processing spades assembly.
assembler : str
Name of the assembler, for logging purposes
"""
logger.info("Starting assembly file processing")
warnings = []
fails = ""
min_contig_len, min_kmer_cov, max_contigs = [int(x) for x in opts]
logger.debug("Setting minimum conting length to: {}".format(
min_contig_len))
logger.debug("Setting minimum kmer coverage: {}".format(min_kmer_cov))
# Parse the spades assembly file and perform the first filtering.
logger.info("Starting assembly parsing")
assembly_obj = Assembly(assembly_file, min_contig_len, min_kmer_cov,
sample_id)
with open(".warnings", "w") as warn_fh:
t_80 = gsize * 1000000 * 0.8
t_150 = gsize * 1000000 * 1.5
# Check if assembly size of the first assembly is lower than 80% of the
# estimated genome size. If True, redo the filtering without the
# k-mer coverage filter
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking assembly length: {}".format(assembly_len))
if assembly_len < t_80:
logger.warning("Assembly size ({}) smaller than the minimum "
"threshold of 80% of expected genome size. "
"Applying contig filters without the k-mer "
"coverage filter".format(assembly_len))
assembly_obj.filter_contigs(*[
["length", ">=", min_contig_len]
])
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking updated assembly length: "
"{}".format(assembly_len))
if assembly_len < t_80:
warn_msg = "Assembly size smaller than the minimum" \
" threshold of 80% of expected genome size: {}".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len > t_150:
warn_msg = "Assembly size ({}) larger than the maximum" \
" threshold of 150% of expected genome size.".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
logger.debug("Checking number of contigs: {}".format(
len(assembly_obj.contigs)))
contig_threshold = (max_contigs * gsize) / 1.5
if len(assembly_obj.contigs) > contig_threshold:
warn_msg = "The number of contigs ({}) exceeds the threshold of " \
"{} contigs per 1.5Mb ({})".format(
len(assembly_obj.contigs),
max_contigs,
round(contig_threshold, 1))
logger.warning(warn_msg)
warn_fh.write(warn_msg)
warnings.append(warn_msg)
# Write filtered assembly
logger.debug("Renaming old assembly file to: {}".format(
"{}.old".format(assembly_file)))
assembly_obj.write_assembly("{}_proc.fasta".format(
os.path.splitext(assembly_file)[0]))
# Write report
output_report = "{}.report.csv".format(sample_id)
assembly_obj.write_report(output_report)
# Write json report
with open(".report.json", "w") as json_report:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Contigs ({})".format(assembler),
"value": len(assembly_obj.contigs),
"table": "assembly",
"columnBar": True},
{"header": "Assembled BP ({})".format(assembler),
"value": assembly_len,
"table": "assembly",
"columnBar": True}
]
}],
}
if warnings:
json_dic["warnings"] = [{
"sample": sample_id,
"table": "assembly",
"value": warnings
}]
if fails:
json_dic["fail"] = [{
"sample": sample_id,
"table": "assembly",
"value": [fails]
}]
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass")
|
Convers a CamelCase string into a snake_case one
|
def convert_camel_case(name):
"""Convers a CamelCase string into a snake_case one
Parameters
----------
name : str
An arbitrary string that may be CamelCase
Returns
-------
str
The input string converted into snake_case
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
|
Collects Process classes and return dict mapping templates to classes
|
def collect_process_map():
"""Collects Process classes and return dict mapping templates to classes
This function crawls through the components module and retrieves all
classes that inherit from the Process class. Then, it converts the name
of the classes (which should be CamelCase) to snake_case, which is used
as the template name.
Returns
-------
dict
Dictionary mapping the template name (snake_case) to the corresponding
process class.
"""
process_map = {}
prefix = "{}.".format(components.__name__)
for importer, modname, _ in pkgutil.iter_modules(components.__path__,
prefix):
_module = importer.find_module(modname).load_module(modname)
_component_classes = [
cls for cls in _module.__dict__.values() if
isinstance(cls, type) and cls.__name__ != "Process"
]
for cls in _component_classes:
process_map[convert_camel_case(cls.__name__)] = cls
return process_map
|
Main executor of the process_newick template.
|
def main(newick):
"""Main executor of the process_newick template.
Parameters
----------
newick : str
path to the newick file.
"""
logger.info("Starting newick file processing")
print(newick)
tree = dendropy.Tree.get(file=open(newick, 'r'), schema="newick")
tree.reroot_at_midpoint()
to_write=tree.as_string("newick").strip().replace("[&R] ", '').replace(' ', '_').replace("'", "")
with open(".report.json", "w") as json_report:
json_dic = {
"treeData": [{
"trees": [
to_write
]
}],
}
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass")
|
Factorize s. t. CUR = data
|
def factorize(self):
""" Factorize s.t. CUR = data
Updated Values
--------------
.C : updated values for C.
.U : updated values for U.
.R : updated values for R.
"""
[prow, pcol] = self.sample_probability()
self._rid = self.sample(self._rrank, prow)
self._cid = self.sample(self._crank, pcol)
self._cmdinit()
self.computeUCR()
|
Factorize s. t. CUR = data
|
def factorize(self):
""" Factorize s.t. CUR = data
Updated Values
--------------
.C : updated values for C.
.U : updated values for U.
.R : updated values for R.
"""
[prow, pcol] = self.sample_probability()
self._rid = self.sample(self._rrank, prow)
self._cid = self.sample(self._crank, pcol)
self._rcnt = np.ones(len(self._rid))
self._ccnt = np.ones(len(self._cid))
self.computeUCR()
|
Find data points on the convex hull of a supplied data set
|
def quickhull(sample):
""" Find data points on the convex hull of a supplied data set
Args:
sample: data points as column vectors n x d
n - number samples
d - data dimension (should be two)
Returns:
a k x d matrix containint the convex hull data points
"""
link = lambda a, b: np.concatenate((a, b[1:]))
edge = lambda a, b: np.concatenate(([a], [b]))
def dome(sample, base):
h, t = base
dists = np.dot(sample - h, np.dot(((0, -1), (1, 0)), (t - h)))
outer = np.repeat(sample, dists > 0, axis=0)
if len(outer):
pivot = sample[np.argmax(dists)]
return link(dome(outer, edge(h, pivot)),
dome(outer, edge(pivot, t)))
else:
return base
if len(sample) > 2:
axis = sample[:, 0]
base = np.take(sample, [np.argmin(axis), np.argmax(axis)], axis=0)
return link(dome(sample, base),
dome(sample, base[::-1]))
else:
return sample
|
Return data points that are most similar to basis vectors W
|
def _map_w_to_data(self):
""" Return data points that are most similar to basis vectors W
"""
# assign W to the next best data sample
self._Wmapped_index = vq(self.data, self.W)
self.Wmapped = np.zeros(self.W.shape)
# do not directly assign, i.e. Wdist = self.data[:,sel]
# as self might be unsorted (in non ascending order)
# -> sorting sel would screw the matching to W if
# self.data is stored as a hdf5 table (see h5py)
for i, s in enumerate(self._Wmapped_index):
self.Wmapped[:,i] = self.data[:,s]
|
compute new W
|
def update_w(self):
""" compute new W """
def select_hull_points(data, n=3):
""" select data points for pairwise projections of the first n
dimensions """
# iterate over all projections and select data points
idx = np.array([])
# iterate over some pairwise combinations of dimensions
for i in combinations(range(n), 2):
# sample convex hull points in 2D projection
convex_hull_d = quickhull(data[i, :].T)
# get indices for convex hull data points
idx = np.append(idx, vq(data[i, :], convex_hull_d.T))
idx = np.unique(idx)
return np.int32(idx)
# determine convex hull data points using either PCA or random
# projections
method = 'randomprojection'
if method == 'pca':
pcamodel = PCA(self.data)
pcamodel.factorize(show_progress=False)
proj = pcamodel.H
else:
R = np.random.randn(self._base_sel, self._data_dimension)
proj = np.dot(R, self.data)
self._hull_idx = select_hull_points(proj, n=self._base_sel)
aa_mdl = AA(self.data[:, self._hull_idx], num_bases=self._num_bases)
# determine W
aa_mdl.factorize(niter=50, compute_h=True, compute_w=True,
compute_err=True, show_progress=False)
self.W = aa_mdl.W
self._map_w_to_data()
|
Factorize s. t. WH = data
|
def factorize(self, show_progress=False, compute_w=True, compute_h=True,
compute_err=True, niter=1):
""" Factorize s.t. WH = data
Parameters
----------
show_progress : bool
print some extra information to stdout.
compute_h : bool
iteratively update values for H.
compute_w : bool
iteratively update values for W.
compute_err : bool
compute Frobenius norm |data-WH| after each update and store
it to .ferr[k].
Updated Values
--------------
.W : updated values for W.
.H : updated values for H.
.ferr : Frobenius norm |data-WH|.
"""
AA.factorize(self, niter=1, show_progress=show_progress,
compute_w=compute_w, compute_h=compute_h,
compute_err=compute_err)
|
compute new W
|
def update_w(self):
""" compute new W """
def select_next(iterval):
""" select the next best data sample using robust map
or simply the max iterval ... """
if self._robust_map:
k = np.argsort(iterval)[::-1]
d_sub = self.data[:,k[:self._robust_nselect]]
self.sub.extend(k[:self._robust_nselect])
# cluster d_sub
kmeans_mdl = Kmeans(d_sub, num_bases=self._robust_cluster)
kmeans_mdl.factorize(niter=10)
# get largest cluster
h = np.histogram(kmeans_mdl.assigned, range(self._robust_cluster+1))[0]
largest_cluster = np.argmax(h)
sel = pdist(kmeans_mdl.W[:, largest_cluster:largest_cluster+1], d_sub)
sel = k[np.argmin(sel)]
else:
sel = np.argmax(iterval)
return sel
EPS = 10**-8
if scipy.sparse.issparse(self.data):
norm_data = np.sqrt(self.data.multiply(self.data).sum(axis=0))
norm_data = np.array(norm_data).reshape((-1))
else:
norm_data = np.sqrt(np.sum(self.data**2, axis=0))
self.select = []
if self._method == 'pca' or self._method == 'aa':
iterval = norm_data.copy()
if self._method == 'nmf':
iterval = np.sum(self.data, axis=0)/(np.sqrt(self.data.shape[0])*norm_data)
iterval = 1.0 - iterval
self.select.append(select_next(iterval))
for l in range(1, self._num_bases):
if scipy.sparse.issparse(self.data):
c = self.data[:, self.select[-1]:self.select[-1]+1].T * self.data
c = np.array(c.todense())
else:
c = np.dot(self.data[:,self.select[-1]], self.data)
c = c/(norm_data * norm_data[self.select[-1]])
if self._method == 'pca':
c = 1.0 - np.abs(c)
c = c * norm_data
elif self._method == 'aa':
c = (c*-1.0 + 1.0)/2.0
c = c * norm_data
elif self._method == 'nmf':
c = 1.0 - np.abs(c)
### update the estimated volume
iterval = c * iterval
# detect the next best data point
self.select.append(select_next(iterval))
self._logger.info('cur_nodes: ' + str(self.select))
# sort indices, otherwise h5py won't work
self.W = self.data[:, np.sort(self.select)]
# "unsort" it again to keep the correct order
self.W = self.W[:, np.argsort(np.argsort(self.select))]
|
Factorize s. t. WH = data
|
def factorize(self, show_progress=False, compute_w=True, compute_h=True,
compute_err=True, robust_cluster=3, niter=1, robust_nselect=-1):
""" Factorize s.t. WH = data
Parameters
----------
show_progress : bool
print some extra information to stdout.
False, default
compute_h : bool
iteratively update values for H.
True, default
compute_w : bool
iteratively update values for W.
default, True
compute_err : bool
compute Frobenius norm |data-WH| after each update and store
it to .ferr[k].
robust_cluster : int, optional
set the number of clusters for robust map selection.
3, default
robust_nselect : int, optional
set the number of samples to consider for robust map
selection.
-1, default (automatically determine suitable number)
Updated Values
--------------
.W : updated values for W.
.H : updated values for H.
.ferr : Frobenius norm |data-WH|.
"""
self._robust_cluster = robust_cluster
self._robust_nselect = robust_nselect
if self._robust_nselect == -1:
self._robust_nselect = np.round(np.log(self.data.shape[1])*2)
AA.factorize(self, niter=1, show_progress=show_progress,
compute_w=compute_w, compute_h=compute_h,
compute_err=compute_err)
|
Main process. Returns ------- est_idxs: np. array ( N ) or list Estimated times for the segment boundaries in frame indeces. List if hierarchical segmentation. est_labels: np. array ( N - 1 ) or list Estimated labels for the segments. List if hierarchical segmentation.
|
def process(self):
"""Main process.
Returns
-------
est_idxs : np.array(N) or list
Estimated times for the segment boundaries in frame indeces.
List if hierarchical segmentation.
est_labels : np.array(N-1) or list
Estimated labels for the segments.
List if hierarchical segmentation.
"""
# This algorithm only accepts one specific kind of features:
# Combination of PCP + MFCC. Let's get them:
pcp_obj = Features.select_features(
"pcp", self.file_struct, self.annot_beats, self.framesync)
mfcc_obj = Features.select_features(
"mfcc", self.file_struct, self.annot_beats, self.framesync)
# Get frame times and make sure they're the same in both features
frame_times = pcp_obj.frame_times
assert np.array_equal(frame_times, mfcc_obj.frame_times)
# Brian wants PCP and MFCC
# (tranpsosed, because he's that kind of person)
# TODO: self.in_bound_idxs
est_idxs, est_labels, F = main2.do_segmentation(
pcp_obj.features.T, mfcc_obj.features.T, self.config,
self.in_bound_idxs)
return est_idxs, est_labels, F
|
Main process. for flat segmentation. Returns ------- est_idxs: np. array ( N ) Estimated times for the segment boundaries in frame indeces. est_labels: np. array ( N - 1 ) Estimated labels for the segments.
|
def processFlat(self):
"""Main process.for flat segmentation.
Returns
-------
est_idxs : np.array(N)
Estimated times for the segment boundaries in frame indeces.
est_labels : np.array(N-1)
Estimated labels for the segments.
"""
self.config["hier"] = False
est_idxs, est_labels, F = self.process()
assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[1] - 1
return self._postprocess(est_idxs, est_labels)
|
Main process. for hierarchial segmentation. Returns ------- est_idxs: list List with np. arrays for each layer of segmentation containing the estimated indeces for the segment boundaries. est_labels: list List with np. arrays containing the labels for each layer of the hierarchical segmentation.
|
def processHierarchical(self):
"""Main process.for hierarchial segmentation.
Returns
-------
est_idxs : list
List with np.arrays for each layer of segmentation containing
the estimated indeces for the segment boundaries.
est_labels : list
List with np.arrays containing the labels for each layer of the
hierarchical segmentation.
"""
self.config["hier"] = True
est_idxs, est_labels, F = self.process()
for layer in range(len(est_idxs)):
assert est_idxs[layer][0] == 0 and \
est_idxs[layer][-1] == F.shape[1] - 1
est_idxs[layer], est_labels[layer] = \
self._postprocess(est_idxs[layer], est_labels[layer])
return est_idxs, est_labels
|
Median filter along the first axis of the feature matrix X.
|
def median_filter(X, M=8):
"""Median filter along the first axis of the feature matrix X."""
for i in range(X.shape[1]):
X[:, i] = filters.median_filter(X[:, i], size=M)
return X
|
Creates a gaussian kernel following Foote s paper.
|
def compute_gaussian_krnl(M):
"""Creates a gaussian kernel following Foote's paper."""
g = signal.gaussian(M, M // 3., sym=True)
G = np.dot(g.reshape(-1, 1), g.reshape(1, -1))
G[M // 2:, :M // 2] = -G[M // 2:, :M // 2]
G[:M // 2, M // 2:] = -G[:M // 2, M // 2:]
return G
|
Computes the self - similarity matrix of X.
|
def compute_ssm(X, metric="seuclidean"):
"""Computes the self-similarity matrix of X."""
D = distance.pdist(X, metric=metric)
D = distance.squareform(D)
D /= D.max()
return 1 - D
|
Computes the novelty curve from the self - similarity matrix X and the gaussian kernel G.
|
def compute_nc(X, G):
"""Computes the novelty curve from the self-similarity matrix X and
the gaussian kernel G."""
N = X.shape[0]
M = G.shape[0]
nc = np.zeros(N)
for i in range(M // 2, N - M // 2 + 1):
nc[i] = np.sum(X[i - M // 2:i + M // 2, i - M // 2:i + M // 2] * G)
# Normalize
nc += nc.min()
nc /= nc.max()
return nc
|
Obtain peaks from a novelty curve using an adaptive threshold.
|
def pick_peaks(nc, L=16):
"""Obtain peaks from a novelty curve using an adaptive threshold."""
offset = nc.mean() / 20.
nc = filters.gaussian_filter1d(nc, sigma=4) # Smooth out nc
th = filters.median_filter(nc, size=L) + offset
#th = filters.gaussian_filter(nc, sigma=L/2., mode="nearest") + offset
peaks = []
for i in range(1, nc.shape[0] - 1):
# is it a peak?
if nc[i - 1] < nc[i] and nc[i] > nc[i + 1]:
# is it above the threshold?
if nc[i] > th[i]:
peaks.append(i)
#plt.plot(nc)
#plt.plot(th)
#for peak in peaks:
#plt.axvline(peak)
#plt.show()
return peaks
|
Main process. Returns ------- est_idxs: np. array ( N ) Estimated indeces the segment boundaries in frames. est_labels: np. array ( N - 1 ) Estimated labels for the segments.
|
def processFlat(self):
"""Main process.
Returns
-------
est_idxs : np.array(N)
Estimated indeces the segment boundaries in frames.
est_labels : np.array(N-1)
Estimated labels for the segments.
"""
# Preprocess to obtain features
F = self._preprocess()
# Normalize
F = msaf.utils.normalize(F, norm_type=self.config["bound_norm_feats"])
# Make sure that the M_gaussian is even
if self.config["M_gaussian"] % 2 == 1:
self.config["M_gaussian"] += 1
# Median filter
F = median_filter(F, M=self.config["m_median"])
#plt.imshow(F.T, interpolation="nearest", aspect="auto"); plt.show()
# Self similarity matrix
S = compute_ssm(F)
# Compute gaussian kernel
G = compute_gaussian_krnl(self.config["M_gaussian"])
#plt.imshow(S, interpolation="nearest", aspect="auto"); plt.show()
# Compute the novelty curve
nc = compute_nc(S, G)
# Find peaks in the novelty curve
est_idxs = pick_peaks(nc, L=self.config["L_peaks"])
# Add first and last frames
est_idxs = np.concatenate(([0], est_idxs, [F.shape[0] - 1]))
# Empty labels
est_labels = np.ones(len(est_idxs) - 1) * -1
# Post process estimations
est_idxs, est_labels = self._postprocess(est_idxs, est_labels)
return est_idxs, est_labels
|
Factorize s. t. WH = data
|
def factorize(self, show_progress=False, compute_w=True, compute_h=True,
compute_err=True, niter=1):
""" Factorize s.t. WH = data
Parameters
----------
show_progress : bool
print some extra information to stdout.
niter : int
number of iterations.
compute_h : bool
iteratively update values for H.
compute_w : bool
iteratively update values for W.
compute_err : bool
compute Frobenius norm |data-WH| after each update and store
it to .ferr[k].
Updated Values
--------------
.W : updated values for W.
.H : updated values for H.
.ferr : Frobenius norm |data-WH|.
"""
if show_progress:
self._logger.setLevel(logging.INFO)
else:
self._logger.setLevel(logging.ERROR)
# create W and H if they don't already exist
# -> any custom initialization to W,H should be done before
if not hasattr(self,'W'):
self.init_w()
if not hasattr(self,'H'):
self.init_h()
if compute_err:
self.ferr = np.zeros(niter)
for i in range(niter):
if compute_w:
self.update_w()
if compute_h:
self.update_h()
if compute_err:
self.ferr[i] = self.frobenius_norm()
self._logger.info('Iteration ' + str(i+1) + '/' + str(niter) +
' FN:' + str(self.ferr[i]))
else:
self._logger.info('Iteration ' + str(i+1) + '/' + str(niter))
|
Gaussian filter along the first axis of the feature matrix X.
|
def gaussian_filter(X, M=8, axis=0):
"""Gaussian filter along the first axis of the feature matrix X."""
for i in range(X.shape[axis]):
if axis == 1:
X[:, i] = filters.gaussian_filter(X[:, i], sigma=M / 2.)
elif axis == 0:
X[i, :] = filters.gaussian_filter(X[i, :], sigma=M / 2.)
return X
|
Computes the novelty curve from the structural features.
|
def compute_nc(X):
"""Computes the novelty curve from the structural features."""
N = X.shape[0]
# nc = np.sum(np.diff(X, axis=0), axis=1) # Difference between SF's
nc = np.zeros(N)
for i in range(N - 1):
nc[i] = distance.euclidean(X[i, :], X[i + 1, :])
# Normalize
nc += np.abs(nc.min())
nc /= float(nc.max())
return nc
|
Obtain peaks from a novelty curve using an adaptive threshold.
|
def pick_peaks(nc, L=16, offset_denom=0.1):
"""Obtain peaks from a novelty curve using an adaptive threshold."""
offset = nc.mean() * float(offset_denom)
th = filters.median_filter(nc, size=L) + offset
#th = filters.gaussian_filter(nc, sigma=L/2., mode="nearest") + offset
#import pylab as plt
#plt.plot(nc)
#plt.plot(th)
#plt.show()
# th = np.ones(nc.shape[0]) * nc.mean() - 0.08
peaks = []
for i in range(1, nc.shape[0] - 1):
# is it a peak?
if nc[i - 1] < nc[i] and nc[i] > nc[i + 1]:
# is it above the threshold?
if nc[i] > th[i]:
peaks.append(i)
return peaks
|
Shifts circularly the X squre matrix in order to get a time - lag matrix.
|
def circular_shift(X):
"""Shifts circularly the X squre matrix in order to get a
time-lag matrix."""
N = X.shape[0]
L = np.zeros(X.shape)
for i in range(N):
L[i, :] = np.asarray([X[(i + j) % N, j] for j in range(N)])
return L
|
Time - delay embedding with m dimensions and tau delays.
|
def embedded_space(X, m, tau=1):
"""Time-delay embedding with m dimensions and tau delays."""
N = X.shape[0] - int(np.ceil(m))
Y = np.zeros((N, int(np.ceil(X.shape[1] * m))))
for i in range(N):
# print X[i:i+m,:].flatten().shape, w, X.shape
# print Y[i,:].shape
rem = int((m % 1) * X.shape[1]) # Reminder for float m
Y[i, :] = np.concatenate((X[i:i + int(m), :].flatten(),
X[i + int(m), :rem]))
return Y
|
Main process. Returns ------- est_idxs: np. array ( N ) Estimated times for the segment boundaries in frame indeces. est_labels: np. array ( N - 1 ) Estimated labels for the segments.
|
def processFlat(self):
"""Main process.
Returns
-------
est_idxs : np.array(N)
Estimated times for the segment boundaries in frame indeces.
est_labels : np.array(N-1)
Estimated labels for the segments.
"""
# Structural Features params
Mp = self.config["Mp_adaptive"] # Size of the adaptive threshold for
# peak picking
od = self.config["offset_thres"] # Offset coefficient for adaptive
# thresholding
M = self.config["M_gaussian"] # Size of gaussian kernel in beats
m = self.config["m_embedded"] # Number of embedded dimensions
k = self.config["k_nearest"] # k*N-nearest neighbors for the
# recurrence plot
# Preprocess to obtain features, times, and input boundary indeces
F = self._preprocess()
# Normalize
F = U.normalize(F, norm_type=self.config["bound_norm_feats"])
# Check size in case the track is too short
if F.shape[0] > 20:
if self.framesync:
red = 0.1
F_copy = np.copy(F)
F = librosa.util.utils.sync(
F.T, np.linspace(0, F.shape[0], num=F.shape[0] * red),
pad=False).T
# Emedding the feature space (i.e. shingle)
E = embedded_space(F, m)
# plt.imshow(E.T, interpolation="nearest", aspect="auto"); plt.show()
# Recurrence matrix
R = librosa.segment.recurrence_matrix(
E.T,
k=k * int(F.shape[0]),
width=1, # zeros from the diagonal
metric="euclidean",
sym=True).astype(np.float32)
# Circular shift
L = circular_shift(R)
#plt.imshow(L, interpolation="nearest", cmap=plt.get_cmap("binary"))
#plt.show()
# Obtain structural features by filtering the lag matrix
SF = gaussian_filter(L.T, M=M, axis=1)
SF = gaussian_filter(L.T, M=1, axis=0)
# plt.imshow(SF.T, interpolation="nearest", aspect="auto")
#plt.show()
# Compute the novelty curve
nc = compute_nc(SF)
# Find peaks in the novelty curve
est_bounds = pick_peaks(nc, L=Mp, offset_denom=od)
# Re-align embedded space
est_bounds = np.asarray(est_bounds) + int(np.ceil(m / 2.))
if self.framesync:
est_bounds /= red
F = F_copy
else:
est_bounds = []
# Add first and last frames
est_idxs = np.concatenate(([0], est_bounds, [F.shape[0] - 1]))
est_idxs = np.unique(est_idxs)
assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1
# Empty labels
est_labels = np.ones(len(est_idxs) - 1) * - 1
# Post process estimations
est_idxs, est_labels = self._postprocess(est_idxs, est_labels)
# plt.figure(1)
# plt.plot(nc);
# [plt.axvline(p, color="m", ymin=.6) for p in est_bounds]
# [plt.axvline(b, color="b", ymax=.6, ymin=.3) for b in brian_bounds]
# [plt.axvline(b, color="g", ymax=.3) for b in ann_bounds]
# plt.show()
return est_idxs, est_labels
|
Formats the plot with the correct axis labels title ticks and so on.
|
def _plot_formatting(title, est_file, algo_ids, last_bound, N, output_file):
"""Formats the plot with the correct axis labels, title, ticks, and
so on."""
import matplotlib.pyplot as plt
if title is None:
title = os.path.basename(est_file).split(".")[0]
plt.title(title)
plt.yticks(np.arange(0, 1, 1 / float(N)) + 1 / (float(N) * 2))
plt.gcf().subplots_adjust(bottom=0.22)
plt.gca().set_yticklabels(algo_ids)
plt.xlabel("Time (seconds)")
plt.xlim((0, last_bound))
plt.tight_layout()
if output_file is not None:
plt.savefig(output_file)
plt.show()
|
Plots all the boundaries.
|
def plot_boundaries(all_boundaries, est_file, algo_ids=None, title=None,
output_file=None):
"""Plots all the boundaries.
Parameters
----------
all_boundaries: list
A list of np.arrays containing the times of the boundaries, one array
for each algorithm.
est_file: str
Path to the estimated file (JSON file)
algo_ids : list
List of algorithm ids to to read boundaries from.
If None, all algorithm ids are read.
title : str
Title of the plot. If None, the name of the file is printed instead.
"""
import matplotlib.pyplot as plt
N = len(all_boundaries) # Number of lists of boundaries
if algo_ids is None:
algo_ids = io.get_algo_ids(est_file)
# Translate ids
for i, algo_id in enumerate(algo_ids):
algo_ids[i] = translate_ids[algo_id]
algo_ids = ["GT"] + algo_ids
figsize = (6, 4)
plt.figure(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')
for i, boundaries in enumerate(all_boundaries):
color = "b"
if i == 0:
color = "g"
for b in boundaries:
plt.axvline(b, i / float(N), (i + 1) / float(N), color=color)
plt.axhline(i / float(N), color="k", linewidth=1)
# Format plot
_plot_formatting(title, est_file, algo_ids, all_boundaries[0][-1], N,
output_file)
|
Plots all the labels.
|
def plot_labels(all_labels, gt_times, est_file, algo_ids=None, title=None,
output_file=None):
"""Plots all the labels.
Parameters
----------
all_labels: list
A list of np.arrays containing the labels of the boundaries, one array
for each algorithm.
gt_times: np.array
Array with the ground truth boundaries.
est_file: str
Path to the estimated file (JSON file)
algo_ids : list
List of algorithm ids to to read boundaries from.
If None, all algorithm ids are read.
title : str
Title of the plot. If None, the name of the file is printed instead.
"""
import matplotlib.pyplot as plt
N = len(all_labels) # Number of lists of labels
if algo_ids is None:
algo_ids = io.get_algo_ids(est_file)
# Translate ids
for i, algo_id in enumerate(algo_ids):
algo_ids[i] = translate_ids[algo_id]
algo_ids = ["GT"] + algo_ids
# Index the labels to normalize them
for i, labels in enumerate(all_labels):
all_labels[i] = mir_eval.util.index_labels(labels)[0]
# Get color map
cm = plt.get_cmap('gist_rainbow')
max_label = max(max(labels) for labels in all_labels)
# To intervals
gt_inters = utils.times_to_intervals(gt_times)
# Plot labels
figsize = (6, 4)
plt.figure(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')
for i, labels in enumerate(all_labels):
for label, inter in zip(labels, gt_inters):
plt.axvspan(inter[0], inter[1], ymin=i / float(N),
ymax=(i + 1) / float(N), alpha=0.6,
color=cm(label / float(max_label)))
plt.axhline(i / float(N), color="k", linewidth=1)
# Draw the boundary lines
for bound in gt_times:
plt.axvline(bound, color="g")
# Format plot
_plot_formatting(title, est_file, algo_ids, gt_times[-1], N,
output_file)
|
Plots the results of one track with ground truth if it exists.
|
def plot_one_track(file_struct, est_times, est_labels, boundaries_id, labels_id,
title=None):
"""Plots the results of one track, with ground truth if it exists."""
import matplotlib.pyplot as plt
# Set up the boundaries id
bid_lid = boundaries_id
if labels_id is not None:
bid_lid += " + " + labels_id
try:
# Read file
jam = jams.load(file_struct.ref_file)
ann = jam.search(namespace='segment_.*')[0]
ref_inters, ref_labels = ann.to_interval_values()
# To times
ref_times = utils.intervals_to_times(ref_inters)
all_boundaries = [ref_times, est_times]
all_labels = [ref_labels, est_labels]
algo_ids = ["GT", bid_lid]
except:
logging.warning("No references found in %s. Not plotting groundtruth"
% file_struct.ref_file)
all_boundaries = [est_times]
all_labels = [est_labels]
algo_ids = [bid_lid]
N = len(all_boundaries)
# Index the labels to normalize them
for i, labels in enumerate(all_labels):
all_labels[i] = mir_eval.util.index_labels(labels)[0]
# Get color map
cm = plt.get_cmap('gist_rainbow')
max_label = max(max(labels) for labels in all_labels)
figsize = (8, 4)
plt.figure(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')
for i, boundaries in enumerate(all_boundaries):
color = "b"
if i == 0:
color = "g"
for b in boundaries:
plt.axvline(b, i / float(N), (i + 1) / float(N), color=color)
if labels_id is not None:
labels = all_labels[i]
inters = utils.times_to_intervals(boundaries)
for label, inter in zip(labels, inters):
plt.axvspan(inter[0], inter[1], ymin=i / float(N),
ymax=(i + 1) / float(N), alpha=0.6,
color=cm(label / float(max_label)))
plt.axhline(i / float(N), color="k", linewidth=1)
# Format plot
_plot_formatting(title, os.path.basename(file_struct.audio_file), algo_ids,
all_boundaries[0][-1], N, None)
|
Plots a given tree containing hierarchical segmentation.
|
def plot_tree(T, res=None, title=None, cmap_id="Pastel2"):
"""Plots a given tree, containing hierarchical segmentation.
Parameters
----------
T: mir_eval.segment.tree
A tree object containing the hierarchical segmentation.
res: float
Frame-rate resolution of the tree (None to use seconds).
title: str
Title for the plot. `None` for no title.
cmap_id: str
Color Map ID
"""
import matplotlib.pyplot as plt
def round_time(t, res=0.1):
v = int(t / float(res)) * res
return v
# Get color map
cmap = plt.get_cmap(cmap_id)
# Get segments by level
level_bounds = []
for level in T.levels:
if level == "root":
continue
segments = T.get_segments_in_level(level)
level_bounds.append(segments)
# Plot axvspans for each segment
B = float(len(level_bounds))
#plt.figure(figsize=figsize)
for i, segments in enumerate(level_bounds):
labels = utils.segment_labels_to_floats(segments)
for segment, label in zip(segments, labels):
#print i, label, cmap(label)
if res is None:
start = segment.start
end = segment.end
xlabel = "Time (seconds)"
else:
start = int(round_time(segment.start, res=res) / res)
end = int(round_time(segment.end, res=res) / res)
xlabel = "Time (frames)"
plt.axvspan(start, end,
ymax=(len(level_bounds) - i) / B,
ymin=(len(level_bounds) - i - 1) / B,
facecolor=cmap(label))
# Plot labels
L = float(len(T.levels) - 1)
plt.yticks(np.linspace(0, (L - 1) / L, num=L) + 1 / L / 2.,
T.levels[1:][::-1])
plt.xlabel(xlabel)
if title is not None:
plt.title(title)
plt.gca().set_xlim([0, end])
|
Returns a set of segments defined by the bound_idxs.
|
def get_feat_segments(F, bound_idxs):
"""Returns a set of segments defined by the bound_idxs.
Parameters
----------
F: np.ndarray
Matrix containing the features, one feature vector per row.
bound_idxs: np.ndarray
Array with boundary indeces.
Returns
-------
feat_segments: list
List of segments, one for each boundary interval.
"""
# Make sure bound_idxs are not empty
assert len(bound_idxs) > 0, "Boundaries can't be empty"
# Make sure that boundaries are sorted
bound_idxs = np.sort(bound_idxs)
# Make sure we're not out of bounds
assert bound_idxs[0] >= 0 and bound_idxs[-1] < F.shape[0], \
"Boundaries are not correct for the given feature dimensions."
# Obtain the segments
feat_segments = []
for i in range(len(bound_idxs) - 1):
feat_segments.append(F[bound_idxs[i]:bound_idxs[i + 1], :])
return feat_segments
|
From a list of feature segments return a list of 2D - Fourier Magnitude Coefs using the maximum segment size as main size and zero pad the rest.
|
def feat_segments_to_2dfmc_max(feat_segments, offset=4):
"""From a list of feature segments, return a list of 2D-Fourier Magnitude
Coefs using the maximum segment size as main size and zero pad the rest.
Parameters
----------
feat_segments: list
List of segments, one for each boundary interval.
offset: int >= 0
Number of frames to ignore from beginning and end of each segment.
Returns
-------
fmcs: np.ndarray
Tensor containing the 2D-FMC matrices, one matrix per segment.
"""
if len(feat_segments) == 0:
return []
# Get maximum segment size
max_len = max([feat_segment.shape[0] for feat_segment in feat_segments])
fmcs = []
for feat_segment in feat_segments:
# Zero pad if needed
X = np.zeros((max_len, feat_segment.shape[1]))
# Remove a set of frames in the beginning an end of the segment
if feat_segment.shape[0] <= offset or offset == 0:
X[:feat_segment.shape[0], :] = feat_segment
else:
X[:feat_segment.shape[0] - offset, :] = \
feat_segment[offset // 2:-offset // 2, :]
# Compute the 2D-FMC
try:
fmcs.append(utils2d.compute_ffmc2d(X))
except:
logging.warning("Couldn't compute the 2D Fourier Transform")
fmcs.append(np.zeros((X.shape[0] * X.shape[1]) // 2 + 1))
# Normalize
# fmcs[-1] = fmcs[-1] / float(fmcs[-1].max())
return np.asarray(fmcs)
|
Main function to compute the segment similarity of file file_struct.
|
def compute_similarity(F, bound_idxs, dirichlet=False, xmeans=False, k=5,
offset=4):
"""Main function to compute the segment similarity of file file_struct.
Parameters
----------
F: np.ndarray
Matrix containing one feature vector per row.
bound_idxs: np.ndarray
Array with the indeces of the segment boundaries.
dirichlet: boolean
Whether to use the dirichlet estimator of the number of unique labels.
xmeans: boolean
Whether to use the xmeans estimator of the number of unique labels.
k: int > 0
If the other two predictors are `False`, use fixed number of labels.
offset: int >= 0
Number of frames to ignore from beginning and end of each segment.
Returns
-------
labels_est: np.ndarray
Estimated labels, containing integer identifiers.
"""
# Get the feature segments
feat_segments = get_feat_segments(F, bound_idxs)
# Get the 2D-FMCs segments
fmcs = feat_segments_to_2dfmc_max(feat_segments, offset)
if len(fmcs) == 0:
return np.arange(len(bound_idxs) - 1)
# Compute the labels using kmeans
if dirichlet:
k_init = np.min([fmcs.shape[0], k])
# Only compute the dirichlet method if the fmc shape is small enough
if fmcs.shape[1] > 500:
labels_est = compute_labels_kmeans(fmcs, k=k)
else:
dpgmm = mixture.DPGMM(n_components=k_init, covariance_type='full')
# dpgmm = mixture.VBGMM(n_components=k_init, covariance_type='full')
dpgmm.fit(fmcs)
k = len(dpgmm.means_)
labels_est = dpgmm.predict(fmcs)
# print("Estimated with Dirichlet Process:", k)
if xmeans:
xm = XMeans(fmcs, plot=False)
k = xm.estimate_K_knee(th=0.01, maxK=8)
labels_est = compute_labels_kmeans(fmcs, k=k)
# print("Estimated with Xmeans:", k)
else:
labels_est = compute_labels_kmeans(fmcs, k=k)
return labels_est
|
Main process. Returns ------- est_idx: np. array ( N ) Estimated indeces for the segment boundaries in frames. est_labels: np. array ( N - 1 ) Estimated labels for the segments.
|
def processFlat(self):
"""Main process.
Returns
-------
est_idx : np.array(N)
Estimated indeces for the segment boundaries in frames.
est_labels : np.array(N-1)
Estimated labels for the segments.
"""
# Preprocess to obtain features, times, and input boundary indeces
F = self._preprocess()
# Normalize
F = U.normalize(F, norm_type=self.config["label_norm_feats"],
floor=self.config["label_norm_floor"],
min_db=self.config["label_norm_min_db"])
# Find the labels using 2D-FMCs
est_labels = compute_similarity(F, self.in_bound_idxs,
dirichlet=self.config["dirichlet"],
xmeans=self.config["xmeans"],
k=self.config["k"],
offset=self.config["2dfmc_offset"])
# Post process estimations
self.in_bound_idxs, est_labels = self._postprocess(self.in_bound_idxs,
est_labels)
return self.in_bound_idxs, est_labels
|
Fit the OLDA model
|
def fit(self, X, Y):
'''Fit the OLDA model
Parameters
----------
X : array-like, shape [n_samples]
Training data: each example is an n_features-by-* data array
Y : array-like, shape [n_samples]
Training labels: each label is an array of change-points
(eg, a list of segment boundaries)
Returns
-------
self : object
'''
# Re-initialize the scatter matrices
self.scatter_ordinal_ = None
self.scatter_within_ = None
# Reduce to partial-fit
self.partial_fit(X, Y)
return self
|
Partial - fit the OLDA model
|
def partial_fit(self, X, Y):
'''Partial-fit the OLDA model
Parameters
----------
X : array-like, shape [n_samples]
Training data: each example is an n_features-by-* data array
Y : array-like, shape [n_samples]
Training labels: each label is an array of change-points
(eg, a list of segment boundaries)
Returns
-------
self : object
'''
for (xi, yi) in itertools.izip(X, Y):
prev_mean = None
prev_length = None
if self.scatter_within_ is None:
# First round: initialize
d, n = xi.shape
if yi[0] > 0:
yi = np.concatenate([np.array([0]), yi])
if yi[-1] < n:
yi = np.concatenate([yi, np.array([n])])
self.scatter_within_ = self.sigma * np.eye(d)
self.scatter_ordinal_ = np.zeros(d)
# iterate over segments
for (seg_start, seg_end) in zip(yi[:-1], yi[1:]):
seg_length = seg_end - seg_start
if seg_length < 2:
continue
seg_mean = np.mean(xi[:, seg_start:seg_end], axis=1, keepdims=True)
seg_cov = np.cov(xi[:, seg_start:seg_end])
self.scatter_within_ = self.scatter_within_ + seg_length * seg_cov
if prev_mean is not None:
diff_ord = seg_mean - (prev_length * prev_mean + seg_length * seg_mean) / (prev_length + seg_length)
self.scatter_ordinal_ = self.scatter_ordinal_ + seg_length * np.dot(diff_ord, diff_ord.T)
diff_ord = prev_mean - (prev_length * prev_mean + seg_length * seg_mean) / (prev_length + seg_length)
self.scatter_ordinal_ = self.scatter_ordinal_ + prev_length * np.dot(diff_ord, diff_ord.T)
prev_mean = seg_mean
prev_length = seg_length
e_vals, e_vecs = scipy.linalg.eig(self.scatter_ordinal_, self.scatter_within_)
self.e_vals_ = e_vals
self.e_vecs_ = e_vecs
self.components_ = e_vecs.T
return self
|
Actual implementation of the features.
|
def compute_features(self):
"""Actual implementation of the features.
Returns
-------
cqt: np.array(N, F)
The features, each row representing a feature vector for a give
time frame/beat.
"""
linear_cqt = np.abs(librosa.cqt(
self._audio, sr=self.sr, hop_length=self.hop_length,
n_bins=self.n_bins, norm=self.norm, filter_scale=self.filter_scale)
) ** 2
cqt = librosa.amplitude_to_db(linear_cqt, ref=self.ref_power).T
return cqt
|
Actual implementation of the features.
|
def compute_features(self):
"""Actual implementation of the features.
Returns
-------
mfcc: np.array(N, F)
The features, each row representing a feature vector for a give
time frame/beat.
"""
S = librosa.feature.melspectrogram(self._audio,
sr=self.sr,
n_fft=self.n_fft,
hop_length=self.hop_length,
n_mels=self.n_mels)
log_S = librosa.amplitude_to_db(S, ref=self.ref_power)
mfcc = librosa.feature.mfcc(S=log_S, n_mfcc=self.n_mfcc).T
return mfcc
|
Actual implementation of the features.
|
def compute_features(self):
"""Actual implementation of the features.
Returns
-------
pcp: np.array(N, F)
The features, each row representing a feature vector for a give
time frame/beat.
"""
audio_harmonic, _ = self.compute_HPSS()
pcp_cqt = np.abs(librosa.hybrid_cqt(audio_harmonic,
sr=self.sr,
hop_length=self.hop_length,
n_bins=self.n_bins,
norm=self.norm,
fmin=self.f_min)) ** 2
pcp = librosa.feature.chroma_cqt(C=pcp_cqt,
sr=self.sr,
hop_length=self.hop_length,
n_octaves=self.n_octaves,
fmin=self.f_min).T
return pcp
|
Actual implementation of the features.
|
def compute_features(self):
"""Actual implementation of the features.
Returns
-------
tonnetz: np.array(N, F)
The features, each row representing a feature vector for a give
time frame/beat.
"""
pcp = PCP(self.file_struct, self.feat_type, self.sr, self.hop_length,
self.n_bins, self.norm, self.f_min, self.n_octaves).features
tonnetz = librosa.feature.tonnetz(chroma=pcp.T).T
return tonnetz
|
Actual implementation of the features.
|
def compute_features(self):
"""Actual implementation of the features.
Returns
-------
tempogram: np.array(N, F)
The features, each row representing a feature vector for a give
time frame/beat.
"""
return librosa.feature.tempogram(self._audio, sr=self.sr,
hop_length=self.hop_length,
win_length=self.win_length).T
|
Reads the estimations ( boundaries and/ or labels ) from a jams file containing the estimations of an algorithm.
|
def read_estimations(est_file, boundaries_id, labels_id=None, **params):
"""Reads the estimations (boundaries and/or labels) from a jams file
containing the estimations of an algorithm.
Parameters
----------
est_file : str
Path to the estimated file (JAMS file).
boundaries_id : str
Identifier of the algorithm used to compute the boundaries.
labels_id : str
Identifier of the algorithm used to compute the labels.
params : dict
Additional search parameters. E.g. {"feature" : "pcp"}.
Returns
-------
boundaries : np.array((N,2))
Array containing the estimated boundaries in intervals.
labels : np.array(N)
Array containing the estimated labels.
Empty array if labels_id is None.
"""
# Open file and read jams
jam = jams.load(est_file)
# Find correct estimation
est = find_estimation(jam, boundaries_id, labels_id, params)
if est is None:
raise NoEstimationsError("No estimations for file: %s" % est_file)
# Get data values
all_boundaries, all_labels = est.to_interval_values()
if params["hier"]:
hier_bounds = defaultdict(list)
hier_labels = defaultdict(list)
for bounds, labels in zip(all_boundaries, all_labels):
level = labels["level"]
hier_bounds[level].append(bounds)
hier_labels[level].append(labels["label"])
# Order
all_boundaries = []
all_labels = []
for key in sorted(list(hier_bounds.keys())):
all_boundaries.append(np.asarray(hier_bounds[key]))
all_labels.append(np.asarray(hier_labels[key]))
return all_boundaries, all_labels
|
Reads the boundary times and the labels.
|
def read_references(audio_path, annotator_id=0):
"""Reads the boundary times and the labels.
Parameters
----------
audio_path : str
Path to the audio file
Returns
-------
ref_times : list
List of boundary times
ref_labels : list
List of labels
Raises
------
IOError: if `audio_path` doesn't exist.
"""
# Dataset path
ds_path = os.path.dirname(os.path.dirname(audio_path))
# Read references
jam_path = os.path.join(ds_path, ds_config.references_dir,
os.path.basename(audio_path)[:-4] +
ds_config.references_ext)
jam = jams.load(jam_path, validate=False)
ann = jam.search(namespace='segment_.*')[annotator_id]
ref_inters, ref_labels = ann.to_interval_values()
# Intervals to times
ref_times = utils.intervals_to_times(ref_inters)
return ref_times, ref_labels
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.