partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
Process.set_channels
|
General purpose method that sets the main channels
This method will take a variable number of keyword arguments to
set the :py:attr:`Process._context` attribute with the information
on the main channels for the process. This is done by appending
the process ID (:py:attr:`Process.pid`) attribute to the input,
output and status channel prefix strings. In the output channel,
the process ID is incremented by 1 to allow the connection with the
channel in the next process.
The ``**kwargs`` system for setting the :py:attr:`Process._context`
attribute also provides additional flexibility. In this way,
individual processes can provide additional information not covered
in this method, without changing it.
Parameters
----------
kwargs : dict
Dictionary with the keyword arguments for setting up the template
context
|
flowcraft/generator/process.py
|
def set_channels(self, **kwargs):
""" General purpose method that sets the main channels
This method will take a variable number of keyword arguments to
set the :py:attr:`Process._context` attribute with the information
on the main channels for the process. This is done by appending
the process ID (:py:attr:`Process.pid`) attribute to the input,
output and status channel prefix strings. In the output channel,
the process ID is incremented by 1 to allow the connection with the
channel in the next process.
The ``**kwargs`` system for setting the :py:attr:`Process._context`
attribute also provides additional flexibility. In this way,
individual processes can provide additional information not covered
in this method, without changing it.
Parameters
----------
kwargs : dict
Dictionary with the keyword arguments for setting up the template
context
"""
if not self.pid:
self.pid = "{}_{}".format(self.lane, kwargs.get("pid"))
for i in self.status_channels:
if i.startswith("STATUS_"):
self.status_strs.append("{}_{}".format(i, self.pid))
else:
self.status_strs.append("STATUS_{}_{}".format(i, self.pid))
if self.main_forks:
logger.debug("Setting main fork channels: {}".format(
self.main_forks))
operator = "set" if len(self.main_forks) == 1 else "into"
self.forks = ["\n{}.{}{{ {} }}\n".format(
self.output_channel, operator, ";".join(self.main_forks))]
self._context = {**kwargs, **{"input_channel": self.input_channel,
"output_channel": self.output_channel,
"template": self.template,
"forks": "\n".join(self.forks),
"pid": self.pid}}
|
def set_channels(self, **kwargs):
""" General purpose method that sets the main channels
This method will take a variable number of keyword arguments to
set the :py:attr:`Process._context` attribute with the information
on the main channels for the process. This is done by appending
the process ID (:py:attr:`Process.pid`) attribute to the input,
output and status channel prefix strings. In the output channel,
the process ID is incremented by 1 to allow the connection with the
channel in the next process.
The ``**kwargs`` system for setting the :py:attr:`Process._context`
attribute also provides additional flexibility. In this way,
individual processes can provide additional information not covered
in this method, without changing it.
Parameters
----------
kwargs : dict
Dictionary with the keyword arguments for setting up the template
context
"""
if not self.pid:
self.pid = "{}_{}".format(self.lane, kwargs.get("pid"))
for i in self.status_channels:
if i.startswith("STATUS_"):
self.status_strs.append("{}_{}".format(i, self.pid))
else:
self.status_strs.append("STATUS_{}_{}".format(i, self.pid))
if self.main_forks:
logger.debug("Setting main fork channels: {}".format(
self.main_forks))
operator = "set" if len(self.main_forks) == 1 else "into"
self.forks = ["\n{}.{}{{ {} }}\n".format(
self.output_channel, operator, ";".join(self.main_forks))]
self._context = {**kwargs, **{"input_channel": self.input_channel,
"output_channel": self.output_channel,
"template": self.template,
"forks": "\n".join(self.forks),
"pid": self.pid}}
|
[
"General",
"purpose",
"method",
"that",
"sets",
"the",
"main",
"channels"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/process.py#L444-L487
|
[
"def",
"set_channels",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"pid",
":",
"self",
".",
"pid",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"self",
".",
"lane",
",",
"kwargs",
".",
"get",
"(",
"\"pid\"",
")",
")",
"for",
"i",
"in",
"self",
".",
"status_channels",
":",
"if",
"i",
".",
"startswith",
"(",
"\"STATUS_\"",
")",
":",
"self",
".",
"status_strs",
".",
"append",
"(",
"\"{}_{}\"",
".",
"format",
"(",
"i",
",",
"self",
".",
"pid",
")",
")",
"else",
":",
"self",
".",
"status_strs",
".",
"append",
"(",
"\"STATUS_{}_{}\"",
".",
"format",
"(",
"i",
",",
"self",
".",
"pid",
")",
")",
"if",
"self",
".",
"main_forks",
":",
"logger",
".",
"debug",
"(",
"\"Setting main fork channels: {}\"",
".",
"format",
"(",
"self",
".",
"main_forks",
")",
")",
"operator",
"=",
"\"set\"",
"if",
"len",
"(",
"self",
".",
"main_forks",
")",
"==",
"1",
"else",
"\"into\"",
"self",
".",
"forks",
"=",
"[",
"\"\\n{}.{}{{ {} }}\\n\"",
".",
"format",
"(",
"self",
".",
"output_channel",
",",
"operator",
",",
"\";\"",
".",
"join",
"(",
"self",
".",
"main_forks",
")",
")",
"]",
"self",
".",
"_context",
"=",
"{",
"*",
"*",
"kwargs",
",",
"*",
"*",
"{",
"\"input_channel\"",
":",
"self",
".",
"input_channel",
",",
"\"output_channel\"",
":",
"self",
".",
"output_channel",
",",
"\"template\"",
":",
"self",
".",
"template",
",",
"\"forks\"",
":",
"\"\\n\"",
".",
"join",
"(",
"self",
".",
"forks",
")",
",",
"\"pid\"",
":",
"self",
".",
"pid",
"}",
"}"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Process.update_main_forks
|
Updates the forks attribute with the sink channel destination
Parameters
----------
sink : str
Channel onto which the main input will be forked to
|
flowcraft/generator/process.py
|
def update_main_forks(self, sink):
"""Updates the forks attribute with the sink channel destination
Parameters
----------
sink : str
Channel onto which the main input will be forked to
"""
if not self.main_forks:
self.main_forks = [self.output_channel]
self.output_channel = "_{}".format(self.output_channel)
self.main_forks.append(sink)
# fork_lst = self.forks + self.main_forks
operator = "set" if len(self.main_forks) == 1 else "into"
self.forks = ["\n{}.{}{{ {} }}\n".format(
self.output_channel, operator, ";".join(self.main_forks))]
self._context = {**self._context,
**{"forks": "".join(self.forks),
"output_channel": self.output_channel}}
|
def update_main_forks(self, sink):
"""Updates the forks attribute with the sink channel destination
Parameters
----------
sink : str
Channel onto which the main input will be forked to
"""
if not self.main_forks:
self.main_forks = [self.output_channel]
self.output_channel = "_{}".format(self.output_channel)
self.main_forks.append(sink)
# fork_lst = self.forks + self.main_forks
operator = "set" if len(self.main_forks) == 1 else "into"
self.forks = ["\n{}.{}{{ {} }}\n".format(
self.output_channel, operator, ";".join(self.main_forks))]
self._context = {**self._context,
**{"forks": "".join(self.forks),
"output_channel": self.output_channel}}
|
[
"Updates",
"the",
"forks",
"attribute",
"with",
"the",
"sink",
"channel",
"destination"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/process.py#L494-L516
|
[
"def",
"update_main_forks",
"(",
"self",
",",
"sink",
")",
":",
"if",
"not",
"self",
".",
"main_forks",
":",
"self",
".",
"main_forks",
"=",
"[",
"self",
".",
"output_channel",
"]",
"self",
".",
"output_channel",
"=",
"\"_{}\"",
".",
"format",
"(",
"self",
".",
"output_channel",
")",
"self",
".",
"main_forks",
".",
"append",
"(",
"sink",
")",
"# fork_lst = self.forks + self.main_forks",
"operator",
"=",
"\"set\"",
"if",
"len",
"(",
"self",
".",
"main_forks",
")",
"==",
"1",
"else",
"\"into\"",
"self",
".",
"forks",
"=",
"[",
"\"\\n{}.{}{{ {} }}\\n\"",
".",
"format",
"(",
"self",
".",
"output_channel",
",",
"operator",
",",
"\";\"",
".",
"join",
"(",
"self",
".",
"main_forks",
")",
")",
"]",
"self",
".",
"_context",
"=",
"{",
"*",
"*",
"self",
".",
"_context",
",",
"*",
"*",
"{",
"\"forks\"",
":",
"\"\"",
".",
"join",
"(",
"self",
".",
"forks",
")",
",",
"\"output_channel\"",
":",
"self",
".",
"output_channel",
"}",
"}"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Process.set_secondary_channel
|
General purpose method for setting a secondary channel
This method allows a given source channel to be forked into one or
more channels and sets those forks in the :py:attr:`Process.forks`
attribute. Both the source and the channels in the ``channel_list``
argument must be the final channel strings, which means that this
method should be called only after setting the main channels.
If the source is not a main channel, this will simply create a fork
or set for every channel in the ``channel_list`` argument list::
SOURCE_CHANNEL_1.into{SINK_1;SINK_2}
If the source is a main channel, this will apply some changes to
the output channel of the process, to avoid overlapping main output
channels. For instance, forking the main output channel for process
2 would create a ``MAIN_2.into{...}``. The issue here is that the
``MAIN_2`` channel is expected as the input of the next process, but
now is being used to create the fork. To solve this issue, the output
channel is modified into ``_MAIN_2``, and the fork is set to
the channels provided channels plus the ``MAIN_2`` channel::
_MAIN_2.into{MAIN_2;MAIN_5;...}
Parameters
----------
source : str
String with the name of the source channel
channel_list : list
List of channels that will receive a fork of the secondary
channel
|
flowcraft/generator/process.py
|
def set_secondary_channel(self, source, channel_list):
""" General purpose method for setting a secondary channel
This method allows a given source channel to be forked into one or
more channels and sets those forks in the :py:attr:`Process.forks`
attribute. Both the source and the channels in the ``channel_list``
argument must be the final channel strings, which means that this
method should be called only after setting the main channels.
If the source is not a main channel, this will simply create a fork
or set for every channel in the ``channel_list`` argument list::
SOURCE_CHANNEL_1.into{SINK_1;SINK_2}
If the source is a main channel, this will apply some changes to
the output channel of the process, to avoid overlapping main output
channels. For instance, forking the main output channel for process
2 would create a ``MAIN_2.into{...}``. The issue here is that the
``MAIN_2`` channel is expected as the input of the next process, but
now is being used to create the fork. To solve this issue, the output
channel is modified into ``_MAIN_2``, and the fork is set to
the channels provided channels plus the ``MAIN_2`` channel::
_MAIN_2.into{MAIN_2;MAIN_5;...}
Parameters
----------
source : str
String with the name of the source channel
channel_list : list
List of channels that will receive a fork of the secondary
channel
"""
logger.debug("Setting secondary channel for source '{}': {}".format(
source, channel_list))
source = "{}_{}".format(source, self.pid)
# Removes possible duplicate channels, when the fork is terminal
channel_list = sorted(list(set(channel_list)))
# When there is only one channel to fork into, use the 'set' operator
# instead of 'into'
op = "set" if len(channel_list) == 1 else "into"
self.forks.append("\n{}.{}{{ {} }}\n".format(
source, op, ";".join(channel_list)))
logger.debug("Setting forks attribute to: {}".format(self.forks))
self._context = {**self._context, **{"forks": "\n".join(self.forks)}}
|
def set_secondary_channel(self, source, channel_list):
""" General purpose method for setting a secondary channel
This method allows a given source channel to be forked into one or
more channels and sets those forks in the :py:attr:`Process.forks`
attribute. Both the source and the channels in the ``channel_list``
argument must be the final channel strings, which means that this
method should be called only after setting the main channels.
If the source is not a main channel, this will simply create a fork
or set for every channel in the ``channel_list`` argument list::
SOURCE_CHANNEL_1.into{SINK_1;SINK_2}
If the source is a main channel, this will apply some changes to
the output channel of the process, to avoid overlapping main output
channels. For instance, forking the main output channel for process
2 would create a ``MAIN_2.into{...}``. The issue here is that the
``MAIN_2`` channel is expected as the input of the next process, but
now is being used to create the fork. To solve this issue, the output
channel is modified into ``_MAIN_2``, and the fork is set to
the channels provided channels plus the ``MAIN_2`` channel::
_MAIN_2.into{MAIN_2;MAIN_5;...}
Parameters
----------
source : str
String with the name of the source channel
channel_list : list
List of channels that will receive a fork of the secondary
channel
"""
logger.debug("Setting secondary channel for source '{}': {}".format(
source, channel_list))
source = "{}_{}".format(source, self.pid)
# Removes possible duplicate channels, when the fork is terminal
channel_list = sorted(list(set(channel_list)))
# When there is only one channel to fork into, use the 'set' operator
# instead of 'into'
op = "set" if len(channel_list) == 1 else "into"
self.forks.append("\n{}.{}{{ {} }}\n".format(
source, op, ";".join(channel_list)))
logger.debug("Setting forks attribute to: {}".format(self.forks))
self._context = {**self._context, **{"forks": "\n".join(self.forks)}}
|
[
"General",
"purpose",
"method",
"for",
"setting",
"a",
"secondary",
"channel"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/process.py#L518-L567
|
[
"def",
"set_secondary_channel",
"(",
"self",
",",
"source",
",",
"channel_list",
")",
":",
"logger",
".",
"debug",
"(",
"\"Setting secondary channel for source '{}': {}\"",
".",
"format",
"(",
"source",
",",
"channel_list",
")",
")",
"source",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"source",
",",
"self",
".",
"pid",
")",
"# Removes possible duplicate channels, when the fork is terminal",
"channel_list",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"channel_list",
")",
")",
")",
"# When there is only one channel to fork into, use the 'set' operator",
"# instead of 'into'",
"op",
"=",
"\"set\"",
"if",
"len",
"(",
"channel_list",
")",
"==",
"1",
"else",
"\"into\"",
"self",
".",
"forks",
".",
"append",
"(",
"\"\\n{}.{}{{ {} }}\\n\"",
".",
"format",
"(",
"source",
",",
"op",
",",
"\";\"",
".",
"join",
"(",
"channel_list",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"Setting forks attribute to: {}\"",
".",
"format",
"(",
"self",
".",
"forks",
")",
")",
"self",
".",
"_context",
"=",
"{",
"*",
"*",
"self",
".",
"_context",
",",
"*",
"*",
"{",
"\"forks\"",
":",
"\"\\n\"",
".",
"join",
"(",
"self",
".",
"forks",
")",
"}",
"}"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Process.update_attributes
|
Updates the directives attribute from a dictionary object.
This will only update the directives for processes that have been
defined in the subclass.
Parameters
----------
attr_dict : dict
Dictionary containing the attributes that will be used to update
the process attributes and/or directives.
|
flowcraft/generator/process.py
|
def update_attributes(self, attr_dict):
"""Updates the directives attribute from a dictionary object.
This will only update the directives for processes that have been
defined in the subclass.
Parameters
----------
attr_dict : dict
Dictionary containing the attributes that will be used to update
the process attributes and/or directives.
"""
# Update directives
# Allowed attributes to write
valid_directives = ["pid", "ignore_type", "ignore_pid", "extra_input",
"group", "input_type"]
for attribute, val in attr_dict.items():
# If the attribute has a valid directive key, update that
# directive
if attribute in valid_directives and hasattr(self, attribute):
setattr(self, attribute, val)
# The params attribute is special, in the sense that it provides
# information for the self.params attribute.
elif attribute == "params":
for name, value in val.items():
if name in self.params:
self.params[name]["default"] = value
else:
raise eh.ProcessError(
"The parameter name '{}' does not exist for "
"component '{}'".format(name, self.template))
else:
for p in self.directives:
self.directives[p][attribute] = val
|
def update_attributes(self, attr_dict):
"""Updates the directives attribute from a dictionary object.
This will only update the directives for processes that have been
defined in the subclass.
Parameters
----------
attr_dict : dict
Dictionary containing the attributes that will be used to update
the process attributes and/or directives.
"""
# Update directives
# Allowed attributes to write
valid_directives = ["pid", "ignore_type", "ignore_pid", "extra_input",
"group", "input_type"]
for attribute, val in attr_dict.items():
# If the attribute has a valid directive key, update that
# directive
if attribute in valid_directives and hasattr(self, attribute):
setattr(self, attribute, val)
# The params attribute is special, in the sense that it provides
# information for the self.params attribute.
elif attribute == "params":
for name, value in val.items():
if name in self.params:
self.params[name]["default"] = value
else:
raise eh.ProcessError(
"The parameter name '{}' does not exist for "
"component '{}'".format(name, self.template))
else:
for p in self.directives:
self.directives[p][attribute] = val
|
[
"Updates",
"the",
"directives",
"attribute",
"from",
"a",
"dictionary",
"object",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/process.py#L569-L608
|
[
"def",
"update_attributes",
"(",
"self",
",",
"attr_dict",
")",
":",
"# Update directives",
"# Allowed attributes to write",
"valid_directives",
"=",
"[",
"\"pid\"",
",",
"\"ignore_type\"",
",",
"\"ignore_pid\"",
",",
"\"extra_input\"",
",",
"\"group\"",
",",
"\"input_type\"",
"]",
"for",
"attribute",
",",
"val",
"in",
"attr_dict",
".",
"items",
"(",
")",
":",
"# If the attribute has a valid directive key, update that",
"# directive",
"if",
"attribute",
"in",
"valid_directives",
"and",
"hasattr",
"(",
"self",
",",
"attribute",
")",
":",
"setattr",
"(",
"self",
",",
"attribute",
",",
"val",
")",
"# The params attribute is special, in the sense that it provides",
"# information for the self.params attribute.",
"elif",
"attribute",
"==",
"\"params\"",
":",
"for",
"name",
",",
"value",
"in",
"val",
".",
"items",
"(",
")",
":",
"if",
"name",
"in",
"self",
".",
"params",
":",
"self",
".",
"params",
"[",
"name",
"]",
"[",
"\"default\"",
"]",
"=",
"value",
"else",
":",
"raise",
"eh",
".",
"ProcessError",
"(",
"\"The parameter name '{}' does not exist for \"",
"\"component '{}'\"",
".",
"format",
"(",
"name",
",",
"self",
".",
"template",
")",
")",
"else",
":",
"for",
"p",
"in",
"self",
".",
"directives",
":",
"self",
".",
"directives",
"[",
"p",
"]",
"[",
"attribute",
"]",
"=",
"val"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Compiler.set_compiler_channels
|
General method for setting the input channels for the status process
Given a list of status channels that are gathered during the pipeline
construction, this method will automatically set the input channel
for the status process. This makes use of the ``mix`` channel operator
of nextflow for multiple channels::
STATUS_1.mix(STATUS_2,STATUS_3,...)
This will set the ``status_channels`` key for the ``_context``
attribute of the process.
Parameters
----------
channel_list : list
List of strings with the final name of the status channels
operator : str
Specifies the operator used to join the compiler channels.
Available options are 'mix'and 'join'.
|
flowcraft/generator/process.py
|
def set_compiler_channels(self, channel_list, operator="mix"):
"""General method for setting the input channels for the status process
Given a list of status channels that are gathered during the pipeline
construction, this method will automatically set the input channel
for the status process. This makes use of the ``mix`` channel operator
of nextflow for multiple channels::
STATUS_1.mix(STATUS_2,STATUS_3,...)
This will set the ``status_channels`` key for the ``_context``
attribute of the process.
Parameters
----------
channel_list : list
List of strings with the final name of the status channels
operator : str
Specifies the operator used to join the compiler channels.
Available options are 'mix'and 'join'.
"""
if not channel_list:
raise eh.ProcessError("At least one status channel must be "
"provided to include this process in the "
"pipeline")
if len(channel_list) == 1:
logger.debug("Setting only one status channel: {}".format(
channel_list[0]))
self._context = {"compile_channels": channel_list[0]}
else:
first_status = channel_list[0]
if operator == "mix":
lst = ",".join(channel_list[1:])
s = "{}.mix({})".format(first_status, lst)
elif operator == "join":
s = first_status
for ch in channel_list[1:]:
s += ".join({})".format(ch)
s += ".map{ ot -> [ ot[0], ot[1..-1] ] }"
logger.debug("Status channel string: {}".format(s))
self._context = {"compile_channels": s}
|
def set_compiler_channels(self, channel_list, operator="mix"):
"""General method for setting the input channels for the status process
Given a list of status channels that are gathered during the pipeline
construction, this method will automatically set the input channel
for the status process. This makes use of the ``mix`` channel operator
of nextflow for multiple channels::
STATUS_1.mix(STATUS_2,STATUS_3,...)
This will set the ``status_channels`` key for the ``_context``
attribute of the process.
Parameters
----------
channel_list : list
List of strings with the final name of the status channels
operator : str
Specifies the operator used to join the compiler channels.
Available options are 'mix'and 'join'.
"""
if not channel_list:
raise eh.ProcessError("At least one status channel must be "
"provided to include this process in the "
"pipeline")
if len(channel_list) == 1:
logger.debug("Setting only one status channel: {}".format(
channel_list[0]))
self._context = {"compile_channels": channel_list[0]}
else:
first_status = channel_list[0]
if operator == "mix":
lst = ",".join(channel_list[1:])
s = "{}.mix({})".format(first_status, lst)
elif operator == "join":
s = first_status
for ch in channel_list[1:]:
s += ".join({})".format(ch)
s += ".map{ ot -> [ ot[0], ot[1..-1] ] }"
logger.debug("Status channel string: {}".format(s))
self._context = {"compile_channels": s}
|
[
"General",
"method",
"for",
"setting",
"the",
"input",
"channels",
"for",
"the",
"status",
"process"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/process.py#L622-L673
|
[
"def",
"set_compiler_channels",
"(",
"self",
",",
"channel_list",
",",
"operator",
"=",
"\"mix\"",
")",
":",
"if",
"not",
"channel_list",
":",
"raise",
"eh",
".",
"ProcessError",
"(",
"\"At least one status channel must be \"",
"\"provided to include this process in the \"",
"\"pipeline\"",
")",
"if",
"len",
"(",
"channel_list",
")",
"==",
"1",
":",
"logger",
".",
"debug",
"(",
"\"Setting only one status channel: {}\"",
".",
"format",
"(",
"channel_list",
"[",
"0",
"]",
")",
")",
"self",
".",
"_context",
"=",
"{",
"\"compile_channels\"",
":",
"channel_list",
"[",
"0",
"]",
"}",
"else",
":",
"first_status",
"=",
"channel_list",
"[",
"0",
"]",
"if",
"operator",
"==",
"\"mix\"",
":",
"lst",
"=",
"\",\"",
".",
"join",
"(",
"channel_list",
"[",
"1",
":",
"]",
")",
"s",
"=",
"\"{}.mix({})\"",
".",
"format",
"(",
"first_status",
",",
"lst",
")",
"elif",
"operator",
"==",
"\"join\"",
":",
"s",
"=",
"first_status",
"for",
"ch",
"in",
"channel_list",
"[",
"1",
":",
"]",
":",
"s",
"+=",
"\".join({})\"",
".",
"format",
"(",
"ch",
")",
"s",
"+=",
"\".map{ ot -> [ ot[0], ot[1..-1] ] }\"",
"logger",
".",
"debug",
"(",
"\"Status channel string: {}\"",
".",
"format",
"(",
"s",
")",
")",
"self",
".",
"_context",
"=",
"{",
"\"compile_channels\"",
":",
"s",
"}"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Init.set_raw_inputs
|
Sets the main input channels of the pipeline and their forks.
The ``raw_input`` dictionary input should contain one entry for each
input type (fastq, fasta, etc). The corresponding value should be a
dictionary/json with the following key:values:
- ``channel``: Name of the raw input channel (e.g.: channel1)
- ``channel_str``: The nextflow definition of the channel and
eventual checks (e.g.: channel1 = Channel.fromPath(param))
- ``raw_forks``: A list of channels to which the channel name will
for to.
Each new type of input parameter is automatically added to the
:attr:`params` attribute, so that they are automatically collected
for the pipeline description and help.
Parameters
----------
raw_input : dict
Contains an entry for each input type with the channel name,
channel string and forks.
|
flowcraft/generator/process.py
|
def set_raw_inputs(self, raw_input):
"""Sets the main input channels of the pipeline and their forks.
The ``raw_input`` dictionary input should contain one entry for each
input type (fastq, fasta, etc). The corresponding value should be a
dictionary/json with the following key:values:
- ``channel``: Name of the raw input channel (e.g.: channel1)
- ``channel_str``: The nextflow definition of the channel and
eventual checks (e.g.: channel1 = Channel.fromPath(param))
- ``raw_forks``: A list of channels to which the channel name will
for to.
Each new type of input parameter is automatically added to the
:attr:`params` attribute, so that they are automatically collected
for the pipeline description and help.
Parameters
----------
raw_input : dict
Contains an entry for each input type with the channel name,
channel string and forks.
"""
logger.debug("Setting raw inputs using raw input dict: {}".format(
raw_input))
primary_inputs = []
for input_type, el in raw_input.items():
primary_inputs.append(el["channel_str"])
# Update the process' parameters with the raw input
raw_channel = self.RAW_MAPPING[input_type]
self.params[input_type] = {
"default": raw_channel["default_value"],
"description": raw_channel["description"]
}
op = "set" if len(el["raw_forks"]) == 1 else "into"
self.forks.append("\n{}.{}{{ {} }}\n".format(
el["channel"], op, ";".join(el["raw_forks"])
))
logger.debug("Setting raw inputs: {}".format(primary_inputs))
logger.debug("Setting forks attribute to: {}".format(self.forks))
self._context = {**self._context,
**{"forks": "\n".join(self.forks),
"main_inputs": "\n".join(primary_inputs)}}
|
def set_raw_inputs(self, raw_input):
"""Sets the main input channels of the pipeline and their forks.
The ``raw_input`` dictionary input should contain one entry for each
input type (fastq, fasta, etc). The corresponding value should be a
dictionary/json with the following key:values:
- ``channel``: Name of the raw input channel (e.g.: channel1)
- ``channel_str``: The nextflow definition of the channel and
eventual checks (e.g.: channel1 = Channel.fromPath(param))
- ``raw_forks``: A list of channels to which the channel name will
for to.
Each new type of input parameter is automatically added to the
:attr:`params` attribute, so that they are automatically collected
for the pipeline description and help.
Parameters
----------
raw_input : dict
Contains an entry for each input type with the channel name,
channel string and forks.
"""
logger.debug("Setting raw inputs using raw input dict: {}".format(
raw_input))
primary_inputs = []
for input_type, el in raw_input.items():
primary_inputs.append(el["channel_str"])
# Update the process' parameters with the raw input
raw_channel = self.RAW_MAPPING[input_type]
self.params[input_type] = {
"default": raw_channel["default_value"],
"description": raw_channel["description"]
}
op = "set" if len(el["raw_forks"]) == 1 else "into"
self.forks.append("\n{}.{}{{ {} }}\n".format(
el["channel"], op, ";".join(el["raw_forks"])
))
logger.debug("Setting raw inputs: {}".format(primary_inputs))
logger.debug("Setting forks attribute to: {}".format(self.forks))
self._context = {**self._context,
**{"forks": "\n".join(self.forks),
"main_inputs": "\n".join(primary_inputs)}}
|
[
"Sets",
"the",
"main",
"input",
"channels",
"of",
"the",
"pipeline",
"and",
"their",
"forks",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/process.py#L687-L737
|
[
"def",
"set_raw_inputs",
"(",
"self",
",",
"raw_input",
")",
":",
"logger",
".",
"debug",
"(",
"\"Setting raw inputs using raw input dict: {}\"",
".",
"format",
"(",
"raw_input",
")",
")",
"primary_inputs",
"=",
"[",
"]",
"for",
"input_type",
",",
"el",
"in",
"raw_input",
".",
"items",
"(",
")",
":",
"primary_inputs",
".",
"append",
"(",
"el",
"[",
"\"channel_str\"",
"]",
")",
"# Update the process' parameters with the raw input",
"raw_channel",
"=",
"self",
".",
"RAW_MAPPING",
"[",
"input_type",
"]",
"self",
".",
"params",
"[",
"input_type",
"]",
"=",
"{",
"\"default\"",
":",
"raw_channel",
"[",
"\"default_value\"",
"]",
",",
"\"description\"",
":",
"raw_channel",
"[",
"\"description\"",
"]",
"}",
"op",
"=",
"\"set\"",
"if",
"len",
"(",
"el",
"[",
"\"raw_forks\"",
"]",
")",
"==",
"1",
"else",
"\"into\"",
"self",
".",
"forks",
".",
"append",
"(",
"\"\\n{}.{}{{ {} }}\\n\"",
".",
"format",
"(",
"el",
"[",
"\"channel\"",
"]",
",",
"op",
",",
"\";\"",
".",
"join",
"(",
"el",
"[",
"\"raw_forks\"",
"]",
")",
")",
")",
"logger",
".",
"debug",
"(",
"\"Setting raw inputs: {}\"",
".",
"format",
"(",
"primary_inputs",
")",
")",
"logger",
".",
"debug",
"(",
"\"Setting forks attribute to: {}\"",
".",
"format",
"(",
"self",
".",
"forks",
")",
")",
"self",
".",
"_context",
"=",
"{",
"*",
"*",
"self",
".",
"_context",
",",
"*",
"*",
"{",
"\"forks\"",
":",
"\"\\n\"",
".",
"join",
"(",
"self",
".",
"forks",
")",
",",
"\"main_inputs\"",
":",
"\"\\n\"",
".",
"join",
"(",
"primary_inputs",
")",
"}",
"}"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Init.set_secondary_inputs
|
Adds secondary inputs to the start of the pipeline.
This channels are inserted into the pipeline file as they are
provided in the values of the argument.
Parameters
----------
channel_dict : dict
Each entry should be <parameter>: <channel string>.
|
flowcraft/generator/process.py
|
def set_secondary_inputs(self, channel_dict):
""" Adds secondary inputs to the start of the pipeline.
This channels are inserted into the pipeline file as they are
provided in the values of the argument.
Parameters
----------
channel_dict : dict
Each entry should be <parameter>: <channel string>.
"""
logger.debug("Setting secondary inputs: {}".format(channel_dict))
secondary_input_str = "\n".join(list(channel_dict.values()))
self._context = {**self._context,
**{"secondary_inputs": secondary_input_str}}
|
def set_secondary_inputs(self, channel_dict):
""" Adds secondary inputs to the start of the pipeline.
This channels are inserted into the pipeline file as they are
provided in the values of the argument.
Parameters
----------
channel_dict : dict
Each entry should be <parameter>: <channel string>.
"""
logger.debug("Setting secondary inputs: {}".format(channel_dict))
secondary_input_str = "\n".join(list(channel_dict.values()))
self._context = {**self._context,
**{"secondary_inputs": secondary_input_str}}
|
[
"Adds",
"secondary",
"inputs",
"to",
"the",
"start",
"of",
"the",
"pipeline",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/process.py#L739-L755
|
[
"def",
"set_secondary_inputs",
"(",
"self",
",",
"channel_dict",
")",
":",
"logger",
".",
"debug",
"(",
"\"Setting secondary inputs: {}\"",
".",
"format",
"(",
"channel_dict",
")",
")",
"secondary_input_str",
"=",
"\"\\n\"",
".",
"join",
"(",
"list",
"(",
"channel_dict",
".",
"values",
"(",
")",
")",
")",
"self",
".",
"_context",
"=",
"{",
"*",
"*",
"self",
".",
"_context",
",",
"*",
"*",
"{",
"\"secondary_inputs\"",
":",
"secondary_input_str",
"}",
"}"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Init.set_extra_inputs
|
Sets the initial definition of the extra input channels.
The ``channel_dict`` argument should contain the input type and
destination channel of each parameter (which is the key)::
channel_dict = {
"param1": {
"input_type": "fasta"
"channels": ["abricate_2_3", "chewbbaca_3_4"]
}
}
Parameters
----------
channel_dict : dict
Dictionary with the extra_input parameter as key, and a dictionary
as a value with the input_type and destination channels
|
flowcraft/generator/process.py
|
def set_extra_inputs(self, channel_dict):
"""Sets the initial definition of the extra input channels.
The ``channel_dict`` argument should contain the input type and
destination channel of each parameter (which is the key)::
channel_dict = {
"param1": {
"input_type": "fasta"
"channels": ["abricate_2_3", "chewbbaca_3_4"]
}
}
Parameters
----------
channel_dict : dict
Dictionary with the extra_input parameter as key, and a dictionary
as a value with the input_type and destination channels
"""
extra_inputs = []
for param, info in channel_dict.items():
# Update the process' parameters with the raw input
raw_channel = self.RAW_MAPPING[info["input_type"]]
self.params[param] = {
"default": raw_channel["default_value"],
"description": raw_channel["description"]
}
channel_name = "IN_{}_extraInput".format(param)
channel_str = self.RAW_MAPPING[info["input_type"]]["channel_str"]
extra_inputs.append("{} = {}".format(channel_name,
channel_str.format(param)))
op = "set" if len(info["channels"]) == 1 else "into"
extra_inputs.append("{}.{}{{ {} }}".format(
channel_name, op, ";".join(info["channels"])))
self._context = {
**self._context,
**{"extra_inputs": "\n".join(extra_inputs)}
}
|
def set_extra_inputs(self, channel_dict):
"""Sets the initial definition of the extra input channels.
The ``channel_dict`` argument should contain the input type and
destination channel of each parameter (which is the key)::
channel_dict = {
"param1": {
"input_type": "fasta"
"channels": ["abricate_2_3", "chewbbaca_3_4"]
}
}
Parameters
----------
channel_dict : dict
Dictionary with the extra_input parameter as key, and a dictionary
as a value with the input_type and destination channels
"""
extra_inputs = []
for param, info in channel_dict.items():
# Update the process' parameters with the raw input
raw_channel = self.RAW_MAPPING[info["input_type"]]
self.params[param] = {
"default": raw_channel["default_value"],
"description": raw_channel["description"]
}
channel_name = "IN_{}_extraInput".format(param)
channel_str = self.RAW_MAPPING[info["input_type"]]["channel_str"]
extra_inputs.append("{} = {}".format(channel_name,
channel_str.format(param)))
op = "set" if len(info["channels"]) == 1 else "into"
extra_inputs.append("{}.{}{{ {} }}".format(
channel_name, op, ";".join(info["channels"])))
self._context = {
**self._context,
**{"extra_inputs": "\n".join(extra_inputs)}
}
|
[
"Sets",
"the",
"initial",
"definition",
"of",
"the",
"extra",
"input",
"channels",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/process.py#L757-L800
|
[
"def",
"set_extra_inputs",
"(",
"self",
",",
"channel_dict",
")",
":",
"extra_inputs",
"=",
"[",
"]",
"for",
"param",
",",
"info",
"in",
"channel_dict",
".",
"items",
"(",
")",
":",
"# Update the process' parameters with the raw input",
"raw_channel",
"=",
"self",
".",
"RAW_MAPPING",
"[",
"info",
"[",
"\"input_type\"",
"]",
"]",
"self",
".",
"params",
"[",
"param",
"]",
"=",
"{",
"\"default\"",
":",
"raw_channel",
"[",
"\"default_value\"",
"]",
",",
"\"description\"",
":",
"raw_channel",
"[",
"\"description\"",
"]",
"}",
"channel_name",
"=",
"\"IN_{}_extraInput\"",
".",
"format",
"(",
"param",
")",
"channel_str",
"=",
"self",
".",
"RAW_MAPPING",
"[",
"info",
"[",
"\"input_type\"",
"]",
"]",
"[",
"\"channel_str\"",
"]",
"extra_inputs",
".",
"append",
"(",
"\"{} = {}\"",
".",
"format",
"(",
"channel_name",
",",
"channel_str",
".",
"format",
"(",
"param",
")",
")",
")",
"op",
"=",
"\"set\"",
"if",
"len",
"(",
"info",
"[",
"\"channels\"",
"]",
")",
"==",
"1",
"else",
"\"into\"",
"extra_inputs",
".",
"append",
"(",
"\"{}.{}{{ {} }}\"",
".",
"format",
"(",
"channel_name",
",",
"op",
",",
"\";\"",
".",
"join",
"(",
"info",
"[",
"\"channels\"",
"]",
")",
")",
")",
"self",
".",
"_context",
"=",
"{",
"*",
"*",
"self",
".",
"_context",
",",
"*",
"*",
"{",
"\"extra_inputs\"",
":",
"\"\\n\"",
".",
"join",
"(",
"extra_inputs",
")",
"}",
"}"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
main
|
Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly: str
Path to the fatsa file generated by the assembler.
minsize: str
Min contig size to be considered a complete ORF
|
flowcraft/templates/process_viral_assembly.py
|
def main(sample_id, assembly_file, minsize):
"""Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly: str
Path to the fatsa file generated by the assembler.
minsize: str
Min contig size to be considered a complete ORF
"""
logger.info("Starting assembly file processing")
warnings = []
fails = ""
# Parse the spades assembly file and perform the first filtering.
logger.info("Starting assembly parsing")
assembly_obj = Assembly(assembly_file, 0, 0,
sample_id, minsize)
if 'spades' in assembly_file:
assembler = "SPAdes"
else:
assembler = "MEGAHIT"
with open(".warnings", "w") as warn_fh:
t_80 = int(minsize) * 0.8
t_150 = int(minsize) * 1.5
# Check if assembly size of the first assembly is lower than 80% of the
# estimated genome size - DENV ORF has min 10k nt. If True, redo the filtering without the
# k-mer coverage filter
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking assembly length: {}".format(assembly_len))
if assembly_obj.nORFs < 1:
warn_msg = "No complete ORFs found."
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len < t_80:
logger.warning("Assembly size ({}) smaller than the minimum "
"threshold of 80% of expected genome size. "
"Applying contig filters without the k-mer "
"coverage filter".format(assembly_len))
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking updated assembly length: "
"{}".format(assembly_len))
if assembly_len < t_80:
warn_msg = "Assembly size smaller than the minimum" \
" threshold of 80% of expected genome size: {}".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len > t_150:
warn_msg = "Assembly size ({}) larger than the maximum" \
" threshold of 150% of expected genome size.".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
# Write json report
with open(".report.json", "w") as json_report:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Contigs ({})".format(assembler),
"value": len(assembly_obj.contigs),
"table": "assembly",
"columnBar": True},
{"header": "Assembled BP ({})".format(assembler),
"value": assembly_len,
"table": "assembly",
"columnBar": True},
{"header": "ORFs",
"value": assembly_obj.nORFs,
"table": "assembly",
"columnBar":False}
]
}],
}
if warnings:
json_dic["warnings"] = [{
"sample": sample_id,
"table": "assembly",
"value": warnings
}]
if fails:
json_dic["fail"] = [{
"sample": sample_id,
"table": "assembly",
"value": [fails]
}]
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass")
|
def main(sample_id, assembly_file, minsize):
"""Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly: str
Path to the fatsa file generated by the assembler.
minsize: str
Min contig size to be considered a complete ORF
"""
logger.info("Starting assembly file processing")
warnings = []
fails = ""
# Parse the spades assembly file and perform the first filtering.
logger.info("Starting assembly parsing")
assembly_obj = Assembly(assembly_file, 0, 0,
sample_id, minsize)
if 'spades' in assembly_file:
assembler = "SPAdes"
else:
assembler = "MEGAHIT"
with open(".warnings", "w") as warn_fh:
t_80 = int(minsize) * 0.8
t_150 = int(minsize) * 1.5
# Check if assembly size of the first assembly is lower than 80% of the
# estimated genome size - DENV ORF has min 10k nt. If True, redo the filtering without the
# k-mer coverage filter
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking assembly length: {}".format(assembly_len))
if assembly_obj.nORFs < 1:
warn_msg = "No complete ORFs found."
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len < t_80:
logger.warning("Assembly size ({}) smaller than the minimum "
"threshold of 80% of expected genome size. "
"Applying contig filters without the k-mer "
"coverage filter".format(assembly_len))
assembly_len = assembly_obj.get_assembly_length()
logger.debug("Checking updated assembly length: "
"{}".format(assembly_len))
if assembly_len < t_80:
warn_msg = "Assembly size smaller than the minimum" \
" threshold of 80% of expected genome size: {}".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
if assembly_len > t_150:
warn_msg = "Assembly size ({}) larger than the maximum" \
" threshold of 150% of expected genome size.".format(
assembly_len)
logger.warning(warn_msg)
warn_fh.write(warn_msg)
fails = warn_msg
# Write json report
with open(".report.json", "w") as json_report:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Contigs ({})".format(assembler),
"value": len(assembly_obj.contigs),
"table": "assembly",
"columnBar": True},
{"header": "Assembled BP ({})".format(assembler),
"value": assembly_len,
"table": "assembly",
"columnBar": True},
{"header": "ORFs",
"value": assembly_obj.nORFs,
"table": "assembly",
"columnBar":False}
]
}],
}
if warnings:
json_dic["warnings"] = [{
"sample": sample_id,
"table": "assembly",
"value": warnings
}]
if fails:
json_dic["fail"] = [{
"sample": sample_id,
"table": "assembly",
"value": [fails]
}]
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass")
|
[
"Main",
"executor",
"of",
"the",
"process_mapping",
"template",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_viral_assembly.py#L451-L562
|
[
"def",
"main",
"(",
"sample_id",
",",
"assembly_file",
",",
"minsize",
")",
":",
"logger",
".",
"info",
"(",
"\"Starting assembly file processing\"",
")",
"warnings",
"=",
"[",
"]",
"fails",
"=",
"\"\"",
"# Parse the spades assembly file and perform the first filtering.",
"logger",
".",
"info",
"(",
"\"Starting assembly parsing\"",
")",
"assembly_obj",
"=",
"Assembly",
"(",
"assembly_file",
",",
"0",
",",
"0",
",",
"sample_id",
",",
"minsize",
")",
"if",
"'spades'",
"in",
"assembly_file",
":",
"assembler",
"=",
"\"SPAdes\"",
"else",
":",
"assembler",
"=",
"\"MEGAHIT\"",
"with",
"open",
"(",
"\".warnings\"",
",",
"\"w\"",
")",
"as",
"warn_fh",
":",
"t_80",
"=",
"int",
"(",
"minsize",
")",
"*",
"0.8",
"t_150",
"=",
"int",
"(",
"minsize",
")",
"*",
"1.5",
"# Check if assembly size of the first assembly is lower than 80% of the",
"# estimated genome size - DENV ORF has min 10k nt. If True, redo the filtering without the",
"# k-mer coverage filter",
"assembly_len",
"=",
"assembly_obj",
".",
"get_assembly_length",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Checking assembly length: {}\"",
".",
"format",
"(",
"assembly_len",
")",
")",
"if",
"assembly_obj",
".",
"nORFs",
"<",
"1",
":",
"warn_msg",
"=",
"\"No complete ORFs found.\"",
"warn_fh",
".",
"write",
"(",
"warn_msg",
")",
"fails",
"=",
"warn_msg",
"if",
"assembly_len",
"<",
"t_80",
":",
"logger",
".",
"warning",
"(",
"\"Assembly size ({}) smaller than the minimum \"",
"\"threshold of 80% of expected genome size. \"",
"\"Applying contig filters without the k-mer \"",
"\"coverage filter\"",
".",
"format",
"(",
"assembly_len",
")",
")",
"assembly_len",
"=",
"assembly_obj",
".",
"get_assembly_length",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Checking updated assembly length: \"",
"\"{}\"",
".",
"format",
"(",
"assembly_len",
")",
")",
"if",
"assembly_len",
"<",
"t_80",
":",
"warn_msg",
"=",
"\"Assembly size smaller than the minimum\"",
"\" threshold of 80% of expected genome size: {}\"",
".",
"format",
"(",
"assembly_len",
")",
"logger",
".",
"warning",
"(",
"warn_msg",
")",
"warn_fh",
".",
"write",
"(",
"warn_msg",
")",
"fails",
"=",
"warn_msg",
"if",
"assembly_len",
">",
"t_150",
":",
"warn_msg",
"=",
"\"Assembly size ({}) larger than the maximum\"",
"\" threshold of 150% of expected genome size.\"",
".",
"format",
"(",
"assembly_len",
")",
"logger",
".",
"warning",
"(",
"warn_msg",
")",
"warn_fh",
".",
"write",
"(",
"warn_msg",
")",
"fails",
"=",
"warn_msg",
"# Write json report",
"with",
"open",
"(",
"\".report.json\"",
",",
"\"w\"",
")",
"as",
"json_report",
":",
"json_dic",
"=",
"{",
"\"tableRow\"",
":",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"data\"",
":",
"[",
"{",
"\"header\"",
":",
"\"Contigs ({})\"",
".",
"format",
"(",
"assembler",
")",
",",
"\"value\"",
":",
"len",
"(",
"assembly_obj",
".",
"contigs",
")",
",",
"\"table\"",
":",
"\"assembly\"",
",",
"\"columnBar\"",
":",
"True",
"}",
",",
"{",
"\"header\"",
":",
"\"Assembled BP ({})\"",
".",
"format",
"(",
"assembler",
")",
",",
"\"value\"",
":",
"assembly_len",
",",
"\"table\"",
":",
"\"assembly\"",
",",
"\"columnBar\"",
":",
"True",
"}",
",",
"{",
"\"header\"",
":",
"\"ORFs\"",
",",
"\"value\"",
":",
"assembly_obj",
".",
"nORFs",
",",
"\"table\"",
":",
"\"assembly\"",
",",
"\"columnBar\"",
":",
"False",
"}",
"]",
"}",
"]",
",",
"}",
"if",
"warnings",
":",
"json_dic",
"[",
"\"warnings\"",
"]",
"=",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"table\"",
":",
"\"assembly\"",
",",
"\"value\"",
":",
"warnings",
"}",
"]",
"if",
"fails",
":",
"json_dic",
"[",
"\"fail\"",
"]",
"=",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"table\"",
":",
"\"assembly\"",
",",
"\"value\"",
":",
"[",
"fails",
"]",
"}",
"]",
"json_report",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"json_dic",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
")",
"with",
"open",
"(",
"\".status\"",
",",
"\"w\"",
")",
"as",
"status_fh",
":",
"status_fh",
".",
"write",
"(",
"\"pass\"",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly._parse_coverage
|
Attempts to retrieve the coverage value from the header string.
It splits the header by "_" and then screens the list backwards in
search of the first float value. This will be interpreted as the
coverage value. If it cannot find a float value, it returns None.
This search methodology is based on the strings of assemblers
like spades and skesa that put the mean kmer coverage for each
contig in its corresponding fasta header.
Parameters
----------
header_str : str
String
Returns
-------
float or None
The coverage value for the contig. None if it cannot find the
value in the provide string.
|
flowcraft/templates/process_viral_assembly.py
|
def _parse_coverage(header_str):
"""Attempts to retrieve the coverage value from the header string.
It splits the header by "_" and then screens the list backwards in
search of the first float value. This will be interpreted as the
coverage value. If it cannot find a float value, it returns None.
This search methodology is based on the strings of assemblers
like spades and skesa that put the mean kmer coverage for each
contig in its corresponding fasta header.
Parameters
----------
header_str : str
String
Returns
-------
float or None
The coverage value for the contig. None if it cannot find the
value in the provide string.
"""
cov = None
for i in header_str.split("_")[::-1]:
try:
cov = float(i)
break
except ValueError:
continue
return cov
|
def _parse_coverage(header_str):
"""Attempts to retrieve the coverage value from the header string.
It splits the header by "_" and then screens the list backwards in
search of the first float value. This will be interpreted as the
coverage value. If it cannot find a float value, it returns None.
This search methodology is based on the strings of assemblers
like spades and skesa that put the mean kmer coverage for each
contig in its corresponding fasta header.
Parameters
----------
header_str : str
String
Returns
-------
float or None
The coverage value for the contig. None if it cannot find the
value in the provide string.
"""
cov = None
for i in header_str.split("_")[::-1]:
try:
cov = float(i)
break
except ValueError:
continue
return cov
|
[
"Attempts",
"to",
"retrieve",
"the",
"coverage",
"value",
"from",
"the",
"header",
"string",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_viral_assembly.py#L144-L174
|
[
"def",
"_parse_coverage",
"(",
"header_str",
")",
":",
"cov",
"=",
"None",
"for",
"i",
"in",
"header_str",
".",
"split",
"(",
"\"_\"",
")",
"[",
":",
":",
"-",
"1",
"]",
":",
"try",
":",
"cov",
"=",
"float",
"(",
"i",
")",
"break",
"except",
"ValueError",
":",
"continue",
"return",
"cov"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly._parse_assembly
|
Parse an assembly fasta file.
This is a Fasta parsing method that populates the
:py:attr:`~Assembly.contigs` attribute with data for each contig in the
assembly.
The insertion of data on the self.contigs is done by the
:py:meth:`Assembly._populate_contigs` method, which also calculates
GC content and proportions.
Parameters
----------
assembly_file : str
Path to the assembly fasta file.
|
flowcraft/templates/process_viral_assembly.py
|
def _parse_assembly(self, assembly_file):
"""Parse an assembly fasta file.
This is a Fasta parsing method that populates the
:py:attr:`~Assembly.contigs` attribute with data for each contig in the
assembly.
The insertion of data on the self.contigs is done by the
:py:meth:`Assembly._populate_contigs` method, which also calculates
GC content and proportions.
Parameters
----------
assembly_file : str
Path to the assembly fasta file.
"""
# Temporary storage of sequence data
seq_temp = []
# Id counter for contig that will serve as key in self.contigs
contig_id = 0
# Initialize kmer coverage and header
cov, header = None, None
with open(assembly_file) as fh:
logger.debug("Starting iteration of assembly file: {}".format(
assembly_file))
for line in fh:
# Skip empty lines
if not line.strip():
continue
else:
# Remove whitespace surrounding line for further processing
line = line.strip()
if line.startswith(">"):
# If a sequence has already been populated, save the
# previous contig information
if seq_temp:
# Use join() to convert string list into the full
# contig string. This is generally much more efficient
# than successively concatenating strings.
seq = "".join(seq_temp)
logger.debug("Populating contig with contig_id '{}', "
"header '{}' and cov '{}'".format(
contig_id, header, cov))
self._populate_contigs(contig_id, header, cov, seq)
# Reset temporary sequence storage
seq_temp = []
contig_id += 1
header = line[1:]
cov = self._parse_coverage(line)
else:
seq_temp.append(line)
# Populate last contig entry
logger.debug("Populating contig with contig_id '{}', "
"header '{}' and cov '{}'".format(
contig_id, header, cov))
seq = "".join(seq_temp)
self._populate_contigs(contig_id, header, cov, seq)
|
def _parse_assembly(self, assembly_file):
"""Parse an assembly fasta file.
This is a Fasta parsing method that populates the
:py:attr:`~Assembly.contigs` attribute with data for each contig in the
assembly.
The insertion of data on the self.contigs is done by the
:py:meth:`Assembly._populate_contigs` method, which also calculates
GC content and proportions.
Parameters
----------
assembly_file : str
Path to the assembly fasta file.
"""
# Temporary storage of sequence data
seq_temp = []
# Id counter for contig that will serve as key in self.contigs
contig_id = 0
# Initialize kmer coverage and header
cov, header = None, None
with open(assembly_file) as fh:
logger.debug("Starting iteration of assembly file: {}".format(
assembly_file))
for line in fh:
# Skip empty lines
if not line.strip():
continue
else:
# Remove whitespace surrounding line for further processing
line = line.strip()
if line.startswith(">"):
# If a sequence has already been populated, save the
# previous contig information
if seq_temp:
# Use join() to convert string list into the full
# contig string. This is generally much more efficient
# than successively concatenating strings.
seq = "".join(seq_temp)
logger.debug("Populating contig with contig_id '{}', "
"header '{}' and cov '{}'".format(
contig_id, header, cov))
self._populate_contigs(contig_id, header, cov, seq)
# Reset temporary sequence storage
seq_temp = []
contig_id += 1
header = line[1:]
cov = self._parse_coverage(line)
else:
seq_temp.append(line)
# Populate last contig entry
logger.debug("Populating contig with contig_id '{}', "
"header '{}' and cov '{}'".format(
contig_id, header, cov))
seq = "".join(seq_temp)
self._populate_contigs(contig_id, header, cov, seq)
|
[
"Parse",
"an",
"assembly",
"fasta",
"file",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_viral_assembly.py#L176-L242
|
[
"def",
"_parse_assembly",
"(",
"self",
",",
"assembly_file",
")",
":",
"# Temporary storage of sequence data",
"seq_temp",
"=",
"[",
"]",
"# Id counter for contig that will serve as key in self.contigs",
"contig_id",
"=",
"0",
"# Initialize kmer coverage and header",
"cov",
",",
"header",
"=",
"None",
",",
"None",
"with",
"open",
"(",
"assembly_file",
")",
"as",
"fh",
":",
"logger",
".",
"debug",
"(",
"\"Starting iteration of assembly file: {}\"",
".",
"format",
"(",
"assembly_file",
")",
")",
"for",
"line",
"in",
"fh",
":",
"# Skip empty lines",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"else",
":",
"# Remove whitespace surrounding line for further processing",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"\">\"",
")",
":",
"# If a sequence has already been populated, save the",
"# previous contig information",
"if",
"seq_temp",
":",
"# Use join() to convert string list into the full",
"# contig string. This is generally much more efficient",
"# than successively concatenating strings.",
"seq",
"=",
"\"\"",
".",
"join",
"(",
"seq_temp",
")",
"logger",
".",
"debug",
"(",
"\"Populating contig with contig_id '{}', \"",
"\"header '{}' and cov '{}'\"",
".",
"format",
"(",
"contig_id",
",",
"header",
",",
"cov",
")",
")",
"self",
".",
"_populate_contigs",
"(",
"contig_id",
",",
"header",
",",
"cov",
",",
"seq",
")",
"# Reset temporary sequence storage",
"seq_temp",
"=",
"[",
"]",
"contig_id",
"+=",
"1",
"header",
"=",
"line",
"[",
"1",
":",
"]",
"cov",
"=",
"self",
".",
"_parse_coverage",
"(",
"line",
")",
"else",
":",
"seq_temp",
".",
"append",
"(",
"line",
")",
"# Populate last contig entry",
"logger",
".",
"debug",
"(",
"\"Populating contig with contig_id '{}', \"",
"\"header '{}' and cov '{}'\"",
".",
"format",
"(",
"contig_id",
",",
"header",
",",
"cov",
")",
")",
"seq",
"=",
"\"\"",
".",
"join",
"(",
"seq_temp",
")",
"self",
".",
"_populate_contigs",
"(",
"contig_id",
",",
"header",
",",
"cov",
",",
"seq",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly._populate_contigs
|
Inserts data from a single contig into\
:py:attr:`~Assembly.contigs`.
By providing a contig id, the original header, the coverage that
is parsed from the header and the sequence, this method will
populate the :py:attr:`~Assembly.contigs` attribute.
Parameters
----------
contig_id : int
Arbitrary unique contig identifier.
header : str
Original header of the current contig.
cov : float
The contig coverage, parsed from the fasta header
sequence : str
The complete sequence of the contig.
|
flowcraft/templates/process_viral_assembly.py
|
def _populate_contigs(self, contig_id, header, cov, sequence):
""" Inserts data from a single contig into\
:py:attr:`~Assembly.contigs`.
By providing a contig id, the original header, the coverage that
is parsed from the header and the sequence, this method will
populate the :py:attr:`~Assembly.contigs` attribute.
Parameters
----------
contig_id : int
Arbitrary unique contig identifier.
header : str
Original header of the current contig.
cov : float
The contig coverage, parsed from the fasta header
sequence : str
The complete sequence of the contig.
"""
# Get AT/GC/N counts and proportions.
# Note that self._get_gc_content returns a dictionary with the
# information on the GC/AT/N counts and proportions. This makes it
# much easier to add to the contigs attribute using the ** notation.
gc_kwargs = self._get_gc_content(sequence, len(sequence))
logger.debug("Populate GC content with: {}".format(gc_kwargs))
self.contigs[contig_id] = {
"header": header,
"sequence": sequence,
"length": len(sequence),
"kmer_cov": cov,
**gc_kwargs
}
|
def _populate_contigs(self, contig_id, header, cov, sequence):
""" Inserts data from a single contig into\
:py:attr:`~Assembly.contigs`.
By providing a contig id, the original header, the coverage that
is parsed from the header and the sequence, this method will
populate the :py:attr:`~Assembly.contigs` attribute.
Parameters
----------
contig_id : int
Arbitrary unique contig identifier.
header : str
Original header of the current contig.
cov : float
The contig coverage, parsed from the fasta header
sequence : str
The complete sequence of the contig.
"""
# Get AT/GC/N counts and proportions.
# Note that self._get_gc_content returns a dictionary with the
# information on the GC/AT/N counts and proportions. This makes it
# much easier to add to the contigs attribute using the ** notation.
gc_kwargs = self._get_gc_content(sequence, len(sequence))
logger.debug("Populate GC content with: {}".format(gc_kwargs))
self.contigs[contig_id] = {
"header": header,
"sequence": sequence,
"length": len(sequence),
"kmer_cov": cov,
**gc_kwargs
}
|
[
"Inserts",
"data",
"from",
"a",
"single",
"contig",
"into",
"\\",
":",
"py",
":",
"attr",
":",
"~Assembly",
".",
"contigs",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_viral_assembly.py#L244-L278
|
[
"def",
"_populate_contigs",
"(",
"self",
",",
"contig_id",
",",
"header",
",",
"cov",
",",
"sequence",
")",
":",
"# Get AT/GC/N counts and proportions.",
"# Note that self._get_gc_content returns a dictionary with the",
"# information on the GC/AT/N counts and proportions. This makes it",
"# much easier to add to the contigs attribute using the ** notation.",
"gc_kwargs",
"=",
"self",
".",
"_get_gc_content",
"(",
"sequence",
",",
"len",
"(",
"sequence",
")",
")",
"logger",
".",
"debug",
"(",
"\"Populate GC content with: {}\"",
".",
"format",
"(",
"gc_kwargs",
")",
")",
"self",
".",
"contigs",
"[",
"contig_id",
"]",
"=",
"{",
"\"header\"",
":",
"header",
",",
"\"sequence\"",
":",
"sequence",
",",
"\"length\"",
":",
"len",
"(",
"sequence",
")",
",",
"\"kmer_cov\"",
":",
"cov",
",",
"*",
"*",
"gc_kwargs",
"}"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly._get_gc_content
|
Get GC content and proportions.
Parameters
----------
sequence : str
The complete sequence of the contig.
length : int
The length of the sequence contig.
Returns
-------
x : dict
Dictionary with the at/gc/n counts and proportions
|
flowcraft/templates/process_viral_assembly.py
|
def _get_gc_content(sequence, length):
"""Get GC content and proportions.
Parameters
----------
sequence : str
The complete sequence of the contig.
length : int
The length of the sequence contig.
Returns
-------
x : dict
Dictionary with the at/gc/n counts and proportions
"""
# Get AT/GC/N counts
at = sum(map(sequence.count, ["A", "T"]))
gc = sum(map(sequence.count, ["G", "C"]))
n = length - (at + gc)
# Get AT/GC/N proportions
at_prop = at / length
gc_prop = gc / length
n_prop = n / length
return {"at": at, "gc": gc, "n": n,
"at_prop": at_prop, "gc_prop": gc_prop, "n_prop": n_prop}
|
def _get_gc_content(sequence, length):
"""Get GC content and proportions.
Parameters
----------
sequence : str
The complete sequence of the contig.
length : int
The length of the sequence contig.
Returns
-------
x : dict
Dictionary with the at/gc/n counts and proportions
"""
# Get AT/GC/N counts
at = sum(map(sequence.count, ["A", "T"]))
gc = sum(map(sequence.count, ["G", "C"]))
n = length - (at + gc)
# Get AT/GC/N proportions
at_prop = at / length
gc_prop = gc / length
n_prop = n / length
return {"at": at, "gc": gc, "n": n,
"at_prop": at_prop, "gc_prop": gc_prop, "n_prop": n_prop}
|
[
"Get",
"GC",
"content",
"and",
"proportions",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_viral_assembly.py#L281-L309
|
[
"def",
"_get_gc_content",
"(",
"sequence",
",",
"length",
")",
":",
"# Get AT/GC/N counts",
"at",
"=",
"sum",
"(",
"map",
"(",
"sequence",
".",
"count",
",",
"[",
"\"A\"",
",",
"\"T\"",
"]",
")",
")",
"gc",
"=",
"sum",
"(",
"map",
"(",
"sequence",
".",
"count",
",",
"[",
"\"G\"",
",",
"\"C\"",
"]",
")",
")",
"n",
"=",
"length",
"-",
"(",
"at",
"+",
"gc",
")",
"# Get AT/GC/N proportions",
"at_prop",
"=",
"at",
"/",
"length",
"gc_prop",
"=",
"gc",
"/",
"length",
"n_prop",
"=",
"n",
"/",
"length",
"return",
"{",
"\"at\"",
":",
"at",
",",
"\"gc\"",
":",
"gc",
",",
"\"n\"",
":",
"n",
",",
"\"at_prop\"",
":",
"at_prop",
",",
"\"gc_prop\"",
":",
"gc_prop",
",",
"\"n_prop\"",
":",
"n_prop",
"}"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly.filter_contigs
|
Filters the contigs of the assembly according to user provided\
comparisons.
The comparisons must be a list of three elements with the
:py:attr:`~Assembly.contigs` key, operator and test value. For
example, to filter contigs with a minimum length of 250, a comparison
would be::
self.filter_contigs(["length", ">=", 250])
The filtered contig ids will be stored in the
:py:attr:`~Assembly.filtered_ids` list.
The result of the test for all contigs will be stored in the
:py:attr:`~Assembly.report` dictionary.
Parameters
----------
comparisons : list
List with contig key, operator and value to test.
|
flowcraft/templates/process_viral_assembly.py
|
def filter_contigs(self, *comparisons):
"""Filters the contigs of the assembly according to user provided\
comparisons.
The comparisons must be a list of three elements with the
:py:attr:`~Assembly.contigs` key, operator and test value. For
example, to filter contigs with a minimum length of 250, a comparison
would be::
self.filter_contigs(["length", ">=", 250])
The filtered contig ids will be stored in the
:py:attr:`~Assembly.filtered_ids` list.
The result of the test for all contigs will be stored in the
:py:attr:`~Assembly.report` dictionary.
Parameters
----------
comparisons : list
List with contig key, operator and value to test.
"""
# Reset list of filtered ids
self.filtered_ids = []
self.report = {}
gc_filters = [
["gc_prop", ">=", self.min_gc],
["gc_prop", "<=", 1 - self.min_gc]
]
self.filters = list(comparisons) + gc_filters
logger.debug("Filtering contigs using filters: {}".format(
self.filters))
for contig_id, contig in self.contigs.items():
for key, op, value in list(comparisons) + gc_filters:
if not self._test_truth(contig[key], op, value):
self.filtered_ids.append(contig_id)
self.report[contig_id] = "{}/{}/{}".format(key,
contig[key],
value)
break
else:
self.report[contig_id] = "pass"
|
def filter_contigs(self, *comparisons):
"""Filters the contigs of the assembly according to user provided\
comparisons.
The comparisons must be a list of three elements with the
:py:attr:`~Assembly.contigs` key, operator and test value. For
example, to filter contigs with a minimum length of 250, a comparison
would be::
self.filter_contigs(["length", ">=", 250])
The filtered contig ids will be stored in the
:py:attr:`~Assembly.filtered_ids` list.
The result of the test for all contigs will be stored in the
:py:attr:`~Assembly.report` dictionary.
Parameters
----------
comparisons : list
List with contig key, operator and value to test.
"""
# Reset list of filtered ids
self.filtered_ids = []
self.report = {}
gc_filters = [
["gc_prop", ">=", self.min_gc],
["gc_prop", "<=", 1 - self.min_gc]
]
self.filters = list(comparisons) + gc_filters
logger.debug("Filtering contigs using filters: {}".format(
self.filters))
for contig_id, contig in self.contigs.items():
for key, op, value in list(comparisons) + gc_filters:
if not self._test_truth(contig[key], op, value):
self.filtered_ids.append(contig_id)
self.report[contig_id] = "{}/{}/{}".format(key,
contig[key],
value)
break
else:
self.report[contig_id] = "pass"
|
[
"Filters",
"the",
"contigs",
"of",
"the",
"assembly",
"according",
"to",
"user",
"provided",
"\\",
"comparisons",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_viral_assembly.py#L344-L391
|
[
"def",
"filter_contigs",
"(",
"self",
",",
"*",
"comparisons",
")",
":",
"# Reset list of filtered ids",
"self",
".",
"filtered_ids",
"=",
"[",
"]",
"self",
".",
"report",
"=",
"{",
"}",
"gc_filters",
"=",
"[",
"[",
"\"gc_prop\"",
",",
"\">=\"",
",",
"self",
".",
"min_gc",
"]",
",",
"[",
"\"gc_prop\"",
",",
"\"<=\"",
",",
"1",
"-",
"self",
".",
"min_gc",
"]",
"]",
"self",
".",
"filters",
"=",
"list",
"(",
"comparisons",
")",
"+",
"gc_filters",
"logger",
".",
"debug",
"(",
"\"Filtering contigs using filters: {}\"",
".",
"format",
"(",
"self",
".",
"filters",
")",
")",
"for",
"contig_id",
",",
"contig",
"in",
"self",
".",
"contigs",
".",
"items",
"(",
")",
":",
"for",
"key",
",",
"op",
",",
"value",
"in",
"list",
"(",
"comparisons",
")",
"+",
"gc_filters",
":",
"if",
"not",
"self",
".",
"_test_truth",
"(",
"contig",
"[",
"key",
"]",
",",
"op",
",",
"value",
")",
":",
"self",
".",
"filtered_ids",
".",
"append",
"(",
"contig_id",
")",
"self",
".",
"report",
"[",
"contig_id",
"]",
"=",
"\"{}/{}/{}\"",
".",
"format",
"(",
"key",
",",
"contig",
"[",
"key",
"]",
",",
"value",
")",
"break",
"else",
":",
"self",
".",
"report",
"[",
"contig_id",
"]",
"=",
"\"pass\""
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly.get_assembly_length
|
Returns the length of the assembly, without the filtered contigs.
Returns
-------
x : int
Total length of the assembly.
|
flowcraft/templates/process_viral_assembly.py
|
def get_assembly_length(self):
"""Returns the length of the assembly, without the filtered contigs.
Returns
-------
x : int
Total length of the assembly.
"""
return sum(
[vals["length"] for contig_id, vals in self.contigs.items()
if contig_id not in self.filtered_ids])
|
def get_assembly_length(self):
"""Returns the length of the assembly, without the filtered contigs.
Returns
-------
x : int
Total length of the assembly.
"""
return sum(
[vals["length"] for contig_id, vals in self.contigs.items()
if contig_id not in self.filtered_ids])
|
[
"Returns",
"the",
"length",
"of",
"the",
"assembly",
"without",
"the",
"filtered",
"contigs",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_viral_assembly.py#L393-L405
|
[
"def",
"get_assembly_length",
"(",
"self",
")",
":",
"return",
"sum",
"(",
"[",
"vals",
"[",
"\"length\"",
"]",
"for",
"contig_id",
",",
"vals",
"in",
"self",
".",
"contigs",
".",
"items",
"(",
")",
"if",
"contig_id",
"not",
"in",
"self",
".",
"filtered_ids",
"]",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly.write_assembly
|
Writes the assembly to a new file.
The ``filtered`` option controls whether the new assembly will be
filtered or not.
Parameters
----------
output_file : str
Name of the output assembly file.
filtered : bool
If ``True``, does not include filtered ids.
|
flowcraft/templates/process_viral_assembly.py
|
def write_assembly(self, output_file, filtered=True):
"""Writes the assembly to a new file.
The ``filtered`` option controls whether the new assembly will be
filtered or not.
Parameters
----------
output_file : str
Name of the output assembly file.
filtered : bool
If ``True``, does not include filtered ids.
"""
logger.debug("Writing the filtered assembly into: {}".format(
output_file))
with open(output_file, "w") as fh:
for contig_id, contig in self.contigs.items():
if contig_id not in self.filtered_ids and filtered:
fh.write(">{}_{}\\n{}\\n".format(self.sample,
contig["header"],
contig["sequence"]))
|
def write_assembly(self, output_file, filtered=True):
"""Writes the assembly to a new file.
The ``filtered`` option controls whether the new assembly will be
filtered or not.
Parameters
----------
output_file : str
Name of the output assembly file.
filtered : bool
If ``True``, does not include filtered ids.
"""
logger.debug("Writing the filtered assembly into: {}".format(
output_file))
with open(output_file, "w") as fh:
for contig_id, contig in self.contigs.items():
if contig_id not in self.filtered_ids and filtered:
fh.write(">{}_{}\\n{}\\n".format(self.sample,
contig["header"],
contig["sequence"]))
|
[
"Writes",
"the",
"assembly",
"to",
"a",
"new",
"file",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_viral_assembly.py#L407-L429
|
[
"def",
"write_assembly",
"(",
"self",
",",
"output_file",
",",
"filtered",
"=",
"True",
")",
":",
"logger",
".",
"debug",
"(",
"\"Writing the filtered assembly into: {}\"",
".",
"format",
"(",
"output_file",
")",
")",
"with",
"open",
"(",
"output_file",
",",
"\"w\"",
")",
"as",
"fh",
":",
"for",
"contig_id",
",",
"contig",
"in",
"self",
".",
"contigs",
".",
"items",
"(",
")",
":",
"if",
"contig_id",
"not",
"in",
"self",
".",
"filtered_ids",
"and",
"filtered",
":",
"fh",
".",
"write",
"(",
"\">{}_{}\\\\n{}\\\\n\"",
".",
"format",
"(",
"self",
".",
"sample",
",",
"contig",
"[",
"\"header\"",
"]",
",",
"contig",
"[",
"\"sequence\"",
"]",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly.write_report
|
Writes a report with the test results for the current assembly
Parameters
----------
output_file : str
Name of the output assembly file.
|
flowcraft/templates/process_viral_assembly.py
|
def write_report(self, output_file):
"""Writes a report with the test results for the current assembly
Parameters
----------
output_file : str
Name of the output assembly file.
"""
logger.debug("Writing the assembly report into: {}".format(
output_file))
with open(output_file, "w") as fh:
for contig_id, vals in self.report.items():
fh.write("{}, {}\\n".format(contig_id, vals))
|
def write_report(self, output_file):
"""Writes a report with the test results for the current assembly
Parameters
----------
output_file : str
Name of the output assembly file.
"""
logger.debug("Writing the assembly report into: {}".format(
output_file))
with open(output_file, "w") as fh:
for contig_id, vals in self.report.items():
fh.write("{}, {}\\n".format(contig_id, vals))
|
[
"Writes",
"a",
"report",
"with",
"the",
"test",
"results",
"for",
"the",
"current",
"assembly"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_viral_assembly.py#L431-L446
|
[
"def",
"write_report",
"(",
"self",
",",
"output_file",
")",
":",
"logger",
".",
"debug",
"(",
"\"Writing the assembly report into: {}\"",
".",
"format",
"(",
"output_file",
")",
")",
"with",
"open",
"(",
"output_file",
",",
"\"w\"",
")",
"as",
"fh",
":",
"for",
"contig_id",
",",
"vals",
"in",
"self",
".",
"report",
".",
"items",
"(",
")",
":",
"fh",
".",
"write",
"(",
"\"{}, {}\\\\n\"",
".",
"format",
"(",
"contig_id",
",",
"vals",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
guess_process
|
Function to guess processes based on strings that are not available in
process_map. If the string has typos and is somewhat similar (50%) to any
process available in flowcraft it will print info to the terminal,
suggesting the most similar processes available in flowcraft.
Parameters
----------
query_str: str
The string of the process with potential typos
process_map:
The dictionary that contains all the available processes
|
flowcraft/generator/pipeline_parser.py
|
def guess_process(query_str, process_map):
"""
Function to guess processes based on strings that are not available in
process_map. If the string has typos and is somewhat similar (50%) to any
process available in flowcraft it will print info to the terminal,
suggesting the most similar processes available in flowcraft.
Parameters
----------
query_str: str
The string of the process with potential typos
process_map:
The dictionary that contains all the available processes
"""
save_list = []
# loops between the processes available in process_map
for process in process_map:
similarity = SequenceMatcher(None, process, query_str)
# checks if similarity between the process and the query string is
# higher than 50%
if similarity.ratio() > 0.5:
save_list.append(process)
# checks if any process is stored in save_list
if save_list:
logger.info(colored_print(
"Maybe you meant:\n\t{}".format("\n\t".join(save_list)), "white"))
logger.info(colored_print("Hint: check the available processes by using "
"the '-l' or '-L' flag.", "white"))
|
def guess_process(query_str, process_map):
"""
Function to guess processes based on strings that are not available in
process_map. If the string has typos and is somewhat similar (50%) to any
process available in flowcraft it will print info to the terminal,
suggesting the most similar processes available in flowcraft.
Parameters
----------
query_str: str
The string of the process with potential typos
process_map:
The dictionary that contains all the available processes
"""
save_list = []
# loops between the processes available in process_map
for process in process_map:
similarity = SequenceMatcher(None, process, query_str)
# checks if similarity between the process and the query string is
# higher than 50%
if similarity.ratio() > 0.5:
save_list.append(process)
# checks if any process is stored in save_list
if save_list:
logger.info(colored_print(
"Maybe you meant:\n\t{}".format("\n\t".join(save_list)), "white"))
logger.info(colored_print("Hint: check the available processes by using "
"the '-l' or '-L' flag.", "white"))
|
[
"Function",
"to",
"guess",
"processes",
"based",
"on",
"strings",
"that",
"are",
"not",
"available",
"in",
"process_map",
".",
"If",
"the",
"string",
"has",
"typos",
"and",
"is",
"somewhat",
"similar",
"(",
"50%",
")",
"to",
"any",
"process",
"available",
"in",
"flowcraft",
"it",
"will",
"print",
"info",
"to",
"the",
"terminal",
"suggesting",
"the",
"most",
"similar",
"processes",
"available",
"in",
"flowcraft",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L24-L55
|
[
"def",
"guess_process",
"(",
"query_str",
",",
"process_map",
")",
":",
"save_list",
"=",
"[",
"]",
"# loops between the processes available in process_map",
"for",
"process",
"in",
"process_map",
":",
"similarity",
"=",
"SequenceMatcher",
"(",
"None",
",",
"process",
",",
"query_str",
")",
"# checks if similarity between the process and the query string is",
"# higher than 50%",
"if",
"similarity",
".",
"ratio",
"(",
")",
">",
"0.5",
":",
"save_list",
".",
"append",
"(",
"process",
")",
"# checks if any process is stored in save_list",
"if",
"save_list",
":",
"logger",
".",
"info",
"(",
"colored_print",
"(",
"\"Maybe you meant:\\n\\t{}\"",
".",
"format",
"(",
"\"\\n\\t\"",
".",
"join",
"(",
"save_list",
")",
")",
",",
"\"white\"",
")",
")",
"logger",
".",
"info",
"(",
"colored_print",
"(",
"\"Hint: check the available processes by using \"",
"\"the '-l' or '-L' flag.\"",
",",
"\"white\"",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
remove_inner_forks
|
Recursively removes nested brackets
This function is used to remove nested brackets from fork strings using
regular expressions
Parameters
----------
text: str
The string that contains brackets with inner forks to be removed
Returns
-------
text: str
the string with only the processes that are not in inner forks, thus
the processes that belong to a given fork.
|
flowcraft/generator/pipeline_parser.py
|
def remove_inner_forks(text):
"""Recursively removes nested brackets
This function is used to remove nested brackets from fork strings using
regular expressions
Parameters
----------
text: str
The string that contains brackets with inner forks to be removed
Returns
-------
text: str
the string with only the processes that are not in inner forks, thus
the processes that belong to a given fork.
"""
n = 1 # run at least once for one level of fork
# Then this loop assures that all brackets will get removed in a nested
# structure
while n:
# this removes non-nested brackets
text, n = re.subn(r'\([^()]*\)', '', text)
return text
|
def remove_inner_forks(text):
"""Recursively removes nested brackets
This function is used to remove nested brackets from fork strings using
regular expressions
Parameters
----------
text: str
The string that contains brackets with inner forks to be removed
Returns
-------
text: str
the string with only the processes that are not in inner forks, thus
the processes that belong to a given fork.
"""
n = 1 # run at least once for one level of fork
# Then this loop assures that all brackets will get removed in a nested
# structure
while n:
# this removes non-nested brackets
text, n = re.subn(r'\([^()]*\)', '', text)
return text
|
[
"Recursively",
"removes",
"nested",
"brackets"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L58-L84
|
[
"def",
"remove_inner_forks",
"(",
"text",
")",
":",
"n",
"=",
"1",
"# run at least once for one level of fork",
"# Then this loop assures that all brackets will get removed in a nested",
"# structure",
"while",
"n",
":",
"# this removes non-nested brackets",
"text",
",",
"n",
"=",
"re",
".",
"subn",
"(",
"r'\\([^()]*\\)'",
",",
"''",
",",
"text",
")",
"return",
"text"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
brackets_insanity_check
|
This function performs a check for different number of '(' and ')'
characters, which indicates that some forks are poorly constructed.
Parameters
----------
p_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
|
flowcraft/generator/pipeline_parser.py
|
def brackets_insanity_check(p_string):
"""
This function performs a check for different number of '(' and ')'
characters, which indicates that some forks are poorly constructed.
Parameters
----------
p_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
"""
if p_string.count(FORK_TOKEN) != p_string.count(CLOSE_TOKEN):
# get the number of each type of bracket and state the one that has a
# higher value
dict_values = {
FORK_TOKEN: p_string.count(FORK_TOKEN),
CLOSE_TOKEN: p_string.count(CLOSE_TOKEN)
}
max_bracket = max(dict_values, key=dict_values.get)
raise SanityError(
"A different number of '(' and ')' was specified. There are "
"{} extra '{}'. The number of '(' and ')'should be equal.".format(
str(abs(
p_string.count(FORK_TOKEN) - p_string.count(CLOSE_TOKEN))),
max_bracket))
|
def brackets_insanity_check(p_string):
"""
This function performs a check for different number of '(' and ')'
characters, which indicates that some forks are poorly constructed.
Parameters
----------
p_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
"""
if p_string.count(FORK_TOKEN) != p_string.count(CLOSE_TOKEN):
# get the number of each type of bracket and state the one that has a
# higher value
dict_values = {
FORK_TOKEN: p_string.count(FORK_TOKEN),
CLOSE_TOKEN: p_string.count(CLOSE_TOKEN)
}
max_bracket = max(dict_values, key=dict_values.get)
raise SanityError(
"A different number of '(' and ')' was specified. There are "
"{} extra '{}'. The number of '(' and ')'should be equal.".format(
str(abs(
p_string.count(FORK_TOKEN) - p_string.count(CLOSE_TOKEN))),
max_bracket))
|
[
"This",
"function",
"performs",
"a",
"check",
"for",
"different",
"number",
"of",
"(",
"and",
")",
"characters",
"which",
"indicates",
"that",
"some",
"forks",
"are",
"poorly",
"constructed",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L120-L147
|
[
"def",
"brackets_insanity_check",
"(",
"p_string",
")",
":",
"if",
"p_string",
".",
"count",
"(",
"FORK_TOKEN",
")",
"!=",
"p_string",
".",
"count",
"(",
"CLOSE_TOKEN",
")",
":",
"# get the number of each type of bracket and state the one that has a",
"# higher value",
"dict_values",
"=",
"{",
"FORK_TOKEN",
":",
"p_string",
".",
"count",
"(",
"FORK_TOKEN",
")",
",",
"CLOSE_TOKEN",
":",
"p_string",
".",
"count",
"(",
"CLOSE_TOKEN",
")",
"}",
"max_bracket",
"=",
"max",
"(",
"dict_values",
",",
"key",
"=",
"dict_values",
".",
"get",
")",
"raise",
"SanityError",
"(",
"\"A different number of '(' and ')' was specified. There are \"",
"\"{} extra '{}'. The number of '(' and ')'should be equal.\"",
".",
"format",
"(",
"str",
"(",
"abs",
"(",
"p_string",
".",
"count",
"(",
"FORK_TOKEN",
")",
"-",
"p_string",
".",
"count",
"(",
"CLOSE_TOKEN",
")",
")",
")",
",",
"max_bracket",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
fork_procs_insanity_check
|
This function checks if the pipeline string contains a process between
the fork start token or end token and the separator (lane) token. Checks for
the absence of processes in one of the branches of the fork ['|)' and '(|']
and for the existence of a process before starting a fork (in an inner fork)
['|('].
Parameters
----------
p_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
|
flowcraft/generator/pipeline_parser.py
|
def fork_procs_insanity_check(p_string):
"""
This function checks if the pipeline string contains a process between
the fork start token or end token and the separator (lane) token. Checks for
the absence of processes in one of the branches of the fork ['|)' and '(|']
and for the existence of a process before starting a fork (in an inner fork)
['|('].
Parameters
----------
p_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
"""
# Check for the absence of processes in one of the branches of the fork
# ['|)' and '(|'] and for the existence of a process before starting a fork
# (in an inner fork) ['|('].
if FORK_TOKEN + LANE_TOKEN in p_string or \
LANE_TOKEN + CLOSE_TOKEN in p_string or \
LANE_TOKEN + FORK_TOKEN in p_string:
raise SanityError("There must be a process between the fork "
"start character '(' or end ')' and the separator of "
"processes character '|'")
|
def fork_procs_insanity_check(p_string):
"""
This function checks if the pipeline string contains a process between
the fork start token or end token and the separator (lane) token. Checks for
the absence of processes in one of the branches of the fork ['|)' and '(|']
and for the existence of a process before starting a fork (in an inner fork)
['|('].
Parameters
----------
p_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
"""
# Check for the absence of processes in one of the branches of the fork
# ['|)' and '(|'] and for the existence of a process before starting a fork
# (in an inner fork) ['|('].
if FORK_TOKEN + LANE_TOKEN in p_string or \
LANE_TOKEN + CLOSE_TOKEN in p_string or \
LANE_TOKEN + FORK_TOKEN in p_string:
raise SanityError("There must be a process between the fork "
"start character '(' or end ')' and the separator of "
"processes character '|'")
|
[
"This",
"function",
"checks",
"if",
"the",
"pipeline",
"string",
"contains",
"a",
"process",
"between",
"the",
"fork",
"start",
"token",
"or",
"end",
"token",
"and",
"the",
"separator",
"(",
"lane",
")",
"token",
".",
"Checks",
"for",
"the",
"absence",
"of",
"processes",
"in",
"one",
"of",
"the",
"branches",
"of",
"the",
"fork",
"[",
"|",
")",
"and",
"(",
"|",
"]",
"and",
"for",
"the",
"existence",
"of",
"a",
"process",
"before",
"starting",
"a",
"fork",
"(",
"in",
"an",
"inner",
"fork",
")",
"[",
"|",
"(",
"]",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L186-L210
|
[
"def",
"fork_procs_insanity_check",
"(",
"p_string",
")",
":",
"# Check for the absence of processes in one of the branches of the fork",
"# ['|)' and '(|'] and for the existence of a process before starting a fork",
"# (in an inner fork) ['|('].",
"if",
"FORK_TOKEN",
"+",
"LANE_TOKEN",
"in",
"p_string",
"or",
"LANE_TOKEN",
"+",
"CLOSE_TOKEN",
"in",
"p_string",
"or",
"LANE_TOKEN",
"+",
"FORK_TOKEN",
"in",
"p_string",
":",
"raise",
"SanityError",
"(",
"\"There must be a process between the fork \"",
"\"start character '(' or end ')' and the separator of \"",
"\"processes character '|'\"",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
inner_fork_insanity_checks
|
This function performs two sanity checks in the pipeline string. The first
check, assures that each fork contains a lane token '|', while the second
check looks for duplicated processes within the same fork.
Parameters
----------
pipeline_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
|
flowcraft/generator/pipeline_parser.py
|
def inner_fork_insanity_checks(pipeline_string):
"""
This function performs two sanity checks in the pipeline string. The first
check, assures that each fork contains a lane token '|', while the second
check looks for duplicated processes within the same fork.
Parameters
----------
pipeline_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
"""
# first lets get all forks to a list.
list_of_forks = [] # stores forks
left_indexes = [] # stores indexes of left brackets
# iterate through the string looking for '(' and ')'.
for pos, char in enumerate(pipeline_string):
if char == FORK_TOKEN:
# saves pos to left_indexes list
left_indexes.append(pos)
elif char == CLOSE_TOKEN and len(left_indexes) > 0:
# saves fork to list_of_forks
list_of_forks.append(pipeline_string[left_indexes[-1] + 1: pos])
# removes last bracket from left_indexes list
left_indexes = left_indexes[:-1]
# sort list in descending order of number of forks
list_of_forks.sort(key=lambda x: x.count(FORK_TOKEN), reverse=True)
# Now, we can iterate through list_of_forks and check for errors in each
# fork
for fork in list_of_forks:
# remove inner forks for these checks since each fork has its own entry
# in list_of_forks. Note that each fork is now sorted in descending
# order which enables to remove sequentially the string for the fork
# potentially with more inner forks
for subfork in list_of_forks:
# checks if subfork is contained in fork and if they are different,
# avoiding to remove itself
if subfork in list_of_forks and subfork != fork:
# removes inner forks. Note that string has no spaces
fork_simplified = fork.replace("({})".format(subfork), "")
else:
fork_simplified = fork
# Checks if there is no fork separator character '|' within each fork
if not len(fork_simplified.split(LANE_TOKEN)) > 1:
raise SanityError("One of the forks doesn't have '|' "
"separator between the processes to fork. This is"
" the prime suspect: '({})'".format(fork))
|
def inner_fork_insanity_checks(pipeline_string):
"""
This function performs two sanity checks in the pipeline string. The first
check, assures that each fork contains a lane token '|', while the second
check looks for duplicated processes within the same fork.
Parameters
----------
pipeline_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
"""
# first lets get all forks to a list.
list_of_forks = [] # stores forks
left_indexes = [] # stores indexes of left brackets
# iterate through the string looking for '(' and ')'.
for pos, char in enumerate(pipeline_string):
if char == FORK_TOKEN:
# saves pos to left_indexes list
left_indexes.append(pos)
elif char == CLOSE_TOKEN and len(left_indexes) > 0:
# saves fork to list_of_forks
list_of_forks.append(pipeline_string[left_indexes[-1] + 1: pos])
# removes last bracket from left_indexes list
left_indexes = left_indexes[:-1]
# sort list in descending order of number of forks
list_of_forks.sort(key=lambda x: x.count(FORK_TOKEN), reverse=True)
# Now, we can iterate through list_of_forks and check for errors in each
# fork
for fork in list_of_forks:
# remove inner forks for these checks since each fork has its own entry
# in list_of_forks. Note that each fork is now sorted in descending
# order which enables to remove sequentially the string for the fork
# potentially with more inner forks
for subfork in list_of_forks:
# checks if subfork is contained in fork and if they are different,
# avoiding to remove itself
if subfork in list_of_forks and subfork != fork:
# removes inner forks. Note that string has no spaces
fork_simplified = fork.replace("({})".format(subfork), "")
else:
fork_simplified = fork
# Checks if there is no fork separator character '|' within each fork
if not len(fork_simplified.split(LANE_TOKEN)) > 1:
raise SanityError("One of the forks doesn't have '|' "
"separator between the processes to fork. This is"
" the prime suspect: '({})'".format(fork))
|
[
"This",
"function",
"performs",
"two",
"sanity",
"checks",
"in",
"the",
"pipeline",
"string",
".",
"The",
"first",
"check",
"assures",
"that",
"each",
"fork",
"contains",
"a",
"lane",
"token",
"|",
"while",
"the",
"second",
"check",
"looks",
"for",
"duplicated",
"processes",
"within",
"the",
"same",
"fork",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L250-L302
|
[
"def",
"inner_fork_insanity_checks",
"(",
"pipeline_string",
")",
":",
"# first lets get all forks to a list.",
"list_of_forks",
"=",
"[",
"]",
"# stores forks",
"left_indexes",
"=",
"[",
"]",
"# stores indexes of left brackets",
"# iterate through the string looking for '(' and ')'.",
"for",
"pos",
",",
"char",
"in",
"enumerate",
"(",
"pipeline_string",
")",
":",
"if",
"char",
"==",
"FORK_TOKEN",
":",
"# saves pos to left_indexes list",
"left_indexes",
".",
"append",
"(",
"pos",
")",
"elif",
"char",
"==",
"CLOSE_TOKEN",
"and",
"len",
"(",
"left_indexes",
")",
">",
"0",
":",
"# saves fork to list_of_forks",
"list_of_forks",
".",
"append",
"(",
"pipeline_string",
"[",
"left_indexes",
"[",
"-",
"1",
"]",
"+",
"1",
":",
"pos",
"]",
")",
"# removes last bracket from left_indexes list",
"left_indexes",
"=",
"left_indexes",
"[",
":",
"-",
"1",
"]",
"# sort list in descending order of number of forks",
"list_of_forks",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"count",
"(",
"FORK_TOKEN",
")",
",",
"reverse",
"=",
"True",
")",
"# Now, we can iterate through list_of_forks and check for errors in each",
"# fork",
"for",
"fork",
"in",
"list_of_forks",
":",
"# remove inner forks for these checks since each fork has its own entry",
"# in list_of_forks. Note that each fork is now sorted in descending",
"# order which enables to remove sequentially the string for the fork",
"# potentially with more inner forks",
"for",
"subfork",
"in",
"list_of_forks",
":",
"# checks if subfork is contained in fork and if they are different,",
"# avoiding to remove itself",
"if",
"subfork",
"in",
"list_of_forks",
"and",
"subfork",
"!=",
"fork",
":",
"# removes inner forks. Note that string has no spaces",
"fork_simplified",
"=",
"fork",
".",
"replace",
"(",
"\"({})\"",
".",
"format",
"(",
"subfork",
")",
",",
"\"\"",
")",
"else",
":",
"fork_simplified",
"=",
"fork",
"# Checks if there is no fork separator character '|' within each fork",
"if",
"not",
"len",
"(",
"fork_simplified",
".",
"split",
"(",
"LANE_TOKEN",
")",
")",
">",
"1",
":",
"raise",
"SanityError",
"(",
"\"One of the forks doesn't have '|' \"",
"\"separator between the processes to fork. This is\"",
"\" the prime suspect: '({})'\"",
".",
"format",
"(",
"fork",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
insanity_checks
|
Wrapper that performs all sanity checks on the pipeline string
Parameters
----------
pipeline_str : str
String with the pipeline definition
|
flowcraft/generator/pipeline_parser.py
|
def insanity_checks(pipeline_str):
"""Wrapper that performs all sanity checks on the pipeline string
Parameters
----------
pipeline_str : str
String with the pipeline definition
"""
# Gets rid of all spaces in string
p_string = pipeline_str.replace(" ", "").strip()
# some of the check functions use the pipeline_str as the user provided but
# the majority uses the parsed p_string.
checks = [
[p_string, [
empty_tasks,
brackets_but_no_lanes,
brackets_insanity_check,
lane_char_insanity_check,
final_char_insanity_check,
fork_procs_insanity_check,
start_proc_insanity_check,
late_proc_insanity_check
]],
[pipeline_str, [
inner_fork_insanity_checks
]]
]
# executes sanity checks in pipeline string before parsing it.
for param, func_list in checks:
for func in func_list:
func(param)
|
def insanity_checks(pipeline_str):
"""Wrapper that performs all sanity checks on the pipeline string
Parameters
----------
pipeline_str : str
String with the pipeline definition
"""
# Gets rid of all spaces in string
p_string = pipeline_str.replace(" ", "").strip()
# some of the check functions use the pipeline_str as the user provided but
# the majority uses the parsed p_string.
checks = [
[p_string, [
empty_tasks,
brackets_but_no_lanes,
brackets_insanity_check,
lane_char_insanity_check,
final_char_insanity_check,
fork_procs_insanity_check,
start_proc_insanity_check,
late_proc_insanity_check
]],
[pipeline_str, [
inner_fork_insanity_checks
]]
]
# executes sanity checks in pipeline string before parsing it.
for param, func_list in checks:
for func in func_list:
func(param)
|
[
"Wrapper",
"that",
"performs",
"all",
"sanity",
"checks",
"on",
"the",
"pipeline",
"string"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L305-L338
|
[
"def",
"insanity_checks",
"(",
"pipeline_str",
")",
":",
"# Gets rid of all spaces in string",
"p_string",
"=",
"pipeline_str",
".",
"replace",
"(",
"\" \"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
"# some of the check functions use the pipeline_str as the user provided but",
"# the majority uses the parsed p_string.",
"checks",
"=",
"[",
"[",
"p_string",
",",
"[",
"empty_tasks",
",",
"brackets_but_no_lanes",
",",
"brackets_insanity_check",
",",
"lane_char_insanity_check",
",",
"final_char_insanity_check",
",",
"fork_procs_insanity_check",
",",
"start_proc_insanity_check",
",",
"late_proc_insanity_check",
"]",
"]",
",",
"[",
"pipeline_str",
",",
"[",
"inner_fork_insanity_checks",
"]",
"]",
"]",
"# executes sanity checks in pipeline string before parsing it.",
"for",
"param",
",",
"func_list",
"in",
"checks",
":",
"for",
"func",
"in",
"func_list",
":",
"func",
"(",
"param",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
parse_pipeline
|
Parses a pipeline string into a list of dictionaries with the connections
between processes
Parameters
----------
pipeline_str : str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
Returns
-------
pipeline_links : list
|
flowcraft/generator/pipeline_parser.py
|
def parse_pipeline(pipeline_str):
"""Parses a pipeline string into a list of dictionaries with the connections
between processes
Parameters
----------
pipeline_str : str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
Returns
-------
pipeline_links : list
"""
if os.path.exists(pipeline_str):
logger.debug("Found pipeline file: {}".format(pipeline_str))
with open(pipeline_str) as fh:
pipeline_str = "".join([x.strip() for x in fh.readlines()])
logger.info(colored_print("Resulting pipeline string:\n"))
logger.info(colored_print(pipeline_str + "\n"))
# Perform pipeline insanity checks
insanity_checks(pipeline_str)
logger.debug("Parsing pipeline string: {}".format(pipeline_str))
pipeline_links = []
lane = 1
# Add unique identifiers to each process to allow a correct connection
# between forks with same processes
pipeline_str_modified, identifiers_to_tags = add_unique_identifiers(
pipeline_str)
# Get number of forks in the pipeline
nforks = pipeline_str_modified.count(FORK_TOKEN)
logger.debug("Found {} fork(s)".format(nforks))
# If there are no forks, connect the pipeline as purely linear
if not nforks:
logger.debug("Detected linear pipeline string : {}".format(
pipeline_str))
linear_pipeline = ["__init__"] + pipeline_str_modified.split()
pipeline_links.extend(linear_connection(linear_pipeline, lane))
# Removes unique identifiers used for correctly assign fork parents with
# a possible same process name
pipeline_links = remove_unique_identifiers(identifiers_to_tags,
pipeline_links)
return pipeline_links
for i in range(nforks):
logger.debug("Processing fork {} in lane {}".format(i, lane))
# Split the pipeline at each fork start position. fields[-1] will
# hold the process after the fork. fields[-2] will hold the processes
# before the fork.
fields = pipeline_str_modified.split(FORK_TOKEN, i + 1)
# Get the processes before the fork. This may be empty when the
# fork is at the beginning of the pipeline.
previous_process = fields[-2].split(LANE_TOKEN)[-1].split()
logger.debug("Previous processes string: {}".format(fields[-2]))
logger.debug("Previous processes list: {}".format(previous_process))
# Get lanes after the fork
next_lanes = get_lanes(fields[-1])
logger.debug("Next lanes object: {}".format(next_lanes))
# Get the immediate targets of the fork
fork_sink = [x[0] for x in next_lanes]
logger.debug("The fork sinks into the processes: {}".format(fork_sink))
# The first fork is a special case, where the processes before AND
# after the fork (until the start of another fork) are added to
# the ``pipeline_links`` variable. Otherwise, only the processes
# after the fork will be added
if i == 0:
# If there are no previous process, the fork is at the beginning
# of the pipeline string. In this case, inject the special
# "init" process.
if not previous_process:
previous_process = ["__init__"]
lane = 0
else:
previous_process = ["__init__"] + previous_process
# Add the linear modules before the fork
pipeline_links.extend(
linear_connection(previous_process, lane))
fork_source = previous_process[-1]
logger.debug("Fork source is set to: {}".format(fork_source))
fork_lane = get_source_lane(previous_process, pipeline_links)
logger.debug("Fork lane is set to: {}".format(fork_lane))
# Add the forking modules
pipeline_links.extend(
fork_connection(fork_source, fork_sink, fork_lane, lane))
# Add the linear connections in the subsequent lanes
pipeline_links.extend(
linear_lane_connection(next_lanes, lane))
lane += len(fork_sink)
pipeline_links = remove_unique_identifiers(identifiers_to_tags,
pipeline_links)
return pipeline_links
|
def parse_pipeline(pipeline_str):
"""Parses a pipeline string into a list of dictionaries with the connections
between processes
Parameters
----------
pipeline_str : str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
Returns
-------
pipeline_links : list
"""
if os.path.exists(pipeline_str):
logger.debug("Found pipeline file: {}".format(pipeline_str))
with open(pipeline_str) as fh:
pipeline_str = "".join([x.strip() for x in fh.readlines()])
logger.info(colored_print("Resulting pipeline string:\n"))
logger.info(colored_print(pipeline_str + "\n"))
# Perform pipeline insanity checks
insanity_checks(pipeline_str)
logger.debug("Parsing pipeline string: {}".format(pipeline_str))
pipeline_links = []
lane = 1
# Add unique identifiers to each process to allow a correct connection
# between forks with same processes
pipeline_str_modified, identifiers_to_tags = add_unique_identifiers(
pipeline_str)
# Get number of forks in the pipeline
nforks = pipeline_str_modified.count(FORK_TOKEN)
logger.debug("Found {} fork(s)".format(nforks))
# If there are no forks, connect the pipeline as purely linear
if not nforks:
logger.debug("Detected linear pipeline string : {}".format(
pipeline_str))
linear_pipeline = ["__init__"] + pipeline_str_modified.split()
pipeline_links.extend(linear_connection(linear_pipeline, lane))
# Removes unique identifiers used for correctly assign fork parents with
# a possible same process name
pipeline_links = remove_unique_identifiers(identifiers_to_tags,
pipeline_links)
return pipeline_links
for i in range(nforks):
logger.debug("Processing fork {} in lane {}".format(i, lane))
# Split the pipeline at each fork start position. fields[-1] will
# hold the process after the fork. fields[-2] will hold the processes
# before the fork.
fields = pipeline_str_modified.split(FORK_TOKEN, i + 1)
# Get the processes before the fork. This may be empty when the
# fork is at the beginning of the pipeline.
previous_process = fields[-2].split(LANE_TOKEN)[-1].split()
logger.debug("Previous processes string: {}".format(fields[-2]))
logger.debug("Previous processes list: {}".format(previous_process))
# Get lanes after the fork
next_lanes = get_lanes(fields[-1])
logger.debug("Next lanes object: {}".format(next_lanes))
# Get the immediate targets of the fork
fork_sink = [x[0] for x in next_lanes]
logger.debug("The fork sinks into the processes: {}".format(fork_sink))
# The first fork is a special case, where the processes before AND
# after the fork (until the start of another fork) are added to
# the ``pipeline_links`` variable. Otherwise, only the processes
# after the fork will be added
if i == 0:
# If there are no previous process, the fork is at the beginning
# of the pipeline string. In this case, inject the special
# "init" process.
if not previous_process:
previous_process = ["__init__"]
lane = 0
else:
previous_process = ["__init__"] + previous_process
# Add the linear modules before the fork
pipeline_links.extend(
linear_connection(previous_process, lane))
fork_source = previous_process[-1]
logger.debug("Fork source is set to: {}".format(fork_source))
fork_lane = get_source_lane(previous_process, pipeline_links)
logger.debug("Fork lane is set to: {}".format(fork_lane))
# Add the forking modules
pipeline_links.extend(
fork_connection(fork_source, fork_sink, fork_lane, lane))
# Add the linear connections in the subsequent lanes
pipeline_links.extend(
linear_lane_connection(next_lanes, lane))
lane += len(fork_sink)
pipeline_links = remove_unique_identifiers(identifiers_to_tags,
pipeline_links)
return pipeline_links
|
[
"Parses",
"a",
"pipeline",
"string",
"into",
"a",
"list",
"of",
"dictionaries",
"with",
"the",
"connections",
"between",
"processes"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L341-L447
|
[
"def",
"parse_pipeline",
"(",
"pipeline_str",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"pipeline_str",
")",
":",
"logger",
".",
"debug",
"(",
"\"Found pipeline file: {}\"",
".",
"format",
"(",
"pipeline_str",
")",
")",
"with",
"open",
"(",
"pipeline_str",
")",
"as",
"fh",
":",
"pipeline_str",
"=",
"\"\"",
".",
"join",
"(",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"fh",
".",
"readlines",
"(",
")",
"]",
")",
"logger",
".",
"info",
"(",
"colored_print",
"(",
"\"Resulting pipeline string:\\n\"",
")",
")",
"logger",
".",
"info",
"(",
"colored_print",
"(",
"pipeline_str",
"+",
"\"\\n\"",
")",
")",
"# Perform pipeline insanity checks",
"insanity_checks",
"(",
"pipeline_str",
")",
"logger",
".",
"debug",
"(",
"\"Parsing pipeline string: {}\"",
".",
"format",
"(",
"pipeline_str",
")",
")",
"pipeline_links",
"=",
"[",
"]",
"lane",
"=",
"1",
"# Add unique identifiers to each process to allow a correct connection",
"# between forks with same processes",
"pipeline_str_modified",
",",
"identifiers_to_tags",
"=",
"add_unique_identifiers",
"(",
"pipeline_str",
")",
"# Get number of forks in the pipeline",
"nforks",
"=",
"pipeline_str_modified",
".",
"count",
"(",
"FORK_TOKEN",
")",
"logger",
".",
"debug",
"(",
"\"Found {} fork(s)\"",
".",
"format",
"(",
"nforks",
")",
")",
"# If there are no forks, connect the pipeline as purely linear",
"if",
"not",
"nforks",
":",
"logger",
".",
"debug",
"(",
"\"Detected linear pipeline string : {}\"",
".",
"format",
"(",
"pipeline_str",
")",
")",
"linear_pipeline",
"=",
"[",
"\"__init__\"",
"]",
"+",
"pipeline_str_modified",
".",
"split",
"(",
")",
"pipeline_links",
".",
"extend",
"(",
"linear_connection",
"(",
"linear_pipeline",
",",
"lane",
")",
")",
"# Removes unique identifiers used for correctly assign fork parents with",
"# a possible same process name",
"pipeline_links",
"=",
"remove_unique_identifiers",
"(",
"identifiers_to_tags",
",",
"pipeline_links",
")",
"return",
"pipeline_links",
"for",
"i",
"in",
"range",
"(",
"nforks",
")",
":",
"logger",
".",
"debug",
"(",
"\"Processing fork {} in lane {}\"",
".",
"format",
"(",
"i",
",",
"lane",
")",
")",
"# Split the pipeline at each fork start position. fields[-1] will",
"# hold the process after the fork. fields[-2] will hold the processes",
"# before the fork.",
"fields",
"=",
"pipeline_str_modified",
".",
"split",
"(",
"FORK_TOKEN",
",",
"i",
"+",
"1",
")",
"# Get the processes before the fork. This may be empty when the",
"# fork is at the beginning of the pipeline.",
"previous_process",
"=",
"fields",
"[",
"-",
"2",
"]",
".",
"split",
"(",
"LANE_TOKEN",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Previous processes string: {}\"",
".",
"format",
"(",
"fields",
"[",
"-",
"2",
"]",
")",
")",
"logger",
".",
"debug",
"(",
"\"Previous processes list: {}\"",
".",
"format",
"(",
"previous_process",
")",
")",
"# Get lanes after the fork",
"next_lanes",
"=",
"get_lanes",
"(",
"fields",
"[",
"-",
"1",
"]",
")",
"logger",
".",
"debug",
"(",
"\"Next lanes object: {}\"",
".",
"format",
"(",
"next_lanes",
")",
")",
"# Get the immediate targets of the fork",
"fork_sink",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"next_lanes",
"]",
"logger",
".",
"debug",
"(",
"\"The fork sinks into the processes: {}\"",
".",
"format",
"(",
"fork_sink",
")",
")",
"# The first fork is a special case, where the processes before AND",
"# after the fork (until the start of another fork) are added to",
"# the ``pipeline_links`` variable. Otherwise, only the processes",
"# after the fork will be added",
"if",
"i",
"==",
"0",
":",
"# If there are no previous process, the fork is at the beginning",
"# of the pipeline string. In this case, inject the special",
"# \"init\" process.",
"if",
"not",
"previous_process",
":",
"previous_process",
"=",
"[",
"\"__init__\"",
"]",
"lane",
"=",
"0",
"else",
":",
"previous_process",
"=",
"[",
"\"__init__\"",
"]",
"+",
"previous_process",
"# Add the linear modules before the fork",
"pipeline_links",
".",
"extend",
"(",
"linear_connection",
"(",
"previous_process",
",",
"lane",
")",
")",
"fork_source",
"=",
"previous_process",
"[",
"-",
"1",
"]",
"logger",
".",
"debug",
"(",
"\"Fork source is set to: {}\"",
".",
"format",
"(",
"fork_source",
")",
")",
"fork_lane",
"=",
"get_source_lane",
"(",
"previous_process",
",",
"pipeline_links",
")",
"logger",
".",
"debug",
"(",
"\"Fork lane is set to: {}\"",
".",
"format",
"(",
"fork_lane",
")",
")",
"# Add the forking modules",
"pipeline_links",
".",
"extend",
"(",
"fork_connection",
"(",
"fork_source",
",",
"fork_sink",
",",
"fork_lane",
",",
"lane",
")",
")",
"# Add the linear connections in the subsequent lanes",
"pipeline_links",
".",
"extend",
"(",
"linear_lane_connection",
"(",
"next_lanes",
",",
"lane",
")",
")",
"lane",
"+=",
"len",
"(",
"fork_sink",
")",
"pipeline_links",
"=",
"remove_unique_identifiers",
"(",
"identifiers_to_tags",
",",
"pipeline_links",
")",
"return",
"pipeline_links"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
get_source_lane
|
Returns the lane of the last process that matches fork_process
Parameters
----------
fork_process : list
List of processes before the fork.
pipeline_list : list
List with the pipeline connection dictionaries.
Returns
-------
int
Lane of the last process that matches fork_process
|
flowcraft/generator/pipeline_parser.py
|
def get_source_lane(fork_process, pipeline_list):
"""Returns the lane of the last process that matches fork_process
Parameters
----------
fork_process : list
List of processes before the fork.
pipeline_list : list
List with the pipeline connection dictionaries.
Returns
-------
int
Lane of the last process that matches fork_process
"""
fork_source = fork_process[-1]
fork_sig = [x for x in fork_process if x != "__init__"]
for position, p in enumerate(pipeline_list[::-1]):
if p["output"]["process"] == fork_source:
lane = p["output"]["lane"]
logger.debug("Possible source match found in position {} in lane"
" {}".format(position, lane))
lane_sequence = [x["output"]["process"] for x in pipeline_list
if x["output"]["lane"] == lane]
logger.debug("Testing lane sequence '{}' against fork signature"
" '{}'".format(lane_sequence, fork_sig))
if lane_sequence == fork_sig:
return p["output"]["lane"]
return 0
|
def get_source_lane(fork_process, pipeline_list):
"""Returns the lane of the last process that matches fork_process
Parameters
----------
fork_process : list
List of processes before the fork.
pipeline_list : list
List with the pipeline connection dictionaries.
Returns
-------
int
Lane of the last process that matches fork_process
"""
fork_source = fork_process[-1]
fork_sig = [x for x in fork_process if x != "__init__"]
for position, p in enumerate(pipeline_list[::-1]):
if p["output"]["process"] == fork_source:
lane = p["output"]["lane"]
logger.debug("Possible source match found in position {} in lane"
" {}".format(position, lane))
lane_sequence = [x["output"]["process"] for x in pipeline_list
if x["output"]["lane"] == lane]
logger.debug("Testing lane sequence '{}' against fork signature"
" '{}'".format(lane_sequence, fork_sig))
if lane_sequence == fork_sig:
return p["output"]["lane"]
return 0
|
[
"Returns",
"the",
"lane",
"of",
"the",
"last",
"process",
"that",
"matches",
"fork_process"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L450-L483
|
[
"def",
"get_source_lane",
"(",
"fork_process",
",",
"pipeline_list",
")",
":",
"fork_source",
"=",
"fork_process",
"[",
"-",
"1",
"]",
"fork_sig",
"=",
"[",
"x",
"for",
"x",
"in",
"fork_process",
"if",
"x",
"!=",
"\"__init__\"",
"]",
"for",
"position",
",",
"p",
"in",
"enumerate",
"(",
"pipeline_list",
"[",
":",
":",
"-",
"1",
"]",
")",
":",
"if",
"p",
"[",
"\"output\"",
"]",
"[",
"\"process\"",
"]",
"==",
"fork_source",
":",
"lane",
"=",
"p",
"[",
"\"output\"",
"]",
"[",
"\"lane\"",
"]",
"logger",
".",
"debug",
"(",
"\"Possible source match found in position {} in lane\"",
"\" {}\"",
".",
"format",
"(",
"position",
",",
"lane",
")",
")",
"lane_sequence",
"=",
"[",
"x",
"[",
"\"output\"",
"]",
"[",
"\"process\"",
"]",
"for",
"x",
"in",
"pipeline_list",
"if",
"x",
"[",
"\"output\"",
"]",
"[",
"\"lane\"",
"]",
"==",
"lane",
"]",
"logger",
".",
"debug",
"(",
"\"Testing lane sequence '{}' against fork signature\"",
"\" '{}'\"",
".",
"format",
"(",
"lane_sequence",
",",
"fork_sig",
")",
")",
"if",
"lane_sequence",
"==",
"fork_sig",
":",
"return",
"p",
"[",
"\"output\"",
"]",
"[",
"\"lane\"",
"]",
"return",
"0"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
get_lanes
|
From a raw pipeline string, get a list of lanes from the start
of the current fork.
When the pipeline is being parsed, it will be split at every fork
position. The string at the right of the fork position will be provided
to this function. It's job is to retrieve the lanes that result
from that fork, ignoring any nested forks.
Parameters
----------
lanes_str : str
Pipeline string after a fork split
Returns
-------
lanes : list
List of lists, with the list of processes for each lane
|
flowcraft/generator/pipeline_parser.py
|
def get_lanes(lanes_str):
"""From a raw pipeline string, get a list of lanes from the start
of the current fork.
When the pipeline is being parsed, it will be split at every fork
position. The string at the right of the fork position will be provided
to this function. It's job is to retrieve the lanes that result
from that fork, ignoring any nested forks.
Parameters
----------
lanes_str : str
Pipeline string after a fork split
Returns
-------
lanes : list
List of lists, with the list of processes for each lane
"""
logger.debug("Parsing lanes from raw string: {}".format(lanes_str))
# Temporarily stores the lanes string after removal of nested forks
parsed_lanes = ""
# Flag used to determined whether the cursor is inside or outside the
# right fork
infork = 0
for i in lanes_str:
# Nested fork started
if i == FORK_TOKEN:
infork += 1
# Nested fork stopped
if i == CLOSE_TOKEN:
infork -= 1
if infork < 0:
break
# Save only when in the right fork
if infork == 0:
# Ignore forking syntax tokens
if i not in [FORK_TOKEN, CLOSE_TOKEN]:
parsed_lanes += i
return [x.split() for x in parsed_lanes.split(LANE_TOKEN)]
|
def get_lanes(lanes_str):
"""From a raw pipeline string, get a list of lanes from the start
of the current fork.
When the pipeline is being parsed, it will be split at every fork
position. The string at the right of the fork position will be provided
to this function. It's job is to retrieve the lanes that result
from that fork, ignoring any nested forks.
Parameters
----------
lanes_str : str
Pipeline string after a fork split
Returns
-------
lanes : list
List of lists, with the list of processes for each lane
"""
logger.debug("Parsing lanes from raw string: {}".format(lanes_str))
# Temporarily stores the lanes string after removal of nested forks
parsed_lanes = ""
# Flag used to determined whether the cursor is inside or outside the
# right fork
infork = 0
for i in lanes_str:
# Nested fork started
if i == FORK_TOKEN:
infork += 1
# Nested fork stopped
if i == CLOSE_TOKEN:
infork -= 1
if infork < 0:
break
# Save only when in the right fork
if infork == 0:
# Ignore forking syntax tokens
if i not in [FORK_TOKEN, CLOSE_TOKEN]:
parsed_lanes += i
return [x.split() for x in parsed_lanes.split(LANE_TOKEN)]
|
[
"From",
"a",
"raw",
"pipeline",
"string",
"get",
"a",
"list",
"of",
"lanes",
"from",
"the",
"start",
"of",
"the",
"current",
"fork",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L486-L532
|
[
"def",
"get_lanes",
"(",
"lanes_str",
")",
":",
"logger",
".",
"debug",
"(",
"\"Parsing lanes from raw string: {}\"",
".",
"format",
"(",
"lanes_str",
")",
")",
"# Temporarily stores the lanes string after removal of nested forks",
"parsed_lanes",
"=",
"\"\"",
"# Flag used to determined whether the cursor is inside or outside the",
"# right fork",
"infork",
"=",
"0",
"for",
"i",
"in",
"lanes_str",
":",
"# Nested fork started",
"if",
"i",
"==",
"FORK_TOKEN",
":",
"infork",
"+=",
"1",
"# Nested fork stopped",
"if",
"i",
"==",
"CLOSE_TOKEN",
":",
"infork",
"-=",
"1",
"if",
"infork",
"<",
"0",
":",
"break",
"# Save only when in the right fork",
"if",
"infork",
"==",
"0",
":",
"# Ignore forking syntax tokens",
"if",
"i",
"not",
"in",
"[",
"FORK_TOKEN",
",",
"CLOSE_TOKEN",
"]",
":",
"parsed_lanes",
"+=",
"i",
"return",
"[",
"x",
".",
"split",
"(",
")",
"for",
"x",
"in",
"parsed_lanes",
".",
"split",
"(",
"LANE_TOKEN",
")",
"]"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
linear_connection
|
Connects a linear list of processes into a list of dictionaries
Parameters
----------
plist : list
List with process names. This list should contain at least two entries.
lane : int
Corresponding lane of the processes
Returns
-------
res : list
List of dictionaries with the links between processes
|
flowcraft/generator/pipeline_parser.py
|
def linear_connection(plist, lane):
"""Connects a linear list of processes into a list of dictionaries
Parameters
----------
plist : list
List with process names. This list should contain at least two entries.
lane : int
Corresponding lane of the processes
Returns
-------
res : list
List of dictionaries with the links between processes
"""
logger.debug(
"Establishing linear connection with processes: {}".format(plist))
res = []
previous = None
for p in plist:
# Skip first process
if not previous:
previous = p
continue
res.append({
"input": {
"process": previous,
"lane": lane
},
"output": {
"process": p,
"lane": lane
}
})
previous = p
return res
|
def linear_connection(plist, lane):
"""Connects a linear list of processes into a list of dictionaries
Parameters
----------
plist : list
List with process names. This list should contain at least two entries.
lane : int
Corresponding lane of the processes
Returns
-------
res : list
List of dictionaries with the links between processes
"""
logger.debug(
"Establishing linear connection with processes: {}".format(plist))
res = []
previous = None
for p in plist:
# Skip first process
if not previous:
previous = p
continue
res.append({
"input": {
"process": previous,
"lane": lane
},
"output": {
"process": p,
"lane": lane
}
})
previous = p
return res
|
[
"Connects",
"a",
"linear",
"list",
"of",
"processes",
"into",
"a",
"list",
"of",
"dictionaries"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L535-L575
|
[
"def",
"linear_connection",
"(",
"plist",
",",
"lane",
")",
":",
"logger",
".",
"debug",
"(",
"\"Establishing linear connection with processes: {}\"",
".",
"format",
"(",
"plist",
")",
")",
"res",
"=",
"[",
"]",
"previous",
"=",
"None",
"for",
"p",
"in",
"plist",
":",
"# Skip first process",
"if",
"not",
"previous",
":",
"previous",
"=",
"p",
"continue",
"res",
".",
"append",
"(",
"{",
"\"input\"",
":",
"{",
"\"process\"",
":",
"previous",
",",
"\"lane\"",
":",
"lane",
"}",
",",
"\"output\"",
":",
"{",
"\"process\"",
":",
"p",
",",
"\"lane\"",
":",
"lane",
"}",
"}",
")",
"previous",
"=",
"p",
"return",
"res"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
fork_connection
|
Makes the connection between a process and the first processes in the
lanes to which it forks.
The ``lane`` argument should correspond to the lane of the source process.
For each lane in ``sink``, the lane counter will increase.
Parameters
----------
source : str
Name of the process that is forking
sink : list
List of the processes where the source will fork to. Each element
corresponds to the start of a lane.
source_lane : int
Lane of the forking process
lane : int
Lane of the source process
Returns
-------
res : list
List of dictionaries with the links between processes
|
flowcraft/generator/pipeline_parser.py
|
def fork_connection(source, sink, source_lane, lane):
"""Makes the connection between a process and the first processes in the
lanes to which it forks.
The ``lane`` argument should correspond to the lane of the source process.
For each lane in ``sink``, the lane counter will increase.
Parameters
----------
source : str
Name of the process that is forking
sink : list
List of the processes where the source will fork to. Each element
corresponds to the start of a lane.
source_lane : int
Lane of the forking process
lane : int
Lane of the source process
Returns
-------
res : list
List of dictionaries with the links between processes
"""
logger.debug("Establishing forking of source '{}' into processes"
" '{}'. Source lane set to '{}' and lane set to '{}'".format(
source, sink, source_lane, lane))
res = []
# Increase the lane counter for the first lane
lane_counter = lane + 1
for p in sink:
res.append({
"input": {
"process": source,
"lane": source_lane
},
"output": {
"process": p,
"lane": lane_counter
}
})
lane_counter += 1
return res
|
def fork_connection(source, sink, source_lane, lane):
"""Makes the connection between a process and the first processes in the
lanes to which it forks.
The ``lane`` argument should correspond to the lane of the source process.
For each lane in ``sink``, the lane counter will increase.
Parameters
----------
source : str
Name of the process that is forking
sink : list
List of the processes where the source will fork to. Each element
corresponds to the start of a lane.
source_lane : int
Lane of the forking process
lane : int
Lane of the source process
Returns
-------
res : list
List of dictionaries with the links between processes
"""
logger.debug("Establishing forking of source '{}' into processes"
" '{}'. Source lane set to '{}' and lane set to '{}'".format(
source, sink, source_lane, lane))
res = []
# Increase the lane counter for the first lane
lane_counter = lane + 1
for p in sink:
res.append({
"input": {
"process": source,
"lane": source_lane
},
"output": {
"process": p,
"lane": lane_counter
}
})
lane_counter += 1
return res
|
[
"Makes",
"the",
"connection",
"between",
"a",
"process",
"and",
"the",
"first",
"processes",
"in",
"the",
"lanes",
"to",
"which",
"it",
"forks",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L578-L624
|
[
"def",
"fork_connection",
"(",
"source",
",",
"sink",
",",
"source_lane",
",",
"lane",
")",
":",
"logger",
".",
"debug",
"(",
"\"Establishing forking of source '{}' into processes\"",
"\" '{}'. Source lane set to '{}' and lane set to '{}'\"",
".",
"format",
"(",
"source",
",",
"sink",
",",
"source_lane",
",",
"lane",
")",
")",
"res",
"=",
"[",
"]",
"# Increase the lane counter for the first lane",
"lane_counter",
"=",
"lane",
"+",
"1",
"for",
"p",
"in",
"sink",
":",
"res",
".",
"append",
"(",
"{",
"\"input\"",
":",
"{",
"\"process\"",
":",
"source",
",",
"\"lane\"",
":",
"source_lane",
"}",
",",
"\"output\"",
":",
"{",
"\"process\"",
":",
"p",
",",
"\"lane\"",
":",
"lane_counter",
"}",
"}",
")",
"lane_counter",
"+=",
"1",
"return",
"res"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
add_unique_identifiers
|
Returns the pipeline string with unique identifiers and a dictionary with
references between the unique keys and the original values
Parameters
----------
pipeline_str : str
Pipeline string
Returns
-------
str
Pipeline string with unique identifiers
dict
Match between process unique values and original names
|
flowcraft/generator/pipeline_parser.py
|
def add_unique_identifiers(pipeline_str):
"""Returns the pipeline string with unique identifiers and a dictionary with
references between the unique keys and the original values
Parameters
----------
pipeline_str : str
Pipeline string
Returns
-------
str
Pipeline string with unique identifiers
dict
Match between process unique values and original names
"""
# Add space at beginning and end of pipeline to allow regex mapping of final
# process in linear pipelines
pipeline_str_modified = " {} ".format(pipeline_str)
# Regex to get all process names. Catch all words without spaces and that
# are not fork tokens or pipes
reg_find_proc = r"[^\s{}{}{}]+".format(LANE_TOKEN, FORK_TOKEN, CLOSE_TOKEN)
process_names = re.findall(reg_find_proc, pipeline_str_modified)
identifiers_to_tags = {}
"""
dict: Matches new process names (identifiers) with original process
names
"""
new_process_names = []
"""
list: New process names used to replace in the pipeline string
"""
# Assigns the new process names by appending a numeric id at the end of
# the process name
for index, val in enumerate(process_names):
if "=" in val:
parts = val.split("=")
new_id = "{}_{}={}".format(parts[0], index, parts[1])
else:
new_id = "{}_{}".format(val, index)
# add new process with id
new_process_names.append(new_id)
# makes a match between new process name and original process name
identifiers_to_tags[new_id] = val
# Add space between forks, pipes and the process names for the replace
# regex to work
match_result = lambda match: " {} ".format(match.group())
# force to add a space between each token so that regex modification can
# be applied
find = r'[{}{}{}]+'.format(FORK_TOKEN, LANE_TOKEN, CLOSE_TOKEN)
pipeline_str_modified = re.sub(find, match_result, pipeline_str_modified)
# Replace original process names by the unique identifiers
for index, val in enumerate(process_names):
# regex to replace process names with non assigned process ids
# escape characters are required to match to the dict keys
# (identifiers_to_tags), since python keys with escape characters
# must be escaped
find = r'{}[^_]'.format(val).replace("\\", "\\\\")
pipeline_str_modified = re.sub(find, new_process_names[index] + " ",
pipeline_str_modified, 1)
return pipeline_str_modified, identifiers_to_tags
|
def add_unique_identifiers(pipeline_str):
"""Returns the pipeline string with unique identifiers and a dictionary with
references between the unique keys and the original values
Parameters
----------
pipeline_str : str
Pipeline string
Returns
-------
str
Pipeline string with unique identifiers
dict
Match between process unique values and original names
"""
# Add space at beginning and end of pipeline to allow regex mapping of final
# process in linear pipelines
pipeline_str_modified = " {} ".format(pipeline_str)
# Regex to get all process names. Catch all words without spaces and that
# are not fork tokens or pipes
reg_find_proc = r"[^\s{}{}{}]+".format(LANE_TOKEN, FORK_TOKEN, CLOSE_TOKEN)
process_names = re.findall(reg_find_proc, pipeline_str_modified)
identifiers_to_tags = {}
"""
dict: Matches new process names (identifiers) with original process
names
"""
new_process_names = []
"""
list: New process names used to replace in the pipeline string
"""
# Assigns the new process names by appending a numeric id at the end of
# the process name
for index, val in enumerate(process_names):
if "=" in val:
parts = val.split("=")
new_id = "{}_{}={}".format(parts[0], index, parts[1])
else:
new_id = "{}_{}".format(val, index)
# add new process with id
new_process_names.append(new_id)
# makes a match between new process name and original process name
identifiers_to_tags[new_id] = val
# Add space between forks, pipes and the process names for the replace
# regex to work
match_result = lambda match: " {} ".format(match.group())
# force to add a space between each token so that regex modification can
# be applied
find = r'[{}{}{}]+'.format(FORK_TOKEN, LANE_TOKEN, CLOSE_TOKEN)
pipeline_str_modified = re.sub(find, match_result, pipeline_str_modified)
# Replace original process names by the unique identifiers
for index, val in enumerate(process_names):
# regex to replace process names with non assigned process ids
# escape characters are required to match to the dict keys
# (identifiers_to_tags), since python keys with escape characters
# must be escaped
find = r'{}[^_]'.format(val).replace("\\", "\\\\")
pipeline_str_modified = re.sub(find, new_process_names[index] + " ",
pipeline_str_modified, 1)
return pipeline_str_modified, identifiers_to_tags
|
[
"Returns",
"the",
"pipeline",
"string",
"with",
"unique",
"identifiers",
"and",
"a",
"dictionary",
"with",
"references",
"between",
"the",
"unique",
"keys",
"and",
"the",
"original",
"values"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L657-L727
|
[
"def",
"add_unique_identifiers",
"(",
"pipeline_str",
")",
":",
"# Add space at beginning and end of pipeline to allow regex mapping of final",
"# process in linear pipelines",
"pipeline_str_modified",
"=",
"\" {} \"",
".",
"format",
"(",
"pipeline_str",
")",
"# Regex to get all process names. Catch all words without spaces and that",
"# are not fork tokens or pipes",
"reg_find_proc",
"=",
"r\"[^\\s{}{}{}]+\"",
".",
"format",
"(",
"LANE_TOKEN",
",",
"FORK_TOKEN",
",",
"CLOSE_TOKEN",
")",
"process_names",
"=",
"re",
".",
"findall",
"(",
"reg_find_proc",
",",
"pipeline_str_modified",
")",
"identifiers_to_tags",
"=",
"{",
"}",
"\"\"\"\n dict: Matches new process names (identifiers) with original process \n names\n \"\"\"",
"new_process_names",
"=",
"[",
"]",
"\"\"\"\n list: New process names used to replace in the pipeline string\n \"\"\"",
"# Assigns the new process names by appending a numeric id at the end of",
"# the process name",
"for",
"index",
",",
"val",
"in",
"enumerate",
"(",
"process_names",
")",
":",
"if",
"\"=\"",
"in",
"val",
":",
"parts",
"=",
"val",
".",
"split",
"(",
"\"=\"",
")",
"new_id",
"=",
"\"{}_{}={}\"",
".",
"format",
"(",
"parts",
"[",
"0",
"]",
",",
"index",
",",
"parts",
"[",
"1",
"]",
")",
"else",
":",
"new_id",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"val",
",",
"index",
")",
"# add new process with id",
"new_process_names",
".",
"append",
"(",
"new_id",
")",
"# makes a match between new process name and original process name",
"identifiers_to_tags",
"[",
"new_id",
"]",
"=",
"val",
"# Add space between forks, pipes and the process names for the replace",
"# regex to work",
"match_result",
"=",
"lambda",
"match",
":",
"\" {} \"",
".",
"format",
"(",
"match",
".",
"group",
"(",
")",
")",
"# force to add a space between each token so that regex modification can",
"# be applied",
"find",
"=",
"r'[{}{}{}]+'",
".",
"format",
"(",
"FORK_TOKEN",
",",
"LANE_TOKEN",
",",
"CLOSE_TOKEN",
")",
"pipeline_str_modified",
"=",
"re",
".",
"sub",
"(",
"find",
",",
"match_result",
",",
"pipeline_str_modified",
")",
"# Replace original process names by the unique identifiers",
"for",
"index",
",",
"val",
"in",
"enumerate",
"(",
"process_names",
")",
":",
"# regex to replace process names with non assigned process ids",
"# escape characters are required to match to the dict keys",
"# (identifiers_to_tags), since python keys with escape characters",
"# must be escaped",
"find",
"=",
"r'{}[^_]'",
".",
"format",
"(",
"val",
")",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"\\\\\\\\\"",
")",
"pipeline_str_modified",
"=",
"re",
".",
"sub",
"(",
"find",
",",
"new_process_names",
"[",
"index",
"]",
"+",
"\" \"",
",",
"pipeline_str_modified",
",",
"1",
")",
"return",
"pipeline_str_modified",
",",
"identifiers_to_tags"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
remove_unique_identifiers
|
Removes unique identifiers and add the original process names to the
already parsed pipelines
Parameters
----------
identifiers_to_tags : dict
Match between unique process identifiers and process names
pipeline_links: list
Parsed pipeline list with unique identifiers
Returns
-------
list
Pipeline list with original identifiers
|
flowcraft/generator/pipeline_parser.py
|
def remove_unique_identifiers(identifiers_to_tags, pipeline_links):
"""Removes unique identifiers and add the original process names to the
already parsed pipelines
Parameters
----------
identifiers_to_tags : dict
Match between unique process identifiers and process names
pipeline_links: list
Parsed pipeline list with unique identifiers
Returns
-------
list
Pipeline list with original identifiers
"""
# Replaces the unique identifiers by the original process names
for index, val in enumerate(pipeline_links):
if val["input"]["process"] != "__init__":
val["input"]["process"] = identifiers_to_tags[
val["input"]["process"]]
if val["output"]["process"] != "__init__":
val["output"]["process"] = identifiers_to_tags[
val["output"]["process"]]
return pipeline_links
|
def remove_unique_identifiers(identifiers_to_tags, pipeline_links):
"""Removes unique identifiers and add the original process names to the
already parsed pipelines
Parameters
----------
identifiers_to_tags : dict
Match between unique process identifiers and process names
pipeline_links: list
Parsed pipeline list with unique identifiers
Returns
-------
list
Pipeline list with original identifiers
"""
# Replaces the unique identifiers by the original process names
for index, val in enumerate(pipeline_links):
if val["input"]["process"] != "__init__":
val["input"]["process"] = identifiers_to_tags[
val["input"]["process"]]
if val["output"]["process"] != "__init__":
val["output"]["process"] = identifiers_to_tags[
val["output"]["process"]]
return pipeline_links
|
[
"Removes",
"unique",
"identifiers",
"and",
"add",
"the",
"original",
"process",
"names",
"to",
"the",
"already",
"parsed",
"pipelines"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/pipeline_parser.py#L730-L756
|
[
"def",
"remove_unique_identifiers",
"(",
"identifiers_to_tags",
",",
"pipeline_links",
")",
":",
"# Replaces the unique identifiers by the original process names",
"for",
"index",
",",
"val",
"in",
"enumerate",
"(",
"pipeline_links",
")",
":",
"if",
"val",
"[",
"\"input\"",
"]",
"[",
"\"process\"",
"]",
"!=",
"\"__init__\"",
":",
"val",
"[",
"\"input\"",
"]",
"[",
"\"process\"",
"]",
"=",
"identifiers_to_tags",
"[",
"val",
"[",
"\"input\"",
"]",
"[",
"\"process\"",
"]",
"]",
"if",
"val",
"[",
"\"output\"",
"]",
"[",
"\"process\"",
"]",
"!=",
"\"__init__\"",
":",
"val",
"[",
"\"output\"",
"]",
"[",
"\"process\"",
"]",
"=",
"identifiers_to_tags",
"[",
"val",
"[",
"\"output\"",
"]",
"[",
"\"process\"",
"]",
"]",
"return",
"pipeline_links"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
signal_handler
|
This function is bound to the SIGINT signal (like ctrl+c) to graciously
exit the program and reset the curses options.
|
flowcraft/generator/inspect.py
|
def signal_handler(screen):
"""This function is bound to the SIGINT signal (like ctrl+c) to graciously
exit the program and reset the curses options.
"""
if screen:
screen.clear()
screen.refresh()
curses.nocbreak()
screen.keypad(0)
curses.echo()
curses.endwin()
print("Exiting flowcraft inspection... Bye")
sys.exit(0)
|
def signal_handler(screen):
"""This function is bound to the SIGINT signal (like ctrl+c) to graciously
exit the program and reset the curses options.
"""
if screen:
screen.clear()
screen.refresh()
curses.nocbreak()
screen.keypad(0)
curses.echo()
curses.endwin()
print("Exiting flowcraft inspection... Bye")
sys.exit(0)
|
[
"This",
"function",
"is",
"bound",
"to",
"the",
"SIGINT",
"signal",
"(",
"like",
"ctrl",
"+",
"c",
")",
"to",
"graciously",
"exit",
"the",
"program",
"and",
"reset",
"the",
"curses",
"options",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L35-L50
|
[
"def",
"signal_handler",
"(",
"screen",
")",
":",
"if",
"screen",
":",
"screen",
".",
"clear",
"(",
")",
"screen",
".",
"refresh",
"(",
")",
"curses",
".",
"nocbreak",
"(",
")",
"screen",
".",
"keypad",
"(",
"0",
")",
"curses",
".",
"echo",
"(",
")",
"curses",
".",
"endwin",
"(",
")",
"print",
"(",
"\"Exiting flowcraft inspection... Bye\"",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._check_required_files
|
Checks whetner the trace and log files are available
|
flowcraft/generator/inspect.py
|
def _check_required_files(self):
"""Checks whetner the trace and log files are available
"""
if not os.path.exists(self.trace_file):
raise eh.InspectionError("The provided trace file could not be "
"opened: {}".format(self.trace_file))
if not os.path.exists(self.log_file):
raise eh.InspectionError("The .nextflow.log files could not be "
"opened. Are you sure you are in a "
"nextflow project directory?")
|
def _check_required_files(self):
"""Checks whetner the trace and log files are available
"""
if not os.path.exists(self.trace_file):
raise eh.InspectionError("The provided trace file could not be "
"opened: {}".format(self.trace_file))
if not os.path.exists(self.log_file):
raise eh.InspectionError("The .nextflow.log files could not be "
"opened. Are you sure you are in a "
"nextflow project directory?")
|
[
"Checks",
"whetner",
"the",
"trace",
"and",
"log",
"files",
"are",
"available"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L267-L278
|
[
"def",
"_check_required_files",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"trace_file",
")",
":",
"raise",
"eh",
".",
"InspectionError",
"(",
"\"The provided trace file could not be \"",
"\"opened: {}\"",
".",
"format",
"(",
"self",
".",
"trace_file",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"log_file",
")",
":",
"raise",
"eh",
".",
"InspectionError",
"(",
"\"The .nextflow.log files could not be \"",
"\"opened. Are you sure you are in a \"",
"\"nextflow project directory?\"",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._header_mapping
|
Parses the trace file header and retrieves the positions of each
column key.
Parameters
----------
header : str
The header line of nextflow's trace file
Returns
-------
dict
Mapping the column ID to its position (e.g.: {"tag":2})
|
flowcraft/generator/inspect.py
|
def _header_mapping(header):
"""Parses the trace file header and retrieves the positions of each
column key.
Parameters
----------
header : str
The header line of nextflow's trace file
Returns
-------
dict
Mapping the column ID to its position (e.g.: {"tag":2})
"""
return dict(
(x.strip(), pos) for pos, x in enumerate(header.split("\t"))
)
|
def _header_mapping(header):
"""Parses the trace file header and retrieves the positions of each
column key.
Parameters
----------
header : str
The header line of nextflow's trace file
Returns
-------
dict
Mapping the column ID to its position (e.g.: {"tag":2})
"""
return dict(
(x.strip(), pos) for pos, x in enumerate(header.split("\t"))
)
|
[
"Parses",
"the",
"trace",
"file",
"header",
"and",
"retrieves",
"the",
"positions",
"of",
"each",
"column",
"key",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L281-L298
|
[
"def",
"_header_mapping",
"(",
"header",
")",
":",
"return",
"dict",
"(",
"(",
"x",
".",
"strip",
"(",
")",
",",
"pos",
")",
"for",
"pos",
",",
"x",
"in",
"enumerate",
"(",
"header",
".",
"split",
"(",
"\"\\t\"",
")",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._expand_path
|
Expands the hash string of a process (ae/1dasjdm) into a full
working directory
Parameters
----------
hash_str : str
Nextflow process hash with the beggining of the work directory
Returns
-------
str
Path to working directory of the hash string
|
flowcraft/generator/inspect.py
|
def _expand_path(hash_str):
"""Expands the hash string of a process (ae/1dasjdm) into a full
working directory
Parameters
----------
hash_str : str
Nextflow process hash with the beggining of the work directory
Returns
-------
str
Path to working directory of the hash string
"""
try:
first_hash, second_hash = hash_str.split("/")
first_hash_path = join(abspath("work"), first_hash)
for l in os.listdir(first_hash_path):
if l.startswith(second_hash):
return join(first_hash_path, l)
except FileNotFoundError:
return None
|
def _expand_path(hash_str):
"""Expands the hash string of a process (ae/1dasjdm) into a full
working directory
Parameters
----------
hash_str : str
Nextflow process hash with the beggining of the work directory
Returns
-------
str
Path to working directory of the hash string
"""
try:
first_hash, second_hash = hash_str.split("/")
first_hash_path = join(abspath("work"), first_hash)
for l in os.listdir(first_hash_path):
if l.startswith(second_hash):
return join(first_hash_path, l)
except FileNotFoundError:
return None
|
[
"Expands",
"the",
"hash",
"string",
"of",
"a",
"process",
"(",
"ae",
"/",
"1dasjdm",
")",
"into",
"a",
"full",
"working",
"directory"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L301-L324
|
[
"def",
"_expand_path",
"(",
"hash_str",
")",
":",
"try",
":",
"first_hash",
",",
"second_hash",
"=",
"hash_str",
".",
"split",
"(",
"\"/\"",
")",
"first_hash_path",
"=",
"join",
"(",
"abspath",
"(",
"\"work\"",
")",
",",
"first_hash",
")",
"for",
"l",
"in",
"os",
".",
"listdir",
"(",
"first_hash_path",
")",
":",
"if",
"l",
".",
"startswith",
"(",
"second_hash",
")",
":",
"return",
"join",
"(",
"first_hash_path",
",",
"l",
")",
"except",
"FileNotFoundError",
":",
"return",
"None"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._hms
|
Converts a hms string into seconds.
Parameters
----------
s : str
The hms string can be something like '20s', '1m30s' or '300ms'.
Returns
-------
float
Time in seconds.
|
flowcraft/generator/inspect.py
|
def _hms(s):
"""Converts a hms string into seconds.
Parameters
----------
s : str
The hms string can be something like '20s', '1m30s' or '300ms'.
Returns
-------
float
Time in seconds.
"""
if s == "-":
return 0
if s.endswith("ms"):
return float(s.rstrip("ms")) / 1000
fields = list(map(float, re.split("[dhms]", s)[:-1]))
if len(fields) == 4:
return fields[0] * 24 * 3600 + fields[1] * 3600 + fields[2] * 60 +\
fields[3]
if len(fields) == 3:
return fields[0] * 3600 + fields[1] * 60 + fields[2]
elif len(fields) == 2:
return fields[0] * 60 + fields[1]
else:
return fields[0]
|
def _hms(s):
"""Converts a hms string into seconds.
Parameters
----------
s : str
The hms string can be something like '20s', '1m30s' or '300ms'.
Returns
-------
float
Time in seconds.
"""
if s == "-":
return 0
if s.endswith("ms"):
return float(s.rstrip("ms")) / 1000
fields = list(map(float, re.split("[dhms]", s)[:-1]))
if len(fields) == 4:
return fields[0] * 24 * 3600 + fields[1] * 3600 + fields[2] * 60 +\
fields[3]
if len(fields) == 3:
return fields[0] * 3600 + fields[1] * 60 + fields[2]
elif len(fields) == 2:
return fields[0] * 60 + fields[1]
else:
return fields[0]
|
[
"Converts",
"a",
"hms",
"string",
"into",
"seconds",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L327-L357
|
[
"def",
"_hms",
"(",
"s",
")",
":",
"if",
"s",
"==",
"\"-\"",
":",
"return",
"0",
"if",
"s",
".",
"endswith",
"(",
"\"ms\"",
")",
":",
"return",
"float",
"(",
"s",
".",
"rstrip",
"(",
"\"ms\"",
")",
")",
"/",
"1000",
"fields",
"=",
"list",
"(",
"map",
"(",
"float",
",",
"re",
".",
"split",
"(",
"\"[dhms]\"",
",",
"s",
")",
"[",
":",
"-",
"1",
"]",
")",
")",
"if",
"len",
"(",
"fields",
")",
"==",
"4",
":",
"return",
"fields",
"[",
"0",
"]",
"*",
"24",
"*",
"3600",
"+",
"fields",
"[",
"1",
"]",
"*",
"3600",
"+",
"fields",
"[",
"2",
"]",
"*",
"60",
"+",
"fields",
"[",
"3",
"]",
"if",
"len",
"(",
"fields",
")",
"==",
"3",
":",
"return",
"fields",
"[",
"0",
"]",
"*",
"3600",
"+",
"fields",
"[",
"1",
"]",
"*",
"60",
"+",
"fields",
"[",
"2",
"]",
"elif",
"len",
"(",
"fields",
")",
"==",
"2",
":",
"return",
"fields",
"[",
"0",
"]",
"*",
"60",
"+",
"fields",
"[",
"1",
"]",
"else",
":",
"return",
"fields",
"[",
"0",
"]"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._size_coverter
|
Converts size string into megabytes
Parameters
----------
s : str
The size string can be '30KB', '20MB' or '1GB'
Returns
-------
float
With the size in bytes
|
flowcraft/generator/inspect.py
|
def _size_coverter(s):
"""Converts size string into megabytes
Parameters
----------
s : str
The size string can be '30KB', '20MB' or '1GB'
Returns
-------
float
With the size in bytes
"""
if s.upper().endswith("KB"):
return float(s.rstrip("KB")) / 1024
elif s.upper().endswith(" B"):
return float(s.rstrip("B")) / 1024 / 1024
elif s.upper().endswith("MB"):
return float(s.rstrip("MB"))
elif s.upper().endswith("GB"):
return float(s.rstrip("GB")) * 1024
elif s.upper().endswith("TB"):
return float(s.rstrip("TB")) * 1024 * 1024
else:
return float(s)
|
def _size_coverter(s):
"""Converts size string into megabytes
Parameters
----------
s : str
The size string can be '30KB', '20MB' or '1GB'
Returns
-------
float
With the size in bytes
"""
if s.upper().endswith("KB"):
return float(s.rstrip("KB")) / 1024
elif s.upper().endswith(" B"):
return float(s.rstrip("B")) / 1024 / 1024
elif s.upper().endswith("MB"):
return float(s.rstrip("MB"))
elif s.upper().endswith("GB"):
return float(s.rstrip("GB")) * 1024
elif s.upper().endswith("TB"):
return float(s.rstrip("TB")) * 1024 * 1024
else:
return float(s)
|
[
"Converts",
"size",
"string",
"into",
"megabytes"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L360-L391
|
[
"def",
"_size_coverter",
"(",
"s",
")",
":",
"if",
"s",
".",
"upper",
"(",
")",
".",
"endswith",
"(",
"\"KB\"",
")",
":",
"return",
"float",
"(",
"s",
".",
"rstrip",
"(",
"\"KB\"",
")",
")",
"/",
"1024",
"elif",
"s",
".",
"upper",
"(",
")",
".",
"endswith",
"(",
"\" B\"",
")",
":",
"return",
"float",
"(",
"s",
".",
"rstrip",
"(",
"\"B\"",
")",
")",
"/",
"1024",
"/",
"1024",
"elif",
"s",
".",
"upper",
"(",
")",
".",
"endswith",
"(",
"\"MB\"",
")",
":",
"return",
"float",
"(",
"s",
".",
"rstrip",
"(",
"\"MB\"",
")",
")",
"elif",
"s",
".",
"upper",
"(",
")",
".",
"endswith",
"(",
"\"GB\"",
")",
":",
"return",
"float",
"(",
"s",
".",
"rstrip",
"(",
"\"GB\"",
")",
")",
"*",
"1024",
"elif",
"s",
".",
"upper",
"(",
")",
".",
"endswith",
"(",
"\"TB\"",
")",
":",
"return",
"float",
"(",
"s",
".",
"rstrip",
"(",
"\"TB\"",
")",
")",
"*",
"1024",
"*",
"1024",
"else",
":",
"return",
"float",
"(",
"s",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._get_pipeline_processes
|
Parses the .nextflow.log file and retrieves the complete list
of processes
This method searches for specific signatures at the beginning of the
.nextflow.log file::
Apr-19 19:07:32.660 [main] DEBUG nextflow.processor
TaskProcessor - Creating operator > report_corrupt_1_1 --
maxForks: 4
When a line with the .*Creating operator.* signature is found, the
process name is retrieved and populates the :attr:`processes` attribute
|
flowcraft/generator/inspect.py
|
def _get_pipeline_processes(self):
"""Parses the .nextflow.log file and retrieves the complete list
of processes
This method searches for specific signatures at the beginning of the
.nextflow.log file::
Apr-19 19:07:32.660 [main] DEBUG nextflow.processor
TaskProcessor - Creating operator > report_corrupt_1_1 --
maxForks: 4
When a line with the .*Creating operator.* signature is found, the
process name is retrieved and populates the :attr:`processes` attribute
"""
with open(self.log_file) as fh:
for line in fh:
if re.match(".*Creating operator.*", line):
# Retrieves the process name from the string
match = re.match(".*Creating operator > (.*) --", line)
process = match.group(1)
if any([process.startswith(x) for x in self._blacklist]):
continue
if process not in self.skip_processes:
self.processes[match.group(1)] = {
"barrier": "W",
"submitted": set(),
"finished": set(),
"failed": set(),
"retry": set(),
"cpus": None,
"memory": None
}
self.process_tags[process] = {}
# Retrieves the pipeline name from the string
if re.match(".*Launching `.*` \[.*\] ", line):
tag_match = re.match(".*Launching `.*` \[(.*)\] ", line)
self.pipeline_tag = tag_match.group(1) if tag_match else \
"?"
name_match = re.match(".*Launching `(.*)` \[.*\] ", line)
self.pipeline_name = name_match.group(1) if name_match \
else "?"
self.content_lines = len(self.processes)
|
def _get_pipeline_processes(self):
"""Parses the .nextflow.log file and retrieves the complete list
of processes
This method searches for specific signatures at the beginning of the
.nextflow.log file::
Apr-19 19:07:32.660 [main] DEBUG nextflow.processor
TaskProcessor - Creating operator > report_corrupt_1_1 --
maxForks: 4
When a line with the .*Creating operator.* signature is found, the
process name is retrieved and populates the :attr:`processes` attribute
"""
with open(self.log_file) as fh:
for line in fh:
if re.match(".*Creating operator.*", line):
# Retrieves the process name from the string
match = re.match(".*Creating operator > (.*) --", line)
process = match.group(1)
if any([process.startswith(x) for x in self._blacklist]):
continue
if process not in self.skip_processes:
self.processes[match.group(1)] = {
"barrier": "W",
"submitted": set(),
"finished": set(),
"failed": set(),
"retry": set(),
"cpus": None,
"memory": None
}
self.process_tags[process] = {}
# Retrieves the pipeline name from the string
if re.match(".*Launching `.*` \[.*\] ", line):
tag_match = re.match(".*Launching `.*` \[(.*)\] ", line)
self.pipeline_tag = tag_match.group(1) if tag_match else \
"?"
name_match = re.match(".*Launching `(.*)` \[.*\] ", line)
self.pipeline_name = name_match.group(1) if name_match \
else "?"
self.content_lines = len(self.processes)
|
[
"Parses",
"the",
".",
"nextflow",
".",
"log",
"file",
"and",
"retrieves",
"the",
"complete",
"list",
"of",
"processes"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L407-L454
|
[
"def",
"_get_pipeline_processes",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"log_file",
")",
"as",
"fh",
":",
"for",
"line",
"in",
"fh",
":",
"if",
"re",
".",
"match",
"(",
"\".*Creating operator.*\"",
",",
"line",
")",
":",
"# Retrieves the process name from the string",
"match",
"=",
"re",
".",
"match",
"(",
"\".*Creating operator > (.*) --\"",
",",
"line",
")",
"process",
"=",
"match",
".",
"group",
"(",
"1",
")",
"if",
"any",
"(",
"[",
"process",
".",
"startswith",
"(",
"x",
")",
"for",
"x",
"in",
"self",
".",
"_blacklist",
"]",
")",
":",
"continue",
"if",
"process",
"not",
"in",
"self",
".",
"skip_processes",
":",
"self",
".",
"processes",
"[",
"match",
".",
"group",
"(",
"1",
")",
"]",
"=",
"{",
"\"barrier\"",
":",
"\"W\"",
",",
"\"submitted\"",
":",
"set",
"(",
")",
",",
"\"finished\"",
":",
"set",
"(",
")",
",",
"\"failed\"",
":",
"set",
"(",
")",
",",
"\"retry\"",
":",
"set",
"(",
")",
",",
"\"cpus\"",
":",
"None",
",",
"\"memory\"",
":",
"None",
"}",
"self",
".",
"process_tags",
"[",
"process",
"]",
"=",
"{",
"}",
"# Retrieves the pipeline name from the string",
"if",
"re",
".",
"match",
"(",
"\".*Launching `.*` \\[.*\\] \"",
",",
"line",
")",
":",
"tag_match",
"=",
"re",
".",
"match",
"(",
"\".*Launching `.*` \\[(.*)\\] \"",
",",
"line",
")",
"self",
".",
"pipeline_tag",
"=",
"tag_match",
".",
"group",
"(",
"1",
")",
"if",
"tag_match",
"else",
"\"?\"",
"name_match",
"=",
"re",
".",
"match",
"(",
"\".*Launching `(.*)` \\[.*\\] \"",
",",
"line",
")",
"self",
".",
"pipeline_name",
"=",
"name_match",
".",
"group",
"(",
"1",
")",
"if",
"name_match",
"else",
"\"?\"",
"self",
".",
"content_lines",
"=",
"len",
"(",
"self",
".",
"processes",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._clear_inspect
|
Clears inspect attributes when re-executing a pipeline
|
flowcraft/generator/inspect.py
|
def _clear_inspect(self):
"""Clears inspect attributes when re-executing a pipeline"""
self.trace_info = defaultdict(list)
self.process_tags = {}
self.process_stats = {}
self.samples = []
self.stored_ids = []
self.stored_log_ids = []
self.time_start = None
self.time_stop = None
self.execution_command = None
self.nextflow_version = None
self.abort_cause = None
self._c = 0
# Clean up of tag running status
for p in self.processes.values():
p["barrier"] = "W"
for i in ["submitted", "finished", "failed", "retry"]:
p[i] = set()
|
def _clear_inspect(self):
"""Clears inspect attributes when re-executing a pipeline"""
self.trace_info = defaultdict(list)
self.process_tags = {}
self.process_stats = {}
self.samples = []
self.stored_ids = []
self.stored_log_ids = []
self.time_start = None
self.time_stop = None
self.execution_command = None
self.nextflow_version = None
self.abort_cause = None
self._c = 0
# Clean up of tag running status
for p in self.processes.values():
p["barrier"] = "W"
for i in ["submitted", "finished", "failed", "retry"]:
p[i] = set()
|
[
"Clears",
"inspect",
"attributes",
"when",
"re",
"-",
"executing",
"a",
"pipeline"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L456-L475
|
[
"def",
"_clear_inspect",
"(",
"self",
")",
":",
"self",
".",
"trace_info",
"=",
"defaultdict",
"(",
"list",
")",
"self",
".",
"process_tags",
"=",
"{",
"}",
"self",
".",
"process_stats",
"=",
"{",
"}",
"self",
".",
"samples",
"=",
"[",
"]",
"self",
".",
"stored_ids",
"=",
"[",
"]",
"self",
".",
"stored_log_ids",
"=",
"[",
"]",
"self",
".",
"time_start",
"=",
"None",
"self",
".",
"time_stop",
"=",
"None",
"self",
".",
"execution_command",
"=",
"None",
"self",
".",
"nextflow_version",
"=",
"None",
"self",
".",
"abort_cause",
"=",
"None",
"self",
".",
"_c",
"=",
"0",
"# Clean up of tag running status",
"for",
"p",
"in",
"self",
".",
"processes",
".",
"values",
"(",
")",
":",
"p",
"[",
"\"barrier\"",
"]",
"=",
"\"W\"",
"for",
"i",
"in",
"[",
"\"submitted\"",
",",
"\"finished\"",
",",
"\"failed\"",
",",
"\"retry\"",
"]",
":",
"p",
"[",
"i",
"]",
"=",
"set",
"(",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._update_pipeline_status
|
Parses the .nextflow.log file for signatures of pipeline status.
It sets the :attr:`status_info` attribute.
|
flowcraft/generator/inspect.py
|
def _update_pipeline_status(self):
"""Parses the .nextflow.log file for signatures of pipeline status.
It sets the :attr:`status_info` attribute.
"""
with open(self.log_file) as fh:
try:
first_line = next(fh)
except:
raise eh.InspectionError("Could not read .nextflow.log file. Is file empty?")
time_str = " ".join(first_line.split()[:2])
self.time_start = time_str
if not self.execution_command:
try:
self.execution_command = re.match(
".*nextflow run (.*)", first_line).group(1)
except AttributeError:
self.execution_command = "Unknown"
for line in fh:
if "DEBUG nextflow.cli.CmdRun" in line:
if not self.nextflow_version:
try:
vline = next(fh)
self.nextflow_version = re.match(
".*Version: (.*)", vline).group(1)
except AttributeError:
self.nextflow_version = "Unknown"
if "Session aborted" in line:
self.run_status = "aborted"
# Get abort cause
try:
self.abort_cause = re.match(
".*Cause: (.*)", line).group(1)
except AttributeError:
self.abort_cause = "Unknown"
# Get time of pipeline stop
time_str = " ".join(line.split()[:2])
self.time_stop = time_str
self.send = True
return
if "Execution complete -- Goodbye" in line:
self.run_status = "complete"
# Get time of pipeline stop
time_str = " ".join(line.split()[:2])
self.time_stop = time_str
self.send = True
return
if self.run_status not in ["running", ""]:
self._clear_inspect()
# Take a break to allow nextflow to restart before refreshing
# pipeine processes
sleep(5)
self._get_pipeline_processes()
self.run_status = "running"
|
def _update_pipeline_status(self):
"""Parses the .nextflow.log file for signatures of pipeline status.
It sets the :attr:`status_info` attribute.
"""
with open(self.log_file) as fh:
try:
first_line = next(fh)
except:
raise eh.InspectionError("Could not read .nextflow.log file. Is file empty?")
time_str = " ".join(first_line.split()[:2])
self.time_start = time_str
if not self.execution_command:
try:
self.execution_command = re.match(
".*nextflow run (.*)", first_line).group(1)
except AttributeError:
self.execution_command = "Unknown"
for line in fh:
if "DEBUG nextflow.cli.CmdRun" in line:
if not self.nextflow_version:
try:
vline = next(fh)
self.nextflow_version = re.match(
".*Version: (.*)", vline).group(1)
except AttributeError:
self.nextflow_version = "Unknown"
if "Session aborted" in line:
self.run_status = "aborted"
# Get abort cause
try:
self.abort_cause = re.match(
".*Cause: (.*)", line).group(1)
except AttributeError:
self.abort_cause = "Unknown"
# Get time of pipeline stop
time_str = " ".join(line.split()[:2])
self.time_stop = time_str
self.send = True
return
if "Execution complete -- Goodbye" in line:
self.run_status = "complete"
# Get time of pipeline stop
time_str = " ".join(line.split()[:2])
self.time_stop = time_str
self.send = True
return
if self.run_status not in ["running", ""]:
self._clear_inspect()
# Take a break to allow nextflow to restart before refreshing
# pipeine processes
sleep(5)
self._get_pipeline_processes()
self.run_status = "running"
|
[
"Parses",
"the",
".",
"nextflow",
".",
"log",
"file",
"for",
"signatures",
"of",
"pipeline",
"status",
".",
"It",
"sets",
"the",
":",
"attr",
":",
"status_info",
"attribute",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L477-L537
|
[
"def",
"_update_pipeline_status",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"log_file",
")",
"as",
"fh",
":",
"try",
":",
"first_line",
"=",
"next",
"(",
"fh",
")",
"except",
":",
"raise",
"eh",
".",
"InspectionError",
"(",
"\"Could not read .nextflow.log file. Is file empty?\"",
")",
"time_str",
"=",
"\" \"",
".",
"join",
"(",
"first_line",
".",
"split",
"(",
")",
"[",
":",
"2",
"]",
")",
"self",
".",
"time_start",
"=",
"time_str",
"if",
"not",
"self",
".",
"execution_command",
":",
"try",
":",
"self",
".",
"execution_command",
"=",
"re",
".",
"match",
"(",
"\".*nextflow run (.*)\"",
",",
"first_line",
")",
".",
"group",
"(",
"1",
")",
"except",
"AttributeError",
":",
"self",
".",
"execution_command",
"=",
"\"Unknown\"",
"for",
"line",
"in",
"fh",
":",
"if",
"\"DEBUG nextflow.cli.CmdRun\"",
"in",
"line",
":",
"if",
"not",
"self",
".",
"nextflow_version",
":",
"try",
":",
"vline",
"=",
"next",
"(",
"fh",
")",
"self",
".",
"nextflow_version",
"=",
"re",
".",
"match",
"(",
"\".*Version: (.*)\"",
",",
"vline",
")",
".",
"group",
"(",
"1",
")",
"except",
"AttributeError",
":",
"self",
".",
"nextflow_version",
"=",
"\"Unknown\"",
"if",
"\"Session aborted\"",
"in",
"line",
":",
"self",
".",
"run_status",
"=",
"\"aborted\"",
"# Get abort cause",
"try",
":",
"self",
".",
"abort_cause",
"=",
"re",
".",
"match",
"(",
"\".*Cause: (.*)\"",
",",
"line",
")",
".",
"group",
"(",
"1",
")",
"except",
"AttributeError",
":",
"self",
".",
"abort_cause",
"=",
"\"Unknown\"",
"# Get time of pipeline stop",
"time_str",
"=",
"\" \"",
".",
"join",
"(",
"line",
".",
"split",
"(",
")",
"[",
":",
"2",
"]",
")",
"self",
".",
"time_stop",
"=",
"time_str",
"self",
".",
"send",
"=",
"True",
"return",
"if",
"\"Execution complete -- Goodbye\"",
"in",
"line",
":",
"self",
".",
"run_status",
"=",
"\"complete\"",
"# Get time of pipeline stop",
"time_str",
"=",
"\" \"",
".",
"join",
"(",
"line",
".",
"split",
"(",
")",
"[",
":",
"2",
"]",
")",
"self",
".",
"time_stop",
"=",
"time_str",
"self",
".",
"send",
"=",
"True",
"return",
"if",
"self",
".",
"run_status",
"not",
"in",
"[",
"\"running\"",
",",
"\"\"",
"]",
":",
"self",
".",
"_clear_inspect",
"(",
")",
"# Take a break to allow nextflow to restart before refreshing",
"# pipeine processes",
"sleep",
"(",
"5",
")",
"self",
".",
"_get_pipeline_processes",
"(",
")",
"self",
".",
"run_status",
"=",
"\"running\""
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._update_tag_status
|
Updates the 'submitted', 'finished', 'failed' and 'retry' status
of each process/tag combination.
Process/tag combinations provided to this method already appear on
the trace file, so their submission status is updated based on their
execution status from nextflow.
For instance, if a tag is successfully
complete, it is moved from the 'submitted' to the 'finished' list.
If not, it is moved to the 'failed' list.
Parameters
----------
process : str
Name of the current process. Must be present in attr:`processes`
vals : list
List of tags for this process that have been gathered in the
trace file.
|
flowcraft/generator/inspect.py
|
def _update_tag_status(self, process, vals):
""" Updates the 'submitted', 'finished', 'failed' and 'retry' status
of each process/tag combination.
Process/tag combinations provided to this method already appear on
the trace file, so their submission status is updated based on their
execution status from nextflow.
For instance, if a tag is successfully
complete, it is moved from the 'submitted' to the 'finished' list.
If not, it is moved to the 'failed' list.
Parameters
----------
process : str
Name of the current process. Must be present in attr:`processes`
vals : list
List of tags for this process that have been gathered in the
trace file.
"""
good_status = ["COMPLETED", "CACHED"]
# Update status of each process
for v in list(vals)[::-1]:
p = self.processes[process]
tag = v["tag"]
# If the process/tag is in the submitted list, move it to the
# complete or failed list
if tag in p["submitted"]:
p["submitted"].remove(tag)
if v["status"] in good_status:
p["finished"].add(tag)
elif v["status"] == "FAILED":
if not v["work_dir"]:
v["work_dir"] = ""
self.process_tags[process][tag]["log"] = \
self._retrieve_log(join(v["work_dir"], ".command.log"))
p["failed"].add(tag)
# It the process/tag is in the retry list and it completed
# successfully, remove it from the retry and fail lists. Otherwise
# maintain it in the retry/failed lists
elif tag in p["retry"]:
if v["status"] in good_status:
p["retry"].remove(tag)
p["failed"].remove(tag)
del self.process_tags[process][tag]["log"]
elif self.run_status == "aborted":
p["retry"].remove(tag)
elif v["status"] in good_status:
p["finished"].add(tag)
# Filter tags without a successfull status.
if v["status"] not in good_status:
if v["tag"] in list(p["submitted"]) + list(p["finished"]):
vals.remove(v)
return vals
|
def _update_tag_status(self, process, vals):
""" Updates the 'submitted', 'finished', 'failed' and 'retry' status
of each process/tag combination.
Process/tag combinations provided to this method already appear on
the trace file, so their submission status is updated based on their
execution status from nextflow.
For instance, if a tag is successfully
complete, it is moved from the 'submitted' to the 'finished' list.
If not, it is moved to the 'failed' list.
Parameters
----------
process : str
Name of the current process. Must be present in attr:`processes`
vals : list
List of tags for this process that have been gathered in the
trace file.
"""
good_status = ["COMPLETED", "CACHED"]
# Update status of each process
for v in list(vals)[::-1]:
p = self.processes[process]
tag = v["tag"]
# If the process/tag is in the submitted list, move it to the
# complete or failed list
if tag in p["submitted"]:
p["submitted"].remove(tag)
if v["status"] in good_status:
p["finished"].add(tag)
elif v["status"] == "FAILED":
if not v["work_dir"]:
v["work_dir"] = ""
self.process_tags[process][tag]["log"] = \
self._retrieve_log(join(v["work_dir"], ".command.log"))
p["failed"].add(tag)
# It the process/tag is in the retry list and it completed
# successfully, remove it from the retry and fail lists. Otherwise
# maintain it in the retry/failed lists
elif tag in p["retry"]:
if v["status"] in good_status:
p["retry"].remove(tag)
p["failed"].remove(tag)
del self.process_tags[process][tag]["log"]
elif self.run_status == "aborted":
p["retry"].remove(tag)
elif v["status"] in good_status:
p["finished"].add(tag)
# Filter tags without a successfull status.
if v["status"] not in good_status:
if v["tag"] in list(p["submitted"]) + list(p["finished"]):
vals.remove(v)
return vals
|
[
"Updates",
"the",
"submitted",
"finished",
"failed",
"and",
"retry",
"status",
"of",
"each",
"process",
"/",
"tag",
"combination",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L539-L599
|
[
"def",
"_update_tag_status",
"(",
"self",
",",
"process",
",",
"vals",
")",
":",
"good_status",
"=",
"[",
"\"COMPLETED\"",
",",
"\"CACHED\"",
"]",
"# Update status of each process",
"for",
"v",
"in",
"list",
"(",
"vals",
")",
"[",
":",
":",
"-",
"1",
"]",
":",
"p",
"=",
"self",
".",
"processes",
"[",
"process",
"]",
"tag",
"=",
"v",
"[",
"\"tag\"",
"]",
"# If the process/tag is in the submitted list, move it to the",
"# complete or failed list",
"if",
"tag",
"in",
"p",
"[",
"\"submitted\"",
"]",
":",
"p",
"[",
"\"submitted\"",
"]",
".",
"remove",
"(",
"tag",
")",
"if",
"v",
"[",
"\"status\"",
"]",
"in",
"good_status",
":",
"p",
"[",
"\"finished\"",
"]",
".",
"add",
"(",
"tag",
")",
"elif",
"v",
"[",
"\"status\"",
"]",
"==",
"\"FAILED\"",
":",
"if",
"not",
"v",
"[",
"\"work_dir\"",
"]",
":",
"v",
"[",
"\"work_dir\"",
"]",
"=",
"\"\"",
"self",
".",
"process_tags",
"[",
"process",
"]",
"[",
"tag",
"]",
"[",
"\"log\"",
"]",
"=",
"self",
".",
"_retrieve_log",
"(",
"join",
"(",
"v",
"[",
"\"work_dir\"",
"]",
",",
"\".command.log\"",
")",
")",
"p",
"[",
"\"failed\"",
"]",
".",
"add",
"(",
"tag",
")",
"# It the process/tag is in the retry list and it completed",
"# successfully, remove it from the retry and fail lists. Otherwise",
"# maintain it in the retry/failed lists",
"elif",
"tag",
"in",
"p",
"[",
"\"retry\"",
"]",
":",
"if",
"v",
"[",
"\"status\"",
"]",
"in",
"good_status",
":",
"p",
"[",
"\"retry\"",
"]",
".",
"remove",
"(",
"tag",
")",
"p",
"[",
"\"failed\"",
"]",
".",
"remove",
"(",
"tag",
")",
"del",
"self",
".",
"process_tags",
"[",
"process",
"]",
"[",
"tag",
"]",
"[",
"\"log\"",
"]",
"elif",
"self",
".",
"run_status",
"==",
"\"aborted\"",
":",
"p",
"[",
"\"retry\"",
"]",
".",
"remove",
"(",
"tag",
")",
"elif",
"v",
"[",
"\"status\"",
"]",
"in",
"good_status",
":",
"p",
"[",
"\"finished\"",
"]",
".",
"add",
"(",
"tag",
")",
"# Filter tags without a successfull status.",
"if",
"v",
"[",
"\"status\"",
"]",
"not",
"in",
"good_status",
":",
"if",
"v",
"[",
"\"tag\"",
"]",
"in",
"list",
"(",
"p",
"[",
"\"submitted\"",
"]",
")",
"+",
"list",
"(",
"p",
"[",
"\"finished\"",
"]",
")",
":",
"vals",
".",
"remove",
"(",
"v",
")",
"return",
"vals"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._update_barrier_status
|
Checks whether the channels to each process have been closed.
|
flowcraft/generator/inspect.py
|
def _update_barrier_status(self):
"""Checks whether the channels to each process have been closed.
"""
with open(self.log_file) as fh:
for line in fh:
# Exit barrier update after session abort signal
if "Session aborted" in line:
return
if "<<< barrier arrive" in line:
# Retrieve process name from string
process_m = re.match(".*process: (.*)\)", line)
if process_m:
process = process_m.group(1)
# Updates process channel to complete
if process in self.processes:
self.processes[process]["barrier"] = "C"
|
def _update_barrier_status(self):
"""Checks whether the channels to each process have been closed.
"""
with open(self.log_file) as fh:
for line in fh:
# Exit barrier update after session abort signal
if "Session aborted" in line:
return
if "<<< barrier arrive" in line:
# Retrieve process name from string
process_m = re.match(".*process: (.*)\)", line)
if process_m:
process = process_m.group(1)
# Updates process channel to complete
if process in self.processes:
self.processes[process]["barrier"] = "C"
|
[
"Checks",
"whether",
"the",
"channels",
"to",
"each",
"process",
"have",
"been",
"closed",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L601-L620
|
[
"def",
"_update_barrier_status",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"log_file",
")",
"as",
"fh",
":",
"for",
"line",
"in",
"fh",
":",
"# Exit barrier update after session abort signal",
"if",
"\"Session aborted\"",
"in",
"line",
":",
"return",
"if",
"\"<<< barrier arrive\"",
"in",
"line",
":",
"# Retrieve process name from string",
"process_m",
"=",
"re",
".",
"match",
"(",
"\".*process: (.*)\\)\"",
",",
"line",
")",
"if",
"process_m",
":",
"process",
"=",
"process_m",
".",
"group",
"(",
"1",
")",
"# Updates process channel to complete",
"if",
"process",
"in",
"self",
".",
"processes",
":",
"self",
".",
"processes",
"[",
"process",
"]",
"[",
"\"barrier\"",
"]",
"=",
"\"C\""
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._retrieve_log
|
Method used to retrieve the contents of a log file into a list.
Parameters
----------
path
Returns
-------
list or None
Contents of the provided file, each line as a list entry
|
flowcraft/generator/inspect.py
|
def _retrieve_log(path):
"""Method used to retrieve the contents of a log file into a list.
Parameters
----------
path
Returns
-------
list or None
Contents of the provided file, each line as a list entry
"""
if not os.path.exists(path):
return None
with open(path) as fh:
return fh.readlines()
|
def _retrieve_log(path):
"""Method used to retrieve the contents of a log file into a list.
Parameters
----------
path
Returns
-------
list or None
Contents of the provided file, each line as a list entry
"""
if not os.path.exists(path):
return None
with open(path) as fh:
return fh.readlines()
|
[
"Method",
"used",
"to",
"retrieve",
"the",
"contents",
"of",
"a",
"log",
"file",
"into",
"a",
"list",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L623-L640
|
[
"def",
"_retrieve_log",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"None",
"with",
"open",
"(",
"path",
")",
"as",
"fh",
":",
"return",
"fh",
".",
"readlines",
"(",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._update_trace_info
|
Parses a trace line and updates the :attr:`status_info` attribute.
Parameters
----------
fields : list
List of the tab-seperated elements of the trace line
hm : dict
Maps the column IDs to their position in the fields argument.
This dictionary object is retrieve from :func:`_header_mapping`.
|
flowcraft/generator/inspect.py
|
def _update_trace_info(self, fields, hm):
"""Parses a trace line and updates the :attr:`status_info` attribute.
Parameters
----------
fields : list
List of the tab-seperated elements of the trace line
hm : dict
Maps the column IDs to their position in the fields argument.
This dictionary object is retrieve from :func:`_header_mapping`.
"""
process = fields[hm["process"]]
if process not in self.processes:
return
# Get information from a single line of trace file
info = dict((column, fields[pos]) for column, pos in hm.items())
# The headers that will be used to populate the process
process_tag_headers = ["realtime", "rss", "rchar", "wchar"]
for h in process_tag_headers:
# In the rare occasion the tag is parsed first in the trace
# file than the log file, add the new tag.
if info["tag"] not in self.process_tags[process]:
# If the 'start' tag is present in the trace, use that
# information. If not, it will be parsed in the log file.
try:
timestart = info["start"].split()[1]
except KeyError:
timestart = None
self.process_tags[process][info["tag"]] = {
"workdir": self._expand_path(info["hash"]),
"start": timestart
}
if h in info and info["tag"] != "-":
if h != "realtime" and info[h] != "-":
self.process_tags[process][info["tag"]][h] = \
round(self._size_coverter(info[h]), 2)
else:
self.process_tags[process][info["tag"]][h] = info[h]
# Set allocated cpu and memory information to process
if "cpus" in info and not self.processes[process]["cpus"]:
self.processes[process]["cpus"] = info["cpus"]
if "memory" in info and not self.processes[process]["memory"]:
try:
self.processes[process]["memory"] = self._size_coverter(
info["memory"])
except ValueError:
self.processes[process]["memory"] = None
if info["hash"] in self.stored_ids:
return
# If the task hash code is provided, expand it to the work directory
# and add a new entry
if "hash" in info:
hs = info["hash"]
info["work_dir"] = self._expand_path(hs)
if "tag" in info:
tag = info["tag"]
if tag != "-" and tag not in self.samples and \
tag.split()[0] not in self.samples:
self.samples.append(tag)
self.trace_info[process].append(info)
self.stored_ids.append(info["hash"])
|
def _update_trace_info(self, fields, hm):
"""Parses a trace line and updates the :attr:`status_info` attribute.
Parameters
----------
fields : list
List of the tab-seperated elements of the trace line
hm : dict
Maps the column IDs to their position in the fields argument.
This dictionary object is retrieve from :func:`_header_mapping`.
"""
process = fields[hm["process"]]
if process not in self.processes:
return
# Get information from a single line of trace file
info = dict((column, fields[pos]) for column, pos in hm.items())
# The headers that will be used to populate the process
process_tag_headers = ["realtime", "rss", "rchar", "wchar"]
for h in process_tag_headers:
# In the rare occasion the tag is parsed first in the trace
# file than the log file, add the new tag.
if info["tag"] not in self.process_tags[process]:
# If the 'start' tag is present in the trace, use that
# information. If not, it will be parsed in the log file.
try:
timestart = info["start"].split()[1]
except KeyError:
timestart = None
self.process_tags[process][info["tag"]] = {
"workdir": self._expand_path(info["hash"]),
"start": timestart
}
if h in info and info["tag"] != "-":
if h != "realtime" and info[h] != "-":
self.process_tags[process][info["tag"]][h] = \
round(self._size_coverter(info[h]), 2)
else:
self.process_tags[process][info["tag"]][h] = info[h]
# Set allocated cpu and memory information to process
if "cpus" in info and not self.processes[process]["cpus"]:
self.processes[process]["cpus"] = info["cpus"]
if "memory" in info and not self.processes[process]["memory"]:
try:
self.processes[process]["memory"] = self._size_coverter(
info["memory"])
except ValueError:
self.processes[process]["memory"] = None
if info["hash"] in self.stored_ids:
return
# If the task hash code is provided, expand it to the work directory
# and add a new entry
if "hash" in info:
hs = info["hash"]
info["work_dir"] = self._expand_path(hs)
if "tag" in info:
tag = info["tag"]
if tag != "-" and tag not in self.samples and \
tag.split()[0] not in self.samples:
self.samples.append(tag)
self.trace_info[process].append(info)
self.stored_ids.append(info["hash"])
|
[
"Parses",
"a",
"trace",
"line",
"and",
"updates",
"the",
":",
"attr",
":",
"status_info",
"attribute",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L642-L713
|
[
"def",
"_update_trace_info",
"(",
"self",
",",
"fields",
",",
"hm",
")",
":",
"process",
"=",
"fields",
"[",
"hm",
"[",
"\"process\"",
"]",
"]",
"if",
"process",
"not",
"in",
"self",
".",
"processes",
":",
"return",
"# Get information from a single line of trace file",
"info",
"=",
"dict",
"(",
"(",
"column",
",",
"fields",
"[",
"pos",
"]",
")",
"for",
"column",
",",
"pos",
"in",
"hm",
".",
"items",
"(",
")",
")",
"# The headers that will be used to populate the process",
"process_tag_headers",
"=",
"[",
"\"realtime\"",
",",
"\"rss\"",
",",
"\"rchar\"",
",",
"\"wchar\"",
"]",
"for",
"h",
"in",
"process_tag_headers",
":",
"# In the rare occasion the tag is parsed first in the trace",
"# file than the log file, add the new tag.",
"if",
"info",
"[",
"\"tag\"",
"]",
"not",
"in",
"self",
".",
"process_tags",
"[",
"process",
"]",
":",
"# If the 'start' tag is present in the trace, use that",
"# information. If not, it will be parsed in the log file.",
"try",
":",
"timestart",
"=",
"info",
"[",
"\"start\"",
"]",
".",
"split",
"(",
")",
"[",
"1",
"]",
"except",
"KeyError",
":",
"timestart",
"=",
"None",
"self",
".",
"process_tags",
"[",
"process",
"]",
"[",
"info",
"[",
"\"tag\"",
"]",
"]",
"=",
"{",
"\"workdir\"",
":",
"self",
".",
"_expand_path",
"(",
"info",
"[",
"\"hash\"",
"]",
")",
",",
"\"start\"",
":",
"timestart",
"}",
"if",
"h",
"in",
"info",
"and",
"info",
"[",
"\"tag\"",
"]",
"!=",
"\"-\"",
":",
"if",
"h",
"!=",
"\"realtime\"",
"and",
"info",
"[",
"h",
"]",
"!=",
"\"-\"",
":",
"self",
".",
"process_tags",
"[",
"process",
"]",
"[",
"info",
"[",
"\"tag\"",
"]",
"]",
"[",
"h",
"]",
"=",
"round",
"(",
"self",
".",
"_size_coverter",
"(",
"info",
"[",
"h",
"]",
")",
",",
"2",
")",
"else",
":",
"self",
".",
"process_tags",
"[",
"process",
"]",
"[",
"info",
"[",
"\"tag\"",
"]",
"]",
"[",
"h",
"]",
"=",
"info",
"[",
"h",
"]",
"# Set allocated cpu and memory information to process",
"if",
"\"cpus\"",
"in",
"info",
"and",
"not",
"self",
".",
"processes",
"[",
"process",
"]",
"[",
"\"cpus\"",
"]",
":",
"self",
".",
"processes",
"[",
"process",
"]",
"[",
"\"cpus\"",
"]",
"=",
"info",
"[",
"\"cpus\"",
"]",
"if",
"\"memory\"",
"in",
"info",
"and",
"not",
"self",
".",
"processes",
"[",
"process",
"]",
"[",
"\"memory\"",
"]",
":",
"try",
":",
"self",
".",
"processes",
"[",
"process",
"]",
"[",
"\"memory\"",
"]",
"=",
"self",
".",
"_size_coverter",
"(",
"info",
"[",
"\"memory\"",
"]",
")",
"except",
"ValueError",
":",
"self",
".",
"processes",
"[",
"process",
"]",
"[",
"\"memory\"",
"]",
"=",
"None",
"if",
"info",
"[",
"\"hash\"",
"]",
"in",
"self",
".",
"stored_ids",
":",
"return",
"# If the task hash code is provided, expand it to the work directory",
"# and add a new entry",
"if",
"\"hash\"",
"in",
"info",
":",
"hs",
"=",
"info",
"[",
"\"hash\"",
"]",
"info",
"[",
"\"work_dir\"",
"]",
"=",
"self",
".",
"_expand_path",
"(",
"hs",
")",
"if",
"\"tag\"",
"in",
"info",
":",
"tag",
"=",
"info",
"[",
"\"tag\"",
"]",
"if",
"tag",
"!=",
"\"-\"",
"and",
"tag",
"not",
"in",
"self",
".",
"samples",
"and",
"tag",
".",
"split",
"(",
")",
"[",
"0",
"]",
"not",
"in",
"self",
".",
"samples",
":",
"self",
".",
"samples",
".",
"append",
"(",
"tag",
")",
"self",
".",
"trace_info",
"[",
"process",
"]",
".",
"append",
"(",
"info",
")",
"self",
".",
"stored_ids",
".",
"append",
"(",
"info",
"[",
"\"hash\"",
"]",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._update_process_resources
|
Updates the resources info in :attr:`processes` dictionary.
|
flowcraft/generator/inspect.py
|
def _update_process_resources(self, process, vals):
"""Updates the resources info in :attr:`processes` dictionary.
"""
resources = ["cpus"]
for r in resources:
if not self.processes[process][r]:
try:
self.processes[process][r] = vals[0]["cpus"]
# When the trace column is not present
except KeyError:
pass
|
def _update_process_resources(self, process, vals):
"""Updates the resources info in :attr:`processes` dictionary.
"""
resources = ["cpus"]
for r in resources:
if not self.processes[process][r]:
try:
self.processes[process][r] = vals[0]["cpus"]
# When the trace column is not present
except KeyError:
pass
|
[
"Updates",
"the",
"resources",
"info",
"in",
":",
"attr",
":",
"processes",
"dictionary",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L715-L727
|
[
"def",
"_update_process_resources",
"(",
"self",
",",
"process",
",",
"vals",
")",
":",
"resources",
"=",
"[",
"\"cpus\"",
"]",
"for",
"r",
"in",
"resources",
":",
"if",
"not",
"self",
".",
"processes",
"[",
"process",
"]",
"[",
"r",
"]",
":",
"try",
":",
"self",
".",
"processes",
"[",
"process",
"]",
"[",
"r",
"]",
"=",
"vals",
"[",
"0",
"]",
"[",
"\"cpus\"",
"]",
"# When the trace column is not present",
"except",
"KeyError",
":",
"pass"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._cpu_load_parser
|
Parses the cpu load from the number of cpus and its usage
percentage and returnsde cpu/hour measure
Parameters
----------
cpus : str
Number of cpus allocated.
cpu_per : str
Percentage of cpu load measured (e.g.: 200,5%).
t : str
The time string can be something like '20s', '1m30s' or '300ms'.
|
flowcraft/generator/inspect.py
|
def _cpu_load_parser(self, cpus, cpu_per, t):
"""Parses the cpu load from the number of cpus and its usage
percentage and returnsde cpu/hour measure
Parameters
----------
cpus : str
Number of cpus allocated.
cpu_per : str
Percentage of cpu load measured (e.g.: 200,5%).
t : str
The time string can be something like '20s', '1m30s' or '300ms'.
"""
try:
_cpus = float(cpus)
_cpu_per = float(cpu_per.replace(",", ".").replace("%", ""))
hours = self._hms(t) / 60 / 24
return ((_cpu_per / (100 * _cpus)) * _cpus) * hours
except ValueError:
return 0
|
def _cpu_load_parser(self, cpus, cpu_per, t):
"""Parses the cpu load from the number of cpus and its usage
percentage and returnsde cpu/hour measure
Parameters
----------
cpus : str
Number of cpus allocated.
cpu_per : str
Percentage of cpu load measured (e.g.: 200,5%).
t : str
The time string can be something like '20s', '1m30s' or '300ms'.
"""
try:
_cpus = float(cpus)
_cpu_per = float(cpu_per.replace(",", ".").replace("%", ""))
hours = self._hms(t) / 60 / 24
return ((_cpu_per / (100 * _cpus)) * _cpus) * hours
except ValueError:
return 0
|
[
"Parses",
"the",
"cpu",
"load",
"from",
"the",
"number",
"of",
"cpus",
"and",
"its",
"usage",
"percentage",
"and",
"returnsde",
"cpu",
"/",
"hour",
"measure"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L729-L751
|
[
"def",
"_cpu_load_parser",
"(",
"self",
",",
"cpus",
",",
"cpu_per",
",",
"t",
")",
":",
"try",
":",
"_cpus",
"=",
"float",
"(",
"cpus",
")",
"_cpu_per",
"=",
"float",
"(",
"cpu_per",
".",
"replace",
"(",
"\",\"",
",",
"\".\"",
")",
".",
"replace",
"(",
"\"%\"",
",",
"\"\"",
")",
")",
"hours",
"=",
"self",
".",
"_hms",
"(",
"t",
")",
"/",
"60",
"/",
"24",
"return",
"(",
"(",
"_cpu_per",
"/",
"(",
"100",
"*",
"_cpus",
")",
")",
"*",
"_cpus",
")",
"*",
"hours",
"except",
"ValueError",
":",
"return",
"0"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._assess_resource_warnings
|
Assess whether the cpu load or memory usage is above the allocation
Parameters
----------
process : str
Process name
vals : vals
List of trace information for each tag of that process
Returns
-------
cpu_warnings : dict
Keys are tags and values are the excessive cpu load
mem_warnings : dict
Keys are tags and values are the excessive rss
|
flowcraft/generator/inspect.py
|
def _assess_resource_warnings(self, process, vals):
"""Assess whether the cpu load or memory usage is above the allocation
Parameters
----------
process : str
Process name
vals : vals
List of trace information for each tag of that process
Returns
-------
cpu_warnings : dict
Keys are tags and values are the excessive cpu load
mem_warnings : dict
Keys are tags and values are the excessive rss
"""
cpu_warnings = {}
mem_warnings = {}
for i in vals:
try:
expected_load = float(i["cpus"]) * 100
cpu_load = float(i["%cpu"].replace(",", ".").replace("%", ""))
if expected_load * 0.9 > cpu_load > expected_load * 1.10:
cpu_warnings[i["tag"]] = {
"expected": expected_load,
"value": cpu_load
}
except (ValueError, KeyError):
pass
try:
rss = self._size_coverter(i["rss"])
mem_allocated = self._size_coverter(i["memory"])
if rss > mem_allocated * 1.10:
mem_warnings[i["tag"]] = {
"expected": mem_allocated,
"value": rss
}
except (ValueError, KeyError):
pass
return cpu_warnings, mem_warnings
|
def _assess_resource_warnings(self, process, vals):
"""Assess whether the cpu load or memory usage is above the allocation
Parameters
----------
process : str
Process name
vals : vals
List of trace information for each tag of that process
Returns
-------
cpu_warnings : dict
Keys are tags and values are the excessive cpu load
mem_warnings : dict
Keys are tags and values are the excessive rss
"""
cpu_warnings = {}
mem_warnings = {}
for i in vals:
try:
expected_load = float(i["cpus"]) * 100
cpu_load = float(i["%cpu"].replace(",", ".").replace("%", ""))
if expected_load * 0.9 > cpu_load > expected_load * 1.10:
cpu_warnings[i["tag"]] = {
"expected": expected_load,
"value": cpu_load
}
except (ValueError, KeyError):
pass
try:
rss = self._size_coverter(i["rss"])
mem_allocated = self._size_coverter(i["memory"])
if rss > mem_allocated * 1.10:
mem_warnings[i["tag"]] = {
"expected": mem_allocated,
"value": rss
}
except (ValueError, KeyError):
pass
return cpu_warnings, mem_warnings
|
[
"Assess",
"whether",
"the",
"cpu",
"load",
"or",
"memory",
"usage",
"is",
"above",
"the",
"allocation"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L753-L799
|
[
"def",
"_assess_resource_warnings",
"(",
"self",
",",
"process",
",",
"vals",
")",
":",
"cpu_warnings",
"=",
"{",
"}",
"mem_warnings",
"=",
"{",
"}",
"for",
"i",
"in",
"vals",
":",
"try",
":",
"expected_load",
"=",
"float",
"(",
"i",
"[",
"\"cpus\"",
"]",
")",
"*",
"100",
"cpu_load",
"=",
"float",
"(",
"i",
"[",
"\"%cpu\"",
"]",
".",
"replace",
"(",
"\",\"",
",",
"\".\"",
")",
".",
"replace",
"(",
"\"%\"",
",",
"\"\"",
")",
")",
"if",
"expected_load",
"*",
"0.9",
">",
"cpu_load",
">",
"expected_load",
"*",
"1.10",
":",
"cpu_warnings",
"[",
"i",
"[",
"\"tag\"",
"]",
"]",
"=",
"{",
"\"expected\"",
":",
"expected_load",
",",
"\"value\"",
":",
"cpu_load",
"}",
"except",
"(",
"ValueError",
",",
"KeyError",
")",
":",
"pass",
"try",
":",
"rss",
"=",
"self",
".",
"_size_coverter",
"(",
"i",
"[",
"\"rss\"",
"]",
")",
"mem_allocated",
"=",
"self",
".",
"_size_coverter",
"(",
"i",
"[",
"\"memory\"",
"]",
")",
"if",
"rss",
">",
"mem_allocated",
"*",
"1.10",
":",
"mem_warnings",
"[",
"i",
"[",
"\"tag\"",
"]",
"]",
"=",
"{",
"\"expected\"",
":",
"mem_allocated",
",",
"\"value\"",
":",
"rss",
"}",
"except",
"(",
"ValueError",
",",
"KeyError",
")",
":",
"pass",
"return",
"cpu_warnings",
",",
"mem_warnings"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._update_process_stats
|
Updates the process stats with the information from the processes
This method is called at the end of each static parsing of the nextflow
trace file. It re-populates the :attr:`process_stats` dictionary
with the new stat metrics.
|
flowcraft/generator/inspect.py
|
def _update_process_stats(self):
"""Updates the process stats with the information from the processes
This method is called at the end of each static parsing of the nextflow
trace file. It re-populates the :attr:`process_stats` dictionary
with the new stat metrics.
"""
good_status = ["COMPLETED", "CACHED"]
for process, vals in self.trace_info.items():
# Update submission status of tags for each process
vals = self._update_tag_status(process, vals)
# Update process resources
self._update_process_resources(process, vals)
self.process_stats[process] = {}
inst = self.process_stats[process]
# Get number of completed samples
inst["completed"] = "{}".format(
len([x for x in vals if x["status"] in good_status]))
# Get average time
try:
time_array = [self._hms(x["realtime"]) for x in vals]
mean_time = round(sum(time_array) / len(time_array), 1)
mean_time_str = strftime('%H:%M:%S', gmtime(mean_time))
inst["realtime"] = mean_time_str
# When the realtime column is not present
except KeyError:
inst["realtime"] = "-"
# Get cumulative cpu/hours
try:
cpu_hours = [self._cpu_load_parser(
x["cpus"], x["%cpu"], x["realtime"]) for x in vals]
inst["cpuhour"] = round(sum(cpu_hours), 2)
# When the realtime, cpus or %cpus column are not present
except KeyError:
inst["cpuhour"] = "-"
# Assess resource warnings
inst["cpu_warnings"], inst["mem_warnings"] = \
self._assess_resource_warnings(process, vals)
# Get maximum memory
try:
rss_values = [self._size_coverter(x["rss"]) for x in vals
if x["rss"] != "-"]
if rss_values:
max_rss = round(max(rss_values))
rss_str = self._size_compress(max_rss)
else:
rss_str = "-"
inst["maxmem"] = rss_str
except KeyError:
inst["maxmem"] = "-"
# Get read size
try:
rchar_values = [self._size_coverter(x["rchar"]) for x in vals
if x["rchar"] != "-"]
if rchar_values:
avg_rchar = round(sum(rchar_values) / len(rchar_values))
rchar_str = self._size_compress(avg_rchar)
else:
rchar_str = "-"
except KeyError:
rchar_str = "-"
inst["avgread"] = rchar_str
# Get write size
try:
wchar_values = [self._size_coverter(x["wchar"]) for x in vals
if x["wchar"] != "-"]
if wchar_values:
avg_wchar = round(sum(wchar_values) / len(wchar_values))
wchar_str = self._size_compress(avg_wchar)
else:
wchar_str = "-"
except KeyError:
wchar_str = "-"
inst["avgwrite"] = wchar_str
|
def _update_process_stats(self):
"""Updates the process stats with the information from the processes
This method is called at the end of each static parsing of the nextflow
trace file. It re-populates the :attr:`process_stats` dictionary
with the new stat metrics.
"""
good_status = ["COMPLETED", "CACHED"]
for process, vals in self.trace_info.items():
# Update submission status of tags for each process
vals = self._update_tag_status(process, vals)
# Update process resources
self._update_process_resources(process, vals)
self.process_stats[process] = {}
inst = self.process_stats[process]
# Get number of completed samples
inst["completed"] = "{}".format(
len([x for x in vals if x["status"] in good_status]))
# Get average time
try:
time_array = [self._hms(x["realtime"]) for x in vals]
mean_time = round(sum(time_array) / len(time_array), 1)
mean_time_str = strftime('%H:%M:%S', gmtime(mean_time))
inst["realtime"] = mean_time_str
# When the realtime column is not present
except KeyError:
inst["realtime"] = "-"
# Get cumulative cpu/hours
try:
cpu_hours = [self._cpu_load_parser(
x["cpus"], x["%cpu"], x["realtime"]) for x in vals]
inst["cpuhour"] = round(sum(cpu_hours), 2)
# When the realtime, cpus or %cpus column are not present
except KeyError:
inst["cpuhour"] = "-"
# Assess resource warnings
inst["cpu_warnings"], inst["mem_warnings"] = \
self._assess_resource_warnings(process, vals)
# Get maximum memory
try:
rss_values = [self._size_coverter(x["rss"]) for x in vals
if x["rss"] != "-"]
if rss_values:
max_rss = round(max(rss_values))
rss_str = self._size_compress(max_rss)
else:
rss_str = "-"
inst["maxmem"] = rss_str
except KeyError:
inst["maxmem"] = "-"
# Get read size
try:
rchar_values = [self._size_coverter(x["rchar"]) for x in vals
if x["rchar"] != "-"]
if rchar_values:
avg_rchar = round(sum(rchar_values) / len(rchar_values))
rchar_str = self._size_compress(avg_rchar)
else:
rchar_str = "-"
except KeyError:
rchar_str = "-"
inst["avgread"] = rchar_str
# Get write size
try:
wchar_values = [self._size_coverter(x["wchar"]) for x in vals
if x["wchar"] != "-"]
if wchar_values:
avg_wchar = round(sum(wchar_values) / len(wchar_values))
wchar_str = self._size_compress(avg_wchar)
else:
wchar_str = "-"
except KeyError:
wchar_str = "-"
inst["avgwrite"] = wchar_str
|
[
"Updates",
"the",
"process",
"stats",
"with",
"the",
"information",
"from",
"the",
"processes"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L801-L887
|
[
"def",
"_update_process_stats",
"(",
"self",
")",
":",
"good_status",
"=",
"[",
"\"COMPLETED\"",
",",
"\"CACHED\"",
"]",
"for",
"process",
",",
"vals",
"in",
"self",
".",
"trace_info",
".",
"items",
"(",
")",
":",
"# Update submission status of tags for each process",
"vals",
"=",
"self",
".",
"_update_tag_status",
"(",
"process",
",",
"vals",
")",
"# Update process resources",
"self",
".",
"_update_process_resources",
"(",
"process",
",",
"vals",
")",
"self",
".",
"process_stats",
"[",
"process",
"]",
"=",
"{",
"}",
"inst",
"=",
"self",
".",
"process_stats",
"[",
"process",
"]",
"# Get number of completed samples",
"inst",
"[",
"\"completed\"",
"]",
"=",
"\"{}\"",
".",
"format",
"(",
"len",
"(",
"[",
"x",
"for",
"x",
"in",
"vals",
"if",
"x",
"[",
"\"status\"",
"]",
"in",
"good_status",
"]",
")",
")",
"# Get average time",
"try",
":",
"time_array",
"=",
"[",
"self",
".",
"_hms",
"(",
"x",
"[",
"\"realtime\"",
"]",
")",
"for",
"x",
"in",
"vals",
"]",
"mean_time",
"=",
"round",
"(",
"sum",
"(",
"time_array",
")",
"/",
"len",
"(",
"time_array",
")",
",",
"1",
")",
"mean_time_str",
"=",
"strftime",
"(",
"'%H:%M:%S'",
",",
"gmtime",
"(",
"mean_time",
")",
")",
"inst",
"[",
"\"realtime\"",
"]",
"=",
"mean_time_str",
"# When the realtime column is not present",
"except",
"KeyError",
":",
"inst",
"[",
"\"realtime\"",
"]",
"=",
"\"-\"",
"# Get cumulative cpu/hours",
"try",
":",
"cpu_hours",
"=",
"[",
"self",
".",
"_cpu_load_parser",
"(",
"x",
"[",
"\"cpus\"",
"]",
",",
"x",
"[",
"\"%cpu\"",
"]",
",",
"x",
"[",
"\"realtime\"",
"]",
")",
"for",
"x",
"in",
"vals",
"]",
"inst",
"[",
"\"cpuhour\"",
"]",
"=",
"round",
"(",
"sum",
"(",
"cpu_hours",
")",
",",
"2",
")",
"# When the realtime, cpus or %cpus column are not present",
"except",
"KeyError",
":",
"inst",
"[",
"\"cpuhour\"",
"]",
"=",
"\"-\"",
"# Assess resource warnings",
"inst",
"[",
"\"cpu_warnings\"",
"]",
",",
"inst",
"[",
"\"mem_warnings\"",
"]",
"=",
"self",
".",
"_assess_resource_warnings",
"(",
"process",
",",
"vals",
")",
"# Get maximum memory",
"try",
":",
"rss_values",
"=",
"[",
"self",
".",
"_size_coverter",
"(",
"x",
"[",
"\"rss\"",
"]",
")",
"for",
"x",
"in",
"vals",
"if",
"x",
"[",
"\"rss\"",
"]",
"!=",
"\"-\"",
"]",
"if",
"rss_values",
":",
"max_rss",
"=",
"round",
"(",
"max",
"(",
"rss_values",
")",
")",
"rss_str",
"=",
"self",
".",
"_size_compress",
"(",
"max_rss",
")",
"else",
":",
"rss_str",
"=",
"\"-\"",
"inst",
"[",
"\"maxmem\"",
"]",
"=",
"rss_str",
"except",
"KeyError",
":",
"inst",
"[",
"\"maxmem\"",
"]",
"=",
"\"-\"",
"# Get read size",
"try",
":",
"rchar_values",
"=",
"[",
"self",
".",
"_size_coverter",
"(",
"x",
"[",
"\"rchar\"",
"]",
")",
"for",
"x",
"in",
"vals",
"if",
"x",
"[",
"\"rchar\"",
"]",
"!=",
"\"-\"",
"]",
"if",
"rchar_values",
":",
"avg_rchar",
"=",
"round",
"(",
"sum",
"(",
"rchar_values",
")",
"/",
"len",
"(",
"rchar_values",
")",
")",
"rchar_str",
"=",
"self",
".",
"_size_compress",
"(",
"avg_rchar",
")",
"else",
":",
"rchar_str",
"=",
"\"-\"",
"except",
"KeyError",
":",
"rchar_str",
"=",
"\"-\"",
"inst",
"[",
"\"avgread\"",
"]",
"=",
"rchar_str",
"# Get write size",
"try",
":",
"wchar_values",
"=",
"[",
"self",
".",
"_size_coverter",
"(",
"x",
"[",
"\"wchar\"",
"]",
")",
"for",
"x",
"in",
"vals",
"if",
"x",
"[",
"\"wchar\"",
"]",
"!=",
"\"-\"",
"]",
"if",
"wchar_values",
":",
"avg_wchar",
"=",
"round",
"(",
"sum",
"(",
"wchar_values",
")",
"/",
"len",
"(",
"wchar_values",
")",
")",
"wchar_str",
"=",
"self",
".",
"_size_compress",
"(",
"avg_wchar",
")",
"else",
":",
"wchar_str",
"=",
"\"-\"",
"except",
"KeyError",
":",
"wchar_str",
"=",
"\"-\"",
"inst",
"[",
"\"avgwrite\"",
"]",
"=",
"wchar_str"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector.trace_parser
|
Method that parses the trace file once and updates the
:attr:`status_info` attribute with the new entries.
|
flowcraft/generator/inspect.py
|
def trace_parser(self):
"""Method that parses the trace file once and updates the
:attr:`status_info` attribute with the new entries.
"""
# Check the timestamp of the tracefile. Only proceed with the parsing
# if it changed from the previous time.
size_stamp = os.path.getsize(self.trace_file)
self.trace_retry = 0
if size_stamp and size_stamp == self.trace_sizestamp:
return
else:
logger.debug("Updating trace size stamp to: {}".format(size_stamp))
self.trace_sizestamp = size_stamp
with open(self.trace_file) as fh:
# Skip potential empty lines at the start of file
header = next(fh).strip()
while not header:
header = next(fh).strip()
# Get header mappings before parsing the file
hm = self._header_mapping(header)
for line in fh:
# Skip empty lines
if line.strip() == "":
continue
fields = line.strip().split("\t")
# Skip if task ID was already processes
if fields[hm["task_id"]] in self.stored_ids:
continue
# Parse trace entry and update status_info attribute
self._update_trace_info(fields, hm)
self.send = True
self._update_process_stats()
self._update_barrier_status()
|
def trace_parser(self):
"""Method that parses the trace file once and updates the
:attr:`status_info` attribute with the new entries.
"""
# Check the timestamp of the tracefile. Only proceed with the parsing
# if it changed from the previous time.
size_stamp = os.path.getsize(self.trace_file)
self.trace_retry = 0
if size_stamp and size_stamp == self.trace_sizestamp:
return
else:
logger.debug("Updating trace size stamp to: {}".format(size_stamp))
self.trace_sizestamp = size_stamp
with open(self.trace_file) as fh:
# Skip potential empty lines at the start of file
header = next(fh).strip()
while not header:
header = next(fh).strip()
# Get header mappings before parsing the file
hm = self._header_mapping(header)
for line in fh:
# Skip empty lines
if line.strip() == "":
continue
fields = line.strip().split("\t")
# Skip if task ID was already processes
if fields[hm["task_id"]] in self.stored_ids:
continue
# Parse trace entry and update status_info attribute
self._update_trace_info(fields, hm)
self.send = True
self._update_process_stats()
self._update_barrier_status()
|
[
"Method",
"that",
"parses",
"the",
"trace",
"file",
"once",
"and",
"updates",
"the",
":",
"attr",
":",
"status_info",
"attribute",
"with",
"the",
"new",
"entries",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L893-L935
|
[
"def",
"trace_parser",
"(",
"self",
")",
":",
"# Check the timestamp of the tracefile. Only proceed with the parsing",
"# if it changed from the previous time.",
"size_stamp",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"self",
".",
"trace_file",
")",
"self",
".",
"trace_retry",
"=",
"0",
"if",
"size_stamp",
"and",
"size_stamp",
"==",
"self",
".",
"trace_sizestamp",
":",
"return",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Updating trace size stamp to: {}\"",
".",
"format",
"(",
"size_stamp",
")",
")",
"self",
".",
"trace_sizestamp",
"=",
"size_stamp",
"with",
"open",
"(",
"self",
".",
"trace_file",
")",
"as",
"fh",
":",
"# Skip potential empty lines at the start of file",
"header",
"=",
"next",
"(",
"fh",
")",
".",
"strip",
"(",
")",
"while",
"not",
"header",
":",
"header",
"=",
"next",
"(",
"fh",
")",
".",
"strip",
"(",
")",
"# Get header mappings before parsing the file",
"hm",
"=",
"self",
".",
"_header_mapping",
"(",
"header",
")",
"for",
"line",
"in",
"fh",
":",
"# Skip empty lines",
"if",
"line",
".",
"strip",
"(",
")",
"==",
"\"\"",
":",
"continue",
"fields",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"# Skip if task ID was already processes",
"if",
"fields",
"[",
"hm",
"[",
"\"task_id\"",
"]",
"]",
"in",
"self",
".",
"stored_ids",
":",
"continue",
"# Parse trace entry and update status_info attribute",
"self",
".",
"_update_trace_info",
"(",
"fields",
",",
"hm",
")",
"self",
".",
"send",
"=",
"True",
"self",
".",
"_update_process_stats",
"(",
")",
"self",
".",
"_update_barrier_status",
"(",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector.log_parser
|
Method that parses the nextflow log file once and updates the
submitted number of samples for each process
|
flowcraft/generator/inspect.py
|
def log_parser(self):
"""Method that parses the nextflow log file once and updates the
submitted number of samples for each process
"""
# Check the timestamp of the log file. Only proceed with the parsing
# if it changed from the previous time.
size_stamp = os.path.getsize(self.log_file)
self.log_retry = 0
if size_stamp and size_stamp == self.log_sizestamp:
return
else:
logger.debug("Updating log size stamp to: {}".format(size_stamp))
self.log_sizestamp = size_stamp
# Regular expression to catch four groups:
# 1. Start timestamp
# 2. Work directory hash
# 3. Process name
# 4. Tag name
r = ".* (.*) \[.*\].*\[(.*)\].*process > (.*) \((.*)\).*"
with open(self.log_file) as fh:
for line in fh:
if "Submitted process >" in line or \
"Re-submitted process >" in line or \
"Cached process >" in line:
m = re.match(r, line)
if not m:
continue
time_start = m.group(1)
workdir = m.group(2)
process = m.group(3)
tag = m.group(4)
# Skip if this line has already been parsed
if time_start + tag not in self.stored_log_ids:
self.stored_log_ids.append(time_start + tag)
else:
continue
# For first time processes
if process not in self.processes:
continue
p = self.processes[process]
# Skip is process/tag combination has finished or is retrying
if tag in list(p["finished"]) + list(p["retry"]):
continue
# Update failed process/tags when they have been re-submitted
if tag in list(p["failed"]) and \
"Re-submitted process >" in line:
p["retry"].add(tag)
self.send = True
continue
# Set process barrier to running. Check for barrier status
# are performed at the end of the trace parsing in the
# _update_barrier_status method.
p["barrier"] = "R"
if tag not in p["submitted"]:
p["submitted"].add(tag)
# Update the process_tags attribute with the new tag.
# Update only when the tag does not exist. This may rarely
# occur when the tag is parsed first in the trace file
if tag not in self.process_tags[process]:
self.process_tags[process][tag] = {
"workdir": self._expand_path(workdir),
"start": time_start
}
self.send = True
# When the tag is filled in the trace file parsing,
# the timestamp may not be present in the trace. In
# those cases, fill that information here.
elif not self.process_tags[process][tag]["start"]:
self.process_tags[process][tag]["start"] = time_start
self.send = True
self._update_pipeline_status()
|
def log_parser(self):
"""Method that parses the nextflow log file once and updates the
submitted number of samples for each process
"""
# Check the timestamp of the log file. Only proceed with the parsing
# if it changed from the previous time.
size_stamp = os.path.getsize(self.log_file)
self.log_retry = 0
if size_stamp and size_stamp == self.log_sizestamp:
return
else:
logger.debug("Updating log size stamp to: {}".format(size_stamp))
self.log_sizestamp = size_stamp
# Regular expression to catch four groups:
# 1. Start timestamp
# 2. Work directory hash
# 3. Process name
# 4. Tag name
r = ".* (.*) \[.*\].*\[(.*)\].*process > (.*) \((.*)\).*"
with open(self.log_file) as fh:
for line in fh:
if "Submitted process >" in line or \
"Re-submitted process >" in line or \
"Cached process >" in line:
m = re.match(r, line)
if not m:
continue
time_start = m.group(1)
workdir = m.group(2)
process = m.group(3)
tag = m.group(4)
# Skip if this line has already been parsed
if time_start + tag not in self.stored_log_ids:
self.stored_log_ids.append(time_start + tag)
else:
continue
# For first time processes
if process not in self.processes:
continue
p = self.processes[process]
# Skip is process/tag combination has finished or is retrying
if tag in list(p["finished"]) + list(p["retry"]):
continue
# Update failed process/tags when they have been re-submitted
if tag in list(p["failed"]) and \
"Re-submitted process >" in line:
p["retry"].add(tag)
self.send = True
continue
# Set process barrier to running. Check for barrier status
# are performed at the end of the trace parsing in the
# _update_barrier_status method.
p["barrier"] = "R"
if tag not in p["submitted"]:
p["submitted"].add(tag)
# Update the process_tags attribute with the new tag.
# Update only when the tag does not exist. This may rarely
# occur when the tag is parsed first in the trace file
if tag not in self.process_tags[process]:
self.process_tags[process][tag] = {
"workdir": self._expand_path(workdir),
"start": time_start
}
self.send = True
# When the tag is filled in the trace file parsing,
# the timestamp may not be present in the trace. In
# those cases, fill that information here.
elif not self.process_tags[process][tag]["start"]:
self.process_tags[process][tag]["start"] = time_start
self.send = True
self._update_pipeline_status()
|
[
"Method",
"that",
"parses",
"the",
"nextflow",
"log",
"file",
"once",
"and",
"updates",
"the",
"submitted",
"number",
"of",
"samples",
"for",
"each",
"process"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L937-L1018
|
[
"def",
"log_parser",
"(",
"self",
")",
":",
"# Check the timestamp of the log file. Only proceed with the parsing",
"# if it changed from the previous time.",
"size_stamp",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"self",
".",
"log_file",
")",
"self",
".",
"log_retry",
"=",
"0",
"if",
"size_stamp",
"and",
"size_stamp",
"==",
"self",
".",
"log_sizestamp",
":",
"return",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Updating log size stamp to: {}\"",
".",
"format",
"(",
"size_stamp",
")",
")",
"self",
".",
"log_sizestamp",
"=",
"size_stamp",
"# Regular expression to catch four groups:",
"# 1. Start timestamp",
"# 2. Work directory hash",
"# 3. Process name",
"# 4. Tag name",
"r",
"=",
"\".* (.*) \\[.*\\].*\\[(.*)\\].*process > (.*) \\((.*)\\).*\"",
"with",
"open",
"(",
"self",
".",
"log_file",
")",
"as",
"fh",
":",
"for",
"line",
"in",
"fh",
":",
"if",
"\"Submitted process >\"",
"in",
"line",
"or",
"\"Re-submitted process >\"",
"in",
"line",
"or",
"\"Cached process >\"",
"in",
"line",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r",
",",
"line",
")",
"if",
"not",
"m",
":",
"continue",
"time_start",
"=",
"m",
".",
"group",
"(",
"1",
")",
"workdir",
"=",
"m",
".",
"group",
"(",
"2",
")",
"process",
"=",
"m",
".",
"group",
"(",
"3",
")",
"tag",
"=",
"m",
".",
"group",
"(",
"4",
")",
"# Skip if this line has already been parsed",
"if",
"time_start",
"+",
"tag",
"not",
"in",
"self",
".",
"stored_log_ids",
":",
"self",
".",
"stored_log_ids",
".",
"append",
"(",
"time_start",
"+",
"tag",
")",
"else",
":",
"continue",
"# For first time processes",
"if",
"process",
"not",
"in",
"self",
".",
"processes",
":",
"continue",
"p",
"=",
"self",
".",
"processes",
"[",
"process",
"]",
"# Skip is process/tag combination has finished or is retrying",
"if",
"tag",
"in",
"list",
"(",
"p",
"[",
"\"finished\"",
"]",
")",
"+",
"list",
"(",
"p",
"[",
"\"retry\"",
"]",
")",
":",
"continue",
"# Update failed process/tags when they have been re-submitted",
"if",
"tag",
"in",
"list",
"(",
"p",
"[",
"\"failed\"",
"]",
")",
"and",
"\"Re-submitted process >\"",
"in",
"line",
":",
"p",
"[",
"\"retry\"",
"]",
".",
"add",
"(",
"tag",
")",
"self",
".",
"send",
"=",
"True",
"continue",
"# Set process barrier to running. Check for barrier status",
"# are performed at the end of the trace parsing in the",
"# _update_barrier_status method.",
"p",
"[",
"\"barrier\"",
"]",
"=",
"\"R\"",
"if",
"tag",
"not",
"in",
"p",
"[",
"\"submitted\"",
"]",
":",
"p",
"[",
"\"submitted\"",
"]",
".",
"add",
"(",
"tag",
")",
"# Update the process_tags attribute with the new tag.",
"# Update only when the tag does not exist. This may rarely",
"# occur when the tag is parsed first in the trace file",
"if",
"tag",
"not",
"in",
"self",
".",
"process_tags",
"[",
"process",
"]",
":",
"self",
".",
"process_tags",
"[",
"process",
"]",
"[",
"tag",
"]",
"=",
"{",
"\"workdir\"",
":",
"self",
".",
"_expand_path",
"(",
"workdir",
")",
",",
"\"start\"",
":",
"time_start",
"}",
"self",
".",
"send",
"=",
"True",
"# When the tag is filled in the trace file parsing,",
"# the timestamp may not be present in the trace. In",
"# those cases, fill that information here.",
"elif",
"not",
"self",
".",
"process_tags",
"[",
"process",
"]",
"[",
"tag",
"]",
"[",
"\"start\"",
"]",
":",
"self",
".",
"process_tags",
"[",
"process",
"]",
"[",
"tag",
"]",
"[",
"\"start\"",
"]",
"=",
"time_start",
"self",
".",
"send",
"=",
"True",
"self",
".",
"_update_pipeline_status",
"(",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector.update_inspection
|
Wrapper method that calls the appropriate main updating methods of
the inspection.
It is meant to be used inside a loop (like while), so that it can
continuously update the class attributes from the trace and log files.
It already implements checks to parse these files only when they
change, and they ignore entries that have been previously processes.
|
flowcraft/generator/inspect.py
|
def update_inspection(self):
"""Wrapper method that calls the appropriate main updating methods of
the inspection.
It is meant to be used inside a loop (like while), so that it can
continuously update the class attributes from the trace and log files.
It already implements checks to parse these files only when they
change, and they ignore entries that have been previously processes.
"""
try:
self.log_parser()
except (FileNotFoundError, StopIteration) as e:
logger.debug("ERROR: " + str(sys.exc_info()[0]))
self.log_retry += 1
if self.log_retry == self.MAX_RETRIES:
raise e
try:
self.trace_parser()
except (FileNotFoundError, StopIteration) as e:
logger.debug("ERROR: " + str(sys.exc_info()[0]))
self.trace_retry += 1
if self.trace_retry == self.MAX_RETRIES:
raise e
|
def update_inspection(self):
"""Wrapper method that calls the appropriate main updating methods of
the inspection.
It is meant to be used inside a loop (like while), so that it can
continuously update the class attributes from the trace and log files.
It already implements checks to parse these files only when they
change, and they ignore entries that have been previously processes.
"""
try:
self.log_parser()
except (FileNotFoundError, StopIteration) as e:
logger.debug("ERROR: " + str(sys.exc_info()[0]))
self.log_retry += 1
if self.log_retry == self.MAX_RETRIES:
raise e
try:
self.trace_parser()
except (FileNotFoundError, StopIteration) as e:
logger.debug("ERROR: " + str(sys.exc_info()[0]))
self.trace_retry += 1
if self.trace_retry == self.MAX_RETRIES:
raise e
|
[
"Wrapper",
"method",
"that",
"calls",
"the",
"appropriate",
"main",
"updating",
"methods",
"of",
"the",
"inspection",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L1020-L1043
|
[
"def",
"update_inspection",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"log_parser",
"(",
")",
"except",
"(",
"FileNotFoundError",
",",
"StopIteration",
")",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"ERROR: \"",
"+",
"str",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
")",
")",
"self",
".",
"log_retry",
"+=",
"1",
"if",
"self",
".",
"log_retry",
"==",
"self",
".",
"MAX_RETRIES",
":",
"raise",
"e",
"try",
":",
"self",
".",
"trace_parser",
"(",
")",
"except",
"(",
"FileNotFoundError",
",",
"StopIteration",
")",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"ERROR: \"",
"+",
"str",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
")",
")",
"self",
".",
"trace_retry",
"+=",
"1",
"if",
"self",
".",
"trace_retry",
"==",
"self",
".",
"MAX_RETRIES",
":",
"raise",
"e"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector.display_overview
|
Displays the default pipeline inspection overview
|
flowcraft/generator/inspect.py
|
def display_overview(self):
"""Displays the default pipeline inspection overview
"""
stay_alive = True
self.screen = curses.initscr()
self.screen.keypad(True)
self.screen.nodelay(-1)
curses.cbreak()
curses.noecho()
curses.start_color()
self.screen_lines = self.screen.getmaxyx()[0]
# self.screen_width = self.screen.getmaxyx()[1]
try:
while stay_alive:
# Provide functionality to certain keybindings
self._curses_keybindings()
# Updates main inspector attributes
self.update_inspection()
# Display curses interface
self.flush_overview()
sleep(self.refresh_rate)
except FileNotFoundError:
sys.stderr.write(colored_print(
"ERROR: nextflow log and/or trace files are no longer "
"reachable!", "red_bold"))
except Exception as e:
sys.stderr.write(str(e))
finally:
curses.nocbreak()
self.screen.keypad(0)
curses.echo()
curses.endwin()
|
def display_overview(self):
"""Displays the default pipeline inspection overview
"""
stay_alive = True
self.screen = curses.initscr()
self.screen.keypad(True)
self.screen.nodelay(-1)
curses.cbreak()
curses.noecho()
curses.start_color()
self.screen_lines = self.screen.getmaxyx()[0]
# self.screen_width = self.screen.getmaxyx()[1]
try:
while stay_alive:
# Provide functionality to certain keybindings
self._curses_keybindings()
# Updates main inspector attributes
self.update_inspection()
# Display curses interface
self.flush_overview()
sleep(self.refresh_rate)
except FileNotFoundError:
sys.stderr.write(colored_print(
"ERROR: nextflow log and/or trace files are no longer "
"reachable!", "red_bold"))
except Exception as e:
sys.stderr.write(str(e))
finally:
curses.nocbreak()
self.screen.keypad(0)
curses.echo()
curses.endwin()
|
[
"Displays",
"the",
"default",
"pipeline",
"inspection",
"overview"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L1049-L1087
|
[
"def",
"display_overview",
"(",
"self",
")",
":",
"stay_alive",
"=",
"True",
"self",
".",
"screen",
"=",
"curses",
".",
"initscr",
"(",
")",
"self",
".",
"screen",
".",
"keypad",
"(",
"True",
")",
"self",
".",
"screen",
".",
"nodelay",
"(",
"-",
"1",
")",
"curses",
".",
"cbreak",
"(",
")",
"curses",
".",
"noecho",
"(",
")",
"curses",
".",
"start_color",
"(",
")",
"self",
".",
"screen_lines",
"=",
"self",
".",
"screen",
".",
"getmaxyx",
"(",
")",
"[",
"0",
"]",
"# self.screen_width = self.screen.getmaxyx()[1]",
"try",
":",
"while",
"stay_alive",
":",
"# Provide functionality to certain keybindings",
"self",
".",
"_curses_keybindings",
"(",
")",
"# Updates main inspector attributes",
"self",
".",
"update_inspection",
"(",
")",
"# Display curses interface",
"self",
".",
"flush_overview",
"(",
")",
"sleep",
"(",
"self",
".",
"refresh_rate",
")",
"except",
"FileNotFoundError",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"colored_print",
"(",
"\"ERROR: nextflow log and/or trace files are no longer \"",
"\"reachable!\"",
",",
"\"red_bold\"",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"str",
"(",
"e",
")",
")",
"finally",
":",
"curses",
".",
"nocbreak",
"(",
")",
"self",
".",
"screen",
".",
"keypad",
"(",
"0",
")",
"curses",
".",
"echo",
"(",
")",
"curses",
".",
"endwin",
"(",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._updown
|
Provides curses scroll functionality.
|
flowcraft/generator/inspect.py
|
def _updown(self, direction):
"""Provides curses scroll functionality.
"""
if direction == "up" and self.top_line != 0:
self.top_line -= 1
elif direction == "down" and \
self.screen.getmaxyx()[0] + self.top_line\
<= self.content_lines + 3:
self.top_line += 1
|
def _updown(self, direction):
"""Provides curses scroll functionality.
"""
if direction == "up" and self.top_line != 0:
self.top_line -= 1
elif direction == "down" and \
self.screen.getmaxyx()[0] + self.top_line\
<= self.content_lines + 3:
self.top_line += 1
|
[
"Provides",
"curses",
"scroll",
"functionality",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L1108-L1117
|
[
"def",
"_updown",
"(",
"self",
",",
"direction",
")",
":",
"if",
"direction",
"==",
"\"up\"",
"and",
"self",
".",
"top_line",
"!=",
"0",
":",
"self",
".",
"top_line",
"-=",
"1",
"elif",
"direction",
"==",
"\"down\"",
"and",
"self",
".",
"screen",
".",
"getmaxyx",
"(",
")",
"[",
"0",
"]",
"+",
"self",
".",
"top_line",
"<=",
"self",
".",
"content_lines",
"+",
"3",
":",
"self",
".",
"top_line",
"+=",
"1"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._rightleft
|
Provides curses horizontal padding
|
flowcraft/generator/inspect.py
|
def _rightleft(self, direction):
"""Provides curses horizontal padding"""
if direction == "left" and self.padding != 0:
self.padding -= 1
if direction == "right" and \
self.screen.getmaxyx()[1] + self.padding < self.max_width:
self.padding += 1
|
def _rightleft(self, direction):
"""Provides curses horizontal padding"""
if direction == "left" and self.padding != 0:
self.padding -= 1
if direction == "right" and \
self.screen.getmaxyx()[1] + self.padding < self.max_width:
self.padding += 1
|
[
"Provides",
"curses",
"horizontal",
"padding"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L1119-L1127
|
[
"def",
"_rightleft",
"(",
"self",
",",
"direction",
")",
":",
"if",
"direction",
"==",
"\"left\"",
"and",
"self",
".",
"padding",
"!=",
"0",
":",
"self",
".",
"padding",
"-=",
"1",
"if",
"direction",
"==",
"\"right\"",
"and",
"self",
".",
"screen",
".",
"getmaxyx",
"(",
")",
"[",
"1",
"]",
"+",
"self",
".",
"padding",
"<",
"self",
".",
"max_width",
":",
"self",
".",
"padding",
"+=",
"1"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector.flush_overview
|
Displays the default overview of the pipeline execution from the
:attr:`status_info`, :attr:`processes` and :attr:`run_status`
attributes into stdout.
|
flowcraft/generator/inspect.py
|
def flush_overview(self):
"""Displays the default overview of the pipeline execution from the
:attr:`status_info`, :attr:`processes` and :attr:`run_status`
attributes into stdout.
"""
colors = {
"W": 1,
"R": 2,
"C": 3
}
pc = {
"running": 3,
"complete": 3,
"aborted": 4,
"error": 4
}
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
# self.screen.erase()
height, width = self.screen.getmaxyx()
win = curses.newpad(height, 2000)
# Add static header
header = "Pipeline [{}] inspection at {}. Status: ".format(
self.pipeline_tag, strftime("%Y-%m-%d %H:%M:%S", gmtime()))
win.addstr(0, 0, header)
win.addstr(0, len(header), self.run_status,
curses.color_pair(pc[self.run_status]))
submission_str = "{0:23.23} {1:23.23} {2:23.23} {3:23.23}".format(
"Running: {}".format(
sum([len(x["submitted"]) for x in self.processes.values()])
),
"Failed: {}".format(
sum([len(x["failed"]) for x in self.processes.values()])
),
"Retrying: {}".format(
sum([len(x["retry"]) for x in self.processes.values()])
),
"Completed: {}".format(
sum([len(x["finished"]) for x in self.processes.values()])
)
)
win.addstr(
1, 0, submission_str, curses.color_pair(1)
)
headers = ["", "Process", "Running", "Complete", "Error",
"Avg Time", "Max Mem", "Avg Read", "Avg Write"]
header_str = "{0: ^1} " \
"{1: ^25} " \
"{2: ^7} " \
"{3: ^7} " \
"{4: ^7} " \
"{5: ^10} " \
"{6: ^10} " \
"{7: ^10} " \
"{8: ^10} ".format(*headers)
self.max_width = len(header_str)
win.addstr(3, 0, header_str, curses.A_UNDERLINE | curses.A_REVERSE)
# Get display size
top = self.top_line
bottom = self.screen_lines - 4 + self.top_line
# Fetch process information
for p, process in enumerate(
list(self.processes.keys())[top:bottom]):
if process not in self.process_stats:
vals = ["-"] * 8
txt_fmt = curses.A_NORMAL
else:
ref = self.process_stats[process]
vals = [ref["completed"],
len(self.processes[process]["failed"]),
ref["realtime"],
ref["maxmem"], ref["avgread"],
ref["avgwrite"]]
txt_fmt = curses.A_BOLD
proc = self.processes[process]
if proc["retry"]:
completed = "{}({})".format(len(proc["submitted"]),
len(proc["retry"]))
else:
completed = "{}".format(len(proc["submitted"]))
win.addstr(
4 + p, 0, "{0: ^1} "
"{1:25.25} "
"{2: ^7} "
"{3: ^7} "
"{4: ^7} "
"{5: ^10} "
"{6: ^10} "
"{7: ^10} "
"{8: ^10} ".format(
proc["barrier"],
process,
completed,
*vals),
curses.color_pair(colors[proc["barrier"]]) | txt_fmt)
win.clrtoeol()
win.refresh(0, self.padding, 0, 0, height-1, width-1)
|
def flush_overview(self):
"""Displays the default overview of the pipeline execution from the
:attr:`status_info`, :attr:`processes` and :attr:`run_status`
attributes into stdout.
"""
colors = {
"W": 1,
"R": 2,
"C": 3
}
pc = {
"running": 3,
"complete": 3,
"aborted": 4,
"error": 4
}
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
# self.screen.erase()
height, width = self.screen.getmaxyx()
win = curses.newpad(height, 2000)
# Add static header
header = "Pipeline [{}] inspection at {}. Status: ".format(
self.pipeline_tag, strftime("%Y-%m-%d %H:%M:%S", gmtime()))
win.addstr(0, 0, header)
win.addstr(0, len(header), self.run_status,
curses.color_pair(pc[self.run_status]))
submission_str = "{0:23.23} {1:23.23} {2:23.23} {3:23.23}".format(
"Running: {}".format(
sum([len(x["submitted"]) for x in self.processes.values()])
),
"Failed: {}".format(
sum([len(x["failed"]) for x in self.processes.values()])
),
"Retrying: {}".format(
sum([len(x["retry"]) for x in self.processes.values()])
),
"Completed: {}".format(
sum([len(x["finished"]) for x in self.processes.values()])
)
)
win.addstr(
1, 0, submission_str, curses.color_pair(1)
)
headers = ["", "Process", "Running", "Complete", "Error",
"Avg Time", "Max Mem", "Avg Read", "Avg Write"]
header_str = "{0: ^1} " \
"{1: ^25} " \
"{2: ^7} " \
"{3: ^7} " \
"{4: ^7} " \
"{5: ^10} " \
"{6: ^10} " \
"{7: ^10} " \
"{8: ^10} ".format(*headers)
self.max_width = len(header_str)
win.addstr(3, 0, header_str, curses.A_UNDERLINE | curses.A_REVERSE)
# Get display size
top = self.top_line
bottom = self.screen_lines - 4 + self.top_line
# Fetch process information
for p, process in enumerate(
list(self.processes.keys())[top:bottom]):
if process not in self.process_stats:
vals = ["-"] * 8
txt_fmt = curses.A_NORMAL
else:
ref = self.process_stats[process]
vals = [ref["completed"],
len(self.processes[process]["failed"]),
ref["realtime"],
ref["maxmem"], ref["avgread"],
ref["avgwrite"]]
txt_fmt = curses.A_BOLD
proc = self.processes[process]
if proc["retry"]:
completed = "{}({})".format(len(proc["submitted"]),
len(proc["retry"]))
else:
completed = "{}".format(len(proc["submitted"]))
win.addstr(
4 + p, 0, "{0: ^1} "
"{1:25.25} "
"{2: ^7} "
"{3: ^7} "
"{4: ^7} "
"{5: ^10} "
"{6: ^10} "
"{7: ^10} "
"{8: ^10} ".format(
proc["barrier"],
process,
completed,
*vals),
curses.color_pair(colors[proc["barrier"]]) | txt_fmt)
win.clrtoeol()
win.refresh(0, self.padding, 0, 0, height-1, width-1)
|
[
"Displays",
"the",
"default",
"overview",
"of",
"the",
"pipeline",
"execution",
"from",
"the",
":",
"attr",
":",
"status_info",
":",
"attr",
":",
"processes",
"and",
":",
"attr",
":",
"run_status",
"attributes",
"into",
"stdout",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L1129-L1242
|
[
"def",
"flush_overview",
"(",
"self",
")",
":",
"colors",
"=",
"{",
"\"W\"",
":",
"1",
",",
"\"R\"",
":",
"2",
",",
"\"C\"",
":",
"3",
"}",
"pc",
"=",
"{",
"\"running\"",
":",
"3",
",",
"\"complete\"",
":",
"3",
",",
"\"aborted\"",
":",
"4",
",",
"\"error\"",
":",
"4",
"}",
"curses",
".",
"init_pair",
"(",
"1",
",",
"curses",
".",
"COLOR_WHITE",
",",
"curses",
".",
"COLOR_BLACK",
")",
"curses",
".",
"init_pair",
"(",
"2",
",",
"curses",
".",
"COLOR_BLUE",
",",
"curses",
".",
"COLOR_BLACK",
")",
"curses",
".",
"init_pair",
"(",
"3",
",",
"curses",
".",
"COLOR_GREEN",
",",
"curses",
".",
"COLOR_BLACK",
")",
"curses",
".",
"init_pair",
"(",
"4",
",",
"curses",
".",
"COLOR_MAGENTA",
",",
"curses",
".",
"COLOR_BLACK",
")",
"# self.screen.erase()",
"height",
",",
"width",
"=",
"self",
".",
"screen",
".",
"getmaxyx",
"(",
")",
"win",
"=",
"curses",
".",
"newpad",
"(",
"height",
",",
"2000",
")",
"# Add static header",
"header",
"=",
"\"Pipeline [{}] inspection at {}. Status: \"",
".",
"format",
"(",
"self",
".",
"pipeline_tag",
",",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
",",
"gmtime",
"(",
")",
")",
")",
"win",
".",
"addstr",
"(",
"0",
",",
"0",
",",
"header",
")",
"win",
".",
"addstr",
"(",
"0",
",",
"len",
"(",
"header",
")",
",",
"self",
".",
"run_status",
",",
"curses",
".",
"color_pair",
"(",
"pc",
"[",
"self",
".",
"run_status",
"]",
")",
")",
"submission_str",
"=",
"\"{0:23.23} {1:23.23} {2:23.23} {3:23.23}\"",
".",
"format",
"(",
"\"Running: {}\"",
".",
"format",
"(",
"sum",
"(",
"[",
"len",
"(",
"x",
"[",
"\"submitted\"",
"]",
")",
"for",
"x",
"in",
"self",
".",
"processes",
".",
"values",
"(",
")",
"]",
")",
")",
",",
"\"Failed: {}\"",
".",
"format",
"(",
"sum",
"(",
"[",
"len",
"(",
"x",
"[",
"\"failed\"",
"]",
")",
"for",
"x",
"in",
"self",
".",
"processes",
".",
"values",
"(",
")",
"]",
")",
")",
",",
"\"Retrying: {}\"",
".",
"format",
"(",
"sum",
"(",
"[",
"len",
"(",
"x",
"[",
"\"retry\"",
"]",
")",
"for",
"x",
"in",
"self",
".",
"processes",
".",
"values",
"(",
")",
"]",
")",
")",
",",
"\"Completed: {}\"",
".",
"format",
"(",
"sum",
"(",
"[",
"len",
"(",
"x",
"[",
"\"finished\"",
"]",
")",
"for",
"x",
"in",
"self",
".",
"processes",
".",
"values",
"(",
")",
"]",
")",
")",
")",
"win",
".",
"addstr",
"(",
"1",
",",
"0",
",",
"submission_str",
",",
"curses",
".",
"color_pair",
"(",
"1",
")",
")",
"headers",
"=",
"[",
"\"\"",
",",
"\"Process\"",
",",
"\"Running\"",
",",
"\"Complete\"",
",",
"\"Error\"",
",",
"\"Avg Time\"",
",",
"\"Max Mem\"",
",",
"\"Avg Read\"",
",",
"\"Avg Write\"",
"]",
"header_str",
"=",
"\"{0: ^1} \"",
"\"{1: ^25} \"",
"\"{2: ^7} \"",
"\"{3: ^7} \"",
"\"{4: ^7} \"",
"\"{5: ^10} \"",
"\"{6: ^10} \"",
"\"{7: ^10} \"",
"\"{8: ^10} \"",
".",
"format",
"(",
"*",
"headers",
")",
"self",
".",
"max_width",
"=",
"len",
"(",
"header_str",
")",
"win",
".",
"addstr",
"(",
"3",
",",
"0",
",",
"header_str",
",",
"curses",
".",
"A_UNDERLINE",
"|",
"curses",
".",
"A_REVERSE",
")",
"# Get display size",
"top",
"=",
"self",
".",
"top_line",
"bottom",
"=",
"self",
".",
"screen_lines",
"-",
"4",
"+",
"self",
".",
"top_line",
"# Fetch process information",
"for",
"p",
",",
"process",
"in",
"enumerate",
"(",
"list",
"(",
"self",
".",
"processes",
".",
"keys",
"(",
")",
")",
"[",
"top",
":",
"bottom",
"]",
")",
":",
"if",
"process",
"not",
"in",
"self",
".",
"process_stats",
":",
"vals",
"=",
"[",
"\"-\"",
"]",
"*",
"8",
"txt_fmt",
"=",
"curses",
".",
"A_NORMAL",
"else",
":",
"ref",
"=",
"self",
".",
"process_stats",
"[",
"process",
"]",
"vals",
"=",
"[",
"ref",
"[",
"\"completed\"",
"]",
",",
"len",
"(",
"self",
".",
"processes",
"[",
"process",
"]",
"[",
"\"failed\"",
"]",
")",
",",
"ref",
"[",
"\"realtime\"",
"]",
",",
"ref",
"[",
"\"maxmem\"",
"]",
",",
"ref",
"[",
"\"avgread\"",
"]",
",",
"ref",
"[",
"\"avgwrite\"",
"]",
"]",
"txt_fmt",
"=",
"curses",
".",
"A_BOLD",
"proc",
"=",
"self",
".",
"processes",
"[",
"process",
"]",
"if",
"proc",
"[",
"\"retry\"",
"]",
":",
"completed",
"=",
"\"{}({})\"",
".",
"format",
"(",
"len",
"(",
"proc",
"[",
"\"submitted\"",
"]",
")",
",",
"len",
"(",
"proc",
"[",
"\"retry\"",
"]",
")",
")",
"else",
":",
"completed",
"=",
"\"{}\"",
".",
"format",
"(",
"len",
"(",
"proc",
"[",
"\"submitted\"",
"]",
")",
")",
"win",
".",
"addstr",
"(",
"4",
"+",
"p",
",",
"0",
",",
"\"{0: ^1} \"",
"\"{1:25.25} \"",
"\"{2: ^7} \"",
"\"{3: ^7} \"",
"\"{4: ^7} \"",
"\"{5: ^10} \"",
"\"{6: ^10} \"",
"\"{7: ^10} \"",
"\"{8: ^10} \"",
".",
"format",
"(",
"proc",
"[",
"\"barrier\"",
"]",
",",
"process",
",",
"completed",
",",
"*",
"vals",
")",
",",
"curses",
".",
"color_pair",
"(",
"colors",
"[",
"proc",
"[",
"\"barrier\"",
"]",
"]",
")",
"|",
"txt_fmt",
")",
"win",
".",
"clrtoeol",
"(",
")",
"win",
".",
"refresh",
"(",
"0",
",",
"self",
".",
"padding",
",",
"0",
",",
"0",
",",
"height",
"-",
"1",
",",
"width",
"-",
"1",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._get_log_lines
|
Returns a list with the last ``n`` lines of the nextflow log file
Parameters
----------
n : int
Number of last lines from the log file
Returns
-------
list
List of strings with the nextflow log
|
flowcraft/generator/inspect.py
|
def _get_log_lines(self, n=300):
"""Returns a list with the last ``n`` lines of the nextflow log file
Parameters
----------
n : int
Number of last lines from the log file
Returns
-------
list
List of strings with the nextflow log
"""
with open(self.log_file) as fh:
last_lines = fh.readlines()[-n:]
return last_lines
|
def _get_log_lines(self, n=300):
"""Returns a list with the last ``n`` lines of the nextflow log file
Parameters
----------
n : int
Number of last lines from the log file
Returns
-------
list
List of strings with the nextflow log
"""
with open(self.log_file) as fh:
last_lines = fh.readlines()[-n:]
return last_lines
|
[
"Returns",
"a",
"list",
"with",
"the",
"last",
"n",
"lines",
"of",
"the",
"nextflow",
"log",
"file"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L1356-L1373
|
[
"def",
"_get_log_lines",
"(",
"self",
",",
"n",
"=",
"300",
")",
":",
"with",
"open",
"(",
"self",
".",
"log_file",
")",
"as",
"fh",
":",
"last_lines",
"=",
"fh",
".",
"readlines",
"(",
")",
"[",
"-",
"n",
":",
"]",
"return",
"last_lines"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._prepare_static_info
|
Prepares the first batch of information, containing static
information such as the pipeline file, and configuration files
Returns
-------
dict
Dict with the static information for the first POST request
|
flowcraft/generator/inspect.py
|
def _prepare_static_info(self):
"""Prepares the first batch of information, containing static
information such as the pipeline file, and configuration files
Returns
-------
dict
Dict with the static information for the first POST request
"""
pipeline_files = {}
with open(join(self.workdir, self.pipeline_name)) as fh:
pipeline_files["pipelineFile"] = fh.readlines()
nf_config = join(self.workdir, "nextflow.config")
if os.path.exists(nf_config):
with open(nf_config) as fh:
pipeline_files["configFile"] = fh.readlines()
# Check for specific flowcraft configurations files
configs = {
"params.config": "paramsFile",
"resources.config": "resourcesFile",
"containers.config": "containersFile",
"user.config": "userFile",
}
for config, key in configs.items():
cfile = join(self.workdir, config)
if os.path.exists(cfile):
with open(cfile) as fh:
pipeline_files[key] = fh.readlines()
return pipeline_files
|
def _prepare_static_info(self):
"""Prepares the first batch of information, containing static
information such as the pipeline file, and configuration files
Returns
-------
dict
Dict with the static information for the first POST request
"""
pipeline_files = {}
with open(join(self.workdir, self.pipeline_name)) as fh:
pipeline_files["pipelineFile"] = fh.readlines()
nf_config = join(self.workdir, "nextflow.config")
if os.path.exists(nf_config):
with open(nf_config) as fh:
pipeline_files["configFile"] = fh.readlines()
# Check for specific flowcraft configurations files
configs = {
"params.config": "paramsFile",
"resources.config": "resourcesFile",
"containers.config": "containersFile",
"user.config": "userFile",
}
for config, key in configs.items():
cfile = join(self.workdir, config)
if os.path.exists(cfile):
with open(cfile) as fh:
pipeline_files[key] = fh.readlines()
return pipeline_files
|
[
"Prepares",
"the",
"first",
"batch",
"of",
"information",
"containing",
"static",
"information",
"such",
"as",
"the",
"pipeline",
"file",
"and",
"configuration",
"files"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L1432-L1465
|
[
"def",
"_prepare_static_info",
"(",
"self",
")",
":",
"pipeline_files",
"=",
"{",
"}",
"with",
"open",
"(",
"join",
"(",
"self",
".",
"workdir",
",",
"self",
".",
"pipeline_name",
")",
")",
"as",
"fh",
":",
"pipeline_files",
"[",
"\"pipelineFile\"",
"]",
"=",
"fh",
".",
"readlines",
"(",
")",
"nf_config",
"=",
"join",
"(",
"self",
".",
"workdir",
",",
"\"nextflow.config\"",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"nf_config",
")",
":",
"with",
"open",
"(",
"nf_config",
")",
"as",
"fh",
":",
"pipeline_files",
"[",
"\"configFile\"",
"]",
"=",
"fh",
".",
"readlines",
"(",
")",
"# Check for specific flowcraft configurations files",
"configs",
"=",
"{",
"\"params.config\"",
":",
"\"paramsFile\"",
",",
"\"resources.config\"",
":",
"\"resourcesFile\"",
",",
"\"containers.config\"",
":",
"\"containersFile\"",
",",
"\"user.config\"",
":",
"\"userFile\"",
",",
"}",
"for",
"config",
",",
"key",
"in",
"configs",
".",
"items",
"(",
")",
":",
"cfile",
"=",
"join",
"(",
"self",
".",
"workdir",
",",
"config",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cfile",
")",
":",
"with",
"open",
"(",
"cfile",
")",
"as",
"fh",
":",
"pipeline_files",
"[",
"key",
"]",
"=",
"fh",
".",
"readlines",
"(",
")",
"return",
"pipeline_files"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._dag_file_to_dict
|
Function that opens the dotfile named .treeDag.json in the current
working directory
Returns
-------
Returns a dictionary with the dag object to be used in the post
instance available through the method _establish_connection
|
flowcraft/generator/inspect.py
|
def _dag_file_to_dict(self):
"""Function that opens the dotfile named .treeDag.json in the current
working directory
Returns
-------
Returns a dictionary with the dag object to be used in the post
instance available through the method _establish_connection
"""
try:
dag_file = open(os.path.join(self.workdir, ".treeDag.json"))
dag_json = json.load(dag_file)
except (FileNotFoundError, json.decoder.JSONDecodeError):
logger.warning(colored_print(
"WARNING: dotfile named .treeDag.json not found or corrupted",
"red_bold"))
dag_json = {}
return dag_json
|
def _dag_file_to_dict(self):
"""Function that opens the dotfile named .treeDag.json in the current
working directory
Returns
-------
Returns a dictionary with the dag object to be used in the post
instance available through the method _establish_connection
"""
try:
dag_file = open(os.path.join(self.workdir, ".treeDag.json"))
dag_json = json.load(dag_file)
except (FileNotFoundError, json.decoder.JSONDecodeError):
logger.warning(colored_print(
"WARNING: dotfile named .treeDag.json not found or corrupted",
"red_bold"))
dag_json = {}
return dag_json
|
[
"Function",
"that",
"opens",
"the",
"dotfile",
"named",
".",
"treeDag",
".",
"json",
"in",
"the",
"current",
"working",
"directory"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L1467-L1486
|
[
"def",
"_dag_file_to_dict",
"(",
"self",
")",
":",
"try",
":",
"dag_file",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"workdir",
",",
"\".treeDag.json\"",
")",
")",
"dag_json",
"=",
"json",
".",
"load",
"(",
"dag_file",
")",
"except",
"(",
"FileNotFoundError",
",",
"json",
".",
"decoder",
".",
"JSONDecodeError",
")",
":",
"logger",
".",
"warning",
"(",
"colored_print",
"(",
"\"WARNING: dotfile named .treeDag.json not found or corrupted\"",
",",
"\"red_bold\"",
")",
")",
"dag_json",
"=",
"{",
"}",
"return",
"dag_json"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowInspector._get_run_hash
|
Gets the hash of the nextflow file
|
flowcraft/generator/inspect.py
|
def _get_run_hash(self):
"""Gets the hash of the nextflow file"""
# Get name and path of the pipeline from the log file
pipeline_path = get_nextflow_filepath(self.log_file)
# Get hash from the entire pipeline file
pipeline_hash = hashlib.md5()
with open(pipeline_path, "rb") as fh:
for chunk in iter(lambda: fh.read(4096), b""):
pipeline_hash.update(chunk)
# Get hash from the current working dir and hostname
workdir = self.workdir.encode("utf8")
hostname = socket.gethostname().encode("utf8")
hardware_addr = str(uuid.getnode()).encode("utf8")
dir_hash = hashlib.md5(workdir + hostname + hardware_addr)
return pipeline_hash.hexdigest() + dir_hash.hexdigest()
|
def _get_run_hash(self):
"""Gets the hash of the nextflow file"""
# Get name and path of the pipeline from the log file
pipeline_path = get_nextflow_filepath(self.log_file)
# Get hash from the entire pipeline file
pipeline_hash = hashlib.md5()
with open(pipeline_path, "rb") as fh:
for chunk in iter(lambda: fh.read(4096), b""):
pipeline_hash.update(chunk)
# Get hash from the current working dir and hostname
workdir = self.workdir.encode("utf8")
hostname = socket.gethostname().encode("utf8")
hardware_addr = str(uuid.getnode()).encode("utf8")
dir_hash = hashlib.md5(workdir + hostname + hardware_addr)
return pipeline_hash.hexdigest() + dir_hash.hexdigest()
|
[
"Gets",
"the",
"hash",
"of",
"the",
"nextflow",
"file"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/inspect.py#L1533-L1550
|
[
"def",
"_get_run_hash",
"(",
"self",
")",
":",
"# Get name and path of the pipeline from the log file",
"pipeline_path",
"=",
"get_nextflow_filepath",
"(",
"self",
".",
"log_file",
")",
"# Get hash from the entire pipeline file",
"pipeline_hash",
"=",
"hashlib",
".",
"md5",
"(",
")",
"with",
"open",
"(",
"pipeline_path",
",",
"\"rb\"",
")",
"as",
"fh",
":",
"for",
"chunk",
"in",
"iter",
"(",
"lambda",
":",
"fh",
".",
"read",
"(",
"4096",
")",
",",
"b\"\"",
")",
":",
"pipeline_hash",
".",
"update",
"(",
"chunk",
")",
"# Get hash from the current working dir and hostname",
"workdir",
"=",
"self",
".",
"workdir",
".",
"encode",
"(",
"\"utf8\"",
")",
"hostname",
"=",
"socket",
".",
"gethostname",
"(",
")",
".",
"encode",
"(",
"\"utf8\"",
")",
"hardware_addr",
"=",
"str",
"(",
"uuid",
".",
"getnode",
"(",
")",
")",
".",
"encode",
"(",
"\"utf8\"",
")",
"dir_hash",
"=",
"hashlib",
".",
"md5",
"(",
"workdir",
"+",
"hostname",
"+",
"hardware_addr",
")",
"return",
"pipeline_hash",
".",
"hexdigest",
"(",
")",
"+",
"dir_hash",
".",
"hexdigest",
"(",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
get_nextflow_filepath
|
Gets the nextflow file path from the nextflow log file. It searches for
the nextflow run command throughout the file.
Parameters
----------
log_file : str
Path for the .nextflow.log file
Returns
-------
str
Path for the nextflow file
|
flowcraft/generator/utils.py
|
def get_nextflow_filepath(log_file):
"""Gets the nextflow file path from the nextflow log file. It searches for
the nextflow run command throughout the file.
Parameters
----------
log_file : str
Path for the .nextflow.log file
Returns
-------
str
Path for the nextflow file
"""
with open(log_file) as fh:
# Searches for the first occurence of the nextflow pipeline
# file name in the .nextflow.log file
while 1:
line = fh.readline()
if not line:
# file is empty
raise eh.LogError("Nextflow command path could not be found - Is "
".nextflow.log empty?")
try:
# Regex supports absolute paths and relative paths
pipeline_path = re.match(".*\s(.*.nf).*", line) \
.group(1)
return pipeline_path
except AttributeError:
continue
|
def get_nextflow_filepath(log_file):
"""Gets the nextflow file path from the nextflow log file. It searches for
the nextflow run command throughout the file.
Parameters
----------
log_file : str
Path for the .nextflow.log file
Returns
-------
str
Path for the nextflow file
"""
with open(log_file) as fh:
# Searches for the first occurence of the nextflow pipeline
# file name in the .nextflow.log file
while 1:
line = fh.readline()
if not line:
# file is empty
raise eh.LogError("Nextflow command path could not be found - Is "
".nextflow.log empty?")
try:
# Regex supports absolute paths and relative paths
pipeline_path = re.match(".*\s(.*.nf).*", line) \
.group(1)
return pipeline_path
except AttributeError:
continue
|
[
"Gets",
"the",
"nextflow",
"file",
"path",
"from",
"the",
"nextflow",
"log",
"file",
".",
"It",
"searches",
"for",
"the",
"nextflow",
"run",
"command",
"throughout",
"the",
"file",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/utils.py#L9-L39
|
[
"def",
"get_nextflow_filepath",
"(",
"log_file",
")",
":",
"with",
"open",
"(",
"log_file",
")",
"as",
"fh",
":",
"# Searches for the first occurence of the nextflow pipeline",
"# file name in the .nextflow.log file",
"while",
"1",
":",
"line",
"=",
"fh",
".",
"readline",
"(",
")",
"if",
"not",
"line",
":",
"# file is empty",
"raise",
"eh",
".",
"LogError",
"(",
"\"Nextflow command path could not be found - Is \"",
"\".nextflow.log empty?\"",
")",
"try",
":",
"# Regex supports absolute paths and relative paths",
"pipeline_path",
"=",
"re",
".",
"match",
"(",
"\".*\\s(.*.nf).*\"",
",",
"line",
")",
".",
"group",
"(",
"1",
")",
"return",
"pipeline_path",
"except",
"AttributeError",
":",
"continue"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
main
|
Main executor of the split_fasta template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly : list
Assembly file.
min_size : int
Minimum contig size.
|
flowcraft/templates/split_fasta.py
|
def main(sample_id, assembly, min_size):
"""Main executor of the split_fasta template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly : list
Assembly file.
min_size : int
Minimum contig size.
"""
logger.info("Starting script")
f_open = open(assembly, "rU")
entry = (x[1] for x in groupby(f_open, lambda line: line[0] == ">"))
success = 0
for header in entry:
headerStr = header.__next__()[1:].strip()
seq = "".join(s.strip() for s in entry.__next__())
if len(seq) >= min_size:
with open(sample_id + '_' + headerStr.replace(" ","_").replace("=","_") + '.fasta', "w") as output_file:
output_file.write(">" + sample_id + "_" + headerStr.replace(" ","_").replace("=","_") + "\\n" + seq + "\\n")
success += 1
f_open.close()
logger.info("{} sequences sucessfully splitted.".format(success))
|
def main(sample_id, assembly, min_size):
"""Main executor of the split_fasta template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly : list
Assembly file.
min_size : int
Minimum contig size.
"""
logger.info("Starting script")
f_open = open(assembly, "rU")
entry = (x[1] for x in groupby(f_open, lambda line: line[0] == ">"))
success = 0
for header in entry:
headerStr = header.__next__()[1:].strip()
seq = "".join(s.strip() for s in entry.__next__())
if len(seq) >= min_size:
with open(sample_id + '_' + headerStr.replace(" ","_").replace("=","_") + '.fasta', "w") as output_file:
output_file.write(">" + sample_id + "_" + headerStr.replace(" ","_").replace("=","_") + "\\n" + seq + "\\n")
success += 1
f_open.close()
logger.info("{} sequences sucessfully splitted.".format(success))
|
[
"Main",
"executor",
"of",
"the",
"split_fasta",
"template",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/split_fasta.py#L52-L84
|
[
"def",
"main",
"(",
"sample_id",
",",
"assembly",
",",
"min_size",
")",
":",
"logger",
".",
"info",
"(",
"\"Starting script\"",
")",
"f_open",
"=",
"open",
"(",
"assembly",
",",
"\"rU\"",
")",
"entry",
"=",
"(",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"groupby",
"(",
"f_open",
",",
"lambda",
"line",
":",
"line",
"[",
"0",
"]",
"==",
"\">\"",
")",
")",
"success",
"=",
"0",
"for",
"header",
"in",
"entry",
":",
"headerStr",
"=",
"header",
".",
"__next__",
"(",
")",
"[",
"1",
":",
"]",
".",
"strip",
"(",
")",
"seq",
"=",
"\"\"",
".",
"join",
"(",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"entry",
".",
"__next__",
"(",
")",
")",
"if",
"len",
"(",
"seq",
")",
">=",
"min_size",
":",
"with",
"open",
"(",
"sample_id",
"+",
"'_'",
"+",
"headerStr",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
".",
"replace",
"(",
"\"=\"",
",",
"\"_\"",
")",
"+",
"'.fasta'",
",",
"\"w\"",
")",
"as",
"output_file",
":",
"output_file",
".",
"write",
"(",
"\">\"",
"+",
"sample_id",
"+",
"\"_\"",
"+",
"headerStr",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
".",
"replace",
"(",
"\"=\"",
",",
"\"_\"",
")",
"+",
"\"\\\\n\"",
"+",
"seq",
"+",
"\"\\\\n\"",
")",
"success",
"+=",
"1",
"f_open",
".",
"close",
"(",
")",
"logger",
".",
"info",
"(",
"\"{} sequences sucessfully splitted.\"",
".",
"format",
"(",
"success",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
main
|
Parses a nextflow trace file, searches for processes with a specific tag
and sends a JSON report with the relevant information
The expected fields for the trace file are::
0. task_id
1. process
2. tag
3. status
4. exit code
5. start timestamp
6. container
7. cpus
8. duration
9. realtime
10. queue
11. cpu percentage
12. memory percentage
13. real memory size of the process
14. virtual memory size of the process
Parameters
----------
trace_file : str
Path to the nextflow trace file
|
flowcraft/templates/pipeline_status.py
|
def main(sample_id, trace_file, workdir):
"""
Parses a nextflow trace file, searches for processes with a specific tag
and sends a JSON report with the relevant information
The expected fields for the trace file are::
0. task_id
1. process
2. tag
3. status
4. exit code
5. start timestamp
6. container
7. cpus
8. duration
9. realtime
10. queue
11. cpu percentage
12. memory percentage
13. real memory size of the process
14. virtual memory size of the process
Parameters
----------
trace_file : str
Path to the nextflow trace file
"""
# Determine the path of the stored JSON for the sample_id
stats_suffix = ".stats.json"
stats_path = join(workdir, sample_id + stats_suffix)
trace_path = join(workdir, trace_file)
logger.info("Starting pipeline status routine")
logger.debug("Checking for previous pipeline status data")
stats_array = get_previous_stats(stats_path)
logger.info("Stats JSON object set to : {}".format(stats_array))
# Search for this substring in the tags field. Only lines with this
# tag will be processed for the reports
tag = " getStats"
logger.debug("Tag variable set to: {}".format(tag))
logger.info("Starting parsing of trace file: {}".format(trace_path))
with open(trace_path) as fh:
header = next(fh).strip().split()
logger.debug("Header set to: {}".format(header))
for line in fh:
fields = line.strip().split("\t")
# Check if tag substring is in the tag field of the nextflow trace
if tag in fields[2] and fields[3] == "COMPLETED":
logger.debug(
"Parsing trace line with COMPLETED status: {}".format(
line))
current_json = get_json_info(fields, header)
stats_array[fields[0]] = current_json
else:
logger.debug(
"Ignoring trace line without COMPLETED status"
" or stats specific tag: {}".format(
line))
with open(join(stats_path), "w") as fh, open(".report.json", "w") as rfh:
fh.write(json.dumps(stats_array, separators=(",", ":")))
rfh.write(json.dumps(stats_array, separators=(",", ":")))
|
def main(sample_id, trace_file, workdir):
"""
Parses a nextflow trace file, searches for processes with a specific tag
and sends a JSON report with the relevant information
The expected fields for the trace file are::
0. task_id
1. process
2. tag
3. status
4. exit code
5. start timestamp
6. container
7. cpus
8. duration
9. realtime
10. queue
11. cpu percentage
12. memory percentage
13. real memory size of the process
14. virtual memory size of the process
Parameters
----------
trace_file : str
Path to the nextflow trace file
"""
# Determine the path of the stored JSON for the sample_id
stats_suffix = ".stats.json"
stats_path = join(workdir, sample_id + stats_suffix)
trace_path = join(workdir, trace_file)
logger.info("Starting pipeline status routine")
logger.debug("Checking for previous pipeline status data")
stats_array = get_previous_stats(stats_path)
logger.info("Stats JSON object set to : {}".format(stats_array))
# Search for this substring in the tags field. Only lines with this
# tag will be processed for the reports
tag = " getStats"
logger.debug("Tag variable set to: {}".format(tag))
logger.info("Starting parsing of trace file: {}".format(trace_path))
with open(trace_path) as fh:
header = next(fh).strip().split()
logger.debug("Header set to: {}".format(header))
for line in fh:
fields = line.strip().split("\t")
# Check if tag substring is in the tag field of the nextflow trace
if tag in fields[2] and fields[3] == "COMPLETED":
logger.debug(
"Parsing trace line with COMPLETED status: {}".format(
line))
current_json = get_json_info(fields, header)
stats_array[fields[0]] = current_json
else:
logger.debug(
"Ignoring trace line without COMPLETED status"
" or stats specific tag: {}".format(
line))
with open(join(stats_path), "w") as fh, open(".report.json", "w") as rfh:
fh.write(json.dumps(stats_array, separators=(",", ":")))
rfh.write(json.dumps(stats_array, separators=(",", ":")))
|
[
"Parses",
"a",
"nextflow",
"trace",
"file",
"searches",
"for",
"processes",
"with",
"a",
"specific",
"tag",
"and",
"sends",
"a",
"JSON",
"report",
"with",
"the",
"relevant",
"information"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/pipeline_status.py#L88-L157
|
[
"def",
"main",
"(",
"sample_id",
",",
"trace_file",
",",
"workdir",
")",
":",
"# Determine the path of the stored JSON for the sample_id",
"stats_suffix",
"=",
"\".stats.json\"",
"stats_path",
"=",
"join",
"(",
"workdir",
",",
"sample_id",
"+",
"stats_suffix",
")",
"trace_path",
"=",
"join",
"(",
"workdir",
",",
"trace_file",
")",
"logger",
".",
"info",
"(",
"\"Starting pipeline status routine\"",
")",
"logger",
".",
"debug",
"(",
"\"Checking for previous pipeline status data\"",
")",
"stats_array",
"=",
"get_previous_stats",
"(",
"stats_path",
")",
"logger",
".",
"info",
"(",
"\"Stats JSON object set to : {}\"",
".",
"format",
"(",
"stats_array",
")",
")",
"# Search for this substring in the tags field. Only lines with this",
"# tag will be processed for the reports",
"tag",
"=",
"\" getStats\"",
"logger",
".",
"debug",
"(",
"\"Tag variable set to: {}\"",
".",
"format",
"(",
"tag",
")",
")",
"logger",
".",
"info",
"(",
"\"Starting parsing of trace file: {}\"",
".",
"format",
"(",
"trace_path",
")",
")",
"with",
"open",
"(",
"trace_path",
")",
"as",
"fh",
":",
"header",
"=",
"next",
"(",
"fh",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Header set to: {}\"",
".",
"format",
"(",
"header",
")",
")",
"for",
"line",
"in",
"fh",
":",
"fields",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"# Check if tag substring is in the tag field of the nextflow trace",
"if",
"tag",
"in",
"fields",
"[",
"2",
"]",
"and",
"fields",
"[",
"3",
"]",
"==",
"\"COMPLETED\"",
":",
"logger",
".",
"debug",
"(",
"\"Parsing trace line with COMPLETED status: {}\"",
".",
"format",
"(",
"line",
")",
")",
"current_json",
"=",
"get_json_info",
"(",
"fields",
",",
"header",
")",
"stats_array",
"[",
"fields",
"[",
"0",
"]",
"]",
"=",
"current_json",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Ignoring trace line without COMPLETED status\"",
"\" or stats specific tag: {}\"",
".",
"format",
"(",
"line",
")",
")",
"with",
"open",
"(",
"join",
"(",
"stats_path",
")",
",",
"\"w\"",
")",
"as",
"fh",
",",
"open",
"(",
"\".report.json\"",
",",
"\"w\"",
")",
"as",
"rfh",
":",
"fh",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"stats_array",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
")",
"rfh",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"stats_array",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
brew_innuendo
|
Brews a given list of processes according to the recipe
Parameters
----------
args : argparse.Namespace
The arguments passed through argparser that will be used to check the
the recipe, tasks and brew the process
Returns
-------
str
The final pipeline string, ready for the engine.
list
List of process strings.
|
flowcraft/generator/recipe.py
|
def brew_innuendo(args):
"""Brews a given list of processes according to the recipe
Parameters
----------
args : argparse.Namespace
The arguments passed through argparser that will be used to check the
the recipe, tasks and brew the process
Returns
-------
str
The final pipeline string, ready for the engine.
list
List of process strings.
"""
# Create recipe class instance
automatic_pipeline = Innuendo()
if not args.tasks:
input_processes = " ".join(
automatic_pipeline.process_descriptions.keys())
else:
input_processes = args.tasks
# Validate the provided pipeline processes
validated = automatic_pipeline.validate_pipeline(input_processes)
if not validated:
sys.exit(1)
# Get the final pipeline string
pipeline_string = automatic_pipeline.run_auto_pipeline(input_processes)
return pipeline_string
|
def brew_innuendo(args):
"""Brews a given list of processes according to the recipe
Parameters
----------
args : argparse.Namespace
The arguments passed through argparser that will be used to check the
the recipe, tasks and brew the process
Returns
-------
str
The final pipeline string, ready for the engine.
list
List of process strings.
"""
# Create recipe class instance
automatic_pipeline = Innuendo()
if not args.tasks:
input_processes = " ".join(
automatic_pipeline.process_descriptions.keys())
else:
input_processes = args.tasks
# Validate the provided pipeline processes
validated = automatic_pipeline.validate_pipeline(input_processes)
if not validated:
sys.exit(1)
# Get the final pipeline string
pipeline_string = automatic_pipeline.run_auto_pipeline(input_processes)
return pipeline_string
|
[
"Brews",
"a",
"given",
"list",
"of",
"processes",
"according",
"to",
"the",
"recipe"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/recipe.py#L528-L561
|
[
"def",
"brew_innuendo",
"(",
"args",
")",
":",
"# Create recipe class instance",
"automatic_pipeline",
"=",
"Innuendo",
"(",
")",
"if",
"not",
"args",
".",
"tasks",
":",
"input_processes",
"=",
"\" \"",
".",
"join",
"(",
"automatic_pipeline",
".",
"process_descriptions",
".",
"keys",
"(",
")",
")",
"else",
":",
"input_processes",
"=",
"args",
".",
"tasks",
"# Validate the provided pipeline processes",
"validated",
"=",
"automatic_pipeline",
".",
"validate_pipeline",
"(",
"input_processes",
")",
"if",
"not",
"validated",
":",
"sys",
".",
"exit",
"(",
"1",
")",
"# Get the final pipeline string",
"pipeline_string",
"=",
"automatic_pipeline",
".",
"run_auto_pipeline",
"(",
"input_processes",
")",
"return",
"pipeline_string"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
brew_recipe
|
Returns a pipeline string from a recipe name.
Parameters
----------
recipe_name : str
Name of the recipe. Must match the name attribute in one of the classes
defined in :mod:`flowcraft.generator.recipes`
Returns
-------
str
Pipeline string ready for parsing and processing by flowcraft engine
|
flowcraft/generator/recipe.py
|
def brew_recipe(recipe_name):
"""Returns a pipeline string from a recipe name.
Parameters
----------
recipe_name : str
Name of the recipe. Must match the name attribute in one of the classes
defined in :mod:`flowcraft.generator.recipes`
Returns
-------
str
Pipeline string ready for parsing and processing by flowcraft engine
"""
# This will iterate over all modules included in the recipes subpackage
# It will return the import class and the module name, algon with the
# correct prefix
prefix = "{}.".format(recipes.__name__)
for importer, modname, _ in pkgutil.iter_modules(recipes.__path__, prefix):
# Import the current module
_module = importer.find_module(modname).load_module(modname)
# Fetch all available classes in module
_recipe_classes = [cls for cls in _module.__dict__.values() if
isinstance(cls, type)]
# Iterate over each Recipe class, and check for a match with the
# provided recipe name.
for cls in _recipe_classes:
# Create instance of class to allow fetching the name attribute
recipe_cls = cls()
if getattr(recipe_cls, "name", None) == recipe_name:
return recipe_cls.brew()
logger.error(
colored_print("Recipe name '{}' does not exist.".format(recipe_name))
)
sys.exit(1)
|
def brew_recipe(recipe_name):
"""Returns a pipeline string from a recipe name.
Parameters
----------
recipe_name : str
Name of the recipe. Must match the name attribute in one of the classes
defined in :mod:`flowcraft.generator.recipes`
Returns
-------
str
Pipeline string ready for parsing and processing by flowcraft engine
"""
# This will iterate over all modules included in the recipes subpackage
# It will return the import class and the module name, algon with the
# correct prefix
prefix = "{}.".format(recipes.__name__)
for importer, modname, _ in pkgutil.iter_modules(recipes.__path__, prefix):
# Import the current module
_module = importer.find_module(modname).load_module(modname)
# Fetch all available classes in module
_recipe_classes = [cls for cls in _module.__dict__.values() if
isinstance(cls, type)]
# Iterate over each Recipe class, and check for a match with the
# provided recipe name.
for cls in _recipe_classes:
# Create instance of class to allow fetching the name attribute
recipe_cls = cls()
if getattr(recipe_cls, "name", None) == recipe_name:
return recipe_cls.brew()
logger.error(
colored_print("Recipe name '{}' does not exist.".format(recipe_name))
)
sys.exit(1)
|
[
"Returns",
"a",
"pipeline",
"string",
"from",
"a",
"recipe",
"name",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/recipe.py#L649-L688
|
[
"def",
"brew_recipe",
"(",
"recipe_name",
")",
":",
"# This will iterate over all modules included in the recipes subpackage",
"# It will return the import class and the module name, algon with the",
"# correct prefix",
"prefix",
"=",
"\"{}.\"",
".",
"format",
"(",
"recipes",
".",
"__name__",
")",
"for",
"importer",
",",
"modname",
",",
"_",
"in",
"pkgutil",
".",
"iter_modules",
"(",
"recipes",
".",
"__path__",
",",
"prefix",
")",
":",
"# Import the current module",
"_module",
"=",
"importer",
".",
"find_module",
"(",
"modname",
")",
".",
"load_module",
"(",
"modname",
")",
"# Fetch all available classes in module",
"_recipe_classes",
"=",
"[",
"cls",
"for",
"cls",
"in",
"_module",
".",
"__dict__",
".",
"values",
"(",
")",
"if",
"isinstance",
"(",
"cls",
",",
"type",
")",
"]",
"# Iterate over each Recipe class, and check for a match with the",
"# provided recipe name.",
"for",
"cls",
"in",
"_recipe_classes",
":",
"# Create instance of class to allow fetching the name attribute",
"recipe_cls",
"=",
"cls",
"(",
")",
"if",
"getattr",
"(",
"recipe_cls",
",",
"\"name\"",
",",
"None",
")",
"==",
"recipe_name",
":",
"return",
"recipe_cls",
".",
"brew",
"(",
")",
"logger",
".",
"error",
"(",
"colored_print",
"(",
"\"Recipe name '{}' does not exist.\"",
".",
"format",
"(",
"recipe_name",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
list_recipes
|
Method that iterates over all available recipes and prints their
information to the standard output
Parameters
----------
full : bool
If true, it will provide the pipeline string along with the recipe name
|
flowcraft/generator/recipe.py
|
def list_recipes(full=False):
"""Method that iterates over all available recipes and prints their
information to the standard output
Parameters
----------
full : bool
If true, it will provide the pipeline string along with the recipe name
"""
logger.info(colored_print(
"\n===== L I S T O F R E C I P E S =====\n",
"green_bold"))
# This will iterate over all modules included in the recipes subpackage
# It will return the import class and the module name, algon with the
# correct prefix
prefix = "{}.".format(recipes.__name__)
for importer, modname, _ in pkgutil.iter_modules(recipes.__path__, prefix):
# Import the current module
_module = importer.find_module(modname).load_module(modname)
# Fetch all available classes in module
_recipe_classes = [cls for cls in _module.__dict__.values() if
isinstance(cls, type)]
# Iterate over each Recipe class, and check for a match with the
# provided recipe name.
for cls in _recipe_classes:
recipe_cls = cls()
if hasattr(recipe_cls, "name"):
logger.info(colored_print("=> {}".format(recipe_cls.name), "blue_bold"))
if full:
logger.info(colored_print("\t {}".format(recipe_cls.__doc__), "purple_bold"))
logger.info(colored_print("Pipeline string: {}\n".format(recipe_cls.pipeline_str), "yellow_bold"))
sys.exit(0)
|
def list_recipes(full=False):
"""Method that iterates over all available recipes and prints their
information to the standard output
Parameters
----------
full : bool
If true, it will provide the pipeline string along with the recipe name
"""
logger.info(colored_print(
"\n===== L I S T O F R E C I P E S =====\n",
"green_bold"))
# This will iterate over all modules included in the recipes subpackage
# It will return the import class and the module name, algon with the
# correct prefix
prefix = "{}.".format(recipes.__name__)
for importer, modname, _ in pkgutil.iter_modules(recipes.__path__, prefix):
# Import the current module
_module = importer.find_module(modname).load_module(modname)
# Fetch all available classes in module
_recipe_classes = [cls for cls in _module.__dict__.values() if
isinstance(cls, type)]
# Iterate over each Recipe class, and check for a match with the
# provided recipe name.
for cls in _recipe_classes:
recipe_cls = cls()
if hasattr(recipe_cls, "name"):
logger.info(colored_print("=> {}".format(recipe_cls.name), "blue_bold"))
if full:
logger.info(colored_print("\t {}".format(recipe_cls.__doc__), "purple_bold"))
logger.info(colored_print("Pipeline string: {}\n".format(recipe_cls.pipeline_str), "yellow_bold"))
sys.exit(0)
|
[
"Method",
"that",
"iterates",
"over",
"all",
"available",
"recipes",
"and",
"prints",
"their",
"information",
"to",
"the",
"standard",
"output"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/recipe.py#L691-L730
|
[
"def",
"list_recipes",
"(",
"full",
"=",
"False",
")",
":",
"logger",
".",
"info",
"(",
"colored_print",
"(",
"\"\\n===== L I S T O F R E C I P E S =====\\n\"",
",",
"\"green_bold\"",
")",
")",
"# This will iterate over all modules included in the recipes subpackage",
"# It will return the import class and the module name, algon with the",
"# correct prefix",
"prefix",
"=",
"\"{}.\"",
".",
"format",
"(",
"recipes",
".",
"__name__",
")",
"for",
"importer",
",",
"modname",
",",
"_",
"in",
"pkgutil",
".",
"iter_modules",
"(",
"recipes",
".",
"__path__",
",",
"prefix",
")",
":",
"# Import the current module",
"_module",
"=",
"importer",
".",
"find_module",
"(",
"modname",
")",
".",
"load_module",
"(",
"modname",
")",
"# Fetch all available classes in module",
"_recipe_classes",
"=",
"[",
"cls",
"for",
"cls",
"in",
"_module",
".",
"__dict__",
".",
"values",
"(",
")",
"if",
"isinstance",
"(",
"cls",
",",
"type",
")",
"]",
"# Iterate over each Recipe class, and check for a match with the",
"# provided recipe name.",
"for",
"cls",
"in",
"_recipe_classes",
":",
"recipe_cls",
"=",
"cls",
"(",
")",
"if",
"hasattr",
"(",
"recipe_cls",
",",
"\"name\"",
")",
":",
"logger",
".",
"info",
"(",
"colored_print",
"(",
"\"=> {}\"",
".",
"format",
"(",
"recipe_cls",
".",
"name",
")",
",",
"\"blue_bold\"",
")",
")",
"if",
"full",
":",
"logger",
".",
"info",
"(",
"colored_print",
"(",
"\"\\t {}\"",
".",
"format",
"(",
"recipe_cls",
".",
"__doc__",
")",
",",
"\"purple_bold\"",
")",
")",
"logger",
".",
"info",
"(",
"colored_print",
"(",
"\"Pipeline string: {}\\n\"",
".",
"format",
"(",
"recipe_cls",
".",
"pipeline_str",
")",
",",
"\"yellow_bold\"",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
InnuendoRecipe.validate_pipeline
|
Validate pipeline string
Validates the pipeline string by searching for forbidden characters
Parameters
----------
pipeline_string : str
STring with the processes provided
Returns
-------
|
flowcraft/generator/recipe.py
|
def validate_pipeline(pipeline_string):
"""Validate pipeline string
Validates the pipeline string by searching for forbidden characters
Parameters
----------
pipeline_string : str
STring with the processes provided
Returns
-------
"""
if "(" in pipeline_string or ")" in pipeline_string or "|" in \
pipeline_string:
logger.error(
colored_print("Please provide a valid task list!", "red_bold")
)
return False
return True
|
def validate_pipeline(pipeline_string):
"""Validate pipeline string
Validates the pipeline string by searching for forbidden characters
Parameters
----------
pipeline_string : str
STring with the processes provided
Returns
-------
"""
if "(" in pipeline_string or ")" in pipeline_string or "|" in \
pipeline_string:
logger.error(
colored_print("Please provide a valid task list!", "red_bold")
)
return False
return True
|
[
"Validate",
"pipeline",
"string"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/recipe.py#L54-L75
|
[
"def",
"validate_pipeline",
"(",
"pipeline_string",
")",
":",
"if",
"\"(\"",
"in",
"pipeline_string",
"or",
"\")\"",
"in",
"pipeline_string",
"or",
"\"|\"",
"in",
"pipeline_string",
":",
"logger",
".",
"error",
"(",
"colored_print",
"(",
"\"Please provide a valid task list!\"",
",",
"\"red_bold\"",
")",
")",
"return",
"False",
"return",
"True"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
InnuendoRecipe.build_upstream
|
Builds the upstream pipeline of the current process
Checks for the upstream processes to the current process and
adds them to the current pipeline fragment if they were provided in
the process list.
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
task : str
Current process
all_tasks : list
A list of all provided processes
task_pipeline : list
Current pipeline fragment
count_forks : int
Current number of forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : resulting pipeline fragment
|
flowcraft/generator/recipe.py
|
def build_upstream(self, process_descriptions, task, all_tasks,
task_pipeline,
count_forks, total_tasks, forks):
"""Builds the upstream pipeline of the current process
Checks for the upstream processes to the current process and
adds them to the current pipeline fragment if they were provided in
the process list.
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
task : str
Current process
all_tasks : list
A list of all provided processes
task_pipeline : list
Current pipeline fragment
count_forks : int
Current number of forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : resulting pipeline fragment
"""
if task in process_descriptions:
if process_descriptions[task][1] is not None:
if len(process_descriptions[task][1].split("|")) > 1:
local_forks = process_descriptions[task][1].split("|")
# Produces a new pipeline fragment for each forkable
# process
for local_fork in local_forks:
if local_fork in total_tasks:
count_forks += 1
task_pipeline.insert(
0,
process_descriptions[task][1]
)
self.define_pipeline_string(
process_descriptions,
local_fork,
False,
True,
count_forks,
total_tasks,
forks
)
return task_pipeline
else:
# Adds the process to the pipeline fragment in case it is
# provided in the task list
if process_descriptions[task][1] in total_tasks:
task_pipeline.insert(
0,
process_descriptions[task][1].split("|")[0]
)
# Proceeds building upstream until the input for a
# process is None
self.build_upstream(
process_descriptions,
process_descriptions[task][1].split("|")[0],
all_tasks,
task_pipeline,
count_forks,
total_tasks,
forks
)
else:
logger.error(
colored_print("{} not in provided protocols as "
"input for {}".format(
process_descriptions[task][1], task), "red_bold"
)
)
sys.exit()
return task_pipeline
else:
return task_pipeline
|
def build_upstream(self, process_descriptions, task, all_tasks,
task_pipeline,
count_forks, total_tasks, forks):
"""Builds the upstream pipeline of the current process
Checks for the upstream processes to the current process and
adds them to the current pipeline fragment if they were provided in
the process list.
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
task : str
Current process
all_tasks : list
A list of all provided processes
task_pipeline : list
Current pipeline fragment
count_forks : int
Current number of forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : resulting pipeline fragment
"""
if task in process_descriptions:
if process_descriptions[task][1] is not None:
if len(process_descriptions[task][1].split("|")) > 1:
local_forks = process_descriptions[task][1].split("|")
# Produces a new pipeline fragment for each forkable
# process
for local_fork in local_forks:
if local_fork in total_tasks:
count_forks += 1
task_pipeline.insert(
0,
process_descriptions[task][1]
)
self.define_pipeline_string(
process_descriptions,
local_fork,
False,
True,
count_forks,
total_tasks,
forks
)
return task_pipeline
else:
# Adds the process to the pipeline fragment in case it is
# provided in the task list
if process_descriptions[task][1] in total_tasks:
task_pipeline.insert(
0,
process_descriptions[task][1].split("|")[0]
)
# Proceeds building upstream until the input for a
# process is None
self.build_upstream(
process_descriptions,
process_descriptions[task][1].split("|")[0],
all_tasks,
task_pipeline,
count_forks,
total_tasks,
forks
)
else:
logger.error(
colored_print("{} not in provided protocols as "
"input for {}".format(
process_descriptions[task][1], task), "red_bold"
)
)
sys.exit()
return task_pipeline
else:
return task_pipeline
|
[
"Builds",
"the",
"upstream",
"pipeline",
"of",
"the",
"current",
"process"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/recipe.py#L77-L163
|
[
"def",
"build_upstream",
"(",
"self",
",",
"process_descriptions",
",",
"task",
",",
"all_tasks",
",",
"task_pipeline",
",",
"count_forks",
",",
"total_tasks",
",",
"forks",
")",
":",
"if",
"task",
"in",
"process_descriptions",
":",
"if",
"process_descriptions",
"[",
"task",
"]",
"[",
"1",
"]",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"process_descriptions",
"[",
"task",
"]",
"[",
"1",
"]",
".",
"split",
"(",
"\"|\"",
")",
")",
">",
"1",
":",
"local_forks",
"=",
"process_descriptions",
"[",
"task",
"]",
"[",
"1",
"]",
".",
"split",
"(",
"\"|\"",
")",
"# Produces a new pipeline fragment for each forkable",
"# process",
"for",
"local_fork",
"in",
"local_forks",
":",
"if",
"local_fork",
"in",
"total_tasks",
":",
"count_forks",
"+=",
"1",
"task_pipeline",
".",
"insert",
"(",
"0",
",",
"process_descriptions",
"[",
"task",
"]",
"[",
"1",
"]",
")",
"self",
".",
"define_pipeline_string",
"(",
"process_descriptions",
",",
"local_fork",
",",
"False",
",",
"True",
",",
"count_forks",
",",
"total_tasks",
",",
"forks",
")",
"return",
"task_pipeline",
"else",
":",
"# Adds the process to the pipeline fragment in case it is",
"# provided in the task list",
"if",
"process_descriptions",
"[",
"task",
"]",
"[",
"1",
"]",
"in",
"total_tasks",
":",
"task_pipeline",
".",
"insert",
"(",
"0",
",",
"process_descriptions",
"[",
"task",
"]",
"[",
"1",
"]",
".",
"split",
"(",
"\"|\"",
")",
"[",
"0",
"]",
")",
"# Proceeds building upstream until the input for a",
"# process is None",
"self",
".",
"build_upstream",
"(",
"process_descriptions",
",",
"process_descriptions",
"[",
"task",
"]",
"[",
"1",
"]",
".",
"split",
"(",
"\"|\"",
")",
"[",
"0",
"]",
",",
"all_tasks",
",",
"task_pipeline",
",",
"count_forks",
",",
"total_tasks",
",",
"forks",
")",
"else",
":",
"logger",
".",
"error",
"(",
"colored_print",
"(",
"\"{} not in provided protocols as \"",
"\"input for {}\"",
".",
"format",
"(",
"process_descriptions",
"[",
"task",
"]",
"[",
"1",
"]",
",",
"task",
")",
",",
"\"red_bold\"",
")",
")",
"sys",
".",
"exit",
"(",
")",
"return",
"task_pipeline",
"else",
":",
"return",
"task_pipeline"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
InnuendoRecipe.build_downstream
|
Builds the downstream pipeline of the current process
Checks for the downstream processes to the current process and
adds them to the current pipeline fragment.
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
task : str
Current process
all_tasks : list
A list of all provided processes
task_pipeline : list
Current pipeline fragment
count_forks : int
Current number of forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : resulting pipeline fragment
|
flowcraft/generator/recipe.py
|
def build_downstream(self, process_descriptions, task, all_tasks,
task_pipeline,
count_forks, total_tasks, forks):
"""Builds the downstream pipeline of the current process
Checks for the downstream processes to the current process and
adds them to the current pipeline fragment.
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
task : str
Current process
all_tasks : list
A list of all provided processes
task_pipeline : list
Current pipeline fragment
count_forks : int
Current number of forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : resulting pipeline fragment
"""
if task in process_descriptions:
if process_descriptions[task][2] is not None:
if len(process_descriptions[task][2].split("|")) > 1:
local_forks = process_descriptions[task][2].split("|")
# Adds the process to the pipeline fragment downstream
# and defines a new pipeline fragment for each fork.
# Those will only look for downstream processes
for local_fork in local_forks:
if local_fork in total_tasks:
count_forks += 1
task_pipeline.append(process_descriptions[task][2])
self.define_pipeline_string(
process_descriptions,
local_fork,
False,
True,
count_forks,
total_tasks,
forks
)
return task_pipeline
else:
if process_descriptions[task][2] in total_tasks:
task_pipeline.append(process_descriptions[task][2].split("|")[0])
# Proceeds building downstream until the output for a
# process is None
self.build_downstream(
process_descriptions,
process_descriptions[task][2].split("|")[0],
all_tasks,
task_pipeline,
count_forks,
total_tasks,
forks
)
return task_pipeline
else:
return task_pipeline
|
def build_downstream(self, process_descriptions, task, all_tasks,
task_pipeline,
count_forks, total_tasks, forks):
"""Builds the downstream pipeline of the current process
Checks for the downstream processes to the current process and
adds them to the current pipeline fragment.
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
task : str
Current process
all_tasks : list
A list of all provided processes
task_pipeline : list
Current pipeline fragment
count_forks : int
Current number of forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : resulting pipeline fragment
"""
if task in process_descriptions:
if process_descriptions[task][2] is not None:
if len(process_descriptions[task][2].split("|")) > 1:
local_forks = process_descriptions[task][2].split("|")
# Adds the process to the pipeline fragment downstream
# and defines a new pipeline fragment for each fork.
# Those will only look for downstream processes
for local_fork in local_forks:
if local_fork in total_tasks:
count_forks += 1
task_pipeline.append(process_descriptions[task][2])
self.define_pipeline_string(
process_descriptions,
local_fork,
False,
True,
count_forks,
total_tasks,
forks
)
return task_pipeline
else:
if process_descriptions[task][2] in total_tasks:
task_pipeline.append(process_descriptions[task][2].split("|")[0])
# Proceeds building downstream until the output for a
# process is None
self.build_downstream(
process_descriptions,
process_descriptions[task][2].split("|")[0],
all_tasks,
task_pipeline,
count_forks,
total_tasks,
forks
)
return task_pipeline
else:
return task_pipeline
|
[
"Builds",
"the",
"downstream",
"pipeline",
"of",
"the",
"current",
"process"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/recipe.py#L165-L235
|
[
"def",
"build_downstream",
"(",
"self",
",",
"process_descriptions",
",",
"task",
",",
"all_tasks",
",",
"task_pipeline",
",",
"count_forks",
",",
"total_tasks",
",",
"forks",
")",
":",
"if",
"task",
"in",
"process_descriptions",
":",
"if",
"process_descriptions",
"[",
"task",
"]",
"[",
"2",
"]",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"process_descriptions",
"[",
"task",
"]",
"[",
"2",
"]",
".",
"split",
"(",
"\"|\"",
")",
")",
">",
"1",
":",
"local_forks",
"=",
"process_descriptions",
"[",
"task",
"]",
"[",
"2",
"]",
".",
"split",
"(",
"\"|\"",
")",
"# Adds the process to the pipeline fragment downstream",
"# and defines a new pipeline fragment for each fork.",
"# Those will only look for downstream processes",
"for",
"local_fork",
"in",
"local_forks",
":",
"if",
"local_fork",
"in",
"total_tasks",
":",
"count_forks",
"+=",
"1",
"task_pipeline",
".",
"append",
"(",
"process_descriptions",
"[",
"task",
"]",
"[",
"2",
"]",
")",
"self",
".",
"define_pipeline_string",
"(",
"process_descriptions",
",",
"local_fork",
",",
"False",
",",
"True",
",",
"count_forks",
",",
"total_tasks",
",",
"forks",
")",
"return",
"task_pipeline",
"else",
":",
"if",
"process_descriptions",
"[",
"task",
"]",
"[",
"2",
"]",
"in",
"total_tasks",
":",
"task_pipeline",
".",
"append",
"(",
"process_descriptions",
"[",
"task",
"]",
"[",
"2",
"]",
".",
"split",
"(",
"\"|\"",
")",
"[",
"0",
"]",
")",
"# Proceeds building downstream until the output for a",
"# process is None",
"self",
".",
"build_downstream",
"(",
"process_descriptions",
",",
"process_descriptions",
"[",
"task",
"]",
"[",
"2",
"]",
".",
"split",
"(",
"\"|\"",
")",
"[",
"0",
"]",
",",
"all_tasks",
",",
"task_pipeline",
",",
"count_forks",
",",
"total_tasks",
",",
"forks",
")",
"return",
"task_pipeline",
"else",
":",
"return",
"task_pipeline"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
InnuendoRecipe.define_pipeline_string
|
Builds the possible forks and connections between the provided
processes
This method loops through all the provided tasks and builds the
upstream and downstream pipeline if required. It then returns all
possible forks than need to be merged à posteriori`
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
tasks : str
Space separated processes
check_upstream : bool
If is to build the upstream pipeline of the current task
check_downstream : bool
If is to build the downstream pipeline of the current task
count_forks : int
Number of current forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : List with all the possible pipeline forks
|
flowcraft/generator/recipe.py
|
def define_pipeline_string(self, process_descriptions, tasks,
check_upstream,
check_downstream, count_forks, total_tasks,
forks):
"""Builds the possible forks and connections between the provided
processes
This method loops through all the provided tasks and builds the
upstream and downstream pipeline if required. It then returns all
possible forks than need to be merged à posteriori`
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
tasks : str
Space separated processes
check_upstream : bool
If is to build the upstream pipeline of the current task
check_downstream : bool
If is to build the downstream pipeline of the current task
count_forks : int
Number of current forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : List with all the possible pipeline forks
"""
tasks_array = tasks.split()
for task_unsplit in tasks_array:
task = task_unsplit.split("=")[0]
if task not in process_descriptions.keys():
logger.error(
colored_print(
"{} not in the possible processes".format(task),
"red_bold"
)
)
sys.exit()
else:
process_split = task_unsplit.split("=")
if len(process_split) > 1:
self.process_to_id[process_split[0]] = process_split[1]
# Only uses the process if it is not already in the possible forks
if not bool([x for x in forks if task in x]) and not bool([y for y in forks if process_descriptions[task][2] in y]):
task_pipeline = []
if task in process_descriptions:
if check_upstream:
task_pipeline = self.build_upstream(
process_descriptions,
task,
tasks_array,
task_pipeline,
count_forks,
total_tasks,
forks
)
task_pipeline.append(task)
if check_downstream:
task_pipeline = self.build_downstream(
process_descriptions,
task,
tasks_array,
task_pipeline,
count_forks,
total_tasks,
forks
)
# Adds the pipeline fragment to the list of possible forks
forks.append(list(OrderedDict.fromkeys(task_pipeline)))
# Checks for task in fork. Case order of input processes is reversed
elif bool([y for y in forks if process_descriptions[task][2] in y]):
for fork in forks:
if task not in fork:
try:
dependent_index = fork.index(process_descriptions[task][2])
fork.insert(dependent_index, task)
except ValueError:
continue
for i in range(0, len(forks)):
for j in range(0, len(forks[i])):
try:
if len(forks[i][j].split("|")) > 1:
forks[i][j] = forks[i][j].split("|")
tmp_fork = []
for s in forks[i][j]:
if s in total_tasks:
tmp_fork.append(s)
forks[i][j] = tmp_fork
except AttributeError as e:
continue
return forks
|
def define_pipeline_string(self, process_descriptions, tasks,
check_upstream,
check_downstream, count_forks, total_tasks,
forks):
"""Builds the possible forks and connections between the provided
processes
This method loops through all the provided tasks and builds the
upstream and downstream pipeline if required. It then returns all
possible forks than need to be merged à posteriori`
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
tasks : str
Space separated processes
check_upstream : bool
If is to build the upstream pipeline of the current task
check_downstream : bool
If is to build the downstream pipeline of the current task
count_forks : int
Number of current forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : List with all the possible pipeline forks
"""
tasks_array = tasks.split()
for task_unsplit in tasks_array:
task = task_unsplit.split("=")[0]
if task not in process_descriptions.keys():
logger.error(
colored_print(
"{} not in the possible processes".format(task),
"red_bold"
)
)
sys.exit()
else:
process_split = task_unsplit.split("=")
if len(process_split) > 1:
self.process_to_id[process_split[0]] = process_split[1]
# Only uses the process if it is not already in the possible forks
if not bool([x for x in forks if task in x]) and not bool([y for y in forks if process_descriptions[task][2] in y]):
task_pipeline = []
if task in process_descriptions:
if check_upstream:
task_pipeline = self.build_upstream(
process_descriptions,
task,
tasks_array,
task_pipeline,
count_forks,
total_tasks,
forks
)
task_pipeline.append(task)
if check_downstream:
task_pipeline = self.build_downstream(
process_descriptions,
task,
tasks_array,
task_pipeline,
count_forks,
total_tasks,
forks
)
# Adds the pipeline fragment to the list of possible forks
forks.append(list(OrderedDict.fromkeys(task_pipeline)))
# Checks for task in fork. Case order of input processes is reversed
elif bool([y for y in forks if process_descriptions[task][2] in y]):
for fork in forks:
if task not in fork:
try:
dependent_index = fork.index(process_descriptions[task][2])
fork.insert(dependent_index, task)
except ValueError:
continue
for i in range(0, len(forks)):
for j in range(0, len(forks[i])):
try:
if len(forks[i][j].split("|")) > 1:
forks[i][j] = forks[i][j].split("|")
tmp_fork = []
for s in forks[i][j]:
if s in total_tasks:
tmp_fork.append(s)
forks[i][j] = tmp_fork
except AttributeError as e:
continue
return forks
|
[
"Builds",
"the",
"possible",
"forks",
"and",
"connections",
"between",
"the",
"provided",
"processes"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/recipe.py#L237-L348
|
[
"def",
"define_pipeline_string",
"(",
"self",
",",
"process_descriptions",
",",
"tasks",
",",
"check_upstream",
",",
"check_downstream",
",",
"count_forks",
",",
"total_tasks",
",",
"forks",
")",
":",
"tasks_array",
"=",
"tasks",
".",
"split",
"(",
")",
"for",
"task_unsplit",
"in",
"tasks_array",
":",
"task",
"=",
"task_unsplit",
".",
"split",
"(",
"\"=\"",
")",
"[",
"0",
"]",
"if",
"task",
"not",
"in",
"process_descriptions",
".",
"keys",
"(",
")",
":",
"logger",
".",
"error",
"(",
"colored_print",
"(",
"\"{} not in the possible processes\"",
".",
"format",
"(",
"task",
")",
",",
"\"red_bold\"",
")",
")",
"sys",
".",
"exit",
"(",
")",
"else",
":",
"process_split",
"=",
"task_unsplit",
".",
"split",
"(",
"\"=\"",
")",
"if",
"len",
"(",
"process_split",
")",
">",
"1",
":",
"self",
".",
"process_to_id",
"[",
"process_split",
"[",
"0",
"]",
"]",
"=",
"process_split",
"[",
"1",
"]",
"# Only uses the process if it is not already in the possible forks",
"if",
"not",
"bool",
"(",
"[",
"x",
"for",
"x",
"in",
"forks",
"if",
"task",
"in",
"x",
"]",
")",
"and",
"not",
"bool",
"(",
"[",
"y",
"for",
"y",
"in",
"forks",
"if",
"process_descriptions",
"[",
"task",
"]",
"[",
"2",
"]",
"in",
"y",
"]",
")",
":",
"task_pipeline",
"=",
"[",
"]",
"if",
"task",
"in",
"process_descriptions",
":",
"if",
"check_upstream",
":",
"task_pipeline",
"=",
"self",
".",
"build_upstream",
"(",
"process_descriptions",
",",
"task",
",",
"tasks_array",
",",
"task_pipeline",
",",
"count_forks",
",",
"total_tasks",
",",
"forks",
")",
"task_pipeline",
".",
"append",
"(",
"task",
")",
"if",
"check_downstream",
":",
"task_pipeline",
"=",
"self",
".",
"build_downstream",
"(",
"process_descriptions",
",",
"task",
",",
"tasks_array",
",",
"task_pipeline",
",",
"count_forks",
",",
"total_tasks",
",",
"forks",
")",
"# Adds the pipeline fragment to the list of possible forks",
"forks",
".",
"append",
"(",
"list",
"(",
"OrderedDict",
".",
"fromkeys",
"(",
"task_pipeline",
")",
")",
")",
"# Checks for task in fork. Case order of input processes is reversed",
"elif",
"bool",
"(",
"[",
"y",
"for",
"y",
"in",
"forks",
"if",
"process_descriptions",
"[",
"task",
"]",
"[",
"2",
"]",
"in",
"y",
"]",
")",
":",
"for",
"fork",
"in",
"forks",
":",
"if",
"task",
"not",
"in",
"fork",
":",
"try",
":",
"dependent_index",
"=",
"fork",
".",
"index",
"(",
"process_descriptions",
"[",
"task",
"]",
"[",
"2",
"]",
")",
"fork",
".",
"insert",
"(",
"dependent_index",
",",
"task",
")",
"except",
"ValueError",
":",
"continue",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"forks",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"forks",
"[",
"i",
"]",
")",
")",
":",
"try",
":",
"if",
"len",
"(",
"forks",
"[",
"i",
"]",
"[",
"j",
"]",
".",
"split",
"(",
"\"|\"",
")",
")",
">",
"1",
":",
"forks",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"forks",
"[",
"i",
"]",
"[",
"j",
"]",
".",
"split",
"(",
"\"|\"",
")",
"tmp_fork",
"=",
"[",
"]",
"for",
"s",
"in",
"forks",
"[",
"i",
"]",
"[",
"j",
"]",
":",
"if",
"s",
"in",
"total_tasks",
":",
"tmp_fork",
".",
"append",
"(",
"s",
")",
"forks",
"[",
"i",
"]",
"[",
"j",
"]",
"=",
"tmp_fork",
"except",
"AttributeError",
"as",
"e",
":",
"continue",
"return",
"forks"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
InnuendoRecipe.build_pipeline_string
|
Parses, filters and merge all possible pipeline forks into the
final pipeline string
This method checks for shared start and end sections between forks
and merges them according to the shared processes::
[[spades, ...], [skesa, ...], [...,[spades, skesa]]]
-> [..., [[spades, ...], [skesa, ...]]]
Then it defines the pipeline string by replacing the arrays levels
to the flowcraft fork format::
[..., [[spades, ...], [skesa, ...]]]
-> ( ... ( spades ... | skesa ... ) )
Parameters
----------
forks : list
List with all the possible pipeline forks.
Returns
-------
str : String with the pipeline definition used as input for
parse_pipeline
|
flowcraft/generator/recipe.py
|
def build_pipeline_string(self, forks):
"""Parses, filters and merge all possible pipeline forks into the
final pipeline string
This method checks for shared start and end sections between forks
and merges them according to the shared processes::
[[spades, ...], [skesa, ...], [...,[spades, skesa]]]
-> [..., [[spades, ...], [skesa, ...]]]
Then it defines the pipeline string by replacing the arrays levels
to the flowcraft fork format::
[..., [[spades, ...], [skesa, ...]]]
-> ( ... ( spades ... | skesa ... ) )
Parameters
----------
forks : list
List with all the possible pipeline forks.
Returns
-------
str : String with the pipeline definition used as input for
parse_pipeline
"""
final_forks = []
for i in range(0, len(forks)):
needs_merge = [False, 0, 0, 0, 0, ""]
is_merged = False
for i2 in range(0, len(forks[i])):
for j in range(i, len(forks)):
needs_merge[0] = False
for j2 in range(0, len(forks[j])):
try:
j2_fork = forks[j][j2].split("|")
except AttributeError:
j2_fork = forks[j][j2]
# Gets the indexes of the forks matrix that need to
# be merged
if forks[i][i2] in j2_fork and (i2 == 0 or j2 == 0) and i != j:
needs_merge[0] = True
needs_merge[1] = i
needs_merge[2] = i2
needs_merge[3] = j
needs_merge[4] = j2
needs_merge[5] = forks[i][i2]
if needs_merge[0]:
index_merge_point = forks[needs_merge[3]][-1].index(needs_merge[5])
# Merges the forks. If only one fork is possible,
# that fork is neglected and it merges into a single
# channel.
if needs_merge[2] == 0:
if len(forks[needs_merge[3]][-1]) < 2:
forks[needs_merge[3]] = forks[needs_merge[3]][:-1] + forks[needs_merge[1]][::]
else:
forks[needs_merge[3]][-1][index_merge_point] = forks[needs_merge[1]]
elif needs_merge[4] == 0:
if len(forks[needs_merge[3]][-1]) < 2:
forks[needs_merge[3]] = forks[needs_merge[3]][:-1] + forks[needs_merge[1]][::]
else:
forks[needs_merge[3]][-1][index_merge_point] = forks[needs_merge[1]]
is_merged = True
# Adds forks that dont need merge to the final forks
if needs_merge[0] is not None and not is_merged:
if bool([nf for nf in forks[i] if "|" in nf]):
continue
final_forks.append(forks[i])
if len(final_forks) == 1:
final_forks = str(final_forks[0])
# parses the string array to the flowcraft nomenclature
pipeline_string = " " + str(final_forks)\
.replace("[[", "( ")\
.replace("]]", " )")\
.replace("]", " |")\
.replace(", [", " ")\
.replace("'", "")\
.replace(",", "")\
.replace("[", "")
if pipeline_string[-1] == "|":
pipeline_string = pipeline_string[:-1]
to_search = " {} "
to_replace = " {}={} "
# Replace only names by names + process ids
for key, val in self.process_to_id.items():
# Case only one process in the pipeline
pipeline_string = pipeline_string\
.replace(to_search.format(key),
to_replace.format(key, val))
return pipeline_string
|
def build_pipeline_string(self, forks):
"""Parses, filters and merge all possible pipeline forks into the
final pipeline string
This method checks for shared start and end sections between forks
and merges them according to the shared processes::
[[spades, ...], [skesa, ...], [...,[spades, skesa]]]
-> [..., [[spades, ...], [skesa, ...]]]
Then it defines the pipeline string by replacing the arrays levels
to the flowcraft fork format::
[..., [[spades, ...], [skesa, ...]]]
-> ( ... ( spades ... | skesa ... ) )
Parameters
----------
forks : list
List with all the possible pipeline forks.
Returns
-------
str : String with the pipeline definition used as input for
parse_pipeline
"""
final_forks = []
for i in range(0, len(forks)):
needs_merge = [False, 0, 0, 0, 0, ""]
is_merged = False
for i2 in range(0, len(forks[i])):
for j in range(i, len(forks)):
needs_merge[0] = False
for j2 in range(0, len(forks[j])):
try:
j2_fork = forks[j][j2].split("|")
except AttributeError:
j2_fork = forks[j][j2]
# Gets the indexes of the forks matrix that need to
# be merged
if forks[i][i2] in j2_fork and (i2 == 0 or j2 == 0) and i != j:
needs_merge[0] = True
needs_merge[1] = i
needs_merge[2] = i2
needs_merge[3] = j
needs_merge[4] = j2
needs_merge[5] = forks[i][i2]
if needs_merge[0]:
index_merge_point = forks[needs_merge[3]][-1].index(needs_merge[5])
# Merges the forks. If only one fork is possible,
# that fork is neglected and it merges into a single
# channel.
if needs_merge[2] == 0:
if len(forks[needs_merge[3]][-1]) < 2:
forks[needs_merge[3]] = forks[needs_merge[3]][:-1] + forks[needs_merge[1]][::]
else:
forks[needs_merge[3]][-1][index_merge_point] = forks[needs_merge[1]]
elif needs_merge[4] == 0:
if len(forks[needs_merge[3]][-1]) < 2:
forks[needs_merge[3]] = forks[needs_merge[3]][:-1] + forks[needs_merge[1]][::]
else:
forks[needs_merge[3]][-1][index_merge_point] = forks[needs_merge[1]]
is_merged = True
# Adds forks that dont need merge to the final forks
if needs_merge[0] is not None and not is_merged:
if bool([nf for nf in forks[i] if "|" in nf]):
continue
final_forks.append(forks[i])
if len(final_forks) == 1:
final_forks = str(final_forks[0])
# parses the string array to the flowcraft nomenclature
pipeline_string = " " + str(final_forks)\
.replace("[[", "( ")\
.replace("]]", " )")\
.replace("]", " |")\
.replace(", [", " ")\
.replace("'", "")\
.replace(",", "")\
.replace("[", "")
if pipeline_string[-1] == "|":
pipeline_string = pipeline_string[:-1]
to_search = " {} "
to_replace = " {}={} "
# Replace only names by names + process ids
for key, val in self.process_to_id.items():
# Case only one process in the pipeline
pipeline_string = pipeline_string\
.replace(to_search.format(key),
to_replace.format(key, val))
return pipeline_string
|
[
"Parses",
"filters",
"and",
"merge",
"all",
"possible",
"pipeline",
"forks",
"into",
"the",
"final",
"pipeline",
"string"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/recipe.py#L350-L453
|
[
"def",
"build_pipeline_string",
"(",
"self",
",",
"forks",
")",
":",
"final_forks",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"forks",
")",
")",
":",
"needs_merge",
"=",
"[",
"False",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"\"\"",
"]",
"is_merged",
"=",
"False",
"for",
"i2",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"forks",
"[",
"i",
"]",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"i",
",",
"len",
"(",
"forks",
")",
")",
":",
"needs_merge",
"[",
"0",
"]",
"=",
"False",
"for",
"j2",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"forks",
"[",
"j",
"]",
")",
")",
":",
"try",
":",
"j2_fork",
"=",
"forks",
"[",
"j",
"]",
"[",
"j2",
"]",
".",
"split",
"(",
"\"|\"",
")",
"except",
"AttributeError",
":",
"j2_fork",
"=",
"forks",
"[",
"j",
"]",
"[",
"j2",
"]",
"# Gets the indexes of the forks matrix that need to",
"# be merged",
"if",
"forks",
"[",
"i",
"]",
"[",
"i2",
"]",
"in",
"j2_fork",
"and",
"(",
"i2",
"==",
"0",
"or",
"j2",
"==",
"0",
")",
"and",
"i",
"!=",
"j",
":",
"needs_merge",
"[",
"0",
"]",
"=",
"True",
"needs_merge",
"[",
"1",
"]",
"=",
"i",
"needs_merge",
"[",
"2",
"]",
"=",
"i2",
"needs_merge",
"[",
"3",
"]",
"=",
"j",
"needs_merge",
"[",
"4",
"]",
"=",
"j2",
"needs_merge",
"[",
"5",
"]",
"=",
"forks",
"[",
"i",
"]",
"[",
"i2",
"]",
"if",
"needs_merge",
"[",
"0",
"]",
":",
"index_merge_point",
"=",
"forks",
"[",
"needs_merge",
"[",
"3",
"]",
"]",
"[",
"-",
"1",
"]",
".",
"index",
"(",
"needs_merge",
"[",
"5",
"]",
")",
"# Merges the forks. If only one fork is possible,",
"# that fork is neglected and it merges into a single",
"# channel.",
"if",
"needs_merge",
"[",
"2",
"]",
"==",
"0",
":",
"if",
"len",
"(",
"forks",
"[",
"needs_merge",
"[",
"3",
"]",
"]",
"[",
"-",
"1",
"]",
")",
"<",
"2",
":",
"forks",
"[",
"needs_merge",
"[",
"3",
"]",
"]",
"=",
"forks",
"[",
"needs_merge",
"[",
"3",
"]",
"]",
"[",
":",
"-",
"1",
"]",
"+",
"forks",
"[",
"needs_merge",
"[",
"1",
"]",
"]",
"[",
":",
":",
"]",
"else",
":",
"forks",
"[",
"needs_merge",
"[",
"3",
"]",
"]",
"[",
"-",
"1",
"]",
"[",
"index_merge_point",
"]",
"=",
"forks",
"[",
"needs_merge",
"[",
"1",
"]",
"]",
"elif",
"needs_merge",
"[",
"4",
"]",
"==",
"0",
":",
"if",
"len",
"(",
"forks",
"[",
"needs_merge",
"[",
"3",
"]",
"]",
"[",
"-",
"1",
"]",
")",
"<",
"2",
":",
"forks",
"[",
"needs_merge",
"[",
"3",
"]",
"]",
"=",
"forks",
"[",
"needs_merge",
"[",
"3",
"]",
"]",
"[",
":",
"-",
"1",
"]",
"+",
"forks",
"[",
"needs_merge",
"[",
"1",
"]",
"]",
"[",
":",
":",
"]",
"else",
":",
"forks",
"[",
"needs_merge",
"[",
"3",
"]",
"]",
"[",
"-",
"1",
"]",
"[",
"index_merge_point",
"]",
"=",
"forks",
"[",
"needs_merge",
"[",
"1",
"]",
"]",
"is_merged",
"=",
"True",
"# Adds forks that dont need merge to the final forks",
"if",
"needs_merge",
"[",
"0",
"]",
"is",
"not",
"None",
"and",
"not",
"is_merged",
":",
"if",
"bool",
"(",
"[",
"nf",
"for",
"nf",
"in",
"forks",
"[",
"i",
"]",
"if",
"\"|\"",
"in",
"nf",
"]",
")",
":",
"continue",
"final_forks",
".",
"append",
"(",
"forks",
"[",
"i",
"]",
")",
"if",
"len",
"(",
"final_forks",
")",
"==",
"1",
":",
"final_forks",
"=",
"str",
"(",
"final_forks",
"[",
"0",
"]",
")",
"# parses the string array to the flowcraft nomenclature",
"pipeline_string",
"=",
"\" \"",
"+",
"str",
"(",
"final_forks",
")",
".",
"replace",
"(",
"\"[[\"",
",",
"\"( \"",
")",
".",
"replace",
"(",
"\"]]\"",
",",
"\" )\"",
")",
".",
"replace",
"(",
"\"]\"",
",",
"\" |\"",
")",
".",
"replace",
"(",
"\", [\"",
",",
"\" \"",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\",\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"[\"",
",",
"\"\"",
")",
"if",
"pipeline_string",
"[",
"-",
"1",
"]",
"==",
"\"|\"",
":",
"pipeline_string",
"=",
"pipeline_string",
"[",
":",
"-",
"1",
"]",
"to_search",
"=",
"\" {} \"",
"to_replace",
"=",
"\" {}={} \"",
"# Replace only names by names + process ids",
"for",
"key",
",",
"val",
"in",
"self",
".",
"process_to_id",
".",
"items",
"(",
")",
":",
"# Case only one process in the pipeline",
"pipeline_string",
"=",
"pipeline_string",
".",
"replace",
"(",
"to_search",
".",
"format",
"(",
"key",
")",
",",
"to_replace",
".",
"format",
"(",
"key",
",",
"val",
")",
")",
"return",
"pipeline_string"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
InnuendoRecipe.run_auto_pipeline
|
Main method to run the automatic pipeline creation
This method aggregates the functions required to build the pipeline
string that can be used as input for the workflow generator.
Parameters
----------
tasks : str
A string with the space separated tasks to be included in the
pipeline
Returns
-------
str : String with the pipeline definition used as input for
parse_pipeline
|
flowcraft/generator/recipe.py
|
def run_auto_pipeline(self, tasks):
"""Main method to run the automatic pipeline creation
This method aggregates the functions required to build the pipeline
string that can be used as input for the workflow generator.
Parameters
----------
tasks : str
A string with the space separated tasks to be included in the
pipeline
Returns
-------
str : String with the pipeline definition used as input for
parse_pipeline
"""
self.forks = self.define_pipeline_string(
self.process_descriptions,
tasks,
True,
True,
self.count_forks,
tasks,
self.forks
)
self.pipeline_string = self.build_pipeline_string(self.forks)
return self.pipeline_string
|
def run_auto_pipeline(self, tasks):
"""Main method to run the automatic pipeline creation
This method aggregates the functions required to build the pipeline
string that can be used as input for the workflow generator.
Parameters
----------
tasks : str
A string with the space separated tasks to be included in the
pipeline
Returns
-------
str : String with the pipeline definition used as input for
parse_pipeline
"""
self.forks = self.define_pipeline_string(
self.process_descriptions,
tasks,
True,
True,
self.count_forks,
tasks,
self.forks
)
self.pipeline_string = self.build_pipeline_string(self.forks)
return self.pipeline_string
|
[
"Main",
"method",
"to",
"run",
"the",
"automatic",
"pipeline",
"creation"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/recipe.py#L455-L485
|
[
"def",
"run_auto_pipeline",
"(",
"self",
",",
"tasks",
")",
":",
"self",
".",
"forks",
"=",
"self",
".",
"define_pipeline_string",
"(",
"self",
".",
"process_descriptions",
",",
"tasks",
",",
"True",
",",
"True",
",",
"self",
".",
"count_forks",
",",
"tasks",
",",
"self",
".",
"forks",
")",
"self",
".",
"pipeline_string",
"=",
"self",
".",
"build_pipeline_string",
"(",
"self",
".",
"forks",
")",
"return",
"self",
".",
"pipeline_string"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Recipe._get_component_str
|
Generates a component string based on the provided parameters and
directives
Parameters
----------
component : str
Component name
params : dict
Dictionary with parameter information
directives : dict
Dictionary with directives information
Returns
-------
str
Component string with the parameters and directives, ready for
parsing by flowcraft engine
|
flowcraft/generator/recipe.py
|
def _get_component_str(component, params=None, directives=None):
""" Generates a component string based on the provided parameters and
directives
Parameters
----------
component : str
Component name
params : dict
Dictionary with parameter information
directives : dict
Dictionary with directives information
Returns
-------
str
Component string with the parameters and directives, ready for
parsing by flowcraft engine
"""
final_directives = {}
if directives:
final_directives = directives
if params:
final_directives["params"] = params
if final_directives:
return "{}={}".format(
component, json.dumps(final_directives, separators=(",", ":")))
else:
return component
|
def _get_component_str(component, params=None, directives=None):
""" Generates a component string based on the provided parameters and
directives
Parameters
----------
component : str
Component name
params : dict
Dictionary with parameter information
directives : dict
Dictionary with directives information
Returns
-------
str
Component string with the parameters and directives, ready for
parsing by flowcraft engine
"""
final_directives = {}
if directives:
final_directives = directives
if params:
final_directives["params"] = params
if final_directives:
return "{}={}".format(
component, json.dumps(final_directives, separators=(",", ":")))
else:
return component
|
[
"Generates",
"a",
"component",
"string",
"based",
"on",
"the",
"provided",
"parameters",
"and",
"directives"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/recipe.py#L614-L646
|
[
"def",
"_get_component_str",
"(",
"component",
",",
"params",
"=",
"None",
",",
"directives",
"=",
"None",
")",
":",
"final_directives",
"=",
"{",
"}",
"if",
"directives",
":",
"final_directives",
"=",
"directives",
"if",
"params",
":",
"final_directives",
"[",
"\"params\"",
"]",
"=",
"params",
"if",
"final_directives",
":",
"return",
"\"{}={}\"",
".",
"format",
"(",
"component",
",",
"json",
".",
"dumps",
"(",
"final_directives",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
")",
"else",
":",
"return",
"component"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
write_report
|
Writes a report from multiple samples.
Parameters
----------
storage_dic : dict or :py:class:`OrderedDict`
Storage containing the trimming statistics. See :py:func:`parse_log`
for its generation.
output_file : str
Path where the output file will be generated.
sample_id : str
Id or name of the current sample.
|
flowcraft/templates/trimmomatic_report.py
|
def write_report(storage_dic, output_file, sample_id):
""" Writes a report from multiple samples.
Parameters
----------
storage_dic : dict or :py:class:`OrderedDict`
Storage containing the trimming statistics. See :py:func:`parse_log`
for its generation.
output_file : str
Path where the output file will be generated.
sample_id : str
Id or name of the current sample.
"""
with open(output_file, "w") as fh, open(".report.json", "w") as json_rep:
# Write header
fh.write("Sample,Total length,Total trimmed,%,5end Trim,3end Trim,"
"bad_reads\\n")
# Write contents
for sample, vals in storage_dic.items():
fh.write("{},{}\\n".format(
sample, ",".join([str(x) for x in vals.values()])))
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "trimmed",
"value": vals["total_trim_perc"],
"table": "qc",
"columnBar": True},
]
}],
"plotData": [{
"sample": sample_id,
"data": {
"sparkline": vals["clean_len"]
}
}],
"badReads": vals["bad_reads"]
}
json_rep.write(json.dumps(json_dic, separators=(",", ":")))
|
def write_report(storage_dic, output_file, sample_id):
""" Writes a report from multiple samples.
Parameters
----------
storage_dic : dict or :py:class:`OrderedDict`
Storage containing the trimming statistics. See :py:func:`parse_log`
for its generation.
output_file : str
Path where the output file will be generated.
sample_id : str
Id or name of the current sample.
"""
with open(output_file, "w") as fh, open(".report.json", "w") as json_rep:
# Write header
fh.write("Sample,Total length,Total trimmed,%,5end Trim,3end Trim,"
"bad_reads\\n")
# Write contents
for sample, vals in storage_dic.items():
fh.write("{},{}\\n".format(
sample, ",".join([str(x) for x in vals.values()])))
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "trimmed",
"value": vals["total_trim_perc"],
"table": "qc",
"columnBar": True},
]
}],
"plotData": [{
"sample": sample_id,
"data": {
"sparkline": vals["clean_len"]
}
}],
"badReads": vals["bad_reads"]
}
json_rep.write(json.dumps(json_dic, separators=(",", ":")))
|
[
"Writes",
"a",
"report",
"from",
"multiple",
"samples",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/trimmomatic_report.py#L117-L160
|
[
"def",
"write_report",
"(",
"storage_dic",
",",
"output_file",
",",
"sample_id",
")",
":",
"with",
"open",
"(",
"output_file",
",",
"\"w\"",
")",
"as",
"fh",
",",
"open",
"(",
"\".report.json\"",
",",
"\"w\"",
")",
"as",
"json_rep",
":",
"# Write header",
"fh",
".",
"write",
"(",
"\"Sample,Total length,Total trimmed,%,5end Trim,3end Trim,\"",
"\"bad_reads\\\\n\"",
")",
"# Write contents",
"for",
"sample",
",",
"vals",
"in",
"storage_dic",
".",
"items",
"(",
")",
":",
"fh",
".",
"write",
"(",
"\"{},{}\\\\n\"",
".",
"format",
"(",
"sample",
",",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"vals",
".",
"values",
"(",
")",
"]",
")",
")",
")",
"json_dic",
"=",
"{",
"\"tableRow\"",
":",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"data\"",
":",
"[",
"{",
"\"header\"",
":",
"\"trimmed\"",
",",
"\"value\"",
":",
"vals",
"[",
"\"total_trim_perc\"",
"]",
",",
"\"table\"",
":",
"\"qc\"",
",",
"\"columnBar\"",
":",
"True",
"}",
",",
"]",
"}",
"]",
",",
"\"plotData\"",
":",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"data\"",
":",
"{",
"\"sparkline\"",
":",
"vals",
"[",
"\"clean_len\"",
"]",
"}",
"}",
"]",
",",
"\"badReads\"",
":",
"vals",
"[",
"\"bad_reads\"",
"]",
"}",
"json_rep",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"json_dic",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
main
|
Main executor of the trimmomatic_report template.
Parameters
----------
log_files : list
List of paths to the trimmomatic log files.
|
flowcraft/templates/trimmomatic_report.py
|
def main(log_files):
""" Main executor of the trimmomatic_report template.
Parameters
----------
log_files : list
List of paths to the trimmomatic log files.
"""
log_storage = OrderedDict()
for log in log_files:
log_id = log.rstrip("_trimlog.txt")
# Populate storage of current sample
log_storage[log_id] = parse_log(log)
# Remove temporary trim log file
os.remove(log)
write_report(log_storage, "trimmomatic_report.csv", log_id)
|
def main(log_files):
""" Main executor of the trimmomatic_report template.
Parameters
----------
log_files : list
List of paths to the trimmomatic log files.
"""
log_storage = OrderedDict()
for log in log_files:
log_id = log.rstrip("_trimlog.txt")
# Populate storage of current sample
log_storage[log_id] = parse_log(log)
# Remove temporary trim log file
os.remove(log)
write_report(log_storage, "trimmomatic_report.csv", log_id)
|
[
"Main",
"executor",
"of",
"the",
"trimmomatic_report",
"template",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/trimmomatic_report.py#L164-L185
|
[
"def",
"main",
"(",
"log_files",
")",
":",
"log_storage",
"=",
"OrderedDict",
"(",
")",
"for",
"log",
"in",
"log_files",
":",
"log_id",
"=",
"log",
".",
"rstrip",
"(",
"\"_trimlog.txt\"",
")",
"# Populate storage of current sample",
"log_storage",
"[",
"log_id",
"]",
"=",
"parse_log",
"(",
"log",
")",
"# Remove temporary trim log file",
"os",
".",
"remove",
"(",
"log",
")",
"write_report",
"(",
"log_storage",
",",
"\"trimmomatic_report.csv\"",
",",
"log_id",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
fix_contig_names
|
Removes whitespace from the assembly contig names
Parameters
----------
asseembly_path : path to assembly file
Returns
-------
str:
Path to new assembly file with fixed contig names
|
flowcraft/templates/megahit.py
|
def fix_contig_names(asseembly_path):
"""Removes whitespace from the assembly contig names
Parameters
----------
asseembly_path : path to assembly file
Returns
-------
str:
Path to new assembly file with fixed contig names
"""
fixed_assembly = "fixed_assembly.fa"
with open(asseembly_path) as in_hf, open(fixed_assembly, "w") as ou_fh:
for line in in_hf:
if line.startswith(">"):
fixed_line = line.replace(" ", "_")
ou_fh.write(fixed_line)
else:
ou_fh.write(line)
return fixed_assembly
|
def fix_contig_names(asseembly_path):
"""Removes whitespace from the assembly contig names
Parameters
----------
asseembly_path : path to assembly file
Returns
-------
str:
Path to new assembly file with fixed contig names
"""
fixed_assembly = "fixed_assembly.fa"
with open(asseembly_path) as in_hf, open(fixed_assembly, "w") as ou_fh:
for line in in_hf:
if line.startswith(">"):
fixed_line = line.replace(" ", "_")
ou_fh.write(fixed_line)
else:
ou_fh.write(line)
return fixed_assembly
|
[
"Removes",
"whitespace",
"from",
"the",
"assembly",
"contig",
"names"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/megahit.py#L149-L174
|
[
"def",
"fix_contig_names",
"(",
"asseembly_path",
")",
":",
"fixed_assembly",
"=",
"\"fixed_assembly.fa\"",
"with",
"open",
"(",
"asseembly_path",
")",
"as",
"in_hf",
",",
"open",
"(",
"fixed_assembly",
",",
"\"w\"",
")",
"as",
"ou_fh",
":",
"for",
"line",
"in",
"in_hf",
":",
"if",
"line",
".",
"startswith",
"(",
"\">\"",
")",
":",
"fixed_line",
"=",
"line",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
"ou_fh",
".",
"write",
"(",
"fixed_line",
")",
"else",
":",
"ou_fh",
".",
"write",
"(",
"line",
")",
"return",
"fixed_assembly"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
clean_up
|
Cleans the temporary fastq files. If they are symlinks, the link
source is removed
Parameters
----------
fastq : list
List of fastq files.
|
flowcraft/templates/megahit.py
|
def clean_up(fastq):
"""
Cleans the temporary fastq files. If they are symlinks, the link
source is removed
Parameters
----------
fastq : list
List of fastq files.
"""
for fq in fastq:
# Get real path of fastq files, following symlinks
rp = os.path.realpath(fq)
logger.debug("Removing temporary fastq file path: {}".format(rp))
if re.match(".*/work/.{2}/.{30}/.*", rp):
os.remove(rp)
|
def clean_up(fastq):
"""
Cleans the temporary fastq files. If they are symlinks, the link
source is removed
Parameters
----------
fastq : list
List of fastq files.
"""
for fq in fastq:
# Get real path of fastq files, following symlinks
rp = os.path.realpath(fq)
logger.debug("Removing temporary fastq file path: {}".format(rp))
if re.match(".*/work/.{2}/.{30}/.*", rp):
os.remove(rp)
|
[
"Cleans",
"the",
"temporary",
"fastq",
"files",
".",
"If",
"they",
"are",
"symlinks",
"the",
"link",
"source",
"is",
"removed"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/megahit.py#L177-L193
|
[
"def",
"clean_up",
"(",
"fastq",
")",
":",
"for",
"fq",
"in",
"fastq",
":",
"# Get real path of fastq files, following symlinks",
"rp",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"fq",
")",
"logger",
".",
"debug",
"(",
"\"Removing temporary fastq file path: {}\"",
".",
"format",
"(",
"rp",
")",
")",
"if",
"re",
".",
"match",
"(",
"\".*/work/.{2}/.{30}/.*\"",
",",
"rp",
")",
":",
"os",
".",
"remove",
"(",
"rp",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Abricate.parse_files
|
Public method for parsing abricate output files.
This method is called at at class instantiation for the provided
output files. Additional abricate output files can be added using
this method after the class instantiation.
Parameters
----------
fls : list
List of paths to Abricate files
|
flowcraft/templates/process_abricate.py
|
def parse_files(self, fls):
"""Public method for parsing abricate output files.
This method is called at at class instantiation for the provided
output files. Additional abricate output files can be added using
this method after the class instantiation.
Parameters
----------
fls : list
List of paths to Abricate files
"""
for f in fls:
# Make sure paths exists
if os.path.exists(f):
self._parser(f)
else:
logger.warning("File {} does not exist".format(f))
|
def parse_files(self, fls):
"""Public method for parsing abricate output files.
This method is called at at class instantiation for the provided
output files. Additional abricate output files can be added using
this method after the class instantiation.
Parameters
----------
fls : list
List of paths to Abricate files
"""
for f in fls:
# Make sure paths exists
if os.path.exists(f):
self._parser(f)
else:
logger.warning("File {} does not exist".format(f))
|
[
"Public",
"method",
"for",
"parsing",
"abricate",
"output",
"files",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_abricate.py#L131-L150
|
[
"def",
"parse_files",
"(",
"self",
",",
"fls",
")",
":",
"for",
"f",
"in",
"fls",
":",
"# Make sure paths exists",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"f",
")",
":",
"self",
".",
"_parser",
"(",
"f",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"File {} does not exist\"",
".",
"format",
"(",
"f",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Abricate._parser
|
Parser for a single abricate output file.
This parser will scan a single Abricate output file and populate
the :py:attr:`Abricate.storage` attribute.
Parameters
----------
fl : str
Path to abricate output file
Notes
-----
This method will populate the :py:attr:`Abricate.storage` attribute
with all compliant lines in the abricate output file. Entries are
inserted using an arbitrary key that is set by the
:py:attr:`Abricate._key` attribute.
|
flowcraft/templates/process_abricate.py
|
def _parser(self, fl):
"""Parser for a single abricate output file.
This parser will scan a single Abricate output file and populate
the :py:attr:`Abricate.storage` attribute.
Parameters
----------
fl : str
Path to abricate output file
Notes
-----
This method will populate the :py:attr:`Abricate.storage` attribute
with all compliant lines in the abricate output file. Entries are
inserted using an arbitrary key that is set by the
:py:attr:`Abricate._key` attribute.
"""
with open(fl) as fh:
for line in fh:
# Skip header and comment lines
if line.startswith("#") or line.strip() == "":
continue
fields = line.strip().split("\t")
try:
coverage = float(fields[8])
except ValueError:
coverage = None
try:
identity = float(fields[9])
except ValueError:
identity = None
try:
accession = fields[11]
except IndexError:
accession = None
self.storage[self._key] = {
"log_file": os.path.basename(fl),
"infile": fields[0],
"reference": fields[1],
"seq_range": (int(fields[2]), int(fields[3])),
"gene": fields[4],
"accession": accession,
"database": fields[10],
"coverage": coverage,
"identity": identity
}
self._key += 1
|
def _parser(self, fl):
"""Parser for a single abricate output file.
This parser will scan a single Abricate output file and populate
the :py:attr:`Abricate.storage` attribute.
Parameters
----------
fl : str
Path to abricate output file
Notes
-----
This method will populate the :py:attr:`Abricate.storage` attribute
with all compliant lines in the abricate output file. Entries are
inserted using an arbitrary key that is set by the
:py:attr:`Abricate._key` attribute.
"""
with open(fl) as fh:
for line in fh:
# Skip header and comment lines
if line.startswith("#") or line.strip() == "":
continue
fields = line.strip().split("\t")
try:
coverage = float(fields[8])
except ValueError:
coverage = None
try:
identity = float(fields[9])
except ValueError:
identity = None
try:
accession = fields[11]
except IndexError:
accession = None
self.storage[self._key] = {
"log_file": os.path.basename(fl),
"infile": fields[0],
"reference": fields[1],
"seq_range": (int(fields[2]), int(fields[3])),
"gene": fields[4],
"accession": accession,
"database": fields[10],
"coverage": coverage,
"identity": identity
}
self._key += 1
|
[
"Parser",
"for",
"a",
"single",
"abricate",
"output",
"file",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_abricate.py#L152-L207
|
[
"def",
"_parser",
"(",
"self",
",",
"fl",
")",
":",
"with",
"open",
"(",
"fl",
")",
"as",
"fh",
":",
"for",
"line",
"in",
"fh",
":",
"# Skip header and comment lines",
"if",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
"or",
"line",
".",
"strip",
"(",
")",
"==",
"\"\"",
":",
"continue",
"fields",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"try",
":",
"coverage",
"=",
"float",
"(",
"fields",
"[",
"8",
"]",
")",
"except",
"ValueError",
":",
"coverage",
"=",
"None",
"try",
":",
"identity",
"=",
"float",
"(",
"fields",
"[",
"9",
"]",
")",
"except",
"ValueError",
":",
"identity",
"=",
"None",
"try",
":",
"accession",
"=",
"fields",
"[",
"11",
"]",
"except",
"IndexError",
":",
"accession",
"=",
"None",
"self",
".",
"storage",
"[",
"self",
".",
"_key",
"]",
"=",
"{",
"\"log_file\"",
":",
"os",
".",
"path",
".",
"basename",
"(",
"fl",
")",
",",
"\"infile\"",
":",
"fields",
"[",
"0",
"]",
",",
"\"reference\"",
":",
"fields",
"[",
"1",
"]",
",",
"\"seq_range\"",
":",
"(",
"int",
"(",
"fields",
"[",
"2",
"]",
")",
",",
"int",
"(",
"fields",
"[",
"3",
"]",
")",
")",
",",
"\"gene\"",
":",
"fields",
"[",
"4",
"]",
",",
"\"accession\"",
":",
"accession",
",",
"\"database\"",
":",
"fields",
"[",
"10",
"]",
",",
"\"coverage\"",
":",
"coverage",
",",
"\"identity\"",
":",
"identity",
"}",
"self",
".",
"_key",
"+=",
"1"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Abricate.iter_filter
|
General purpose filter iterator.
This general filter iterator allows the filtering of entries based
on one or more custom filters. These filters must contain
an entry of the `storage` attribute, a comparison operator, and the
test value. For example, to filter out entries with coverage below 80::
my_filter = ["coverage", ">=", 80]
Filters should always be provide as a list of lists::
iter_filter([["coverage", ">=", 80]])
# or
my_filters = [["coverage", ">=", 80],
["identity", ">=", 50]]
iter_filter(my_filters)
As a convenience, a list of the desired databases can be directly
specified using the `database` argument, which will only report
entries for the specified databases::
iter_filter(my_filters, databases=["plasmidfinder"])
By default, this method will yield the complete entry record. However,
the returned filters can be specified using the `fields` option::
iter_filter(my_filters, fields=["reference", "coverage"])
Parameters
----------
filters : list
List of lists with the custom filter. Each list should have three
elements. (1) the key from the entry to be compared; (2) the
comparison operator; (3) the test value. Example:
``[["identity", ">", 80]]``.
databases : list
List of databases that should be reported.
fields : list
List of fields from each individual entry that are yielded.
filter_behavior : str
options: ``'and'`` ``'or'``
Sets the behaviour of the filters, if multiple filters have been
provided. By default it is set to ``'and'``, which means that an
entry has to pass all filters. It can be set to ``'or'``, in which
case one one of the filters has to pass.
yields
------
dic : dict
Dictionary object containing a :py:attr:`Abricate.storage` entry
that passed the filters.
|
flowcraft/templates/process_abricate.py
|
def iter_filter(self, filters, databases=None, fields=None,
filter_behavior="and"):
"""General purpose filter iterator.
This general filter iterator allows the filtering of entries based
on one or more custom filters. These filters must contain
an entry of the `storage` attribute, a comparison operator, and the
test value. For example, to filter out entries with coverage below 80::
my_filter = ["coverage", ">=", 80]
Filters should always be provide as a list of lists::
iter_filter([["coverage", ">=", 80]])
# or
my_filters = [["coverage", ">=", 80],
["identity", ">=", 50]]
iter_filter(my_filters)
As a convenience, a list of the desired databases can be directly
specified using the `database` argument, which will only report
entries for the specified databases::
iter_filter(my_filters, databases=["plasmidfinder"])
By default, this method will yield the complete entry record. However,
the returned filters can be specified using the `fields` option::
iter_filter(my_filters, fields=["reference", "coverage"])
Parameters
----------
filters : list
List of lists with the custom filter. Each list should have three
elements. (1) the key from the entry to be compared; (2) the
comparison operator; (3) the test value. Example:
``[["identity", ">", 80]]``.
databases : list
List of databases that should be reported.
fields : list
List of fields from each individual entry that are yielded.
filter_behavior : str
options: ``'and'`` ``'or'``
Sets the behaviour of the filters, if multiple filters have been
provided. By default it is set to ``'and'``, which means that an
entry has to pass all filters. It can be set to ``'or'``, in which
case one one of the filters has to pass.
yields
------
dic : dict
Dictionary object containing a :py:attr:`Abricate.storage` entry
that passed the filters.
"""
if filter_behavior not in ["and", "or"]:
raise ValueError("Filter behavior must be either 'and' or 'or'")
for dic in self.storage.values():
# This attribute will determine whether an entry will be yielded
# or not
_pass = False
# Stores the flags with the test results for each filter
# The results will be either True or False
flag = []
# Filter for databases
if databases:
# Skip entry if not in specified database
if dic["database"] not in databases:
continue
# Apply filters
for f in filters:
# Get value of current filter
val = dic[f[0]]
if not self._test_truth(val, f[1], f[2]):
flag.append(False)
else:
flag.append(True)
# Test whether the entry will pass based on the test results
# and the filter behaviour
if filter_behavior == "and":
if all(flag):
_pass = True
elif filter_behavior == "or":
if any(flag):
_pass = True
if _pass:
if fields:
yield dict((x, y) for x, y in dic.items() if x in fields)
else:
yield dic
|
def iter_filter(self, filters, databases=None, fields=None,
filter_behavior="and"):
"""General purpose filter iterator.
This general filter iterator allows the filtering of entries based
on one or more custom filters. These filters must contain
an entry of the `storage` attribute, a comparison operator, and the
test value. For example, to filter out entries with coverage below 80::
my_filter = ["coverage", ">=", 80]
Filters should always be provide as a list of lists::
iter_filter([["coverage", ">=", 80]])
# or
my_filters = [["coverage", ">=", 80],
["identity", ">=", 50]]
iter_filter(my_filters)
As a convenience, a list of the desired databases can be directly
specified using the `database` argument, which will only report
entries for the specified databases::
iter_filter(my_filters, databases=["plasmidfinder"])
By default, this method will yield the complete entry record. However,
the returned filters can be specified using the `fields` option::
iter_filter(my_filters, fields=["reference", "coverage"])
Parameters
----------
filters : list
List of lists with the custom filter. Each list should have three
elements. (1) the key from the entry to be compared; (2) the
comparison operator; (3) the test value. Example:
``[["identity", ">", 80]]``.
databases : list
List of databases that should be reported.
fields : list
List of fields from each individual entry that are yielded.
filter_behavior : str
options: ``'and'`` ``'or'``
Sets the behaviour of the filters, if multiple filters have been
provided. By default it is set to ``'and'``, which means that an
entry has to pass all filters. It can be set to ``'or'``, in which
case one one of the filters has to pass.
yields
------
dic : dict
Dictionary object containing a :py:attr:`Abricate.storage` entry
that passed the filters.
"""
if filter_behavior not in ["and", "or"]:
raise ValueError("Filter behavior must be either 'and' or 'or'")
for dic in self.storage.values():
# This attribute will determine whether an entry will be yielded
# or not
_pass = False
# Stores the flags with the test results for each filter
# The results will be either True or False
flag = []
# Filter for databases
if databases:
# Skip entry if not in specified database
if dic["database"] not in databases:
continue
# Apply filters
for f in filters:
# Get value of current filter
val = dic[f[0]]
if not self._test_truth(val, f[1], f[2]):
flag.append(False)
else:
flag.append(True)
# Test whether the entry will pass based on the test results
# and the filter behaviour
if filter_behavior == "and":
if all(flag):
_pass = True
elif filter_behavior == "or":
if any(flag):
_pass = True
if _pass:
if fields:
yield dict((x, y) for x, y in dic.items() if x in fields)
else:
yield dic
|
[
"General",
"purpose",
"filter",
"iterator",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_abricate.py#L242-L340
|
[
"def",
"iter_filter",
"(",
"self",
",",
"filters",
",",
"databases",
"=",
"None",
",",
"fields",
"=",
"None",
",",
"filter_behavior",
"=",
"\"and\"",
")",
":",
"if",
"filter_behavior",
"not",
"in",
"[",
"\"and\"",
",",
"\"or\"",
"]",
":",
"raise",
"ValueError",
"(",
"\"Filter behavior must be either 'and' or 'or'\"",
")",
"for",
"dic",
"in",
"self",
".",
"storage",
".",
"values",
"(",
")",
":",
"# This attribute will determine whether an entry will be yielded",
"# or not",
"_pass",
"=",
"False",
"# Stores the flags with the test results for each filter",
"# The results will be either True or False",
"flag",
"=",
"[",
"]",
"# Filter for databases",
"if",
"databases",
":",
"# Skip entry if not in specified database",
"if",
"dic",
"[",
"\"database\"",
"]",
"not",
"in",
"databases",
":",
"continue",
"# Apply filters",
"for",
"f",
"in",
"filters",
":",
"# Get value of current filter",
"val",
"=",
"dic",
"[",
"f",
"[",
"0",
"]",
"]",
"if",
"not",
"self",
".",
"_test_truth",
"(",
"val",
",",
"f",
"[",
"1",
"]",
",",
"f",
"[",
"2",
"]",
")",
":",
"flag",
".",
"append",
"(",
"False",
")",
"else",
":",
"flag",
".",
"append",
"(",
"True",
")",
"# Test whether the entry will pass based on the test results",
"# and the filter behaviour",
"if",
"filter_behavior",
"==",
"\"and\"",
":",
"if",
"all",
"(",
"flag",
")",
":",
"_pass",
"=",
"True",
"elif",
"filter_behavior",
"==",
"\"or\"",
":",
"if",
"any",
"(",
"flag",
")",
":",
"_pass",
"=",
"True",
"if",
"_pass",
":",
"if",
"fields",
":",
"yield",
"dict",
"(",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"dic",
".",
"items",
"(",
")",
"if",
"x",
"in",
"fields",
")",
"else",
":",
"yield",
"dic"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
AbricateReport._get_contig_id
|
Tries to retrieve contig id. Returns the original string if it
is unable to retrieve the id.
Parameters
----------
contig_str : str
Full contig string (fasta header)
Returns
-------
str
Contig id
|
flowcraft/templates/process_abricate.py
|
def _get_contig_id(contig_str):
"""Tries to retrieve contig id. Returns the original string if it
is unable to retrieve the id.
Parameters
----------
contig_str : str
Full contig string (fasta header)
Returns
-------
str
Contig id
"""
contig_id = contig_str
try:
contig_id = re.search(".*NODE_([0-9]*)_.*", contig_str).group(1)
except AttributeError:
pass
try:
contig_id = re.search(".*Contig_([0-9]*)_.*", contig_str).group(1)
except AttributeError:
pass
return contig_id
|
def _get_contig_id(contig_str):
"""Tries to retrieve contig id. Returns the original string if it
is unable to retrieve the id.
Parameters
----------
contig_str : str
Full contig string (fasta header)
Returns
-------
str
Contig id
"""
contig_id = contig_str
try:
contig_id = re.search(".*NODE_([0-9]*)_.*", contig_str).group(1)
except AttributeError:
pass
try:
contig_id = re.search(".*Contig_([0-9]*)_.*", contig_str).group(1)
except AttributeError:
pass
return contig_id
|
[
"Tries",
"to",
"retrieve",
"contig",
"id",
".",
"Returns",
"the",
"original",
"string",
"if",
"it",
"is",
"unable",
"to",
"retrieve",
"the",
"id",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_abricate.py#L380-L407
|
[
"def",
"_get_contig_id",
"(",
"contig_str",
")",
":",
"contig_id",
"=",
"contig_str",
"try",
":",
"contig_id",
"=",
"re",
".",
"search",
"(",
"\".*NODE_([0-9]*)_.*\"",
",",
"contig_str",
")",
".",
"group",
"(",
"1",
")",
"except",
"AttributeError",
":",
"pass",
"try",
":",
"contig_id",
"=",
"re",
".",
"search",
"(",
"\".*Contig_([0-9]*)_.*\"",
",",
"contig_str",
")",
".",
"group",
"(",
"1",
")",
"except",
"AttributeError",
":",
"pass",
"return",
"contig_id"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
AbricateReport.get_plot_data
|
Generates the JSON report to plot the gene boxes
Following the convention of the reports platform, this method returns
a list of JSON/dict objects with the information about each entry in
the abricate file. The information contained in this JSON is::
{contig_id: <str>,
seqRange: [<int>, <int>],
gene: <str>,
accession: <str>,
coverage: <float>,
identity: <float>
}
Note that the `seqRange` entry contains the position in the
corresponding contig, not the absolute position in the whole assembly.
Returns
-------
json_dic : list
List of JSON/dict objects with the report data.
|
flowcraft/templates/process_abricate.py
|
def get_plot_data(self):
""" Generates the JSON report to plot the gene boxes
Following the convention of the reports platform, this method returns
a list of JSON/dict objects with the information about each entry in
the abricate file. The information contained in this JSON is::
{contig_id: <str>,
seqRange: [<int>, <int>],
gene: <str>,
accession: <str>,
coverage: <float>,
identity: <float>
}
Note that the `seqRange` entry contains the position in the
corresponding contig, not the absolute position in the whole assembly.
Returns
-------
json_dic : list
List of JSON/dict objects with the report data.
"""
json_dic = {"plotData": []}
sample_dic = {}
sample_assembly_map = {}
for entry in self.storage.values():
sample_id = re.match("(.*)_abr", entry["log_file"]).groups()[0]
if sample_id not in sample_dic:
sample_dic[sample_id] = {}
# Get contig ID using the same regex as in `assembly_report.py`
# template
contig_id = self._get_contig_id(entry["reference"])
# Get database
database = entry["database"]
if database not in sample_dic[sample_id]:
sample_dic[sample_id][database] = []
# Update the sample-assembly correspondence dict
if sample_id not in sample_assembly_map:
sample_assembly_map[sample_id] = entry["infile"]
sample_dic[sample_id][database].append(
{"contig": contig_id,
"seqRange": entry["seq_range"],
"gene": entry["gene"].replace("'", ""),
"accession": entry["accession"],
"coverage": entry["coverage"],
"identity": entry["identity"],
},
)
for sample, data in sample_dic.items():
json_dic["plotData"].append(
{
"sample": sample,
"data": {"abricateXrange": data},
"assemblyFile": sample_assembly_map[sample]
}
)
return json_dic
|
def get_plot_data(self):
""" Generates the JSON report to plot the gene boxes
Following the convention of the reports platform, this method returns
a list of JSON/dict objects with the information about each entry in
the abricate file. The information contained in this JSON is::
{contig_id: <str>,
seqRange: [<int>, <int>],
gene: <str>,
accession: <str>,
coverage: <float>,
identity: <float>
}
Note that the `seqRange` entry contains the position in the
corresponding contig, not the absolute position in the whole assembly.
Returns
-------
json_dic : list
List of JSON/dict objects with the report data.
"""
json_dic = {"plotData": []}
sample_dic = {}
sample_assembly_map = {}
for entry in self.storage.values():
sample_id = re.match("(.*)_abr", entry["log_file"]).groups()[0]
if sample_id not in sample_dic:
sample_dic[sample_id] = {}
# Get contig ID using the same regex as in `assembly_report.py`
# template
contig_id = self._get_contig_id(entry["reference"])
# Get database
database = entry["database"]
if database not in sample_dic[sample_id]:
sample_dic[sample_id][database] = []
# Update the sample-assembly correspondence dict
if sample_id not in sample_assembly_map:
sample_assembly_map[sample_id] = entry["infile"]
sample_dic[sample_id][database].append(
{"contig": contig_id,
"seqRange": entry["seq_range"],
"gene": entry["gene"].replace("'", ""),
"accession": entry["accession"],
"coverage": entry["coverage"],
"identity": entry["identity"],
},
)
for sample, data in sample_dic.items():
json_dic["plotData"].append(
{
"sample": sample,
"data": {"abricateXrange": data},
"assemblyFile": sample_assembly_map[sample]
}
)
return json_dic
|
[
"Generates",
"the",
"JSON",
"report",
"to",
"plot",
"the",
"gene",
"boxes"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_abricate.py#L409-L474
|
[
"def",
"get_plot_data",
"(",
"self",
")",
":",
"json_dic",
"=",
"{",
"\"plotData\"",
":",
"[",
"]",
"}",
"sample_dic",
"=",
"{",
"}",
"sample_assembly_map",
"=",
"{",
"}",
"for",
"entry",
"in",
"self",
".",
"storage",
".",
"values",
"(",
")",
":",
"sample_id",
"=",
"re",
".",
"match",
"(",
"\"(.*)_abr\"",
",",
"entry",
"[",
"\"log_file\"",
"]",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"if",
"sample_id",
"not",
"in",
"sample_dic",
":",
"sample_dic",
"[",
"sample_id",
"]",
"=",
"{",
"}",
"# Get contig ID using the same regex as in `assembly_report.py`",
"# template",
"contig_id",
"=",
"self",
".",
"_get_contig_id",
"(",
"entry",
"[",
"\"reference\"",
"]",
")",
"# Get database",
"database",
"=",
"entry",
"[",
"\"database\"",
"]",
"if",
"database",
"not",
"in",
"sample_dic",
"[",
"sample_id",
"]",
":",
"sample_dic",
"[",
"sample_id",
"]",
"[",
"database",
"]",
"=",
"[",
"]",
"# Update the sample-assembly correspondence dict",
"if",
"sample_id",
"not",
"in",
"sample_assembly_map",
":",
"sample_assembly_map",
"[",
"sample_id",
"]",
"=",
"entry",
"[",
"\"infile\"",
"]",
"sample_dic",
"[",
"sample_id",
"]",
"[",
"database",
"]",
".",
"append",
"(",
"{",
"\"contig\"",
":",
"contig_id",
",",
"\"seqRange\"",
":",
"entry",
"[",
"\"seq_range\"",
"]",
",",
"\"gene\"",
":",
"entry",
"[",
"\"gene\"",
"]",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
",",
"\"accession\"",
":",
"entry",
"[",
"\"accession\"",
"]",
",",
"\"coverage\"",
":",
"entry",
"[",
"\"coverage\"",
"]",
",",
"\"identity\"",
":",
"entry",
"[",
"\"identity\"",
"]",
",",
"}",
",",
")",
"for",
"sample",
",",
"data",
"in",
"sample_dic",
".",
"items",
"(",
")",
":",
"json_dic",
"[",
"\"plotData\"",
"]",
".",
"append",
"(",
"{",
"\"sample\"",
":",
"sample",
",",
"\"data\"",
":",
"{",
"\"abricateXrange\"",
":",
"data",
"}",
",",
"\"assemblyFile\"",
":",
"sample_assembly_map",
"[",
"sample",
"]",
"}",
")",
"return",
"json_dic"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
AbricateReport.write_report_data
|
Writes the JSON report to a json file
|
flowcraft/templates/process_abricate.py
|
def write_report_data(self):
"""Writes the JSON report to a json file
"""
json_plot = self.get_plot_data()
json_table = self.get_table_data()
json_dic = {**json_plot, **json_table}
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":")))
|
def write_report_data(self):
"""Writes the JSON report to a json file
"""
json_plot = self.get_plot_data()
json_table = self.get_table_data()
json_dic = {**json_plot, **json_table}
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":")))
|
[
"Writes",
"the",
"JSON",
"report",
"to",
"a",
"json",
"file"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_abricate.py#L527-L537
|
[
"def",
"write_report_data",
"(",
"self",
")",
":",
"json_plot",
"=",
"self",
".",
"get_plot_data",
"(",
")",
"json_table",
"=",
"self",
".",
"get_table_data",
"(",
")",
"json_dic",
"=",
"{",
"*",
"*",
"json_plot",
",",
"*",
"*",
"json_table",
"}",
"with",
"open",
"(",
"\".report.json\"",
",",
"\"w\"",
")",
"as",
"json_report",
":",
"json_report",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"json_dic",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
main
|
Main executor of the assembly_report template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly_file : str
Path to assembly file in Fasta format.
|
flowcraft/templates/assembly_report.py
|
def main(sample_id, assembly_file, coverage_bp_file=None):
"""Main executor of the assembly_report template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly_file : str
Path to assembly file in Fasta format.
"""
logger.info("Starting assembly report")
assembly_obj = Assembly(assembly_file, sample_id)
logger.info("Retrieving summary statistics for assembly")
assembly_obj.get_summary_stats("{}_assembly_report.csv".format(sample_id))
size_dist = [len(x) for x in assembly_obj.contigs.values()]
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Contigs",
"value": assembly_obj.summary_info["ncontigs"],
"table": "assembly",
"columnBar": True},
{"header": "Assembled BP",
"value": assembly_obj.summary_info["total_len"],
"table": "assembly",
"columnBar": True},
]
}],
"plotData": [{
"sample": sample_id,
"data": {
"size_dist": size_dist
}
}]
}
if coverage_bp_file:
try:
window = 2000
gc_sliding_data = assembly_obj.get_gc_sliding(window=window)
cov_sliding_data = \
assembly_obj.get_coverage_sliding(coverage_bp_file,
window=window)
# Get total basepairs based on the individual coverage of each
# contig bpx
total_bp = sum(
[sum(x) for x in assembly_obj.contig_coverage.values()]
)
# Add data to json report
json_dic["plotData"][0]["data"]["genomeSliding"] = {
"gcData": gc_sliding_data,
"covData": cov_sliding_data,
"window": window,
"xbars": assembly_obj._get_window_labels(window),
"assemblyFile": os.path.basename(assembly_file)
}
json_dic["plotData"][0]["data"]["sparkline"] = total_bp
except:
logger.error("Unexpected error creating sliding window data:\\n"
"{}".format(traceback.format_exc()))
# Write json report
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass")
|
def main(sample_id, assembly_file, coverage_bp_file=None):
"""Main executor of the assembly_report template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly_file : str
Path to assembly file in Fasta format.
"""
logger.info("Starting assembly report")
assembly_obj = Assembly(assembly_file, sample_id)
logger.info("Retrieving summary statistics for assembly")
assembly_obj.get_summary_stats("{}_assembly_report.csv".format(sample_id))
size_dist = [len(x) for x in assembly_obj.contigs.values()]
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Contigs",
"value": assembly_obj.summary_info["ncontigs"],
"table": "assembly",
"columnBar": True},
{"header": "Assembled BP",
"value": assembly_obj.summary_info["total_len"],
"table": "assembly",
"columnBar": True},
]
}],
"plotData": [{
"sample": sample_id,
"data": {
"size_dist": size_dist
}
}]
}
if coverage_bp_file:
try:
window = 2000
gc_sliding_data = assembly_obj.get_gc_sliding(window=window)
cov_sliding_data = \
assembly_obj.get_coverage_sliding(coverage_bp_file,
window=window)
# Get total basepairs based on the individual coverage of each
# contig bpx
total_bp = sum(
[sum(x) for x in assembly_obj.contig_coverage.values()]
)
# Add data to json report
json_dic["plotData"][0]["data"]["genomeSliding"] = {
"gcData": gc_sliding_data,
"covData": cov_sliding_data,
"window": window,
"xbars": assembly_obj._get_window_labels(window),
"assemblyFile": os.path.basename(assembly_file)
}
json_dic["plotData"][0]["data"]["sparkline"] = total_bp
except:
logger.error("Unexpected error creating sliding window data:\\n"
"{}".format(traceback.format_exc()))
# Write json report
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass")
|
[
"Main",
"executor",
"of",
"the",
"assembly_report",
"template",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/assembly_report.py#L423-L498
|
[
"def",
"main",
"(",
"sample_id",
",",
"assembly_file",
",",
"coverage_bp_file",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"\"Starting assembly report\"",
")",
"assembly_obj",
"=",
"Assembly",
"(",
"assembly_file",
",",
"sample_id",
")",
"logger",
".",
"info",
"(",
"\"Retrieving summary statistics for assembly\"",
")",
"assembly_obj",
".",
"get_summary_stats",
"(",
"\"{}_assembly_report.csv\"",
".",
"format",
"(",
"sample_id",
")",
")",
"size_dist",
"=",
"[",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"assembly_obj",
".",
"contigs",
".",
"values",
"(",
")",
"]",
"json_dic",
"=",
"{",
"\"tableRow\"",
":",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"data\"",
":",
"[",
"{",
"\"header\"",
":",
"\"Contigs\"",
",",
"\"value\"",
":",
"assembly_obj",
".",
"summary_info",
"[",
"\"ncontigs\"",
"]",
",",
"\"table\"",
":",
"\"assembly\"",
",",
"\"columnBar\"",
":",
"True",
"}",
",",
"{",
"\"header\"",
":",
"\"Assembled BP\"",
",",
"\"value\"",
":",
"assembly_obj",
".",
"summary_info",
"[",
"\"total_len\"",
"]",
",",
"\"table\"",
":",
"\"assembly\"",
",",
"\"columnBar\"",
":",
"True",
"}",
",",
"]",
"}",
"]",
",",
"\"plotData\"",
":",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"data\"",
":",
"{",
"\"size_dist\"",
":",
"size_dist",
"}",
"}",
"]",
"}",
"if",
"coverage_bp_file",
":",
"try",
":",
"window",
"=",
"2000",
"gc_sliding_data",
"=",
"assembly_obj",
".",
"get_gc_sliding",
"(",
"window",
"=",
"window",
")",
"cov_sliding_data",
"=",
"assembly_obj",
".",
"get_coverage_sliding",
"(",
"coverage_bp_file",
",",
"window",
"=",
"window",
")",
"# Get total basepairs based on the individual coverage of each",
"# contig bpx",
"total_bp",
"=",
"sum",
"(",
"[",
"sum",
"(",
"x",
")",
"for",
"x",
"in",
"assembly_obj",
".",
"contig_coverage",
".",
"values",
"(",
")",
"]",
")",
"# Add data to json report",
"json_dic",
"[",
"\"plotData\"",
"]",
"[",
"0",
"]",
"[",
"\"data\"",
"]",
"[",
"\"genomeSliding\"",
"]",
"=",
"{",
"\"gcData\"",
":",
"gc_sliding_data",
",",
"\"covData\"",
":",
"cov_sliding_data",
",",
"\"window\"",
":",
"window",
",",
"\"xbars\"",
":",
"assembly_obj",
".",
"_get_window_labels",
"(",
"window",
")",
",",
"\"assemblyFile\"",
":",
"os",
".",
"path",
".",
"basename",
"(",
"assembly_file",
")",
"}",
"json_dic",
"[",
"\"plotData\"",
"]",
"[",
"0",
"]",
"[",
"\"data\"",
"]",
"[",
"\"sparkline\"",
"]",
"=",
"total_bp",
"except",
":",
"logger",
".",
"error",
"(",
"\"Unexpected error creating sliding window data:\\\\n\"",
"\"{}\"",
".",
"format",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"# Write json report",
"with",
"open",
"(",
"\".report.json\"",
",",
"\"w\"",
")",
"as",
"json_report",
":",
"json_report",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"json_dic",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
")",
"with",
"open",
"(",
"\".status\"",
",",
"\"w\"",
")",
"as",
"status_fh",
":",
"status_fh",
".",
"write",
"(",
"\"pass\"",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly._parse_assembly
|
Parse an assembly file in fasta format.
This is a Fasta parsing method that populates the
:py:attr:`Assembly.contigs` attribute with data for each contig in the
assembly.
Parameters
----------
assembly_file : str
Path to the assembly fasta file.
|
flowcraft/templates/assembly_report.py
|
def _parse_assembly(self, assembly_file):
"""Parse an assembly file in fasta format.
This is a Fasta parsing method that populates the
:py:attr:`Assembly.contigs` attribute with data for each contig in the
assembly.
Parameters
----------
assembly_file : str
Path to the assembly fasta file.
"""
with open(assembly_file) as fh:
header = None
logger.debug("Starting iteration of assembly file: {}".format(
assembly_file))
for line in fh:
# Skip empty lines
if not line.strip():
continue
if line.startswith(">"):
# Add contig header to contig dictionary
header = line[1:].strip()
self.contigs[header] = []
else:
# Add sequence string for the current contig
self.contigs[header].append(line.strip())
# After populating the contigs dictionary, convert the values
# list into a string sequence
self.contigs = OrderedDict(
(header, "".join(seq)) for header, seq in self.contigs.items())
|
def _parse_assembly(self, assembly_file):
"""Parse an assembly file in fasta format.
This is a Fasta parsing method that populates the
:py:attr:`Assembly.contigs` attribute with data for each contig in the
assembly.
Parameters
----------
assembly_file : str
Path to the assembly fasta file.
"""
with open(assembly_file) as fh:
header = None
logger.debug("Starting iteration of assembly file: {}".format(
assembly_file))
for line in fh:
# Skip empty lines
if not line.strip():
continue
if line.startswith(">"):
# Add contig header to contig dictionary
header = line[1:].strip()
self.contigs[header] = []
else:
# Add sequence string for the current contig
self.contigs[header].append(line.strip())
# After populating the contigs dictionary, convert the values
# list into a string sequence
self.contigs = OrderedDict(
(header, "".join(seq)) for header, seq in self.contigs.items())
|
[
"Parse",
"an",
"assembly",
"file",
"in",
"fasta",
"format",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/assembly_report.py#L143-L181
|
[
"def",
"_parse_assembly",
"(",
"self",
",",
"assembly_file",
")",
":",
"with",
"open",
"(",
"assembly_file",
")",
"as",
"fh",
":",
"header",
"=",
"None",
"logger",
".",
"debug",
"(",
"\"Starting iteration of assembly file: {}\"",
".",
"format",
"(",
"assembly_file",
")",
")",
"for",
"line",
"in",
"fh",
":",
"# Skip empty lines",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"if",
"line",
".",
"startswith",
"(",
"\">\"",
")",
":",
"# Add contig header to contig dictionary",
"header",
"=",
"line",
"[",
"1",
":",
"]",
".",
"strip",
"(",
")",
"self",
".",
"contigs",
"[",
"header",
"]",
"=",
"[",
"]",
"else",
":",
"# Add sequence string for the current contig",
"self",
".",
"contigs",
"[",
"header",
"]",
".",
"append",
"(",
"line",
".",
"strip",
"(",
")",
")",
"# After populating the contigs dictionary, convert the values",
"# list into a string sequence",
"self",
".",
"contigs",
"=",
"OrderedDict",
"(",
"(",
"header",
",",
"\"\"",
".",
"join",
"(",
"seq",
")",
")",
"for",
"header",
",",
"seq",
"in",
"self",
".",
"contigs",
".",
"items",
"(",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly.get_summary_stats
|
Generates a CSV report with summary statistics about the assembly
The calculated statistics are:
- Number of contigs
- Average contig size
- N50
- Total assembly length
- Average GC content
- Amount of missing data
Parameters
----------
output_csv: str
Name of the output CSV file.
|
flowcraft/templates/assembly_report.py
|
def get_summary_stats(self, output_csv=None):
"""Generates a CSV report with summary statistics about the assembly
The calculated statistics are:
- Number of contigs
- Average contig size
- N50
- Total assembly length
- Average GC content
- Amount of missing data
Parameters
----------
output_csv: str
Name of the output CSV file.
"""
contig_size_list = []
self.summary_info["ncontigs"] = len(self.contigs)
for contig_id, sequence in self.contigs.items():
logger.debug("Processing contig: {}".format(contig_id))
# Get contig sequence size
contig_len = len(sequence)
# Add size for average contig size
contig_size_list.append(contig_len)
# Add to total assembly length
self.summary_info["total_len"] += contig_len
# Add to average gc
self.summary_info["avg_gc"].append(
sum(map(sequence.count, ["G", "C"])) / contig_len
)
# Add to missing data
self.summary_info["missing_data"] += sequence.count("N")
# Get average contig size
logger.debug("Getting average contig size")
self.summary_info["avg_contig_size"] = \
sum(contig_size_list) / len(contig_size_list)
# Get average gc content
logger.debug("Getting average GC content")
self.summary_info["avg_gc"] = \
sum(self.summary_info["avg_gc"]) / len(self.summary_info["avg_gc"])
# Get N50
logger.debug("Getting N50")
cum_size = 0
for l in sorted(contig_size_list, reverse=True):
cum_size += l
if cum_size >= self.summary_info["total_len"] / 2:
self.summary_info["n50"] = l
break
if output_csv:
logger.debug("Writing report to csv")
# Write summary info to CSV
with open(output_csv, "w") as fh:
summary_line = "{}, {}\\n".format(
self.sample, ",".join(
[str(x) for x in self.summary_info.values()]))
fh.write(summary_line)
|
def get_summary_stats(self, output_csv=None):
"""Generates a CSV report with summary statistics about the assembly
The calculated statistics are:
- Number of contigs
- Average contig size
- N50
- Total assembly length
- Average GC content
- Amount of missing data
Parameters
----------
output_csv: str
Name of the output CSV file.
"""
contig_size_list = []
self.summary_info["ncontigs"] = len(self.contigs)
for contig_id, sequence in self.contigs.items():
logger.debug("Processing contig: {}".format(contig_id))
# Get contig sequence size
contig_len = len(sequence)
# Add size for average contig size
contig_size_list.append(contig_len)
# Add to total assembly length
self.summary_info["total_len"] += contig_len
# Add to average gc
self.summary_info["avg_gc"].append(
sum(map(sequence.count, ["G", "C"])) / contig_len
)
# Add to missing data
self.summary_info["missing_data"] += sequence.count("N")
# Get average contig size
logger.debug("Getting average contig size")
self.summary_info["avg_contig_size"] = \
sum(contig_size_list) / len(contig_size_list)
# Get average gc content
logger.debug("Getting average GC content")
self.summary_info["avg_gc"] = \
sum(self.summary_info["avg_gc"]) / len(self.summary_info["avg_gc"])
# Get N50
logger.debug("Getting N50")
cum_size = 0
for l in sorted(contig_size_list, reverse=True):
cum_size += l
if cum_size >= self.summary_info["total_len"] / 2:
self.summary_info["n50"] = l
break
if output_csv:
logger.debug("Writing report to csv")
# Write summary info to CSV
with open(output_csv, "w") as fh:
summary_line = "{}, {}\\n".format(
self.sample, ",".join(
[str(x) for x in self.summary_info.values()]))
fh.write(summary_line)
|
[
"Generates",
"a",
"CSV",
"report",
"with",
"summary",
"statistics",
"about",
"the",
"assembly"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/assembly_report.py#L213-L282
|
[
"def",
"get_summary_stats",
"(",
"self",
",",
"output_csv",
"=",
"None",
")",
":",
"contig_size_list",
"=",
"[",
"]",
"self",
".",
"summary_info",
"[",
"\"ncontigs\"",
"]",
"=",
"len",
"(",
"self",
".",
"contigs",
")",
"for",
"contig_id",
",",
"sequence",
"in",
"self",
".",
"contigs",
".",
"items",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"Processing contig: {}\"",
".",
"format",
"(",
"contig_id",
")",
")",
"# Get contig sequence size",
"contig_len",
"=",
"len",
"(",
"sequence",
")",
"# Add size for average contig size",
"contig_size_list",
".",
"append",
"(",
"contig_len",
")",
"# Add to total assembly length",
"self",
".",
"summary_info",
"[",
"\"total_len\"",
"]",
"+=",
"contig_len",
"# Add to average gc",
"self",
".",
"summary_info",
"[",
"\"avg_gc\"",
"]",
".",
"append",
"(",
"sum",
"(",
"map",
"(",
"sequence",
".",
"count",
",",
"[",
"\"G\"",
",",
"\"C\"",
"]",
")",
")",
"/",
"contig_len",
")",
"# Add to missing data",
"self",
".",
"summary_info",
"[",
"\"missing_data\"",
"]",
"+=",
"sequence",
".",
"count",
"(",
"\"N\"",
")",
"# Get average contig size",
"logger",
".",
"debug",
"(",
"\"Getting average contig size\"",
")",
"self",
".",
"summary_info",
"[",
"\"avg_contig_size\"",
"]",
"=",
"sum",
"(",
"contig_size_list",
")",
"/",
"len",
"(",
"contig_size_list",
")",
"# Get average gc content",
"logger",
".",
"debug",
"(",
"\"Getting average GC content\"",
")",
"self",
".",
"summary_info",
"[",
"\"avg_gc\"",
"]",
"=",
"sum",
"(",
"self",
".",
"summary_info",
"[",
"\"avg_gc\"",
"]",
")",
"/",
"len",
"(",
"self",
".",
"summary_info",
"[",
"\"avg_gc\"",
"]",
")",
"# Get N50",
"logger",
".",
"debug",
"(",
"\"Getting N50\"",
")",
"cum_size",
"=",
"0",
"for",
"l",
"in",
"sorted",
"(",
"contig_size_list",
",",
"reverse",
"=",
"True",
")",
":",
"cum_size",
"+=",
"l",
"if",
"cum_size",
">=",
"self",
".",
"summary_info",
"[",
"\"total_len\"",
"]",
"/",
"2",
":",
"self",
".",
"summary_info",
"[",
"\"n50\"",
"]",
"=",
"l",
"break",
"if",
"output_csv",
":",
"logger",
".",
"debug",
"(",
"\"Writing report to csv\"",
")",
"# Write summary info to CSV",
"with",
"open",
"(",
"output_csv",
",",
"\"w\"",
")",
"as",
"fh",
":",
"summary_line",
"=",
"\"{}, {}\\\\n\"",
".",
"format",
"(",
"self",
".",
"sample",
",",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"self",
".",
"summary_info",
".",
"values",
"(",
")",
"]",
")",
")",
"fh",
".",
"write",
"(",
"summary_line",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly._get_window_labels
|
Returns the mapping between sliding window points and their contigs,
and the x-axis position of contig
Parameters
----------
window : int
Size of the window.
Returns
-------
xbars : list
The x-axis position of the ending for each contig.
labels : list
The x-axis labels for each data point in the sliding window
|
flowcraft/templates/assembly_report.py
|
def _get_window_labels(self, window):
"""Returns the mapping between sliding window points and their contigs,
and the x-axis position of contig
Parameters
----------
window : int
Size of the window.
Returns
-------
xbars : list
The x-axis position of the ending for each contig.
labels : list
The x-axis labels for each data point in the sliding window
"""
# Get summary stats, if they have not yet been triggered
if not self.summary_info:
self.get_summary_stats()
# Get contig boundary positon
c = 0
xbars = []
for contig, seq in self.contigs.items():
contig_id = self._get_contig_id(contig)
self.contig_boundaries[contig_id] = [c, c + len(seq)]
c += len(seq)
xbars.append((contig_id, c, contig))
return xbars
|
def _get_window_labels(self, window):
"""Returns the mapping between sliding window points and their contigs,
and the x-axis position of contig
Parameters
----------
window : int
Size of the window.
Returns
-------
xbars : list
The x-axis position of the ending for each contig.
labels : list
The x-axis labels for each data point in the sliding window
"""
# Get summary stats, if they have not yet been triggered
if not self.summary_info:
self.get_summary_stats()
# Get contig boundary positon
c = 0
xbars = []
for contig, seq in self.contigs.items():
contig_id = self._get_contig_id(contig)
self.contig_boundaries[contig_id] = [c, c + len(seq)]
c += len(seq)
xbars.append((contig_id, c, contig))
return xbars
|
[
"Returns",
"the",
"mapping",
"between",
"sliding",
"window",
"points",
"and",
"their",
"contigs",
"and",
"the",
"x",
"-",
"axis",
"position",
"of",
"contig"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/assembly_report.py#L284-L315
|
[
"def",
"_get_window_labels",
"(",
"self",
",",
"window",
")",
":",
"# Get summary stats, if they have not yet been triggered",
"if",
"not",
"self",
".",
"summary_info",
":",
"self",
".",
"get_summary_stats",
"(",
")",
"# Get contig boundary positon",
"c",
"=",
"0",
"xbars",
"=",
"[",
"]",
"for",
"contig",
",",
"seq",
"in",
"self",
".",
"contigs",
".",
"items",
"(",
")",
":",
"contig_id",
"=",
"self",
".",
"_get_contig_id",
"(",
"contig",
")",
"self",
".",
"contig_boundaries",
"[",
"contig_id",
"]",
"=",
"[",
"c",
",",
"c",
"+",
"len",
"(",
"seq",
")",
"]",
"c",
"+=",
"len",
"(",
"seq",
")",
"xbars",
".",
"append",
"(",
"(",
"contig_id",
",",
"c",
",",
"contig",
")",
")",
"return",
"xbars"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly._gc_prop
|
Get proportion of GC from a string
Parameters
----------
s : str
Arbitrary string
Returns
-------
x : float
GC proportion.
|
flowcraft/templates/assembly_report.py
|
def _gc_prop(s, length):
"""Get proportion of GC from a string
Parameters
----------
s : str
Arbitrary string
Returns
-------
x : float
GC proportion.
"""
gc = sum(map(s.count, ["c", "g"]))
return gc / length
|
def _gc_prop(s, length):
"""Get proportion of GC from a string
Parameters
----------
s : str
Arbitrary string
Returns
-------
x : float
GC proportion.
"""
gc = sum(map(s.count, ["c", "g"]))
return gc / length
|
[
"Get",
"proportion",
"of",
"GC",
"from",
"a",
"string"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/assembly_report.py#L318-L334
|
[
"def",
"_gc_prop",
"(",
"s",
",",
"length",
")",
":",
"gc",
"=",
"sum",
"(",
"map",
"(",
"s",
".",
"count",
",",
"[",
"\"c\"",
",",
"\"g\"",
"]",
")",
")",
"return",
"gc",
"/",
"length"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Assembly.get_gc_sliding
|
Calculates a sliding window of the GC content for the assembly
Returns
-------
gc_res : list
List of GC proportion floats for each data point in the sliding
window
|
flowcraft/templates/assembly_report.py
|
def get_gc_sliding(self, window=2000):
"""Calculates a sliding window of the GC content for the assembly
Returns
-------
gc_res : list
List of GC proportion floats for each data point in the sliding
window
"""
gc_res = []
# Get complete sequence to calculate sliding window values
complete_seq = "".join(self.contigs.values()).lower()
for i in range(0, len(complete_seq), window):
seq_window = complete_seq[i:i + window]
# Get GC proportion
gc_res.append(round(self._gc_prop(seq_window, len(seq_window)), 2))
return gc_res
|
def get_gc_sliding(self, window=2000):
"""Calculates a sliding window of the GC content for the assembly
Returns
-------
gc_res : list
List of GC proportion floats for each data point in the sliding
window
"""
gc_res = []
# Get complete sequence to calculate sliding window values
complete_seq = "".join(self.contigs.values()).lower()
for i in range(0, len(complete_seq), window):
seq_window = complete_seq[i:i + window]
# Get GC proportion
gc_res.append(round(self._gc_prop(seq_window, len(seq_window)), 2))
return gc_res
|
[
"Calculates",
"a",
"sliding",
"window",
"of",
"the",
"GC",
"content",
"for",
"the",
"assembly"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/assembly_report.py#L336-L359
|
[
"def",
"get_gc_sliding",
"(",
"self",
",",
"window",
"=",
"2000",
")",
":",
"gc_res",
"=",
"[",
"]",
"# Get complete sequence to calculate sliding window values",
"complete_seq",
"=",
"\"\"",
".",
"join",
"(",
"self",
".",
"contigs",
".",
"values",
"(",
")",
")",
".",
"lower",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"complete_seq",
")",
",",
"window",
")",
":",
"seq_window",
"=",
"complete_seq",
"[",
"i",
":",
"i",
"+",
"window",
"]",
"# Get GC proportion",
"gc_res",
".",
"append",
"(",
"round",
"(",
"self",
".",
"_gc_prop",
"(",
"seq_window",
",",
"len",
"(",
"seq_window",
")",
")",
",",
"2",
")",
")",
"return",
"gc_res"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
main
|
Main executor of the skesa template.
Parameters
----------
sample_id : str
Sample Identification string.
fastq_pair : list
Two element list containing the paired FastQ files.
clear : str
Can be either 'true' or 'false'. If 'true', the input fastq files will
be removed at the end of the run, IF they are in the working directory
|
flowcraft/templates/skesa.py
|
def main(sample_id, fastq_pair, clear):
"""Main executor of the skesa template.
Parameters
----------
sample_id : str
Sample Identification string.
fastq_pair : list
Two element list containing the paired FastQ files.
clear : str
Can be either 'true' or 'false'. If 'true', the input fastq files will
be removed at the end of the run, IF they are in the working directory
"""
logger.info("Starting skesa")
# Determine output file
if "_trim." in fastq_pair[0]:
sample_id += "_trim"
version = __get_version_skesa()["version"]
output_file = "{}_skesa{}.fasta".format(sample_id, version.replace(".", ""))
cli = [
"skesa",
"--fastq",
"{},{}".format(fastq_pair[0], fastq_pair[1]),
"--gz",
"--use_paired_ends",
"--cores",
"${task.cpus}"
]
logger.debug("Running Skesa subprocess with command: {}".format(cli))
with open(output_file, "w") as fh:
p = subprocess.Popen(cli, stdout=fh, stderr=PIPE)
stdout, stderr = p.communicate()
# Attempt to decode STDERR output from bytes. If unsuccessful, coerce to
# string
try:
stderr = stderr.decode("utf8")
stdout = stdout.decode("utf8")
except (UnicodeDecodeError, AttributeError):
stderr = str(stderr)
stdout = str(stdout)
logger.info("Finished Skesa subprocess with STDOUT:\\n"
"======================================\\n{}".format(stdout))
logger.info("Fished Skesa subprocess with STDERR:\\n"
"======================================\\n{}".format(stderr))
logger.info("Finished Skesa with return code: {}".format(
p.returncode))
# Remove input fastq files when clear option is specified.
# Only remove temporary input when the expected output exists.
if clear == "true" and os.path.exists(output_file):
clean_up(fastq_pair)
with open(".status", "w") as fh:
if p.returncode != 0:
fh.write("error")
raise SystemExit(p.returncode)
else:
fh.write("pass")
|
def main(sample_id, fastq_pair, clear):
"""Main executor of the skesa template.
Parameters
----------
sample_id : str
Sample Identification string.
fastq_pair : list
Two element list containing the paired FastQ files.
clear : str
Can be either 'true' or 'false'. If 'true', the input fastq files will
be removed at the end of the run, IF they are in the working directory
"""
logger.info("Starting skesa")
# Determine output file
if "_trim." in fastq_pair[0]:
sample_id += "_trim"
version = __get_version_skesa()["version"]
output_file = "{}_skesa{}.fasta".format(sample_id, version.replace(".", ""))
cli = [
"skesa",
"--fastq",
"{},{}".format(fastq_pair[0], fastq_pair[1]),
"--gz",
"--use_paired_ends",
"--cores",
"${task.cpus}"
]
logger.debug("Running Skesa subprocess with command: {}".format(cli))
with open(output_file, "w") as fh:
p = subprocess.Popen(cli, stdout=fh, stderr=PIPE)
stdout, stderr = p.communicate()
# Attempt to decode STDERR output from bytes. If unsuccessful, coerce to
# string
try:
stderr = stderr.decode("utf8")
stdout = stdout.decode("utf8")
except (UnicodeDecodeError, AttributeError):
stderr = str(stderr)
stdout = str(stdout)
logger.info("Finished Skesa subprocess with STDOUT:\\n"
"======================================\\n{}".format(stdout))
logger.info("Fished Skesa subprocess with STDERR:\\n"
"======================================\\n{}".format(stderr))
logger.info("Finished Skesa with return code: {}".format(
p.returncode))
# Remove input fastq files when clear option is specified.
# Only remove temporary input when the expected output exists.
if clear == "true" and os.path.exists(output_file):
clean_up(fastq_pair)
with open(".status", "w") as fh:
if p.returncode != 0:
fh.write("error")
raise SystemExit(p.returncode)
else:
fh.write("pass")
|
[
"Main",
"executor",
"of",
"the",
"skesa",
"template",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/skesa.py#L104-L168
|
[
"def",
"main",
"(",
"sample_id",
",",
"fastq_pair",
",",
"clear",
")",
":",
"logger",
".",
"info",
"(",
"\"Starting skesa\"",
")",
"# Determine output file",
"if",
"\"_trim.\"",
"in",
"fastq_pair",
"[",
"0",
"]",
":",
"sample_id",
"+=",
"\"_trim\"",
"version",
"=",
"__get_version_skesa",
"(",
")",
"[",
"\"version\"",
"]",
"output_file",
"=",
"\"{}_skesa{}.fasta\"",
".",
"format",
"(",
"sample_id",
",",
"version",
".",
"replace",
"(",
"\".\"",
",",
"\"\"",
")",
")",
"cli",
"=",
"[",
"\"skesa\"",
",",
"\"--fastq\"",
",",
"\"{},{}\"",
".",
"format",
"(",
"fastq_pair",
"[",
"0",
"]",
",",
"fastq_pair",
"[",
"1",
"]",
")",
",",
"\"--gz\"",
",",
"\"--use_paired_ends\"",
",",
"\"--cores\"",
",",
"\"${task.cpus}\"",
"]",
"logger",
".",
"debug",
"(",
"\"Running Skesa subprocess with command: {}\"",
".",
"format",
"(",
"cli",
")",
")",
"with",
"open",
"(",
"output_file",
",",
"\"w\"",
")",
"as",
"fh",
":",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cli",
",",
"stdout",
"=",
"fh",
",",
"stderr",
"=",
"PIPE",
")",
"stdout",
",",
"stderr",
"=",
"p",
".",
"communicate",
"(",
")",
"# Attempt to decode STDERR output from bytes. If unsuccessful, coerce to",
"# string",
"try",
":",
"stderr",
"=",
"stderr",
".",
"decode",
"(",
"\"utf8\"",
")",
"stdout",
"=",
"stdout",
".",
"decode",
"(",
"\"utf8\"",
")",
"except",
"(",
"UnicodeDecodeError",
",",
"AttributeError",
")",
":",
"stderr",
"=",
"str",
"(",
"stderr",
")",
"stdout",
"=",
"str",
"(",
"stdout",
")",
"logger",
".",
"info",
"(",
"\"Finished Skesa subprocess with STDOUT:\\\\n\"",
"\"======================================\\\\n{}\"",
".",
"format",
"(",
"stdout",
")",
")",
"logger",
".",
"info",
"(",
"\"Fished Skesa subprocess with STDERR:\\\\n\"",
"\"======================================\\\\n{}\"",
".",
"format",
"(",
"stderr",
")",
")",
"logger",
".",
"info",
"(",
"\"Finished Skesa with return code: {}\"",
".",
"format",
"(",
"p",
".",
"returncode",
")",
")",
"# Remove input fastq files when clear option is specified.",
"# Only remove temporary input when the expected output exists.",
"if",
"clear",
"==",
"\"true\"",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"output_file",
")",
":",
"clean_up",
"(",
"fastq_pair",
")",
"with",
"open",
"(",
"\".status\"",
",",
"\"w\"",
")",
"as",
"fh",
":",
"if",
"p",
".",
"returncode",
"!=",
"0",
":",
"fh",
".",
"write",
"(",
"\"error\"",
")",
"raise",
"SystemExit",
"(",
"p",
".",
"returncode",
")",
"else",
":",
"fh",
".",
"write",
"(",
"\"pass\"",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
write_json_report
|
Writes the report
Parameters
----------
data1
data2
Returns
-------
|
flowcraft/templates/fastqc_report.py
|
def write_json_report(sample_id, data1, data2):
"""Writes the report
Parameters
----------
data1
data2
Returns
-------
"""
parser_map = {
"base_sequence_quality": ">>Per base sequence quality",
"sequence_quality": ">>Per sequence quality scores",
"base_gc_content": ">>Per sequence GC content",
"base_n_content": ">>Per base N content",
"sequence_length_dist": ">>Sequence Length Distribution",
"per_base_sequence_content": ">>Per base sequence content"
}
json_dic = {
"plotData": [{
"sample": sample_id,
"data": {
"base_sequence_quality": {"status": None, "data": []},
"sequence_quality": {"status": None, "data": []},
"base_gc_content": {"status": None, "data": []},
"base_n_content": {"status": None, "data": []},
"sequence_length_dist": {"status": None, "data": []},
"per_base_sequence_content": {"status": None, "data": []}
}
}]
}
for cat, start_str in parser_map.items():
if cat == "per_base_sequence_content":
fs = 1
fe = 5
else:
fs = 1
fe = 2
report1, status1 = _get_quality_stats(data1, start_str,
field_start=fs, field_end=fe)
report2, status2 = _get_quality_stats(data2, start_str,
field_start=fs, field_end=fe)
status = None
for i in ["fail", "warn", "pass"]:
if i in [status1, status2]:
status = i
json_dic["plotData"][0]["data"][cat]["data"] = [report1, report2]
json_dic["plotData"][0]["data"][cat]["status"] = status
return json_dic
|
def write_json_report(sample_id, data1, data2):
"""Writes the report
Parameters
----------
data1
data2
Returns
-------
"""
parser_map = {
"base_sequence_quality": ">>Per base sequence quality",
"sequence_quality": ">>Per sequence quality scores",
"base_gc_content": ">>Per sequence GC content",
"base_n_content": ">>Per base N content",
"sequence_length_dist": ">>Sequence Length Distribution",
"per_base_sequence_content": ">>Per base sequence content"
}
json_dic = {
"plotData": [{
"sample": sample_id,
"data": {
"base_sequence_quality": {"status": None, "data": []},
"sequence_quality": {"status": None, "data": []},
"base_gc_content": {"status": None, "data": []},
"base_n_content": {"status": None, "data": []},
"sequence_length_dist": {"status": None, "data": []},
"per_base_sequence_content": {"status": None, "data": []}
}
}]
}
for cat, start_str in parser_map.items():
if cat == "per_base_sequence_content":
fs = 1
fe = 5
else:
fs = 1
fe = 2
report1, status1 = _get_quality_stats(data1, start_str,
field_start=fs, field_end=fe)
report2, status2 = _get_quality_stats(data2, start_str,
field_start=fs, field_end=fe)
status = None
for i in ["fail", "warn", "pass"]:
if i in [status1, status2]:
status = i
json_dic["plotData"][0]["data"][cat]["data"] = [report1, report2]
json_dic["plotData"][0]["data"][cat]["status"] = status
return json_dic
|
[
"Writes",
"the",
"report"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/fastqc_report.py#L131-L189
|
[
"def",
"write_json_report",
"(",
"sample_id",
",",
"data1",
",",
"data2",
")",
":",
"parser_map",
"=",
"{",
"\"base_sequence_quality\"",
":",
"\">>Per base sequence quality\"",
",",
"\"sequence_quality\"",
":",
"\">>Per sequence quality scores\"",
",",
"\"base_gc_content\"",
":",
"\">>Per sequence GC content\"",
",",
"\"base_n_content\"",
":",
"\">>Per base N content\"",
",",
"\"sequence_length_dist\"",
":",
"\">>Sequence Length Distribution\"",
",",
"\"per_base_sequence_content\"",
":",
"\">>Per base sequence content\"",
"}",
"json_dic",
"=",
"{",
"\"plotData\"",
":",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"data\"",
":",
"{",
"\"base_sequence_quality\"",
":",
"{",
"\"status\"",
":",
"None",
",",
"\"data\"",
":",
"[",
"]",
"}",
",",
"\"sequence_quality\"",
":",
"{",
"\"status\"",
":",
"None",
",",
"\"data\"",
":",
"[",
"]",
"}",
",",
"\"base_gc_content\"",
":",
"{",
"\"status\"",
":",
"None",
",",
"\"data\"",
":",
"[",
"]",
"}",
",",
"\"base_n_content\"",
":",
"{",
"\"status\"",
":",
"None",
",",
"\"data\"",
":",
"[",
"]",
"}",
",",
"\"sequence_length_dist\"",
":",
"{",
"\"status\"",
":",
"None",
",",
"\"data\"",
":",
"[",
"]",
"}",
",",
"\"per_base_sequence_content\"",
":",
"{",
"\"status\"",
":",
"None",
",",
"\"data\"",
":",
"[",
"]",
"}",
"}",
"}",
"]",
"}",
"for",
"cat",
",",
"start_str",
"in",
"parser_map",
".",
"items",
"(",
")",
":",
"if",
"cat",
"==",
"\"per_base_sequence_content\"",
":",
"fs",
"=",
"1",
"fe",
"=",
"5",
"else",
":",
"fs",
"=",
"1",
"fe",
"=",
"2",
"report1",
",",
"status1",
"=",
"_get_quality_stats",
"(",
"data1",
",",
"start_str",
",",
"field_start",
"=",
"fs",
",",
"field_end",
"=",
"fe",
")",
"report2",
",",
"status2",
"=",
"_get_quality_stats",
"(",
"data2",
",",
"start_str",
",",
"field_start",
"=",
"fs",
",",
"field_end",
"=",
"fe",
")",
"status",
"=",
"None",
"for",
"i",
"in",
"[",
"\"fail\"",
",",
"\"warn\"",
",",
"\"pass\"",
"]",
":",
"if",
"i",
"in",
"[",
"status1",
",",
"status2",
"]",
":",
"status",
"=",
"i",
"json_dic",
"[",
"\"plotData\"",
"]",
"[",
"0",
"]",
"[",
"\"data\"",
"]",
"[",
"cat",
"]",
"[",
"\"data\"",
"]",
"=",
"[",
"report1",
",",
"report2",
"]",
"json_dic",
"[",
"\"plotData\"",
"]",
"[",
"0",
"]",
"[",
"\"data\"",
"]",
"[",
"cat",
"]",
"[",
"\"status\"",
"]",
"=",
"status",
"return",
"json_dic"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
get_trim_index
|
Returns the trim index from a ``bool`` list
Provided with a list of ``bool`` elements (``[False, False, True, True]``),
this function will assess the index of the list that minimizes the number
of True elements (biased positions) at the extremities. To do so,
it will iterate over the boolean list and find an index position where
there are two consecutive ``False`` elements after a ``True`` element. This
will be considered as an optimal trim position. For example, in the
following list::
[True, True, False, True, True, False, False, False, False, ...]
The optimal trim index will be the 4th position, since it is the first
occurrence of a ``True`` element with two False elements after it.
If the provided ``bool`` list has no ``True`` elements, then the 0 index is
returned.
Parameters
----------
biased_list: list
List of ``bool`` elements, where ``True`` means a biased site.
Returns
-------
x : index position of the biased list for the optimal trim.
|
flowcraft/templates/fastqc_report.py
|
def get_trim_index(biased_list):
"""Returns the trim index from a ``bool`` list
Provided with a list of ``bool`` elements (``[False, False, True, True]``),
this function will assess the index of the list that minimizes the number
of True elements (biased positions) at the extremities. To do so,
it will iterate over the boolean list and find an index position where
there are two consecutive ``False`` elements after a ``True`` element. This
will be considered as an optimal trim position. For example, in the
following list::
[True, True, False, True, True, False, False, False, False, ...]
The optimal trim index will be the 4th position, since it is the first
occurrence of a ``True`` element with two False elements after it.
If the provided ``bool`` list has no ``True`` elements, then the 0 index is
returned.
Parameters
----------
biased_list: list
List of ``bool`` elements, where ``True`` means a biased site.
Returns
-------
x : index position of the biased list for the optimal trim.
"""
# Return index 0 if there are no biased positions
if set(biased_list) == {False}:
return 0
if set(biased_list[:5]) == {False}:
return 0
# Iterate over the biased_list array. Keep the iteration going until
# we find a biased position with the two following positions unbiased
# (e.g.: True, False, False).
# When this condition is verified, return the last biased position
# index for subsequent trimming.
for i, val in enumerate(biased_list):
if val and set(biased_list[i+1:i+3]) == {False}:
return i + 1
# If the previous iteration could not find and index to trim, it means
# that the whole list is basically biased. Return the length of the
# biased_list
return len(biased_list)
|
def get_trim_index(biased_list):
"""Returns the trim index from a ``bool`` list
Provided with a list of ``bool`` elements (``[False, False, True, True]``),
this function will assess the index of the list that minimizes the number
of True elements (biased positions) at the extremities. To do so,
it will iterate over the boolean list and find an index position where
there are two consecutive ``False`` elements after a ``True`` element. This
will be considered as an optimal trim position. For example, in the
following list::
[True, True, False, True, True, False, False, False, False, ...]
The optimal trim index will be the 4th position, since it is the first
occurrence of a ``True`` element with two False elements after it.
If the provided ``bool`` list has no ``True`` elements, then the 0 index is
returned.
Parameters
----------
biased_list: list
List of ``bool`` elements, where ``True`` means a biased site.
Returns
-------
x : index position of the biased list for the optimal trim.
"""
# Return index 0 if there are no biased positions
if set(biased_list) == {False}:
return 0
if set(biased_list[:5]) == {False}:
return 0
# Iterate over the biased_list array. Keep the iteration going until
# we find a biased position with the two following positions unbiased
# (e.g.: True, False, False).
# When this condition is verified, return the last biased position
# index for subsequent trimming.
for i, val in enumerate(biased_list):
if val and set(biased_list[i+1:i+3]) == {False}:
return i + 1
# If the previous iteration could not find and index to trim, it means
# that the whole list is basically biased. Return the length of the
# biased_list
return len(biased_list)
|
[
"Returns",
"the",
"trim",
"index",
"from",
"a",
"bool",
"list"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/fastqc_report.py#L192-L241
|
[
"def",
"get_trim_index",
"(",
"biased_list",
")",
":",
"# Return index 0 if there are no biased positions",
"if",
"set",
"(",
"biased_list",
")",
"==",
"{",
"False",
"}",
":",
"return",
"0",
"if",
"set",
"(",
"biased_list",
"[",
":",
"5",
"]",
")",
"==",
"{",
"False",
"}",
":",
"return",
"0",
"# Iterate over the biased_list array. Keep the iteration going until",
"# we find a biased position with the two following positions unbiased",
"# (e.g.: True, False, False).",
"# When this condition is verified, return the last biased position",
"# index for subsequent trimming.",
"for",
"i",
",",
"val",
"in",
"enumerate",
"(",
"biased_list",
")",
":",
"if",
"val",
"and",
"set",
"(",
"biased_list",
"[",
"i",
"+",
"1",
":",
"i",
"+",
"3",
"]",
")",
"==",
"{",
"False",
"}",
":",
"return",
"i",
"+",
"1",
"# If the previous iteration could not find and index to trim, it means",
"# that the whole list is basically biased. Return the length of the",
"# biased_list",
"return",
"len",
"(",
"biased_list",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
trim_range
|
Assess the optimal trim range for a given FastQC data file.
This function will parse a single FastQC data file, namely the
*'Per base sequence content'* category. It will retrieve the A/T and G/C
content for each nucleotide position in the reads, and check whether the
G/C and A/T proportions are between 80% and 120%. If they are, that
nucleotide position is marked as biased for future removal.
Parameters
----------
data_file: str
Path to FastQC data file.
Returns
-------
trim_nt: list
List containing the range with the best trimming positions for the
corresponding FastQ file. The first element is the 5' end trim index
and the second element is the 3' end trim index.
|
flowcraft/templates/fastqc_report.py
|
def trim_range(data_file):
"""Assess the optimal trim range for a given FastQC data file.
This function will parse a single FastQC data file, namely the
*'Per base sequence content'* category. It will retrieve the A/T and G/C
content for each nucleotide position in the reads, and check whether the
G/C and A/T proportions are between 80% and 120%. If they are, that
nucleotide position is marked as biased for future removal.
Parameters
----------
data_file: str
Path to FastQC data file.
Returns
-------
trim_nt: list
List containing the range with the best trimming positions for the
corresponding FastQ file. The first element is the 5' end trim index
and the second element is the 3' end trim index.
"""
logger.debug("Starting trim range assessment")
# Target string for nucleotide bias assessment
target_nuc_bias = ">>Per base sequence content"
logger.debug("Target string to start nucleotide bias assessment set to "
"{}".format(target_nuc_bias))
# This flag will become True when gathering base proportion data
# from file.
gather = False
# This variable will store a boolean array on the biased/unbiased
# positions. Biased position will be True, while unbiased positions
# will be False
biased = []
with open(data_file) as fh:
for line in fh:
# Start assessment of nucleotide bias
if line.startswith(target_nuc_bias):
# Skip comment line
logger.debug("Found target string at line: {}".format(line))
next(fh)
gather = True
# Stop assessment when reaching end of target module
elif line.startswith(">>END_MODULE") and gather:
logger.debug("Stopping parsing at line: {}".format(line))
break
elif gather:
# Get proportions of each nucleotide
g, a, t, c = [float(x) for x in line.strip().split()[1:]]
# Get 'GC' and 'AT content
gc = (g + 0.1) / (c + 0.1)
at = (a + 0.1) / (t + 0.1)
# Assess bias
if 0.8 <= gc <= 1.2 and 0.8 <= at <= 1.2:
biased.append(False)
else:
biased.append(True)
logger.debug("Finished bias assessment with result: {}".format(biased))
# Split biased list in half to get the 5' and 3' ends
biased_5end, biased_3end = biased[:int(len(biased)/2)],\
biased[int(len(biased)/2):][::-1]
logger.debug("Getting optimal trim range from biased list")
trim_nt = [0, 0]
# Assess number of nucleotides to clip at 5' end
trim_nt[0] = get_trim_index(biased_5end)
logger.debug("Optimal trim range at 5' end set to: {}".format(trim_nt[0]))
# Assess number of nucleotides to clip at 3' end
trim_nt[1] = len(biased) - get_trim_index(biased_3end)
logger.debug("Optimal trim range at 3' end set to: {}".format(trim_nt[1]))
return trim_nt
|
def trim_range(data_file):
"""Assess the optimal trim range for a given FastQC data file.
This function will parse a single FastQC data file, namely the
*'Per base sequence content'* category. It will retrieve the A/T and G/C
content for each nucleotide position in the reads, and check whether the
G/C and A/T proportions are between 80% and 120%. If they are, that
nucleotide position is marked as biased for future removal.
Parameters
----------
data_file: str
Path to FastQC data file.
Returns
-------
trim_nt: list
List containing the range with the best trimming positions for the
corresponding FastQ file. The first element is the 5' end trim index
and the second element is the 3' end trim index.
"""
logger.debug("Starting trim range assessment")
# Target string for nucleotide bias assessment
target_nuc_bias = ">>Per base sequence content"
logger.debug("Target string to start nucleotide bias assessment set to "
"{}".format(target_nuc_bias))
# This flag will become True when gathering base proportion data
# from file.
gather = False
# This variable will store a boolean array on the biased/unbiased
# positions. Biased position will be True, while unbiased positions
# will be False
biased = []
with open(data_file) as fh:
for line in fh:
# Start assessment of nucleotide bias
if line.startswith(target_nuc_bias):
# Skip comment line
logger.debug("Found target string at line: {}".format(line))
next(fh)
gather = True
# Stop assessment when reaching end of target module
elif line.startswith(">>END_MODULE") and gather:
logger.debug("Stopping parsing at line: {}".format(line))
break
elif gather:
# Get proportions of each nucleotide
g, a, t, c = [float(x) for x in line.strip().split()[1:]]
# Get 'GC' and 'AT content
gc = (g + 0.1) / (c + 0.1)
at = (a + 0.1) / (t + 0.1)
# Assess bias
if 0.8 <= gc <= 1.2 and 0.8 <= at <= 1.2:
biased.append(False)
else:
biased.append(True)
logger.debug("Finished bias assessment with result: {}".format(biased))
# Split biased list in half to get the 5' and 3' ends
biased_5end, biased_3end = biased[:int(len(biased)/2)],\
biased[int(len(biased)/2):][::-1]
logger.debug("Getting optimal trim range from biased list")
trim_nt = [0, 0]
# Assess number of nucleotides to clip at 5' end
trim_nt[0] = get_trim_index(biased_5end)
logger.debug("Optimal trim range at 5' end set to: {}".format(trim_nt[0]))
# Assess number of nucleotides to clip at 3' end
trim_nt[1] = len(biased) - get_trim_index(biased_3end)
logger.debug("Optimal trim range at 3' end set to: {}".format(trim_nt[1]))
return trim_nt
|
[
"Assess",
"the",
"optimal",
"trim",
"range",
"for",
"a",
"given",
"FastQC",
"data",
"file",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/fastqc_report.py#L244-L321
|
[
"def",
"trim_range",
"(",
"data_file",
")",
":",
"logger",
".",
"debug",
"(",
"\"Starting trim range assessment\"",
")",
"# Target string for nucleotide bias assessment",
"target_nuc_bias",
"=",
"\">>Per base sequence content\"",
"logger",
".",
"debug",
"(",
"\"Target string to start nucleotide bias assessment set to \"",
"\"{}\"",
".",
"format",
"(",
"target_nuc_bias",
")",
")",
"# This flag will become True when gathering base proportion data",
"# from file.",
"gather",
"=",
"False",
"# This variable will store a boolean array on the biased/unbiased",
"# positions. Biased position will be True, while unbiased positions",
"# will be False",
"biased",
"=",
"[",
"]",
"with",
"open",
"(",
"data_file",
")",
"as",
"fh",
":",
"for",
"line",
"in",
"fh",
":",
"# Start assessment of nucleotide bias",
"if",
"line",
".",
"startswith",
"(",
"target_nuc_bias",
")",
":",
"# Skip comment line",
"logger",
".",
"debug",
"(",
"\"Found target string at line: {}\"",
".",
"format",
"(",
"line",
")",
")",
"next",
"(",
"fh",
")",
"gather",
"=",
"True",
"# Stop assessment when reaching end of target module",
"elif",
"line",
".",
"startswith",
"(",
"\">>END_MODULE\"",
")",
"and",
"gather",
":",
"logger",
".",
"debug",
"(",
"\"Stopping parsing at line: {}\"",
".",
"format",
"(",
"line",
")",
")",
"break",
"elif",
"gather",
":",
"# Get proportions of each nucleotide",
"g",
",",
"a",
",",
"t",
",",
"c",
"=",
"[",
"float",
"(",
"x",
")",
"for",
"x",
"in",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
"]",
"# Get 'GC' and 'AT content",
"gc",
"=",
"(",
"g",
"+",
"0.1",
")",
"/",
"(",
"c",
"+",
"0.1",
")",
"at",
"=",
"(",
"a",
"+",
"0.1",
")",
"/",
"(",
"t",
"+",
"0.1",
")",
"# Assess bias",
"if",
"0.8",
"<=",
"gc",
"<=",
"1.2",
"and",
"0.8",
"<=",
"at",
"<=",
"1.2",
":",
"biased",
".",
"append",
"(",
"False",
")",
"else",
":",
"biased",
".",
"append",
"(",
"True",
")",
"logger",
".",
"debug",
"(",
"\"Finished bias assessment with result: {}\"",
".",
"format",
"(",
"biased",
")",
")",
"# Split biased list in half to get the 5' and 3' ends",
"biased_5end",
",",
"biased_3end",
"=",
"biased",
"[",
":",
"int",
"(",
"len",
"(",
"biased",
")",
"/",
"2",
")",
"]",
",",
"biased",
"[",
"int",
"(",
"len",
"(",
"biased",
")",
"/",
"2",
")",
":",
"]",
"[",
":",
":",
"-",
"1",
"]",
"logger",
".",
"debug",
"(",
"\"Getting optimal trim range from biased list\"",
")",
"trim_nt",
"=",
"[",
"0",
",",
"0",
"]",
"# Assess number of nucleotides to clip at 5' end",
"trim_nt",
"[",
"0",
"]",
"=",
"get_trim_index",
"(",
"biased_5end",
")",
"logger",
".",
"debug",
"(",
"\"Optimal trim range at 5' end set to: {}\"",
".",
"format",
"(",
"trim_nt",
"[",
"0",
"]",
")",
")",
"# Assess number of nucleotides to clip at 3' end",
"trim_nt",
"[",
"1",
"]",
"=",
"len",
"(",
"biased",
")",
"-",
"get_trim_index",
"(",
"biased_3end",
")",
"logger",
".",
"debug",
"(",
"\"Optimal trim range at 3' end set to: {}\"",
".",
"format",
"(",
"trim_nt",
"[",
"1",
"]",
")",
")",
"return",
"trim_nt"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
get_sample_trim
|
Get the optimal read trim range from data files of paired FastQ reads.
Given the FastQC data report files for paired-end FastQ reads, this
function will assess the optimal trim range for the 3' and 5' ends of
the paired-end reads. This assessment will be based on the *'Per sequence
GC content'*.
Parameters
----------
p1_data: str
Path to FastQC data report file from pair 1
p2_data: str
Path to FastQC data report file from pair 2
Returns
-------
optimal_5trim: int
Optimal trim index for the 5' end of the reads
optima_3trim: int
Optimal trim index for the 3' end of the reads
See Also
--------
trim_range
|
flowcraft/templates/fastqc_report.py
|
def get_sample_trim(p1_data, p2_data):
"""Get the optimal read trim range from data files of paired FastQ reads.
Given the FastQC data report files for paired-end FastQ reads, this
function will assess the optimal trim range for the 3' and 5' ends of
the paired-end reads. This assessment will be based on the *'Per sequence
GC content'*.
Parameters
----------
p1_data: str
Path to FastQC data report file from pair 1
p2_data: str
Path to FastQC data report file from pair 2
Returns
-------
optimal_5trim: int
Optimal trim index for the 5' end of the reads
optima_3trim: int
Optimal trim index for the 3' end of the reads
See Also
--------
trim_range
"""
sample_ranges = [trim_range(x) for x in [p1_data, p2_data]]
# Get the optimal trim position for 5' end
optimal_5trim = max([x[0] for x in sample_ranges])
# Get optimal trim position for 3' end
optimal_3trim = min([x[1] for x in sample_ranges])
return optimal_5trim, optimal_3trim
|
def get_sample_trim(p1_data, p2_data):
"""Get the optimal read trim range from data files of paired FastQ reads.
Given the FastQC data report files for paired-end FastQ reads, this
function will assess the optimal trim range for the 3' and 5' ends of
the paired-end reads. This assessment will be based on the *'Per sequence
GC content'*.
Parameters
----------
p1_data: str
Path to FastQC data report file from pair 1
p2_data: str
Path to FastQC data report file from pair 2
Returns
-------
optimal_5trim: int
Optimal trim index for the 5' end of the reads
optima_3trim: int
Optimal trim index for the 3' end of the reads
See Also
--------
trim_range
"""
sample_ranges = [trim_range(x) for x in [p1_data, p2_data]]
# Get the optimal trim position for 5' end
optimal_5trim = max([x[0] for x in sample_ranges])
# Get optimal trim position for 3' end
optimal_3trim = min([x[1] for x in sample_ranges])
return optimal_5trim, optimal_3trim
|
[
"Get",
"the",
"optimal",
"read",
"trim",
"range",
"from",
"data",
"files",
"of",
"paired",
"FastQ",
"reads",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/fastqc_report.py#L324-L359
|
[
"def",
"get_sample_trim",
"(",
"p1_data",
",",
"p2_data",
")",
":",
"sample_ranges",
"=",
"[",
"trim_range",
"(",
"x",
")",
"for",
"x",
"in",
"[",
"p1_data",
",",
"p2_data",
"]",
"]",
"# Get the optimal trim position for 5' end",
"optimal_5trim",
"=",
"max",
"(",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"sample_ranges",
"]",
")",
"# Get optimal trim position for 3' end",
"optimal_3trim",
"=",
"min",
"(",
"[",
"x",
"[",
"1",
"]",
"for",
"x",
"in",
"sample_ranges",
"]",
")",
"return",
"optimal_5trim",
",",
"optimal_3trim"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
get_summary
|
Parses a FastQC summary report file and returns it as a dictionary.
This function parses a typical FastQC summary report file, retrieving
only the information on the first two columns. For instance, a line could
be::
'PASS Basic Statistics SH10762A_1.fastq.gz'
This parser will build a dictionary with the string in the second column
as a key and the QC result as the value. In this case, the returned
``dict`` would be something like::
{"Basic Statistics": "PASS"}
Parameters
----------
summary_file: str
Path to FastQC summary report.
Returns
-------
summary_info: :py:data:`OrderedDict`
Returns the information of the FastQC summary report as an ordered
dictionary, with the categories as strings and the QC result as values.
|
flowcraft/templates/fastqc_report.py
|
def get_summary(summary_file):
"""Parses a FastQC summary report file and returns it as a dictionary.
This function parses a typical FastQC summary report file, retrieving
only the information on the first two columns. For instance, a line could
be::
'PASS Basic Statistics SH10762A_1.fastq.gz'
This parser will build a dictionary with the string in the second column
as a key and the QC result as the value. In this case, the returned
``dict`` would be something like::
{"Basic Statistics": "PASS"}
Parameters
----------
summary_file: str
Path to FastQC summary report.
Returns
-------
summary_info: :py:data:`OrderedDict`
Returns the information of the FastQC summary report as an ordered
dictionary, with the categories as strings and the QC result as values.
"""
summary_info = OrderedDict()
logger.debug("Retrieving summary information from file: {}".format(
summary_file))
with open(summary_file) as fh:
for line in fh:
# Skip empty lines
if not line.strip():
continue
# Populate summary info
fields = [x.strip() for x in line.split("\t")]
summary_info[fields[1]] = fields[0]
logger.debug("Retrieved summary information from file: {}".format(
summary_info))
return summary_info
|
def get_summary(summary_file):
"""Parses a FastQC summary report file and returns it as a dictionary.
This function parses a typical FastQC summary report file, retrieving
only the information on the first two columns. For instance, a line could
be::
'PASS Basic Statistics SH10762A_1.fastq.gz'
This parser will build a dictionary with the string in the second column
as a key and the QC result as the value. In this case, the returned
``dict`` would be something like::
{"Basic Statistics": "PASS"}
Parameters
----------
summary_file: str
Path to FastQC summary report.
Returns
-------
summary_info: :py:data:`OrderedDict`
Returns the information of the FastQC summary report as an ordered
dictionary, with the categories as strings and the QC result as values.
"""
summary_info = OrderedDict()
logger.debug("Retrieving summary information from file: {}".format(
summary_file))
with open(summary_file) as fh:
for line in fh:
# Skip empty lines
if not line.strip():
continue
# Populate summary info
fields = [x.strip() for x in line.split("\t")]
summary_info[fields[1]] = fields[0]
logger.debug("Retrieved summary information from file: {}".format(
summary_info))
return summary_info
|
[
"Parses",
"a",
"FastQC",
"summary",
"report",
"file",
"and",
"returns",
"it",
"as",
"a",
"dictionary",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/fastqc_report.py#L362-L406
|
[
"def",
"get_summary",
"(",
"summary_file",
")",
":",
"summary_info",
"=",
"OrderedDict",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Retrieving summary information from file: {}\"",
".",
"format",
"(",
"summary_file",
")",
")",
"with",
"open",
"(",
"summary_file",
")",
"as",
"fh",
":",
"for",
"line",
"in",
"fh",
":",
"# Skip empty lines",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"# Populate summary info",
"fields",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"]",
"summary_info",
"[",
"fields",
"[",
"1",
"]",
"]",
"=",
"fields",
"[",
"0",
"]",
"logger",
".",
"debug",
"(",
"\"Retrieved summary information from file: {}\"",
".",
"format",
"(",
"summary_info",
")",
")",
"return",
"summary_info"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
check_summary_health
|
Checks the health of a sample from the FastQC summary file.
Parses the FastQC summary file and tests whether the sample is good
or not. There are four categories that cannot fail, and two that
must pass in order for the sample pass this check. If the sample fails
the quality checks, a list with the failing categories is also returned.
Categories that cannot fail::
fail_sensitive = [
"Per base sequence quality",
"Overrepresented sequences",
"Sequence Length Distribution",
"Per sequence GC content"
]
Categories that must pass::
must_pass = [
"Per base N content",
"Adapter Content"
]
Parameters
----------
summary_file: str
Path to FastQC summary file.
Returns
-------
x : bool
Returns ``True`` if the sample passes all tests. ``False`` if not.
summary_info : list
A list with the FastQC categories that failed the tests. Is empty
if the sample passes all tests.
|
flowcraft/templates/fastqc_report.py
|
def check_summary_health(summary_file, **kwargs):
"""Checks the health of a sample from the FastQC summary file.
Parses the FastQC summary file and tests whether the sample is good
or not. There are four categories that cannot fail, and two that
must pass in order for the sample pass this check. If the sample fails
the quality checks, a list with the failing categories is also returned.
Categories that cannot fail::
fail_sensitive = [
"Per base sequence quality",
"Overrepresented sequences",
"Sequence Length Distribution",
"Per sequence GC content"
]
Categories that must pass::
must_pass = [
"Per base N content",
"Adapter Content"
]
Parameters
----------
summary_file: str
Path to FastQC summary file.
Returns
-------
x : bool
Returns ``True`` if the sample passes all tests. ``False`` if not.
summary_info : list
A list with the FastQC categories that failed the tests. Is empty
if the sample passes all tests.
"""
# Store the summary categories that cannot fail. If they fail, do not
# proceed with this sample
fail_sensitive = kwargs.get("fail_sensitive", [
"Per base sequence quality",
"Overrepresented sequences",
"Sequence Length Distribution",
"Per sequence GC content"
])
logger.debug("Fail sensitive categories: {}".format(fail_sensitive))
# Store summary categories that must pass. If they do not, do not proceed
# with that sample
must_pass = kwargs.get("must_pass", [
"Per base N content",
"Adapter Content"
])
logger.debug("Must pass categories: {}".format(must_pass))
warning_fail_sensitive = kwargs.get("warning_fail_sensitive", [
"Per base sequence quality",
"Overrepresented sequences",
])
warning_must_pass = kwargs.get("warning_must_pass", [
"Per base sequence content"
])
# Get summary dictionary
summary_info = get_summary(summary_file)
# This flag will change to False if one of the tests fails
health = True
# List of failing categories
failed = []
# List of warning categories
warning = []
for cat, test in summary_info.items():
logger.debug("Assessing category {} with result {}".format(cat, test))
# FAILURES
# Check for fail sensitive
if cat in fail_sensitive and test == "FAIL":
health = False
failed.append("{}:{}".format(cat, test))
logger.error("Category {} failed a fail sensitive "
"category".format(cat))
# Check for must pass
if cat in must_pass and test != "PASS":
health = False
failed.append("{}:{}".format(cat, test))
logger.error("Category {} failed a must pass category".format(
cat))
# WARNINGS
# Check for fail sensitive
if cat in warning_fail_sensitive and test == "FAIL":
warning.append("Failed category: {}".format(cat))
logger.warning("Category {} flagged at a fail sensitive "
"category".format(cat))
if cat in warning_must_pass and test != "PASS":
warning.append("Did not pass category: {}".format(cat))
logger.warning("Category {} flagged at a must pass "
"category".format(cat))
# Passed all tests
return health, failed, warning
|
def check_summary_health(summary_file, **kwargs):
"""Checks the health of a sample from the FastQC summary file.
Parses the FastQC summary file and tests whether the sample is good
or not. There are four categories that cannot fail, and two that
must pass in order for the sample pass this check. If the sample fails
the quality checks, a list with the failing categories is also returned.
Categories that cannot fail::
fail_sensitive = [
"Per base sequence quality",
"Overrepresented sequences",
"Sequence Length Distribution",
"Per sequence GC content"
]
Categories that must pass::
must_pass = [
"Per base N content",
"Adapter Content"
]
Parameters
----------
summary_file: str
Path to FastQC summary file.
Returns
-------
x : bool
Returns ``True`` if the sample passes all tests. ``False`` if not.
summary_info : list
A list with the FastQC categories that failed the tests. Is empty
if the sample passes all tests.
"""
# Store the summary categories that cannot fail. If they fail, do not
# proceed with this sample
fail_sensitive = kwargs.get("fail_sensitive", [
"Per base sequence quality",
"Overrepresented sequences",
"Sequence Length Distribution",
"Per sequence GC content"
])
logger.debug("Fail sensitive categories: {}".format(fail_sensitive))
# Store summary categories that must pass. If they do not, do not proceed
# with that sample
must_pass = kwargs.get("must_pass", [
"Per base N content",
"Adapter Content"
])
logger.debug("Must pass categories: {}".format(must_pass))
warning_fail_sensitive = kwargs.get("warning_fail_sensitive", [
"Per base sequence quality",
"Overrepresented sequences",
])
warning_must_pass = kwargs.get("warning_must_pass", [
"Per base sequence content"
])
# Get summary dictionary
summary_info = get_summary(summary_file)
# This flag will change to False if one of the tests fails
health = True
# List of failing categories
failed = []
# List of warning categories
warning = []
for cat, test in summary_info.items():
logger.debug("Assessing category {} with result {}".format(cat, test))
# FAILURES
# Check for fail sensitive
if cat in fail_sensitive and test == "FAIL":
health = False
failed.append("{}:{}".format(cat, test))
logger.error("Category {} failed a fail sensitive "
"category".format(cat))
# Check for must pass
if cat in must_pass and test != "PASS":
health = False
failed.append("{}:{}".format(cat, test))
logger.error("Category {} failed a must pass category".format(
cat))
# WARNINGS
# Check for fail sensitive
if cat in warning_fail_sensitive and test == "FAIL":
warning.append("Failed category: {}".format(cat))
logger.warning("Category {} flagged at a fail sensitive "
"category".format(cat))
if cat in warning_must_pass and test != "PASS":
warning.append("Did not pass category: {}".format(cat))
logger.warning("Category {} flagged at a must pass "
"category".format(cat))
# Passed all tests
return health, failed, warning
|
[
"Checks",
"the",
"health",
"of",
"a",
"sample",
"from",
"the",
"FastQC",
"summary",
"file",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/fastqc_report.py#L409-L517
|
[
"def",
"check_summary_health",
"(",
"summary_file",
",",
"*",
"*",
"kwargs",
")",
":",
"# Store the summary categories that cannot fail. If they fail, do not",
"# proceed with this sample",
"fail_sensitive",
"=",
"kwargs",
".",
"get",
"(",
"\"fail_sensitive\"",
",",
"[",
"\"Per base sequence quality\"",
",",
"\"Overrepresented sequences\"",
",",
"\"Sequence Length Distribution\"",
",",
"\"Per sequence GC content\"",
"]",
")",
"logger",
".",
"debug",
"(",
"\"Fail sensitive categories: {}\"",
".",
"format",
"(",
"fail_sensitive",
")",
")",
"# Store summary categories that must pass. If they do not, do not proceed",
"# with that sample",
"must_pass",
"=",
"kwargs",
".",
"get",
"(",
"\"must_pass\"",
",",
"[",
"\"Per base N content\"",
",",
"\"Adapter Content\"",
"]",
")",
"logger",
".",
"debug",
"(",
"\"Must pass categories: {}\"",
".",
"format",
"(",
"must_pass",
")",
")",
"warning_fail_sensitive",
"=",
"kwargs",
".",
"get",
"(",
"\"warning_fail_sensitive\"",
",",
"[",
"\"Per base sequence quality\"",
",",
"\"Overrepresented sequences\"",
",",
"]",
")",
"warning_must_pass",
"=",
"kwargs",
".",
"get",
"(",
"\"warning_must_pass\"",
",",
"[",
"\"Per base sequence content\"",
"]",
")",
"# Get summary dictionary",
"summary_info",
"=",
"get_summary",
"(",
"summary_file",
")",
"# This flag will change to False if one of the tests fails",
"health",
"=",
"True",
"# List of failing categories",
"failed",
"=",
"[",
"]",
"# List of warning categories",
"warning",
"=",
"[",
"]",
"for",
"cat",
",",
"test",
"in",
"summary_info",
".",
"items",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"Assessing category {} with result {}\"",
".",
"format",
"(",
"cat",
",",
"test",
")",
")",
"# FAILURES",
"# Check for fail sensitive",
"if",
"cat",
"in",
"fail_sensitive",
"and",
"test",
"==",
"\"FAIL\"",
":",
"health",
"=",
"False",
"failed",
".",
"append",
"(",
"\"{}:{}\"",
".",
"format",
"(",
"cat",
",",
"test",
")",
")",
"logger",
".",
"error",
"(",
"\"Category {} failed a fail sensitive \"",
"\"category\"",
".",
"format",
"(",
"cat",
")",
")",
"# Check for must pass",
"if",
"cat",
"in",
"must_pass",
"and",
"test",
"!=",
"\"PASS\"",
":",
"health",
"=",
"False",
"failed",
".",
"append",
"(",
"\"{}:{}\"",
".",
"format",
"(",
"cat",
",",
"test",
")",
")",
"logger",
".",
"error",
"(",
"\"Category {} failed a must pass category\"",
".",
"format",
"(",
"cat",
")",
")",
"# WARNINGS",
"# Check for fail sensitive",
"if",
"cat",
"in",
"warning_fail_sensitive",
"and",
"test",
"==",
"\"FAIL\"",
":",
"warning",
".",
"append",
"(",
"\"Failed category: {}\"",
".",
"format",
"(",
"cat",
")",
")",
"logger",
".",
"warning",
"(",
"\"Category {} flagged at a fail sensitive \"",
"\"category\"",
".",
"format",
"(",
"cat",
")",
")",
"if",
"cat",
"in",
"warning_must_pass",
"and",
"test",
"!=",
"\"PASS\"",
":",
"warning",
".",
"append",
"(",
"\"Did not pass category: {}\"",
".",
"format",
"(",
"cat",
")",
")",
"logger",
".",
"warning",
"(",
"\"Category {} flagged at a must pass \"",
"\"category\"",
".",
"format",
"(",
"cat",
")",
")",
"# Passed all tests",
"return",
"health",
",",
"failed",
",",
"warning"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
main
|
Main executor of the fastqc_report template.
If the "--ignore-tests" option is present in the ``opts`` argument,
the health check of the sample will be bypassed, and it will pass the
check. This option is used in the first run of FastQC. In the second
run (after filtering with trimmomatic) this option is not provided and
the samples are submitted to a health check before proceeding in the
pipeline.
Parameters
----------
sample_id : str
Sample Identification string.
result_p1 : list
Two element list containing the path to the FastQC report files to
the first FastQ pair.
The first must be the nucleotide level report and the second the
categorical report.
result_p2: list
Two element list containing the path to the FastQC report files to
the second FastQ pair.
The first must be the nucleotide level report and the second the
categorical report.
opts : list
List of arbitrary options. See `Expected input`_.
|
flowcraft/templates/fastqc_report.py
|
def main(sample_id, result_p1, result_p2, opts):
"""Main executor of the fastqc_report template.
If the "--ignore-tests" option is present in the ``opts`` argument,
the health check of the sample will be bypassed, and it will pass the
check. This option is used in the first run of FastQC. In the second
run (after filtering with trimmomatic) this option is not provided and
the samples are submitted to a health check before proceeding in the
pipeline.
Parameters
----------
sample_id : str
Sample Identification string.
result_p1 : list
Two element list containing the path to the FastQC report files to
the first FastQ pair.
The first must be the nucleotide level report and the second the
categorical report.
result_p2: list
Two element list containing the path to the FastQC report files to
the second FastQ pair.
The first must be the nucleotide level report and the second the
categorical report.
opts : list
List of arbitrary options. See `Expected input`_.
"""
logger.info("Starting fastqc report")
json_dic = {}
with open("{}_trim_report".format(sample_id), "w") as trep_fh, \
open("optimal_trim", "w") as trim_fh, \
open("{}_status_report".format(sample_id), "w") as rep_fh, \
open(".status", "w") as status_fh, \
open(".warning", "w") as warn_fh, \
open(".fail", "w") as fail_fh, \
open(".report.json", "w") as report_fh:
# Perform health check according to the FastQC summary report for
# each pair. If both pairs pass the check, send the 'pass' information
# to the 'fastqc_health' channel. If at least one fails, send the
# summary report.
if "--ignore-tests" not in opts:
# Get reports for each category in json format
json_dic = write_json_report(sample_id, result_p1[0],
result_p2[0])
logger.info("Performing FastQ health check")
for p, fastqc_summary in enumerate([result_p1[1], result_p2[1]]):
logger.debug("Checking files: {}".format(fastqc_summary))
# Get the boolean health variable and a list of failed
# categories, if any
health, f_cat, warnings = check_summary_health(fastqc_summary)
logger.debug("Health checked: {}".format(health))
logger.debug("Failed categories: {}".format(f_cat))
# Write any warnings
if warnings:
json_dic["warnings"] = [{
"sample": sample_id,
"table": "qc",
"value": []
}]
for w in warnings:
warn_fh.write("{}\\n".format(w))
json_dic["warnings"][0]["value"].append(w)
# Rename category summary file to the channel that will publish
# The results
output_file = "{}_{}_summary.txt".format(sample_id, p)
os.rename(fastqc_summary, output_file)
logger.debug("Setting summary file name to {}".format(
output_file))
# If one of the health flags returns False, send the summary
# report through the status channel
if not health:
fail_msg = "Sample failed quality control checks:" \
" {}".format(",".join(f_cat))
logger.warning(fail_msg)
fail_fh.write(fail_msg)
json_dic["fail"] = [{
"sample": sample_id,
"table": "qc",
"value": [fail_msg]
}]
report_fh.write(
json.dumps(json_dic, separators=(",", ":")))
status_fh.write("fail")
trim_fh.write("fail")
rep_fh.write("{}, {}\\n".format(sample_id, ",".join(f_cat)))
trep_fh.write("{},fail,fail\\n".format(sample_id))
return
logger.info("Sample passed quality control checks")
status_fh.write("pass")
rep_fh.write("{}, pass\\n".format(sample_id))
logger.info("Assessing optimal trim range for sample")
# Get optimal trimming range for sample, based on the per base sequence
# content
optimal_trim = get_sample_trim(result_p1[0], result_p2[0])
logger.info("Optimal trim range set to: {}".format(optimal_trim))
trim_fh.write("{}".format(" ".join([str(x) for x in optimal_trim])))
trep_fh.write("{},{},{}\\n".format(sample_id, optimal_trim[0],
optimal_trim[1]))
# The json dict report is only populated when the FastQC quality
# checks are performed, that is, when the --ignore-tests option
# is not provide
if json_dic:
report_fh.write(json.dumps(json_dic, separators=(",", ":")))
|
def main(sample_id, result_p1, result_p2, opts):
"""Main executor of the fastqc_report template.
If the "--ignore-tests" option is present in the ``opts`` argument,
the health check of the sample will be bypassed, and it will pass the
check. This option is used in the first run of FastQC. In the second
run (after filtering with trimmomatic) this option is not provided and
the samples are submitted to a health check before proceeding in the
pipeline.
Parameters
----------
sample_id : str
Sample Identification string.
result_p1 : list
Two element list containing the path to the FastQC report files to
the first FastQ pair.
The first must be the nucleotide level report and the second the
categorical report.
result_p2: list
Two element list containing the path to the FastQC report files to
the second FastQ pair.
The first must be the nucleotide level report and the second the
categorical report.
opts : list
List of arbitrary options. See `Expected input`_.
"""
logger.info("Starting fastqc report")
json_dic = {}
with open("{}_trim_report".format(sample_id), "w") as trep_fh, \
open("optimal_trim", "w") as trim_fh, \
open("{}_status_report".format(sample_id), "w") as rep_fh, \
open(".status", "w") as status_fh, \
open(".warning", "w") as warn_fh, \
open(".fail", "w") as fail_fh, \
open(".report.json", "w") as report_fh:
# Perform health check according to the FastQC summary report for
# each pair. If both pairs pass the check, send the 'pass' information
# to the 'fastqc_health' channel. If at least one fails, send the
# summary report.
if "--ignore-tests" not in opts:
# Get reports for each category in json format
json_dic = write_json_report(sample_id, result_p1[0],
result_p2[0])
logger.info("Performing FastQ health check")
for p, fastqc_summary in enumerate([result_p1[1], result_p2[1]]):
logger.debug("Checking files: {}".format(fastqc_summary))
# Get the boolean health variable and a list of failed
# categories, if any
health, f_cat, warnings = check_summary_health(fastqc_summary)
logger.debug("Health checked: {}".format(health))
logger.debug("Failed categories: {}".format(f_cat))
# Write any warnings
if warnings:
json_dic["warnings"] = [{
"sample": sample_id,
"table": "qc",
"value": []
}]
for w in warnings:
warn_fh.write("{}\\n".format(w))
json_dic["warnings"][0]["value"].append(w)
# Rename category summary file to the channel that will publish
# The results
output_file = "{}_{}_summary.txt".format(sample_id, p)
os.rename(fastqc_summary, output_file)
logger.debug("Setting summary file name to {}".format(
output_file))
# If one of the health flags returns False, send the summary
# report through the status channel
if not health:
fail_msg = "Sample failed quality control checks:" \
" {}".format(",".join(f_cat))
logger.warning(fail_msg)
fail_fh.write(fail_msg)
json_dic["fail"] = [{
"sample": sample_id,
"table": "qc",
"value": [fail_msg]
}]
report_fh.write(
json.dumps(json_dic, separators=(",", ":")))
status_fh.write("fail")
trim_fh.write("fail")
rep_fh.write("{}, {}\\n".format(sample_id, ",".join(f_cat)))
trep_fh.write("{},fail,fail\\n".format(sample_id))
return
logger.info("Sample passed quality control checks")
status_fh.write("pass")
rep_fh.write("{}, pass\\n".format(sample_id))
logger.info("Assessing optimal trim range for sample")
# Get optimal trimming range for sample, based on the per base sequence
# content
optimal_trim = get_sample_trim(result_p1[0], result_p2[0])
logger.info("Optimal trim range set to: {}".format(optimal_trim))
trim_fh.write("{}".format(" ".join([str(x) for x in optimal_trim])))
trep_fh.write("{},{},{}\\n".format(sample_id, optimal_trim[0],
optimal_trim[1]))
# The json dict report is only populated when the FastQC quality
# checks are performed, that is, when the --ignore-tests option
# is not provide
if json_dic:
report_fh.write(json.dumps(json_dic, separators=(",", ":")))
|
[
"Main",
"executor",
"of",
"the",
"fastqc_report",
"template",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/fastqc_report.py#L521-L639
|
[
"def",
"main",
"(",
"sample_id",
",",
"result_p1",
",",
"result_p2",
",",
"opts",
")",
":",
"logger",
".",
"info",
"(",
"\"Starting fastqc report\"",
")",
"json_dic",
"=",
"{",
"}",
"with",
"open",
"(",
"\"{}_trim_report\"",
".",
"format",
"(",
"sample_id",
")",
",",
"\"w\"",
")",
"as",
"trep_fh",
",",
"open",
"(",
"\"optimal_trim\"",
",",
"\"w\"",
")",
"as",
"trim_fh",
",",
"open",
"(",
"\"{}_status_report\"",
".",
"format",
"(",
"sample_id",
")",
",",
"\"w\"",
")",
"as",
"rep_fh",
",",
"open",
"(",
"\".status\"",
",",
"\"w\"",
")",
"as",
"status_fh",
",",
"open",
"(",
"\".warning\"",
",",
"\"w\"",
")",
"as",
"warn_fh",
",",
"open",
"(",
"\".fail\"",
",",
"\"w\"",
")",
"as",
"fail_fh",
",",
"open",
"(",
"\".report.json\"",
",",
"\"w\"",
")",
"as",
"report_fh",
":",
"# Perform health check according to the FastQC summary report for",
"# each pair. If both pairs pass the check, send the 'pass' information",
"# to the 'fastqc_health' channel. If at least one fails, send the",
"# summary report.",
"if",
"\"--ignore-tests\"",
"not",
"in",
"opts",
":",
"# Get reports for each category in json format",
"json_dic",
"=",
"write_json_report",
"(",
"sample_id",
",",
"result_p1",
"[",
"0",
"]",
",",
"result_p2",
"[",
"0",
"]",
")",
"logger",
".",
"info",
"(",
"\"Performing FastQ health check\"",
")",
"for",
"p",
",",
"fastqc_summary",
"in",
"enumerate",
"(",
"[",
"result_p1",
"[",
"1",
"]",
",",
"result_p2",
"[",
"1",
"]",
"]",
")",
":",
"logger",
".",
"debug",
"(",
"\"Checking files: {}\"",
".",
"format",
"(",
"fastqc_summary",
")",
")",
"# Get the boolean health variable and a list of failed",
"# categories, if any",
"health",
",",
"f_cat",
",",
"warnings",
"=",
"check_summary_health",
"(",
"fastqc_summary",
")",
"logger",
".",
"debug",
"(",
"\"Health checked: {}\"",
".",
"format",
"(",
"health",
")",
")",
"logger",
".",
"debug",
"(",
"\"Failed categories: {}\"",
".",
"format",
"(",
"f_cat",
")",
")",
"# Write any warnings",
"if",
"warnings",
":",
"json_dic",
"[",
"\"warnings\"",
"]",
"=",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"table\"",
":",
"\"qc\"",
",",
"\"value\"",
":",
"[",
"]",
"}",
"]",
"for",
"w",
"in",
"warnings",
":",
"warn_fh",
".",
"write",
"(",
"\"{}\\\\n\"",
".",
"format",
"(",
"w",
")",
")",
"json_dic",
"[",
"\"warnings\"",
"]",
"[",
"0",
"]",
"[",
"\"value\"",
"]",
".",
"append",
"(",
"w",
")",
"# Rename category summary file to the channel that will publish",
"# The results",
"output_file",
"=",
"\"{}_{}_summary.txt\"",
".",
"format",
"(",
"sample_id",
",",
"p",
")",
"os",
".",
"rename",
"(",
"fastqc_summary",
",",
"output_file",
")",
"logger",
".",
"debug",
"(",
"\"Setting summary file name to {}\"",
".",
"format",
"(",
"output_file",
")",
")",
"# If one of the health flags returns False, send the summary",
"# report through the status channel",
"if",
"not",
"health",
":",
"fail_msg",
"=",
"\"Sample failed quality control checks:\"",
"\" {}\"",
".",
"format",
"(",
"\",\"",
".",
"join",
"(",
"f_cat",
")",
")",
"logger",
".",
"warning",
"(",
"fail_msg",
")",
"fail_fh",
".",
"write",
"(",
"fail_msg",
")",
"json_dic",
"[",
"\"fail\"",
"]",
"=",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"table\"",
":",
"\"qc\"",
",",
"\"value\"",
":",
"[",
"fail_msg",
"]",
"}",
"]",
"report_fh",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"json_dic",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
")",
"status_fh",
".",
"write",
"(",
"\"fail\"",
")",
"trim_fh",
".",
"write",
"(",
"\"fail\"",
")",
"rep_fh",
".",
"write",
"(",
"\"{}, {}\\\\n\"",
".",
"format",
"(",
"sample_id",
",",
"\",\"",
".",
"join",
"(",
"f_cat",
")",
")",
")",
"trep_fh",
".",
"write",
"(",
"\"{},fail,fail\\\\n\"",
".",
"format",
"(",
"sample_id",
")",
")",
"return",
"logger",
".",
"info",
"(",
"\"Sample passed quality control checks\"",
")",
"status_fh",
".",
"write",
"(",
"\"pass\"",
")",
"rep_fh",
".",
"write",
"(",
"\"{}, pass\\\\n\"",
".",
"format",
"(",
"sample_id",
")",
")",
"logger",
".",
"info",
"(",
"\"Assessing optimal trim range for sample\"",
")",
"# Get optimal trimming range for sample, based on the per base sequence",
"# content",
"optimal_trim",
"=",
"get_sample_trim",
"(",
"result_p1",
"[",
"0",
"]",
",",
"result_p2",
"[",
"0",
"]",
")",
"logger",
".",
"info",
"(",
"\"Optimal trim range set to: {}\"",
".",
"format",
"(",
"optimal_trim",
")",
")",
"trim_fh",
".",
"write",
"(",
"\"{}\"",
".",
"format",
"(",
"\" \"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"optimal_trim",
"]",
")",
")",
")",
"trep_fh",
".",
"write",
"(",
"\"{},{},{}\\\\n\"",
".",
"format",
"(",
"sample_id",
",",
"optimal_trim",
"[",
"0",
"]",
",",
"optimal_trim",
"[",
"1",
"]",
")",
")",
"# The json dict report is only populated when the FastQC quality",
"# checks are performed, that is, when the --ignore-tests option",
"# is not provide",
"if",
"json_dic",
":",
"report_fh",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"json_dic",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
main
|
Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
boetie_log: str
Path to the log file generated by bowtie.
|
flowcraft/templates/process_mapping.py
|
def main(sample_id, bowite_log):
"""Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
boetie_log: str
Path to the log file generated by bowtie.
"""
logger.info("Starting mapping file processing")
warnings = []
fails = ""
bowtie_info = Bowtie(sample_id, bowite_log)
print(bowtie_info.overall_rate)
with open(".report.json", "w") as json_report:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Reads",
"value": int(bowtie_info.n_reads),
"table": "mapping",
"columnBar": False},
{"header": "Unmapped",
"value": int(bowtie_info.align_0x),
"table": "mapping",
"columnBar": False},
{"header": "Mapped 1x",
"value": int(bowtie_info.align_1x),
"table": "mapping",
"columnBar": False},
{"header": "Mapped >1x",
"value": int(bowtie_info.align_mt1x),
"table": "mapping",
"columnBar": False},
{"header": "Overall alignment rate (%)",
"value": float(bowtie_info.overall_rate),
"table": "mapping",
"columnBar": False}
]
}],
}
if warnings:
json_dic["warnings"] = [{
"sample": sample_id,
"table": "mapping",
"value": warnings
}]
if fails:
json_dic["fail"] = [{
"sample": sample_id,
"table": "mapping",
"value": [fails]
}]
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass")
|
def main(sample_id, bowite_log):
"""Main executor of the process_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
boetie_log: str
Path to the log file generated by bowtie.
"""
logger.info("Starting mapping file processing")
warnings = []
fails = ""
bowtie_info = Bowtie(sample_id, bowite_log)
print(bowtie_info.overall_rate)
with open(".report.json", "w") as json_report:
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Reads",
"value": int(bowtie_info.n_reads),
"table": "mapping",
"columnBar": False},
{"header": "Unmapped",
"value": int(bowtie_info.align_0x),
"table": "mapping",
"columnBar": False},
{"header": "Mapped 1x",
"value": int(bowtie_info.align_1x),
"table": "mapping",
"columnBar": False},
{"header": "Mapped >1x",
"value": int(bowtie_info.align_mt1x),
"table": "mapping",
"columnBar": False},
{"header": "Overall alignment rate (%)",
"value": float(bowtie_info.overall_rate),
"table": "mapping",
"columnBar": False}
]
}],
}
if warnings:
json_dic["warnings"] = [{
"sample": sample_id,
"table": "mapping",
"value": warnings
}]
if fails:
json_dic["fail"] = [{
"sample": sample_id,
"table": "mapping",
"value": [fails]
}]
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass")
|
[
"Main",
"executor",
"of",
"the",
"process_mapping",
"template",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_mapping.py#L196-L263
|
[
"def",
"main",
"(",
"sample_id",
",",
"bowite_log",
")",
":",
"logger",
".",
"info",
"(",
"\"Starting mapping file processing\"",
")",
"warnings",
"=",
"[",
"]",
"fails",
"=",
"\"\"",
"bowtie_info",
"=",
"Bowtie",
"(",
"sample_id",
",",
"bowite_log",
")",
"print",
"(",
"bowtie_info",
".",
"overall_rate",
")",
"with",
"open",
"(",
"\".report.json\"",
",",
"\"w\"",
")",
"as",
"json_report",
":",
"json_dic",
"=",
"{",
"\"tableRow\"",
":",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"data\"",
":",
"[",
"{",
"\"header\"",
":",
"\"Reads\"",
",",
"\"value\"",
":",
"int",
"(",
"bowtie_info",
".",
"n_reads",
")",
",",
"\"table\"",
":",
"\"mapping\"",
",",
"\"columnBar\"",
":",
"False",
"}",
",",
"{",
"\"header\"",
":",
"\"Unmapped\"",
",",
"\"value\"",
":",
"int",
"(",
"bowtie_info",
".",
"align_0x",
")",
",",
"\"table\"",
":",
"\"mapping\"",
",",
"\"columnBar\"",
":",
"False",
"}",
",",
"{",
"\"header\"",
":",
"\"Mapped 1x\"",
",",
"\"value\"",
":",
"int",
"(",
"bowtie_info",
".",
"align_1x",
")",
",",
"\"table\"",
":",
"\"mapping\"",
",",
"\"columnBar\"",
":",
"False",
"}",
",",
"{",
"\"header\"",
":",
"\"Mapped >1x\"",
",",
"\"value\"",
":",
"int",
"(",
"bowtie_info",
".",
"align_mt1x",
")",
",",
"\"table\"",
":",
"\"mapping\"",
",",
"\"columnBar\"",
":",
"False",
"}",
",",
"{",
"\"header\"",
":",
"\"Overall alignment rate (%)\"",
",",
"\"value\"",
":",
"float",
"(",
"bowtie_info",
".",
"overall_rate",
")",
",",
"\"table\"",
":",
"\"mapping\"",
",",
"\"columnBar\"",
":",
"False",
"}",
"]",
"}",
"]",
",",
"}",
"if",
"warnings",
":",
"json_dic",
"[",
"\"warnings\"",
"]",
"=",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"table\"",
":",
"\"mapping\"",
",",
"\"value\"",
":",
"warnings",
"}",
"]",
"if",
"fails",
":",
"json_dic",
"[",
"\"fail\"",
"]",
"=",
"[",
"{",
"\"sample\"",
":",
"sample_id",
",",
"\"table\"",
":",
"\"mapping\"",
",",
"\"value\"",
":",
"[",
"fails",
"]",
"}",
"]",
"json_report",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"json_dic",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
")",
"with",
"open",
"(",
"\".status\"",
",",
"\"w\"",
")",
"as",
"status_fh",
":",
"status_fh",
".",
"write",
"(",
"\"pass\"",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
Bowtie.parse_log
|
Parse a bowtie log file.
This is a bowtie log parsing method that populates the
:py:attr:`self.n_reads, self.align_0x, self.align_1x, self.align_mt1x and self.overall_rate` attributes with
data from the log file.
Disclamer: THIS METHOD IS HORRIBLE BECAUSE THE BOWTIE LOG IS HORRIBLE.
The insertion of data on the attribytes is done by the
:py:meth:`set_attribute method.
Parameters
----------
bowtie_log : str
Path to the boetie log file.
|
flowcraft/templates/process_mapping.py
|
def parse_log(self, bowtie_log):
"""Parse a bowtie log file.
This is a bowtie log parsing method that populates the
:py:attr:`self.n_reads, self.align_0x, self.align_1x, self.align_mt1x and self.overall_rate` attributes with
data from the log file.
Disclamer: THIS METHOD IS HORRIBLE BECAUSE THE BOWTIE LOG IS HORRIBLE.
The insertion of data on the attribytes is done by the
:py:meth:`set_attribute method.
Parameters
----------
bowtie_log : str
Path to the boetie log file.
"""
print("is here!")
# Regexes - thanks to https://github.com/ewels/MultiQC/blob/master/multiqc/modules/bowtie2/bowtie2.py
regexes = {
'unpaired': {
'unpaired_aligned_none': r"(\\d+) \\([\\d\\.]+%\\) aligned 0 times",
'unpaired_aligned_one': r"(\\d+) \\([\\d\\.]+%\\) aligned exactly 1 time",
'unpaired_aligned_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned >1 times"
},
'paired': {
'paired_aligned_none': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly 0 times",
'paired_aligned_one': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly exactly 1 time",
'paired_aligned_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly >1 times",
'paired_aligned_discord_one': r"(\\d+) \\([\\d\\.]+%\\) aligned discordantly 1 time",
'paired_aligned_discord_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned discordantly >1 times",
'paired_aligned_mate_one': r"(\\d+) \\([\\d\\.]+%\\) aligned exactly 1 time",
'paired_aligned_mate_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned >1 times",
'paired_aligned_mate_none': r"(\\d+) \\([\\d\\.]+%\\) aligned 0 times"
}
}
#Missing parser for unpaired (not implemented in flowcraft yet)
with open(bowtie_log, "r") as f:
#Go through log file line by line
for l in f:
print(l)
#total reads
total = re.search(r"(\\d+) reads; of these:", l)
print(total)
if total:
print(total)
self.set_n_reads(total.group(1))
# Paired end reads aka the pain
paired = re.search(r"(\\d+) \\([\\d\\.]+%\\) were paired; of these:", l)
if paired:
paired_total = int(paired.group(1))
paired_numbers = {}
# Do nested loop whilst we have this level of indentation
l = f.readline()
while l.startswith(' '):
for k, r in regexes['paired'].items():
match = re.search(r, l)
if match:
paired_numbers[k] = int(match.group(1))
l = f.readline()
align_zero_times = paired_numbers['paired_aligned_none'] + paired_numbers['paired_aligned_mate_none']
if align_zero_times:
self.set_align_0x(align_zero_times)
align_one_time = paired_numbers['paired_aligned_one'] + paired_numbers['paired_aligned_mate_one']
if align_one_time:
self.set_align_1x(align_one_time)
align_more_than_one_time = paired_numbers['paired_aligned_multi'] + paired_numbers['paired_aligned_mate_multi']
if align_more_than_one_time:
self.set_align_mt1x(align_more_than_one_time)
# Overall alignment rate
overall = re.search(r"([\\d\\.]+)% overall alignment rate", l)
if overall:
self.overall_rate = float(overall.group(1))
|
def parse_log(self, bowtie_log):
"""Parse a bowtie log file.
This is a bowtie log parsing method that populates the
:py:attr:`self.n_reads, self.align_0x, self.align_1x, self.align_mt1x and self.overall_rate` attributes with
data from the log file.
Disclamer: THIS METHOD IS HORRIBLE BECAUSE THE BOWTIE LOG IS HORRIBLE.
The insertion of data on the attribytes is done by the
:py:meth:`set_attribute method.
Parameters
----------
bowtie_log : str
Path to the boetie log file.
"""
print("is here!")
# Regexes - thanks to https://github.com/ewels/MultiQC/blob/master/multiqc/modules/bowtie2/bowtie2.py
regexes = {
'unpaired': {
'unpaired_aligned_none': r"(\\d+) \\([\\d\\.]+%\\) aligned 0 times",
'unpaired_aligned_one': r"(\\d+) \\([\\d\\.]+%\\) aligned exactly 1 time",
'unpaired_aligned_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned >1 times"
},
'paired': {
'paired_aligned_none': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly 0 times",
'paired_aligned_one': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly exactly 1 time",
'paired_aligned_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly >1 times",
'paired_aligned_discord_one': r"(\\d+) \\([\\d\\.]+%\\) aligned discordantly 1 time",
'paired_aligned_discord_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned discordantly >1 times",
'paired_aligned_mate_one': r"(\\d+) \\([\\d\\.]+%\\) aligned exactly 1 time",
'paired_aligned_mate_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned >1 times",
'paired_aligned_mate_none': r"(\\d+) \\([\\d\\.]+%\\) aligned 0 times"
}
}
#Missing parser for unpaired (not implemented in flowcraft yet)
with open(bowtie_log, "r") as f:
#Go through log file line by line
for l in f:
print(l)
#total reads
total = re.search(r"(\\d+) reads; of these:", l)
print(total)
if total:
print(total)
self.set_n_reads(total.group(1))
# Paired end reads aka the pain
paired = re.search(r"(\\d+) \\([\\d\\.]+%\\) were paired; of these:", l)
if paired:
paired_total = int(paired.group(1))
paired_numbers = {}
# Do nested loop whilst we have this level of indentation
l = f.readline()
while l.startswith(' '):
for k, r in regexes['paired'].items():
match = re.search(r, l)
if match:
paired_numbers[k] = int(match.group(1))
l = f.readline()
align_zero_times = paired_numbers['paired_aligned_none'] + paired_numbers['paired_aligned_mate_none']
if align_zero_times:
self.set_align_0x(align_zero_times)
align_one_time = paired_numbers['paired_aligned_one'] + paired_numbers['paired_aligned_mate_one']
if align_one_time:
self.set_align_1x(align_one_time)
align_more_than_one_time = paired_numbers['paired_aligned_multi'] + paired_numbers['paired_aligned_mate_multi']
if align_more_than_one_time:
self.set_align_mt1x(align_more_than_one_time)
# Overall alignment rate
overall = re.search(r"([\\d\\.]+)% overall alignment rate", l)
if overall:
self.overall_rate = float(overall.group(1))
|
[
"Parse",
"a",
"bowtie",
"log",
"file",
"."
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_mapping.py#L103-L192
|
[
"def",
"parse_log",
"(",
"self",
",",
"bowtie_log",
")",
":",
"print",
"(",
"\"is here!\"",
")",
"# Regexes - thanks to https://github.com/ewels/MultiQC/blob/master/multiqc/modules/bowtie2/bowtie2.py",
"regexes",
"=",
"{",
"'unpaired'",
":",
"{",
"'unpaired_aligned_none'",
":",
"r\"(\\\\d+) \\\\([\\\\d\\\\.]+%\\\\) aligned 0 times\"",
",",
"'unpaired_aligned_one'",
":",
"r\"(\\\\d+) \\\\([\\\\d\\\\.]+%\\\\) aligned exactly 1 time\"",
",",
"'unpaired_aligned_multi'",
":",
"r\"(\\\\d+) \\\\([\\\\d\\\\.]+%\\\\) aligned >1 times\"",
"}",
",",
"'paired'",
":",
"{",
"'paired_aligned_none'",
":",
"r\"(\\\\d+) \\\\([\\\\d\\\\.]+%\\\\) aligned concordantly 0 times\"",
",",
"'paired_aligned_one'",
":",
"r\"(\\\\d+) \\\\([\\\\d\\\\.]+%\\\\) aligned concordantly exactly 1 time\"",
",",
"'paired_aligned_multi'",
":",
"r\"(\\\\d+) \\\\([\\\\d\\\\.]+%\\\\) aligned concordantly >1 times\"",
",",
"'paired_aligned_discord_one'",
":",
"r\"(\\\\d+) \\\\([\\\\d\\\\.]+%\\\\) aligned discordantly 1 time\"",
",",
"'paired_aligned_discord_multi'",
":",
"r\"(\\\\d+) \\\\([\\\\d\\\\.]+%\\\\) aligned discordantly >1 times\"",
",",
"'paired_aligned_mate_one'",
":",
"r\"(\\\\d+) \\\\([\\\\d\\\\.]+%\\\\) aligned exactly 1 time\"",
",",
"'paired_aligned_mate_multi'",
":",
"r\"(\\\\d+) \\\\([\\\\d\\\\.]+%\\\\) aligned >1 times\"",
",",
"'paired_aligned_mate_none'",
":",
"r\"(\\\\d+) \\\\([\\\\d\\\\.]+%\\\\) aligned 0 times\"",
"}",
"}",
"#Missing parser for unpaired (not implemented in flowcraft yet)",
"with",
"open",
"(",
"bowtie_log",
",",
"\"r\"",
")",
"as",
"f",
":",
"#Go through log file line by line",
"for",
"l",
"in",
"f",
":",
"print",
"(",
"l",
")",
"#total reads",
"total",
"=",
"re",
".",
"search",
"(",
"r\"(\\\\d+) reads; of these:\"",
",",
"l",
")",
"print",
"(",
"total",
")",
"if",
"total",
":",
"print",
"(",
"total",
")",
"self",
".",
"set_n_reads",
"(",
"total",
".",
"group",
"(",
"1",
")",
")",
"# Paired end reads aka the pain",
"paired",
"=",
"re",
".",
"search",
"(",
"r\"(\\\\d+) \\\\([\\\\d\\\\.]+%\\\\) were paired; of these:\"",
",",
"l",
")",
"if",
"paired",
":",
"paired_total",
"=",
"int",
"(",
"paired",
".",
"group",
"(",
"1",
")",
")",
"paired_numbers",
"=",
"{",
"}",
"# Do nested loop whilst we have this level of indentation",
"l",
"=",
"f",
".",
"readline",
"(",
")",
"while",
"l",
".",
"startswith",
"(",
"' '",
")",
":",
"for",
"k",
",",
"r",
"in",
"regexes",
"[",
"'paired'",
"]",
".",
"items",
"(",
")",
":",
"match",
"=",
"re",
".",
"search",
"(",
"r",
",",
"l",
")",
"if",
"match",
":",
"paired_numbers",
"[",
"k",
"]",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"l",
"=",
"f",
".",
"readline",
"(",
")",
"align_zero_times",
"=",
"paired_numbers",
"[",
"'paired_aligned_none'",
"]",
"+",
"paired_numbers",
"[",
"'paired_aligned_mate_none'",
"]",
"if",
"align_zero_times",
":",
"self",
".",
"set_align_0x",
"(",
"align_zero_times",
")",
"align_one_time",
"=",
"paired_numbers",
"[",
"'paired_aligned_one'",
"]",
"+",
"paired_numbers",
"[",
"'paired_aligned_mate_one'",
"]",
"if",
"align_one_time",
":",
"self",
".",
"set_align_1x",
"(",
"align_one_time",
")",
"align_more_than_one_time",
"=",
"paired_numbers",
"[",
"'paired_aligned_multi'",
"]",
"+",
"paired_numbers",
"[",
"'paired_aligned_mate_multi'",
"]",
"if",
"align_more_than_one_time",
":",
"self",
".",
"set_align_mt1x",
"(",
"align_more_than_one_time",
")",
"# Overall alignment rate",
"overall",
"=",
"re",
".",
"search",
"(",
"r\"([\\\\d\\\\.]+)% overall alignment rate\"",
",",
"l",
")",
"if",
"overall",
":",
"self",
".",
"overall_rate",
"=",
"float",
"(",
"overall",
".",
"group",
"(",
"1",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowGenerator._parse_process_name
|
Parses the process string and returns the process name and its
directives
Process strings my contain directive information with the following
syntax::
proc_name={'directive':'val'}
This method parses this string and returns the process name as a
string and the directives information as a dictionary.
Parameters
----------
name_str : str
Raw string with process name and, potentially, directive
information
Returns
-------
str
Process name
dict or None
Process directives
|
flowcraft/generator/engine.py
|
def _parse_process_name(name_str):
"""Parses the process string and returns the process name and its
directives
Process strings my contain directive information with the following
syntax::
proc_name={'directive':'val'}
This method parses this string and returns the process name as a
string and the directives information as a dictionary.
Parameters
----------
name_str : str
Raw string with process name and, potentially, directive
information
Returns
-------
str
Process name
dict or None
Process directives
"""
directives = None
fields = name_str.split("=")
process_name = fields[0]
if len(fields) == 2:
_directives = fields[1].replace("'", '"')
try:
directives = json.loads(_directives)
except json.decoder.JSONDecodeError:
raise eh.ProcessError(
"Could not parse directives for process '{}'. The raw"
" string is: {}\n"
"Possible causes include:\n"
"\t1. Spaces inside directives\n"
"\t2. Missing '=' symbol before directives\n"
"\t3. Missing quotes (' or \") around directives\n"
"A valid example: process_name={{'cpus':'2'}}".format(
process_name, name_str))
return process_name, directives
|
def _parse_process_name(name_str):
"""Parses the process string and returns the process name and its
directives
Process strings my contain directive information with the following
syntax::
proc_name={'directive':'val'}
This method parses this string and returns the process name as a
string and the directives information as a dictionary.
Parameters
----------
name_str : str
Raw string with process name and, potentially, directive
information
Returns
-------
str
Process name
dict or None
Process directives
"""
directives = None
fields = name_str.split("=")
process_name = fields[0]
if len(fields) == 2:
_directives = fields[1].replace("'", '"')
try:
directives = json.loads(_directives)
except json.decoder.JSONDecodeError:
raise eh.ProcessError(
"Could not parse directives for process '{}'. The raw"
" string is: {}\n"
"Possible causes include:\n"
"\t1. Spaces inside directives\n"
"\t2. Missing '=' symbol before directives\n"
"\t3. Missing quotes (' or \") around directives\n"
"A valid example: process_name={{'cpus':'2'}}".format(
process_name, name_str))
return process_name, directives
|
[
"Parses",
"the",
"process",
"string",
"and",
"returns",
"the",
"process",
"name",
"and",
"its",
"directives"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/engine.py#L184-L230
|
[
"def",
"_parse_process_name",
"(",
"name_str",
")",
":",
"directives",
"=",
"None",
"fields",
"=",
"name_str",
".",
"split",
"(",
"\"=\"",
")",
"process_name",
"=",
"fields",
"[",
"0",
"]",
"if",
"len",
"(",
"fields",
")",
"==",
"2",
":",
"_directives",
"=",
"fields",
"[",
"1",
"]",
".",
"replace",
"(",
"\"'\"",
",",
"'\"'",
")",
"try",
":",
"directives",
"=",
"json",
".",
"loads",
"(",
"_directives",
")",
"except",
"json",
".",
"decoder",
".",
"JSONDecodeError",
":",
"raise",
"eh",
".",
"ProcessError",
"(",
"\"Could not parse directives for process '{}'. The raw\"",
"\" string is: {}\\n\"",
"\"Possible causes include:\\n\"",
"\"\\t1. Spaces inside directives\\n\"",
"\"\\t2. Missing '=' symbol before directives\\n\"",
"\"\\t3. Missing quotes (' or \\\") around directives\\n\"",
"\"A valid example: process_name={{'cpus':'2'}}\"",
".",
"format",
"(",
"process_name",
",",
"name_str",
")",
")",
"return",
"process_name",
",",
"directives"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowGenerator._build_connections
|
Parses the process connections dictionaries into a process list
This method is called upon instantiation of the NextflowGenerator
class. Essentially, it sets the main input/output channel names of the
processes so that they can be linked correctly.
If a connection between two consecutive process is not possible due
to a mismatch in the input/output types, it exits with an error.
Returns
-------
|
flowcraft/generator/engine.py
|
def _build_connections(self, process_list, ignore_dependencies,
auto_dependency):
"""Parses the process connections dictionaries into a process list
This method is called upon instantiation of the NextflowGenerator
class. Essentially, it sets the main input/output channel names of the
processes so that they can be linked correctly.
If a connection between two consecutive process is not possible due
to a mismatch in the input/output types, it exits with an error.
Returns
-------
"""
logger.debug("=============================")
logger.debug("Building pipeline connections")
logger.debug("=============================")
logger.debug("Processing connections: {}".format(process_list))
for p, con in enumerate(process_list):
logger.debug("Processing connection '{}': {}".format(p, con))
# Get lanes
in_lane = con["input"]["lane"]
out_lane = con["output"]["lane"]
logger.debug("[{}] Input lane: {}".format(p, in_lane))
logger.debug("[{}] Output lane: {}".format(p, out_lane))
# Update the total number of lines of the pipeline
if out_lane > self.lanes:
self.lanes = out_lane
# Get process names and directives for the output process
p_in_name, p_out_name, out_directives = self._get_process_names(
con, p)
# Check if process is available or correctly named
if p_out_name not in self.process_map:
logger.error(colored_print(
"\nThe process '{}' is not available."
.format(p_out_name), "red_bold"))
guess_process(p_out_name, self.process_map)
sys.exit(1)
# Instance output process
out_process = self.process_map[p_out_name](template=p_out_name)
# Update directives, if provided
if out_directives:
out_process.update_attributes(out_directives)
# Set suffix strings for main input/output channels. Suffixes are
# based on the lane and the arbitrary and unique process id
# e.g.: 'process_1_1'
input_suf = "{}_{}".format(in_lane, p)
output_suf = "{}_{}".format(out_lane, p)
logger.debug("[{}] Setting main channels with input suffix '{}'"
" and output suffix '{}'".format(
p, input_suf, output_suf))
out_process.set_main_channel_names(input_suf, output_suf, out_lane)
# Instance input process, if it exists. In case of init, the
# output process forks from the raw input user data
if p_in_name != "__init__":
# Create instance of input process
in_process = self.process_map[p_in_name](template=p_in_name)
# Test if two processes can be connected by input/output types
logger.debug("[{}] Testing connection between input and "
"output processes".format(p))
self._test_connection(in_process, out_process)
out_process.parent_lane = in_lane
else:
# When the input process is __init__, set the parent_lane
# to None. This will tell the engine that this process
# will receive the main input from the raw user input.
out_process.parent_lane = None
logger.debug("[{}] Parent lane: {}".format(
p, out_process.parent_lane))
# If the current connection is a fork, add it to the fork tree
if in_lane != out_lane:
logger.debug("[{}] Connection is a fork. Adding lanes to "
"fork list".format(p))
self._fork_tree[in_lane].append(out_lane)
# Update main output fork of parent process
try:
parent_process = [
x for x in self.processes if x.lane == in_lane and
x.template == p_in_name
][0]
logger.debug(
"[{}] Updating main forks of parent fork '{}' with"
" '{}'".format(p, parent_process,
out_process.input_channel))
parent_process.update_main_forks(out_process.input_channel)
except IndexError:
pass
else:
# Get parent process, naive version
parent_process = self.processes[-1]
# Check if the last process' lane matches the lane of the
# current output process. If not, get the last process
# in the same lane
if parent_process.lane and parent_process.lane != out_lane:
parent_process = [x for x in self.processes[::-1]
if x.lane == out_lane][0]
if parent_process.output_channel:
logger.debug(
"[{}] Updating input channel of output process"
" with '{}'".format(
p, parent_process.output_channel))
out_process.input_channel = parent_process.output_channel
# Check for process dependencies
if out_process.dependencies and not ignore_dependencies:
logger.debug("[{}] Dependencies found for process '{}': "
"{}".format(p, p_out_name,
out_process.dependencies))
parent_lanes = self._get_fork_tree(out_lane)
for dep in out_process.dependencies:
if not self._search_tree_backwards(dep, parent_lanes):
if auto_dependency:
self._add_dependency(
out_process, dep, in_lane, out_lane, p)
elif not self.export_parameters:
logger.error(colored_print(
"\nThe following dependency of the process"
" '{}' is missing: {}".format(p_out_name, dep),
"red_bold"))
sys.exit(1)
self.processes.append(out_process)
logger.debug("Completed connections: {}".format(self.processes))
logger.debug("Fork tree: {}".format(self._fork_tree))
|
def _build_connections(self, process_list, ignore_dependencies,
auto_dependency):
"""Parses the process connections dictionaries into a process list
This method is called upon instantiation of the NextflowGenerator
class. Essentially, it sets the main input/output channel names of the
processes so that they can be linked correctly.
If a connection between two consecutive process is not possible due
to a mismatch in the input/output types, it exits with an error.
Returns
-------
"""
logger.debug("=============================")
logger.debug("Building pipeline connections")
logger.debug("=============================")
logger.debug("Processing connections: {}".format(process_list))
for p, con in enumerate(process_list):
logger.debug("Processing connection '{}': {}".format(p, con))
# Get lanes
in_lane = con["input"]["lane"]
out_lane = con["output"]["lane"]
logger.debug("[{}] Input lane: {}".format(p, in_lane))
logger.debug("[{}] Output lane: {}".format(p, out_lane))
# Update the total number of lines of the pipeline
if out_lane > self.lanes:
self.lanes = out_lane
# Get process names and directives for the output process
p_in_name, p_out_name, out_directives = self._get_process_names(
con, p)
# Check if process is available or correctly named
if p_out_name not in self.process_map:
logger.error(colored_print(
"\nThe process '{}' is not available."
.format(p_out_name), "red_bold"))
guess_process(p_out_name, self.process_map)
sys.exit(1)
# Instance output process
out_process = self.process_map[p_out_name](template=p_out_name)
# Update directives, if provided
if out_directives:
out_process.update_attributes(out_directives)
# Set suffix strings for main input/output channels. Suffixes are
# based on the lane and the arbitrary and unique process id
# e.g.: 'process_1_1'
input_suf = "{}_{}".format(in_lane, p)
output_suf = "{}_{}".format(out_lane, p)
logger.debug("[{}] Setting main channels with input suffix '{}'"
" and output suffix '{}'".format(
p, input_suf, output_suf))
out_process.set_main_channel_names(input_suf, output_suf, out_lane)
# Instance input process, if it exists. In case of init, the
# output process forks from the raw input user data
if p_in_name != "__init__":
# Create instance of input process
in_process = self.process_map[p_in_name](template=p_in_name)
# Test if two processes can be connected by input/output types
logger.debug("[{}] Testing connection between input and "
"output processes".format(p))
self._test_connection(in_process, out_process)
out_process.parent_lane = in_lane
else:
# When the input process is __init__, set the parent_lane
# to None. This will tell the engine that this process
# will receive the main input from the raw user input.
out_process.parent_lane = None
logger.debug("[{}] Parent lane: {}".format(
p, out_process.parent_lane))
# If the current connection is a fork, add it to the fork tree
if in_lane != out_lane:
logger.debug("[{}] Connection is a fork. Adding lanes to "
"fork list".format(p))
self._fork_tree[in_lane].append(out_lane)
# Update main output fork of parent process
try:
parent_process = [
x for x in self.processes if x.lane == in_lane and
x.template == p_in_name
][0]
logger.debug(
"[{}] Updating main forks of parent fork '{}' with"
" '{}'".format(p, parent_process,
out_process.input_channel))
parent_process.update_main_forks(out_process.input_channel)
except IndexError:
pass
else:
# Get parent process, naive version
parent_process = self.processes[-1]
# Check if the last process' lane matches the lane of the
# current output process. If not, get the last process
# in the same lane
if parent_process.lane and parent_process.lane != out_lane:
parent_process = [x for x in self.processes[::-1]
if x.lane == out_lane][0]
if parent_process.output_channel:
logger.debug(
"[{}] Updating input channel of output process"
" with '{}'".format(
p, parent_process.output_channel))
out_process.input_channel = parent_process.output_channel
# Check for process dependencies
if out_process.dependencies and not ignore_dependencies:
logger.debug("[{}] Dependencies found for process '{}': "
"{}".format(p, p_out_name,
out_process.dependencies))
parent_lanes = self._get_fork_tree(out_lane)
for dep in out_process.dependencies:
if not self._search_tree_backwards(dep, parent_lanes):
if auto_dependency:
self._add_dependency(
out_process, dep, in_lane, out_lane, p)
elif not self.export_parameters:
logger.error(colored_print(
"\nThe following dependency of the process"
" '{}' is missing: {}".format(p_out_name, dep),
"red_bold"))
sys.exit(1)
self.processes.append(out_process)
logger.debug("Completed connections: {}".format(self.processes))
logger.debug("Fork tree: {}".format(self._fork_tree))
|
[
"Parses",
"the",
"process",
"connections",
"dictionaries",
"into",
"a",
"process",
"list"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/engine.py#L232-L372
|
[
"def",
"_build_connections",
"(",
"self",
",",
"process_list",
",",
"ignore_dependencies",
",",
"auto_dependency",
")",
":",
"logger",
".",
"debug",
"(",
"\"=============================\"",
")",
"logger",
".",
"debug",
"(",
"\"Building pipeline connections\"",
")",
"logger",
".",
"debug",
"(",
"\"=============================\"",
")",
"logger",
".",
"debug",
"(",
"\"Processing connections: {}\"",
".",
"format",
"(",
"process_list",
")",
")",
"for",
"p",
",",
"con",
"in",
"enumerate",
"(",
"process_list",
")",
":",
"logger",
".",
"debug",
"(",
"\"Processing connection '{}': {}\"",
".",
"format",
"(",
"p",
",",
"con",
")",
")",
"# Get lanes",
"in_lane",
"=",
"con",
"[",
"\"input\"",
"]",
"[",
"\"lane\"",
"]",
"out_lane",
"=",
"con",
"[",
"\"output\"",
"]",
"[",
"\"lane\"",
"]",
"logger",
".",
"debug",
"(",
"\"[{}] Input lane: {}\"",
".",
"format",
"(",
"p",
",",
"in_lane",
")",
")",
"logger",
".",
"debug",
"(",
"\"[{}] Output lane: {}\"",
".",
"format",
"(",
"p",
",",
"out_lane",
")",
")",
"# Update the total number of lines of the pipeline",
"if",
"out_lane",
">",
"self",
".",
"lanes",
":",
"self",
".",
"lanes",
"=",
"out_lane",
"# Get process names and directives for the output process",
"p_in_name",
",",
"p_out_name",
",",
"out_directives",
"=",
"self",
".",
"_get_process_names",
"(",
"con",
",",
"p",
")",
"# Check if process is available or correctly named",
"if",
"p_out_name",
"not",
"in",
"self",
".",
"process_map",
":",
"logger",
".",
"error",
"(",
"colored_print",
"(",
"\"\\nThe process '{}' is not available.\"",
".",
"format",
"(",
"p_out_name",
")",
",",
"\"red_bold\"",
")",
")",
"guess_process",
"(",
"p_out_name",
",",
"self",
".",
"process_map",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# Instance output process",
"out_process",
"=",
"self",
".",
"process_map",
"[",
"p_out_name",
"]",
"(",
"template",
"=",
"p_out_name",
")",
"# Update directives, if provided",
"if",
"out_directives",
":",
"out_process",
".",
"update_attributes",
"(",
"out_directives",
")",
"# Set suffix strings for main input/output channels. Suffixes are",
"# based on the lane and the arbitrary and unique process id",
"# e.g.: 'process_1_1'",
"input_suf",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"in_lane",
",",
"p",
")",
"output_suf",
"=",
"\"{}_{}\"",
".",
"format",
"(",
"out_lane",
",",
"p",
")",
"logger",
".",
"debug",
"(",
"\"[{}] Setting main channels with input suffix '{}'\"",
"\" and output suffix '{}'\"",
".",
"format",
"(",
"p",
",",
"input_suf",
",",
"output_suf",
")",
")",
"out_process",
".",
"set_main_channel_names",
"(",
"input_suf",
",",
"output_suf",
",",
"out_lane",
")",
"# Instance input process, if it exists. In case of init, the",
"# output process forks from the raw input user data",
"if",
"p_in_name",
"!=",
"\"__init__\"",
":",
"# Create instance of input process",
"in_process",
"=",
"self",
".",
"process_map",
"[",
"p_in_name",
"]",
"(",
"template",
"=",
"p_in_name",
")",
"# Test if two processes can be connected by input/output types",
"logger",
".",
"debug",
"(",
"\"[{}] Testing connection between input and \"",
"\"output processes\"",
".",
"format",
"(",
"p",
")",
")",
"self",
".",
"_test_connection",
"(",
"in_process",
",",
"out_process",
")",
"out_process",
".",
"parent_lane",
"=",
"in_lane",
"else",
":",
"# When the input process is __init__, set the parent_lane",
"# to None. This will tell the engine that this process",
"# will receive the main input from the raw user input.",
"out_process",
".",
"parent_lane",
"=",
"None",
"logger",
".",
"debug",
"(",
"\"[{}] Parent lane: {}\"",
".",
"format",
"(",
"p",
",",
"out_process",
".",
"parent_lane",
")",
")",
"# If the current connection is a fork, add it to the fork tree",
"if",
"in_lane",
"!=",
"out_lane",
":",
"logger",
".",
"debug",
"(",
"\"[{}] Connection is a fork. Adding lanes to \"",
"\"fork list\"",
".",
"format",
"(",
"p",
")",
")",
"self",
".",
"_fork_tree",
"[",
"in_lane",
"]",
".",
"append",
"(",
"out_lane",
")",
"# Update main output fork of parent process",
"try",
":",
"parent_process",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"processes",
"if",
"x",
".",
"lane",
"==",
"in_lane",
"and",
"x",
".",
"template",
"==",
"p_in_name",
"]",
"[",
"0",
"]",
"logger",
".",
"debug",
"(",
"\"[{}] Updating main forks of parent fork '{}' with\"",
"\" '{}'\"",
".",
"format",
"(",
"p",
",",
"parent_process",
",",
"out_process",
".",
"input_channel",
")",
")",
"parent_process",
".",
"update_main_forks",
"(",
"out_process",
".",
"input_channel",
")",
"except",
"IndexError",
":",
"pass",
"else",
":",
"# Get parent process, naive version",
"parent_process",
"=",
"self",
".",
"processes",
"[",
"-",
"1",
"]",
"# Check if the last process' lane matches the lane of the",
"# current output process. If not, get the last process",
"# in the same lane",
"if",
"parent_process",
".",
"lane",
"and",
"parent_process",
".",
"lane",
"!=",
"out_lane",
":",
"parent_process",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"processes",
"[",
":",
":",
"-",
"1",
"]",
"if",
"x",
".",
"lane",
"==",
"out_lane",
"]",
"[",
"0",
"]",
"if",
"parent_process",
".",
"output_channel",
":",
"logger",
".",
"debug",
"(",
"\"[{}] Updating input channel of output process\"",
"\" with '{}'\"",
".",
"format",
"(",
"p",
",",
"parent_process",
".",
"output_channel",
")",
")",
"out_process",
".",
"input_channel",
"=",
"parent_process",
".",
"output_channel",
"# Check for process dependencies",
"if",
"out_process",
".",
"dependencies",
"and",
"not",
"ignore_dependencies",
":",
"logger",
".",
"debug",
"(",
"\"[{}] Dependencies found for process '{}': \"",
"\"{}\"",
".",
"format",
"(",
"p",
",",
"p_out_name",
",",
"out_process",
".",
"dependencies",
")",
")",
"parent_lanes",
"=",
"self",
".",
"_get_fork_tree",
"(",
"out_lane",
")",
"for",
"dep",
"in",
"out_process",
".",
"dependencies",
":",
"if",
"not",
"self",
".",
"_search_tree_backwards",
"(",
"dep",
",",
"parent_lanes",
")",
":",
"if",
"auto_dependency",
":",
"self",
".",
"_add_dependency",
"(",
"out_process",
",",
"dep",
",",
"in_lane",
",",
"out_lane",
",",
"p",
")",
"elif",
"not",
"self",
".",
"export_parameters",
":",
"logger",
".",
"error",
"(",
"colored_print",
"(",
"\"\\nThe following dependency of the process\"",
"\" '{}' is missing: {}\"",
".",
"format",
"(",
"p_out_name",
",",
"dep",
")",
",",
"\"red_bold\"",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"self",
".",
"processes",
".",
"append",
"(",
"out_process",
")",
"logger",
".",
"debug",
"(",
"\"Completed connections: {}\"",
".",
"format",
"(",
"self",
".",
"processes",
")",
")",
"logger",
".",
"debug",
"(",
"\"Fork tree: {}\"",
".",
"format",
"(",
"self",
".",
"_fork_tree",
")",
")"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
test
|
NextflowGenerator._get_process_names
|
Returns the input/output process names and output process directives
Parameters
----------
con : dict
Dictionary with the connection information between two processes.
Returns
-------
input_name : str
Name of the input process
output_name : str
Name of the output process
output_directives : dict
Parsed directives from the output process
|
flowcraft/generator/engine.py
|
def _get_process_names(self, con, pid):
"""Returns the input/output process names and output process directives
Parameters
----------
con : dict
Dictionary with the connection information between two processes.
Returns
-------
input_name : str
Name of the input process
output_name : str
Name of the output process
output_directives : dict
Parsed directives from the output process
"""
try:
_p_in_name = con["input"]["process"]
p_in_name, _ = self._parse_process_name(_p_in_name)
logger.debug("[{}] Input channel: {}".format(pid, p_in_name))
_p_out_name = con["output"]["process"]
p_out_name, out_directives = self._parse_process_name(
_p_out_name)
logger.debug("[{}] Output channel: {}".format(pid, p_out_name))
# Exception is triggered when the process name/directives cannot
# be processes.
except eh.ProcessError as ex:
logger.error(colored_print(ex.value, "red_bold"))
sys.exit(1)
return p_in_name, p_out_name, out_directives
|
def _get_process_names(self, con, pid):
"""Returns the input/output process names and output process directives
Parameters
----------
con : dict
Dictionary with the connection information between two processes.
Returns
-------
input_name : str
Name of the input process
output_name : str
Name of the output process
output_directives : dict
Parsed directives from the output process
"""
try:
_p_in_name = con["input"]["process"]
p_in_name, _ = self._parse_process_name(_p_in_name)
logger.debug("[{}] Input channel: {}".format(pid, p_in_name))
_p_out_name = con["output"]["process"]
p_out_name, out_directives = self._parse_process_name(
_p_out_name)
logger.debug("[{}] Output channel: {}".format(pid, p_out_name))
# Exception is triggered when the process name/directives cannot
# be processes.
except eh.ProcessError as ex:
logger.error(colored_print(ex.value, "red_bold"))
sys.exit(1)
return p_in_name, p_out_name, out_directives
|
[
"Returns",
"the",
"input",
"/",
"output",
"process",
"names",
"and",
"output",
"process",
"directives"
] |
assemblerflow/flowcraft
|
python
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/engine.py#L374-L406
|
[
"def",
"_get_process_names",
"(",
"self",
",",
"con",
",",
"pid",
")",
":",
"try",
":",
"_p_in_name",
"=",
"con",
"[",
"\"input\"",
"]",
"[",
"\"process\"",
"]",
"p_in_name",
",",
"_",
"=",
"self",
".",
"_parse_process_name",
"(",
"_p_in_name",
")",
"logger",
".",
"debug",
"(",
"\"[{}] Input channel: {}\"",
".",
"format",
"(",
"pid",
",",
"p_in_name",
")",
")",
"_p_out_name",
"=",
"con",
"[",
"\"output\"",
"]",
"[",
"\"process\"",
"]",
"p_out_name",
",",
"out_directives",
"=",
"self",
".",
"_parse_process_name",
"(",
"_p_out_name",
")",
"logger",
".",
"debug",
"(",
"\"[{}] Output channel: {}\"",
".",
"format",
"(",
"pid",
",",
"p_out_name",
")",
")",
"# Exception is triggered when the process name/directives cannot",
"# be processes.",
"except",
"eh",
".",
"ProcessError",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"colored_print",
"(",
"ex",
".",
"value",
",",
"\"red_bold\"",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"p_in_name",
",",
"p_out_name",
",",
"out_directives"
] |
fc3f4bddded1efc76006600016dc71a06dd908c0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.