partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
get_config_path
|
Determine the path to the config file. This will return, in this order of
precedence:
- the value of $BUGWARRIORRC if set
- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc if exists
- ~/.bugwarriorrc if exists
- <dir>/bugwarrior/bugwarriorc if exists, for dir in $XDG_CONFIG_DIRS
- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc otherwise
|
bugwarrior/config.py
|
def get_config_path():
"""
Determine the path to the config file. This will return, in this order of
precedence:
- the value of $BUGWARRIORRC if set
- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc if exists
- ~/.bugwarriorrc if exists
- <dir>/bugwarrior/bugwarriorc if exists, for dir in $XDG_CONFIG_DIRS
- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc otherwise
"""
if os.environ.get(BUGWARRIORRC):
return os.environ[BUGWARRIORRC]
xdg_config_home = (
os.environ.get('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'))
xdg_config_dirs = (
(os.environ.get('XDG_CONFIG_DIRS') or '/etc/xdg').split(':'))
paths = [
os.path.join(xdg_config_home, 'bugwarrior', 'bugwarriorrc'),
os.path.expanduser("~/.bugwarriorrc")]
paths += [
os.path.join(d, 'bugwarrior', 'bugwarriorrc') for d in xdg_config_dirs]
for path in paths:
if os.path.exists(path):
return path
return paths[0]
|
def get_config_path():
"""
Determine the path to the config file. This will return, in this order of
precedence:
- the value of $BUGWARRIORRC if set
- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc if exists
- ~/.bugwarriorrc if exists
- <dir>/bugwarrior/bugwarriorc if exists, for dir in $XDG_CONFIG_DIRS
- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc otherwise
"""
if os.environ.get(BUGWARRIORRC):
return os.environ[BUGWARRIORRC]
xdg_config_home = (
os.environ.get('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'))
xdg_config_dirs = (
(os.environ.get('XDG_CONFIG_DIRS') or '/etc/xdg').split(':'))
paths = [
os.path.join(xdg_config_home, 'bugwarrior', 'bugwarriorrc'),
os.path.expanduser("~/.bugwarriorrc")]
paths += [
os.path.join(d, 'bugwarrior', 'bugwarriorrc') for d in xdg_config_dirs]
for path in paths:
if os.path.exists(path):
return path
return paths[0]
|
[
"Determine",
"the",
"path",
"to",
"the",
"config",
"file",
".",
"This",
"will",
"return",
"in",
"this",
"order",
"of",
"precedence",
":",
"-",
"the",
"value",
"of",
"$BUGWARRIORRC",
"if",
"set",
"-",
"$XDG_CONFIG_HOME",
"/",
"bugwarrior",
"/",
"bugwarriorc",
"if",
"exists",
"-",
"~",
"/",
".",
"bugwarriorrc",
"if",
"exists",
"-",
"<dir",
">",
"/",
"bugwarrior",
"/",
"bugwarriorc",
"if",
"exists",
"for",
"dir",
"in",
"$XDG_CONFIG_DIRS",
"-",
"$XDG_CONFIG_HOME",
"/",
"bugwarrior",
"/",
"bugwarriorc",
"otherwise"
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/config.py#L186-L210
|
[
"def",
"get_config_path",
"(",
")",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"BUGWARRIORRC",
")",
":",
"return",
"os",
".",
"environ",
"[",
"BUGWARRIORRC",
"]",
"xdg_config_home",
"=",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'XDG_CONFIG_HOME'",
")",
"or",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/.config'",
")",
")",
"xdg_config_dirs",
"=",
"(",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'XDG_CONFIG_DIRS'",
")",
"or",
"'/etc/xdg'",
")",
".",
"split",
"(",
"':'",
")",
")",
"paths",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"xdg_config_home",
",",
"'bugwarrior'",
",",
"'bugwarriorrc'",
")",
",",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.bugwarriorrc\"",
")",
"]",
"paths",
"+=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"d",
",",
"'bugwarrior'",
",",
"'bugwarriorrc'",
")",
"for",
"d",
"in",
"xdg_config_dirs",
"]",
"for",
"path",
"in",
"paths",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"path",
"return",
"paths",
"[",
"0",
"]"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
fix_logging_path
|
Expand environment variables and user home (~) in the log.file and return
as relative path.
|
bugwarrior/config.py
|
def fix_logging_path(config, main_section):
"""
Expand environment variables and user home (~) in the log.file and return
as relative path.
"""
log_file = config.get(main_section, 'log.file')
if log_file:
log_file = os.path.expanduser(os.path.expandvars(log_file))
if os.path.isabs(log_file):
log_file = os.path.relpath(log_file)
return log_file
|
def fix_logging_path(config, main_section):
"""
Expand environment variables and user home (~) in the log.file and return
as relative path.
"""
log_file = config.get(main_section, 'log.file')
if log_file:
log_file = os.path.expanduser(os.path.expandvars(log_file))
if os.path.isabs(log_file):
log_file = os.path.relpath(log_file)
return log_file
|
[
"Expand",
"environment",
"variables",
"and",
"user",
"home",
"(",
"~",
")",
"in",
"the",
"log",
".",
"file",
"and",
"return",
"as",
"relative",
"path",
"."
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/config.py#L213-L223
|
[
"def",
"fix_logging_path",
"(",
"config",
",",
"main_section",
")",
":",
"log_file",
"=",
"config",
".",
"get",
"(",
"main_section",
",",
"'log.file'",
")",
"if",
"log_file",
":",
"log_file",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"expandvars",
"(",
"log_file",
")",
")",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"log_file",
")",
":",
"log_file",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"log_file",
")",
"return",
"log_file"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
BugwarriorConfigParser.getint
|
Accepts both integers and empty values.
|
bugwarrior/config.py
|
def getint(self, section, option):
""" Accepts both integers and empty values. """
try:
return super(BugwarriorConfigParser, self).getint(section, option)
except ValueError:
if self.get(section, option) == u'':
return None
else:
raise ValueError(
"{section}.{option} must be an integer or empty.".format(
section=section, option=option))
|
def getint(self, section, option):
""" Accepts both integers and empty values. """
try:
return super(BugwarriorConfigParser, self).getint(section, option)
except ValueError:
if self.get(section, option) == u'':
return None
else:
raise ValueError(
"{section}.{option} must be an integer or empty.".format(
section=section, option=option))
|
[
"Accepts",
"both",
"integers",
"and",
"empty",
"values",
"."
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/config.py#L272-L282
|
[
"def",
"getint",
"(",
"self",
",",
"section",
",",
"option",
")",
":",
"try",
":",
"return",
"super",
"(",
"BugwarriorConfigParser",
",",
"self",
")",
".",
"getint",
"(",
"section",
",",
"option",
")",
"except",
"ValueError",
":",
"if",
"self",
".",
"get",
"(",
"section",
",",
"option",
")",
"==",
"u''",
":",
"return",
"None",
"else",
":",
"raise",
"ValueError",
"(",
"\"{section}.{option} must be an integer or empty.\"",
".",
"format",
"(",
"section",
"=",
"section",
",",
"option",
"=",
"option",
")",
")"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
_get_bug_attr
|
Default longdescs/flags case to [] since they may not be present.
|
bugwarrior/services/bz.py
|
def _get_bug_attr(bug, attr):
"""Default longdescs/flags case to [] since they may not be present."""
if attr in ("longdescs", "flags"):
return getattr(bug, attr, [])
return getattr(bug, attr)
|
def _get_bug_attr(bug, attr):
"""Default longdescs/flags case to [] since they may not be present."""
if attr in ("longdescs", "flags"):
return getattr(bug, attr, [])
return getattr(bug, attr)
|
[
"Default",
"longdescs",
"/",
"flags",
"case",
"to",
"[]",
"since",
"they",
"may",
"not",
"be",
"present",
"."
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/services/bz.py#L286-L290
|
[
"def",
"_get_bug_attr",
"(",
"bug",
",",
"attr",
")",
":",
"if",
"attr",
"in",
"(",
"\"longdescs\"",
",",
"\"flags\"",
")",
":",
"return",
"getattr",
"(",
"bug",
",",
"attr",
",",
"[",
"]",
")",
"return",
"getattr",
"(",
"bug",
",",
"attr",
")"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
pull
|
Pull down tasks from forges and add them to your taskwarrior tasks.
Relies on configuration in bugwarriorrc
|
bugwarrior/command.py
|
def pull(dry_run, flavor, interactive, debug):
""" Pull down tasks from forges and add them to your taskwarrior tasks.
Relies on configuration in bugwarriorrc
"""
try:
main_section = _get_section_name(flavor)
config = _try_load_config(main_section, interactive)
lockfile_path = os.path.join(get_data_path(config, main_section),
'bugwarrior.lockfile')
lockfile = PIDLockFile(lockfile_path)
lockfile.acquire(timeout=10)
try:
# Get all the issues. This can take a while.
issue_generator = aggregate_issues(config, main_section, debug)
# Stuff them in the taskwarrior db as necessary
synchronize(issue_generator, config, main_section, dry_run)
finally:
lockfile.release()
except LockTimeout:
log.critical(
'Your taskrc repository is currently locked. '
'Remove the file at %s if you are sure no other '
'bugwarrior processes are currently running.' % (
lockfile_path
)
)
except RuntimeError as e:
log.exception("Aborted (%s)" % e)
|
def pull(dry_run, flavor, interactive, debug):
""" Pull down tasks from forges and add them to your taskwarrior tasks.
Relies on configuration in bugwarriorrc
"""
try:
main_section = _get_section_name(flavor)
config = _try_load_config(main_section, interactive)
lockfile_path = os.path.join(get_data_path(config, main_section),
'bugwarrior.lockfile')
lockfile = PIDLockFile(lockfile_path)
lockfile.acquire(timeout=10)
try:
# Get all the issues. This can take a while.
issue_generator = aggregate_issues(config, main_section, debug)
# Stuff them in the taskwarrior db as necessary
synchronize(issue_generator, config, main_section, dry_run)
finally:
lockfile.release()
except LockTimeout:
log.critical(
'Your taskrc repository is currently locked. '
'Remove the file at %s if you are sure no other '
'bugwarrior processes are currently running.' % (
lockfile_path
)
)
except RuntimeError as e:
log.exception("Aborted (%s)" % e)
|
[
"Pull",
"down",
"tasks",
"from",
"forges",
"and",
"add",
"them",
"to",
"your",
"taskwarrior",
"tasks",
"."
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/command.py#L54-L85
|
[
"def",
"pull",
"(",
"dry_run",
",",
"flavor",
",",
"interactive",
",",
"debug",
")",
":",
"try",
":",
"main_section",
"=",
"_get_section_name",
"(",
"flavor",
")",
"config",
"=",
"_try_load_config",
"(",
"main_section",
",",
"interactive",
")",
"lockfile_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_data_path",
"(",
"config",
",",
"main_section",
")",
",",
"'bugwarrior.lockfile'",
")",
"lockfile",
"=",
"PIDLockFile",
"(",
"lockfile_path",
")",
"lockfile",
".",
"acquire",
"(",
"timeout",
"=",
"10",
")",
"try",
":",
"# Get all the issues. This can take a while.",
"issue_generator",
"=",
"aggregate_issues",
"(",
"config",
",",
"main_section",
",",
"debug",
")",
"# Stuff them in the taskwarrior db as necessary",
"synchronize",
"(",
"issue_generator",
",",
"config",
",",
"main_section",
",",
"dry_run",
")",
"finally",
":",
"lockfile",
".",
"release",
"(",
")",
"except",
"LockTimeout",
":",
"log",
".",
"critical",
"(",
"'Your taskrc repository is currently locked. '",
"'Remove the file at %s if you are sure no other '",
"'bugwarrior processes are currently running.'",
"%",
"(",
"lockfile_path",
")",
")",
"except",
"RuntimeError",
"as",
"e",
":",
"log",
".",
"exception",
"(",
"\"Aborted (%s)\"",
"%",
"e",
")"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
BitbucketService.get_data
|
Perform a request to the fully qualified url and return json.
|
bugwarrior/services/bitbucket.py
|
def get_data(self, url):
""" Perform a request to the fully qualified url and return json. """
return self.json_response(requests.get(url, **self.requests_kwargs))
|
def get_data(self, url):
""" Perform a request to the fully qualified url and return json. """
return self.json_response(requests.get(url, **self.requests_kwargs))
|
[
"Perform",
"a",
"request",
"to",
"the",
"fully",
"qualified",
"url",
"and",
"return",
"json",
"."
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/services/bitbucket.py#L142-L144
|
[
"def",
"get_data",
"(",
"self",
",",
"url",
")",
":",
"return",
"self",
".",
"json_response",
"(",
"requests",
".",
"get",
"(",
"url",
",",
"*",
"*",
"self",
".",
"requests_kwargs",
")",
")"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
BitbucketService.get_collection
|
Pages through an object collection from the bitbucket API.
Returns an iterator that lazily goes through all the 'values'
of all the pages in the collection.
|
bugwarrior/services/bitbucket.py
|
def get_collection(self, url):
""" Pages through an object collection from the bitbucket API.
Returns an iterator that lazily goes through all the 'values'
of all the pages in the collection. """
url = self.BASE_API2 + url
while url is not None:
response = self.get_data(url)
for value in response['values']:
yield value
url = response.get('next', None)
|
def get_collection(self, url):
""" Pages through an object collection from the bitbucket API.
Returns an iterator that lazily goes through all the 'values'
of all the pages in the collection. """
url = self.BASE_API2 + url
while url is not None:
response = self.get_data(url)
for value in response['values']:
yield value
url = response.get('next', None)
|
[
"Pages",
"through",
"an",
"object",
"collection",
"from",
"the",
"bitbucket",
"API",
".",
"Returns",
"an",
"iterator",
"that",
"lazily",
"goes",
"through",
"all",
"the",
"values",
"of",
"all",
"the",
"pages",
"in",
"the",
"collection",
"."
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/services/bitbucket.py#L146-L155
|
[
"def",
"get_collection",
"(",
"self",
",",
"url",
")",
":",
"url",
"=",
"self",
".",
"BASE_API2",
"+",
"url",
"while",
"url",
"is",
"not",
"None",
":",
"response",
"=",
"self",
".",
"get_data",
"(",
"url",
")",
"for",
"value",
"in",
"response",
"[",
"'values'",
"]",
":",
"yield",
"value",
"url",
"=",
"response",
".",
"get",
"(",
"'next'",
",",
"None",
")"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
hamdist
|
Count the # of differences between equal length strings str1 and str2
|
bugwarrior/db.py
|
def hamdist(str1, str2):
"""Count the # of differences between equal length strings str1 and str2"""
diffs = 0
for ch1, ch2 in zip(str1, str2):
if ch1 != ch2:
diffs += 1
return diffs
|
def hamdist(str1, str2):
"""Count the # of differences between equal length strings str1 and str2"""
diffs = 0
for ch1, ch2 in zip(str1, str2):
if ch1 != ch2:
diffs += 1
return diffs
|
[
"Count",
"the",
"#",
"of",
"differences",
"between",
"equal",
"length",
"strings",
"str1",
"and",
"str2"
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/db.py#L91-L97
|
[
"def",
"hamdist",
"(",
"str1",
",",
"str2",
")",
":",
"diffs",
"=",
"0",
"for",
"ch1",
",",
"ch2",
"in",
"zip",
"(",
"str1",
",",
"str2",
")",
":",
"if",
"ch1",
"!=",
"ch2",
":",
"diffs",
"+=",
"1",
"return",
"diffs"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
find_local_uuid
|
For a given issue issue, find its local UUID.
Assembles a list of task IDs existing in taskwarrior
matching the supplied issue (`issue`) on the combination of any
set of supplied unique identifiers (`keys`) or, optionally,
the task's description field (should `legacy_matching` be `True`).
:params:
* `tw`: An instance of `taskw.TaskWarriorShellout`
* `keys`: A list of lists of keys to use for uniquely identifying
an issue. To clarify the "list of lists" behavior, assume that
there are two services, one having a single primary key field
-- 'serviceAid' -- and another having a pair of fields composing
its primary key -- 'serviceBproject' and 'serviceBnumber' --, the
incoming data for this field would be::
[
['serviceAid'],
['serviceBproject', 'serviceBnumber'],
]
* `issue`: An instance of a subclass of `bugwarrior.services.Issue`.
* `legacy_matching`: By default, this is disabled, and it allows
the matching algorithm to -- in addition to searching by stored
issue keys -- search using the task's description for a match.
It is prone to error and should avoided if possible.
:returns:
* A single string UUID.
:raises:
* `bugwarrior.db.MultipleMatches`: if multiple matches were found.
* `bugwarrior.db.NotFound`: if an issue was not found.
|
bugwarrior/db.py
|
def find_local_uuid(tw, keys, issue, legacy_matching=False):
""" For a given issue issue, find its local UUID.
Assembles a list of task IDs existing in taskwarrior
matching the supplied issue (`issue`) on the combination of any
set of supplied unique identifiers (`keys`) or, optionally,
the task's description field (should `legacy_matching` be `True`).
:params:
* `tw`: An instance of `taskw.TaskWarriorShellout`
* `keys`: A list of lists of keys to use for uniquely identifying
an issue. To clarify the "list of lists" behavior, assume that
there are two services, one having a single primary key field
-- 'serviceAid' -- and another having a pair of fields composing
its primary key -- 'serviceBproject' and 'serviceBnumber' --, the
incoming data for this field would be::
[
['serviceAid'],
['serviceBproject', 'serviceBnumber'],
]
* `issue`: An instance of a subclass of `bugwarrior.services.Issue`.
* `legacy_matching`: By default, this is disabled, and it allows
the matching algorithm to -- in addition to searching by stored
issue keys -- search using the task's description for a match.
It is prone to error and should avoided if possible.
:returns:
* A single string UUID.
:raises:
* `bugwarrior.db.MultipleMatches`: if multiple matches were found.
* `bugwarrior.db.NotFound`: if an issue was not found.
"""
if not issue['description']:
raise ValueError('Issue %s has no description.' % issue)
possibilities = set([])
if legacy_matching:
legacy_description = issue.get_default_description().rsplit('..', 1)[0]
# Furthermore, we have to kill off any single quotes which break in
# task-2.4.x, as much as it saddens me.
legacy_description = legacy_description.split("'")[0]
results = tw.filter_tasks({
'description.startswith': legacy_description,
'or': [
('status', 'pending'),
('status', 'waiting'),
],
})
possibilities = possibilities | set([
task['uuid'] for task in results
])
for service, key_list in six.iteritems(keys):
if any([key in issue for key in key_list]):
results = tw.filter_tasks({
'and': [("%s.is" % key, issue[key]) for key in key_list],
'or': [
('status', 'pending'),
('status', 'waiting'),
],
})
possibilities = possibilities | set([
task['uuid'] for task in results
])
if len(possibilities) == 1:
return possibilities.pop()
if len(possibilities) > 1:
raise MultipleMatches(
"Issue %s matched multiple IDs: %s" % (
issue['description'],
possibilities
)
)
raise NotFound(
"No issue was found matching %s" % issue
)
|
def find_local_uuid(tw, keys, issue, legacy_matching=False):
""" For a given issue issue, find its local UUID.
Assembles a list of task IDs existing in taskwarrior
matching the supplied issue (`issue`) on the combination of any
set of supplied unique identifiers (`keys`) or, optionally,
the task's description field (should `legacy_matching` be `True`).
:params:
* `tw`: An instance of `taskw.TaskWarriorShellout`
* `keys`: A list of lists of keys to use for uniquely identifying
an issue. To clarify the "list of lists" behavior, assume that
there are two services, one having a single primary key field
-- 'serviceAid' -- and another having a pair of fields composing
its primary key -- 'serviceBproject' and 'serviceBnumber' --, the
incoming data for this field would be::
[
['serviceAid'],
['serviceBproject', 'serviceBnumber'],
]
* `issue`: An instance of a subclass of `bugwarrior.services.Issue`.
* `legacy_matching`: By default, this is disabled, and it allows
the matching algorithm to -- in addition to searching by stored
issue keys -- search using the task's description for a match.
It is prone to error and should avoided if possible.
:returns:
* A single string UUID.
:raises:
* `bugwarrior.db.MultipleMatches`: if multiple matches were found.
* `bugwarrior.db.NotFound`: if an issue was not found.
"""
if not issue['description']:
raise ValueError('Issue %s has no description.' % issue)
possibilities = set([])
if legacy_matching:
legacy_description = issue.get_default_description().rsplit('..', 1)[0]
# Furthermore, we have to kill off any single quotes which break in
# task-2.4.x, as much as it saddens me.
legacy_description = legacy_description.split("'")[0]
results = tw.filter_tasks({
'description.startswith': legacy_description,
'or': [
('status', 'pending'),
('status', 'waiting'),
],
})
possibilities = possibilities | set([
task['uuid'] for task in results
])
for service, key_list in six.iteritems(keys):
if any([key in issue for key in key_list]):
results = tw.filter_tasks({
'and': [("%s.is" % key, issue[key]) for key in key_list],
'or': [
('status', 'pending'),
('status', 'waiting'),
],
})
possibilities = possibilities | set([
task['uuid'] for task in results
])
if len(possibilities) == 1:
return possibilities.pop()
if len(possibilities) > 1:
raise MultipleMatches(
"Issue %s matched multiple IDs: %s" % (
issue['description'],
possibilities
)
)
raise NotFound(
"No issue was found matching %s" % issue
)
|
[
"For",
"a",
"given",
"issue",
"issue",
"find",
"its",
"local",
"UUID",
"."
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/db.py#L129-L212
|
[
"def",
"find_local_uuid",
"(",
"tw",
",",
"keys",
",",
"issue",
",",
"legacy_matching",
"=",
"False",
")",
":",
"if",
"not",
"issue",
"[",
"'description'",
"]",
":",
"raise",
"ValueError",
"(",
"'Issue %s has no description.'",
"%",
"issue",
")",
"possibilities",
"=",
"set",
"(",
"[",
"]",
")",
"if",
"legacy_matching",
":",
"legacy_description",
"=",
"issue",
".",
"get_default_description",
"(",
")",
".",
"rsplit",
"(",
"'..'",
",",
"1",
")",
"[",
"0",
"]",
"# Furthermore, we have to kill off any single quotes which break in",
"# task-2.4.x, as much as it saddens me.",
"legacy_description",
"=",
"legacy_description",
".",
"split",
"(",
"\"'\"",
")",
"[",
"0",
"]",
"results",
"=",
"tw",
".",
"filter_tasks",
"(",
"{",
"'description.startswith'",
":",
"legacy_description",
",",
"'or'",
":",
"[",
"(",
"'status'",
",",
"'pending'",
")",
",",
"(",
"'status'",
",",
"'waiting'",
")",
",",
"]",
",",
"}",
")",
"possibilities",
"=",
"possibilities",
"|",
"set",
"(",
"[",
"task",
"[",
"'uuid'",
"]",
"for",
"task",
"in",
"results",
"]",
")",
"for",
"service",
",",
"key_list",
"in",
"six",
".",
"iteritems",
"(",
"keys",
")",
":",
"if",
"any",
"(",
"[",
"key",
"in",
"issue",
"for",
"key",
"in",
"key_list",
"]",
")",
":",
"results",
"=",
"tw",
".",
"filter_tasks",
"(",
"{",
"'and'",
":",
"[",
"(",
"\"%s.is\"",
"%",
"key",
",",
"issue",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"key_list",
"]",
",",
"'or'",
":",
"[",
"(",
"'status'",
",",
"'pending'",
")",
",",
"(",
"'status'",
",",
"'waiting'",
")",
",",
"]",
",",
"}",
")",
"possibilities",
"=",
"possibilities",
"|",
"set",
"(",
"[",
"task",
"[",
"'uuid'",
"]",
"for",
"task",
"in",
"results",
"]",
")",
"if",
"len",
"(",
"possibilities",
")",
"==",
"1",
":",
"return",
"possibilities",
".",
"pop",
"(",
")",
"if",
"len",
"(",
"possibilities",
")",
">",
"1",
":",
"raise",
"MultipleMatches",
"(",
"\"Issue %s matched multiple IDs: %s\"",
"%",
"(",
"issue",
"[",
"'description'",
"]",
",",
"possibilities",
")",
")",
"raise",
"NotFound",
"(",
"\"No issue was found matching %s\"",
"%",
"issue",
")"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
merge_left
|
Merge array field from the remote_issue into local_task
* Local 'left' entries are preserved without modification
* Remote 'left' are appended to task if not present in local.
:param `field`: Task field to merge.
:param `local_task`: `taskw.task.Task` object into which to merge
remote changes.
:param `remote_issue`: `dict` instance from which to merge into
local task.
:param `hamming`: (default `False`) If `True`, compare entries by
truncating to maximum length, and comparing hamming distances.
Useful generally only for annotations.
|
bugwarrior/db.py
|
def merge_left(field, local_task, remote_issue, hamming=False):
""" Merge array field from the remote_issue into local_task
* Local 'left' entries are preserved without modification
* Remote 'left' are appended to task if not present in local.
:param `field`: Task field to merge.
:param `local_task`: `taskw.task.Task` object into which to merge
remote changes.
:param `remote_issue`: `dict` instance from which to merge into
local task.
:param `hamming`: (default `False`) If `True`, compare entries by
truncating to maximum length, and comparing hamming distances.
Useful generally only for annotations.
"""
# Ensure that empty defaults are present
local_field = local_task.get(field, [])
remote_field = remote_issue.get(field, [])
# We need to make sure an array exists for this field because
# we will be appending to it in a moment.
if field not in local_task:
local_task[field] = []
# If a remote does not appear in local, add it to the local task
new_count = 0
for remote in remote_field:
for local in local_field:
if (
# For annotations, they don't have to match *exactly*.
(
hamming
and get_annotation_hamming_distance(remote, local) == 0
)
# But for everything else, they should.
or (
remote == local
)
):
break
else:
log.debug("%s not found in %r" % (remote, local_field))
local_task[field].append(remote)
new_count += 1
if new_count > 0:
log.debug('Added %s new values to %s (total: %s)' % (
new_count, field, len(local_task[field]),))
|
def merge_left(field, local_task, remote_issue, hamming=False):
""" Merge array field from the remote_issue into local_task
* Local 'left' entries are preserved without modification
* Remote 'left' are appended to task if not present in local.
:param `field`: Task field to merge.
:param `local_task`: `taskw.task.Task` object into which to merge
remote changes.
:param `remote_issue`: `dict` instance from which to merge into
local task.
:param `hamming`: (default `False`) If `True`, compare entries by
truncating to maximum length, and comparing hamming distances.
Useful generally only for annotations.
"""
# Ensure that empty defaults are present
local_field = local_task.get(field, [])
remote_field = remote_issue.get(field, [])
# We need to make sure an array exists for this field because
# we will be appending to it in a moment.
if field not in local_task:
local_task[field] = []
# If a remote does not appear in local, add it to the local task
new_count = 0
for remote in remote_field:
for local in local_field:
if (
# For annotations, they don't have to match *exactly*.
(
hamming
and get_annotation_hamming_distance(remote, local) == 0
)
# But for everything else, they should.
or (
remote == local
)
):
break
else:
log.debug("%s not found in %r" % (remote, local_field))
local_task[field].append(remote)
new_count += 1
if new_count > 0:
log.debug('Added %s new values to %s (total: %s)' % (
new_count, field, len(local_task[field]),))
|
[
"Merge",
"array",
"field",
"from",
"the",
"remote_issue",
"into",
"local_task"
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/db.py#L215-L263
|
[
"def",
"merge_left",
"(",
"field",
",",
"local_task",
",",
"remote_issue",
",",
"hamming",
"=",
"False",
")",
":",
"# Ensure that empty defaults are present",
"local_field",
"=",
"local_task",
".",
"get",
"(",
"field",
",",
"[",
"]",
")",
"remote_field",
"=",
"remote_issue",
".",
"get",
"(",
"field",
",",
"[",
"]",
")",
"# We need to make sure an array exists for this field because",
"# we will be appending to it in a moment.",
"if",
"field",
"not",
"in",
"local_task",
":",
"local_task",
"[",
"field",
"]",
"=",
"[",
"]",
"# If a remote does not appear in local, add it to the local task",
"new_count",
"=",
"0",
"for",
"remote",
"in",
"remote_field",
":",
"for",
"local",
"in",
"local_field",
":",
"if",
"(",
"# For annotations, they don't have to match *exactly*.",
"(",
"hamming",
"and",
"get_annotation_hamming_distance",
"(",
"remote",
",",
"local",
")",
"==",
"0",
")",
"# But for everything else, they should.",
"or",
"(",
"remote",
"==",
"local",
")",
")",
":",
"break",
"else",
":",
"log",
".",
"debug",
"(",
"\"%s not found in %r\"",
"%",
"(",
"remote",
",",
"local_field",
")",
")",
"local_task",
"[",
"field",
"]",
".",
"append",
"(",
"remote",
")",
"new_count",
"+=",
"1",
"if",
"new_count",
">",
"0",
":",
"log",
".",
"debug",
"(",
"'Added %s new values to %s (total: %s)'",
"%",
"(",
"new_count",
",",
"field",
",",
"len",
"(",
"local_task",
"[",
"field",
"]",
")",
",",
")",
")"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
build_uda_config_overrides
|
Returns a list of UDAs defined by given targets
For all targets in `targets`, build a dictionary of configuration overrides
representing the UDAs defined by the passed-in services (`targets`).
Given a hypothetical situation in which you have two services, the first
of which defining a UDA named 'serviceAid' ("Service A ID", string) and
a second service defining two UDAs named 'serviceBproject'
("Service B Project", string) and 'serviceBnumber'
("Service B Number", numeric), this would return the following structure::
{
'uda': {
'serviceAid': {
'label': 'Service A ID',
'type': 'string',
},
'serviceBproject': {
'label': 'Service B Project',
'type': 'string',
},
'serviceBnumber': {
'label': 'Service B Number',
'type': 'numeric',
}
}
}
|
bugwarrior/db.py
|
def build_uda_config_overrides(targets):
""" Returns a list of UDAs defined by given targets
For all targets in `targets`, build a dictionary of configuration overrides
representing the UDAs defined by the passed-in services (`targets`).
Given a hypothetical situation in which you have two services, the first
of which defining a UDA named 'serviceAid' ("Service A ID", string) and
a second service defining two UDAs named 'serviceBproject'
("Service B Project", string) and 'serviceBnumber'
("Service B Number", numeric), this would return the following structure::
{
'uda': {
'serviceAid': {
'label': 'Service A ID',
'type': 'string',
},
'serviceBproject': {
'label': 'Service B Project',
'type': 'string',
},
'serviceBnumber': {
'label': 'Service B Number',
'type': 'numeric',
}
}
}
"""
from bugwarrior.services import get_service
targets_udas = {}
for target in targets:
targets_udas.update(get_service(target).ISSUE_CLASS.UDAS)
return {
'uda': targets_udas
}
|
def build_uda_config_overrides(targets):
""" Returns a list of UDAs defined by given targets
For all targets in `targets`, build a dictionary of configuration overrides
representing the UDAs defined by the passed-in services (`targets`).
Given a hypothetical situation in which you have two services, the first
of which defining a UDA named 'serviceAid' ("Service A ID", string) and
a second service defining two UDAs named 'serviceBproject'
("Service B Project", string) and 'serviceBnumber'
("Service B Number", numeric), this would return the following structure::
{
'uda': {
'serviceAid': {
'label': 'Service A ID',
'type': 'string',
},
'serviceBproject': {
'label': 'Service B Project',
'type': 'string',
},
'serviceBnumber': {
'label': 'Service B Number',
'type': 'numeric',
}
}
}
"""
from bugwarrior.services import get_service
targets_udas = {}
for target in targets:
targets_udas.update(get_service(target).ISSUE_CLASS.UDAS)
return {
'uda': targets_udas
}
|
[
"Returns",
"a",
"list",
"of",
"UDAs",
"defined",
"by",
"given",
"targets"
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/db.py#L476-L514
|
[
"def",
"build_uda_config_overrides",
"(",
"targets",
")",
":",
"from",
"bugwarrior",
".",
"services",
"import",
"get_service",
"targets_udas",
"=",
"{",
"}",
"for",
"target",
"in",
"targets",
":",
"targets_udas",
".",
"update",
"(",
"get_service",
"(",
"target",
")",
".",
"ISSUE_CLASS",
".",
"UDAS",
")",
"return",
"{",
"'uda'",
":",
"targets_udas",
"}"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
_parse_sprint_string
|
Parse the big ugly sprint string stored by JIRA.
They look like:
com.atlassian.greenhopper.service.sprint.Sprint@4c9c41a5[id=2322,rapid
ViewId=1173,state=ACTIVE,name=Sprint 1,startDate=2016-09-06T16:08:07.4
55Z,endDate=2016-09-23T16:08:00.000Z,completeDate=<null>,sequence=2322]
|
bugwarrior/services/jira.py
|
def _parse_sprint_string(sprint):
""" Parse the big ugly sprint string stored by JIRA.
They look like:
com.atlassian.greenhopper.service.sprint.Sprint@4c9c41a5[id=2322,rapid
ViewId=1173,state=ACTIVE,name=Sprint 1,startDate=2016-09-06T16:08:07.4
55Z,endDate=2016-09-23T16:08:00.000Z,completeDate=<null>,sequence=2322]
"""
entries = sprint[sprint.index('[')+1:sprint.index(']')].split('=')
fields = sum((entry.rsplit(',', 1) for entry in entries), [])
return dict(zip(fields[::2], fields[1::2]))
|
def _parse_sprint_string(sprint):
""" Parse the big ugly sprint string stored by JIRA.
They look like:
com.atlassian.greenhopper.service.sprint.Sprint@4c9c41a5[id=2322,rapid
ViewId=1173,state=ACTIVE,name=Sprint 1,startDate=2016-09-06T16:08:07.4
55Z,endDate=2016-09-23T16:08:00.000Z,completeDate=<null>,sequence=2322]
"""
entries = sprint[sprint.index('[')+1:sprint.index(']')].split('=')
fields = sum((entry.rsplit(',', 1) for entry in entries), [])
return dict(zip(fields[::2], fields[1::2]))
|
[
"Parse",
"the",
"big",
"ugly",
"sprint",
"string",
"stored",
"by",
"JIRA",
"."
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/services/jira.py#L43-L53
|
[
"def",
"_parse_sprint_string",
"(",
"sprint",
")",
":",
"entries",
"=",
"sprint",
"[",
"sprint",
".",
"index",
"(",
"'['",
")",
"+",
"1",
":",
"sprint",
".",
"index",
"(",
"']'",
")",
"]",
".",
"split",
"(",
"'='",
")",
"fields",
"=",
"sum",
"(",
"(",
"entry",
".",
"rsplit",
"(",
"','",
",",
"1",
")",
"for",
"entry",
"in",
"entries",
")",
",",
"[",
"]",
")",
"return",
"dict",
"(",
"zip",
"(",
"fields",
"[",
":",
":",
"2",
"]",
",",
"fields",
"[",
"1",
":",
":",
"2",
"]",
")",
")"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
GmailService.get_credentials
|
Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
|
bugwarrior/services/gmail.py
|
def get_credentials(self):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
with self.AUTHENTICATION_LOCK:
log.info('Starting authentication for %s', self.target)
store = oauth2client.file.Storage(self.credentials_path)
credentials = store.get()
if not credentials or credentials.invalid:
log.info("No valid login. Starting OAUTH flow.")
flow = oauth2client.client.flow_from_clientsecrets(self.client_secret_path, self.SCOPES)
flow.user_agent = self.APPLICATION_NAME
flags = oauth2client.tools.argparser.parse_args([])
credentials = oauth2client.tools.run_flow(flow, store, flags)
log.info('Storing credentials to %r', self.credentials_path)
return credentials
|
def get_credentials(self):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
with self.AUTHENTICATION_LOCK:
log.info('Starting authentication for %s', self.target)
store = oauth2client.file.Storage(self.credentials_path)
credentials = store.get()
if not credentials or credentials.invalid:
log.info("No valid login. Starting OAUTH flow.")
flow = oauth2client.client.flow_from_clientsecrets(self.client_secret_path, self.SCOPES)
flow.user_agent = self.APPLICATION_NAME
flags = oauth2client.tools.argparser.parse_args([])
credentials = oauth2client.tools.run_flow(flow, store, flags)
log.info('Storing credentials to %r', self.credentials_path)
return credentials
|
[
"Gets",
"valid",
"user",
"credentials",
"from",
"storage",
"."
] |
ralphbean/bugwarrior
|
python
|
https://github.com/ralphbean/bugwarrior/blob/b2a5108f7b40cb0c437509b64eaa28f941f7ac8b/bugwarrior/services/gmail.py#L120-L140
|
[
"def",
"get_credentials",
"(",
"self",
")",
":",
"with",
"self",
".",
"AUTHENTICATION_LOCK",
":",
"log",
".",
"info",
"(",
"'Starting authentication for %s'",
",",
"self",
".",
"target",
")",
"store",
"=",
"oauth2client",
".",
"file",
".",
"Storage",
"(",
"self",
".",
"credentials_path",
")",
"credentials",
"=",
"store",
".",
"get",
"(",
")",
"if",
"not",
"credentials",
"or",
"credentials",
".",
"invalid",
":",
"log",
".",
"info",
"(",
"\"No valid login. Starting OAUTH flow.\"",
")",
"flow",
"=",
"oauth2client",
".",
"client",
".",
"flow_from_clientsecrets",
"(",
"self",
".",
"client_secret_path",
",",
"self",
".",
"SCOPES",
")",
"flow",
".",
"user_agent",
"=",
"self",
".",
"APPLICATION_NAME",
"flags",
"=",
"oauth2client",
".",
"tools",
".",
"argparser",
".",
"parse_args",
"(",
"[",
"]",
")",
"credentials",
"=",
"oauth2client",
".",
"tools",
".",
"run_flow",
"(",
"flow",
",",
"store",
",",
"flags",
")",
"log",
".",
"info",
"(",
"'Storing credentials to %r'",
",",
"self",
".",
"credentials_path",
")",
"return",
"credentials"
] |
b2a5108f7b40cb0c437509b64eaa28f941f7ac8b
|
test
|
multi_rouge_n
|
Efficient way to compute highly repetitive scoring
i.e. sequences are involved multiple time
Args:
sequences(list[str]): list of sequences (either hyp or ref)
scores_ids(list[tuple(int)]): list of pairs (hyp_id, ref_id)
ie. scores[i] = rouge_n(scores_ids[i][0],
scores_ids[i][1])
Returns:
scores: list of length `len(scores_ids)` containing rouge `n`
scores as a dict with 'f', 'r', 'p'
Raises:
KeyError: if there's a value of i in scores_ids that is not in
[0, len(sequences)[
|
rouge/rouge_score.py
|
def multi_rouge_n(sequences, scores_ids, n=2):
"""
Efficient way to compute highly repetitive scoring
i.e. sequences are involved multiple time
Args:
sequences(list[str]): list of sequences (either hyp or ref)
scores_ids(list[tuple(int)]): list of pairs (hyp_id, ref_id)
ie. scores[i] = rouge_n(scores_ids[i][0],
scores_ids[i][1])
Returns:
scores: list of length `len(scores_ids)` containing rouge `n`
scores as a dict with 'f', 'r', 'p'
Raises:
KeyError: if there's a value of i in scores_ids that is not in
[0, len(sequences)[
"""
ngrams = [_get_word_ngrams(n, sequence) for sequence in sequences]
counts = [len(ngram) for ngram in ngrams]
scores = []
for hyp_id, ref_id in scores_ids:
evaluated_ngrams = ngrams[hyp_id]
evaluated_count = counts[hyp_id]
reference_ngrams = ngrams[ref_id]
reference_count = counts[ref_id]
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
scores += [f_r_p_rouge_n(evaluated_count,
reference_count, overlapping_count)]
return scores
|
def multi_rouge_n(sequences, scores_ids, n=2):
"""
Efficient way to compute highly repetitive scoring
i.e. sequences are involved multiple time
Args:
sequences(list[str]): list of sequences (either hyp or ref)
scores_ids(list[tuple(int)]): list of pairs (hyp_id, ref_id)
ie. scores[i] = rouge_n(scores_ids[i][0],
scores_ids[i][1])
Returns:
scores: list of length `len(scores_ids)` containing rouge `n`
scores as a dict with 'f', 'r', 'p'
Raises:
KeyError: if there's a value of i in scores_ids that is not in
[0, len(sequences)[
"""
ngrams = [_get_word_ngrams(n, sequence) for sequence in sequences]
counts = [len(ngram) for ngram in ngrams]
scores = []
for hyp_id, ref_id in scores_ids:
evaluated_ngrams = ngrams[hyp_id]
evaluated_count = counts[hyp_id]
reference_ngrams = ngrams[ref_id]
reference_count = counts[ref_id]
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
scores += [f_r_p_rouge_n(evaluated_count,
reference_count, overlapping_count)]
return scores
|
[
"Efficient",
"way",
"to",
"compute",
"highly",
"repetitive",
"scoring",
"i",
".",
"e",
".",
"sequences",
"are",
"involved",
"multiple",
"time"
] |
pltrdy/rouge
|
python
|
https://github.com/pltrdy/rouge/blob/7bf8a83af5ca5c1677b93620b4e1f85ffd63b377/rouge/rouge_score.py#L140-L174
|
[
"def",
"multi_rouge_n",
"(",
"sequences",
",",
"scores_ids",
",",
"n",
"=",
"2",
")",
":",
"ngrams",
"=",
"[",
"_get_word_ngrams",
"(",
"n",
",",
"sequence",
")",
"for",
"sequence",
"in",
"sequences",
"]",
"counts",
"=",
"[",
"len",
"(",
"ngram",
")",
"for",
"ngram",
"in",
"ngrams",
"]",
"scores",
"=",
"[",
"]",
"for",
"hyp_id",
",",
"ref_id",
"in",
"scores_ids",
":",
"evaluated_ngrams",
"=",
"ngrams",
"[",
"hyp_id",
"]",
"evaluated_count",
"=",
"counts",
"[",
"hyp_id",
"]",
"reference_ngrams",
"=",
"ngrams",
"[",
"ref_id",
"]",
"reference_count",
"=",
"counts",
"[",
"ref_id",
"]",
"overlapping_ngrams",
"=",
"evaluated_ngrams",
".",
"intersection",
"(",
"reference_ngrams",
")",
"overlapping_count",
"=",
"len",
"(",
"overlapping_ngrams",
")",
"scores",
"+=",
"[",
"f_r_p_rouge_n",
"(",
"evaluated_count",
",",
"reference_count",
",",
"overlapping_count",
")",
"]",
"return",
"scores"
] |
7bf8a83af5ca5c1677b93620b4e1f85ffd63b377
|
test
|
rouge_n
|
Computes ROUGE-N of two text collections of sentences.
Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/
papers/rouge-working-note-v1.3.1.pdf
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentences: The sentences from the referene set
n: Size of ngram. Defaults to 2.
Returns:
A tuple (f1, precision, recall) for ROUGE-N
Raises:
ValueError: raises exception if a param has len <= 0
|
rouge/rouge_score.py
|
def rouge_n(evaluated_sentences, reference_sentences, n=2):
"""
Computes ROUGE-N of two text collections of sentences.
Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/
papers/rouge-working-note-v1.3.1.pdf
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentences: The sentences from the referene set
n: Size of ngram. Defaults to 2.
Returns:
A tuple (f1, precision, recall) for ROUGE-N
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)
reference_ngrams = _get_word_ngrams(n, reference_sentences)
reference_count = len(reference_ngrams)
evaluated_count = len(evaluated_ngrams)
# Gets the overlapping ngrams between evaluated and reference
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
return f_r_p_rouge_n(evaluated_count, reference_count, overlapping_count)
|
def rouge_n(evaluated_sentences, reference_sentences, n=2):
"""
Computes ROUGE-N of two text collections of sentences.
Sourece: http://research.microsoft.com/en-us/um/people/cyl/download/
papers/rouge-working-note-v1.3.1.pdf
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentences: The sentences from the referene set
n: Size of ngram. Defaults to 2.
Returns:
A tuple (f1, precision, recall) for ROUGE-N
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
evaluated_ngrams = _get_word_ngrams(n, evaluated_sentences)
reference_ngrams = _get_word_ngrams(n, reference_sentences)
reference_count = len(reference_ngrams)
evaluated_count = len(evaluated_ngrams)
# Gets the overlapping ngrams between evaluated and reference
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
return f_r_p_rouge_n(evaluated_count, reference_count, overlapping_count)
|
[
"Computes",
"ROUGE",
"-",
"N",
"of",
"two",
"text",
"collections",
"of",
"sentences",
".",
"Sourece",
":",
"http",
":",
"//",
"research",
".",
"microsoft",
".",
"com",
"/",
"en",
"-",
"us",
"/",
"um",
"/",
"people",
"/",
"cyl",
"/",
"download",
"/",
"papers",
"/",
"rouge",
"-",
"working",
"-",
"note",
"-",
"v1",
".",
"3",
".",
"1",
".",
"pdf"
] |
pltrdy/rouge
|
python
|
https://github.com/pltrdy/rouge/blob/7bf8a83af5ca5c1677b93620b4e1f85ffd63b377/rouge/rouge_score.py#L177-L207
|
[
"def",
"rouge_n",
"(",
"evaluated_sentences",
",",
"reference_sentences",
",",
"n",
"=",
"2",
")",
":",
"if",
"len",
"(",
"evaluated_sentences",
")",
"<=",
"0",
"or",
"len",
"(",
"reference_sentences",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Collections must contain at least 1 sentence.\"",
")",
"evaluated_ngrams",
"=",
"_get_word_ngrams",
"(",
"n",
",",
"evaluated_sentences",
")",
"reference_ngrams",
"=",
"_get_word_ngrams",
"(",
"n",
",",
"reference_sentences",
")",
"reference_count",
"=",
"len",
"(",
"reference_ngrams",
")",
"evaluated_count",
"=",
"len",
"(",
"evaluated_ngrams",
")",
"# Gets the overlapping ngrams between evaluated and reference",
"overlapping_ngrams",
"=",
"evaluated_ngrams",
".",
"intersection",
"(",
"reference_ngrams",
")",
"overlapping_count",
"=",
"len",
"(",
"overlapping_ngrams",
")",
"return",
"f_r_p_rouge_n",
"(",
"evaluated_count",
",",
"reference_count",
",",
"overlapping_count",
")"
] |
7bf8a83af5ca5c1677b93620b4e1f85ffd63b377
|
test
|
_union_lcs
|
Returns LCS_u(r_i, C) which is the LCS score of the union longest common
subsequence between reference sentence ri and candidate summary C.
For example:
if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8
and c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1
is "w1 w2" and the longest common subsequence of r_i and c2 is "w1 w3 w5".
The union longest common subsequence of r_i, c1, and c2 is "w1 w2 w3 w5"
and LCS_u(r_i, C) = 4/5.
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
float: LCS_u(r_i, C)
ValueError:
Raises exception if a param has len <= 0
|
rouge/rouge_score.py
|
def _union_lcs(evaluated_sentences, reference_sentence, prev_union=None):
"""
Returns LCS_u(r_i, C) which is the LCS score of the union longest common
subsequence between reference sentence ri and candidate summary C.
For example:
if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8
and c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1
is "w1 w2" and the longest common subsequence of r_i and c2 is "w1 w3 w5".
The union longest common subsequence of r_i, c1, and c2 is "w1 w2 w3 w5"
and LCS_u(r_i, C) = 4/5.
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
float: LCS_u(r_i, C)
ValueError:
Raises exception if a param has len <= 0
"""
if prev_union is None:
prev_union = set()
if len(evaluated_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
lcs_union = prev_union
prev_count = len(prev_union)
reference_words = _split_into_words([reference_sentence])
combined_lcs_length = 0
for eval_s in evaluated_sentences:
evaluated_words = _split_into_words([eval_s])
lcs = set(_recon_lcs(reference_words, evaluated_words))
combined_lcs_length += len(lcs)
lcs_union = lcs_union.union(lcs)
new_lcs_count = len(lcs_union) - prev_count
return new_lcs_count, lcs_union
|
def _union_lcs(evaluated_sentences, reference_sentence, prev_union=None):
"""
Returns LCS_u(r_i, C) which is the LCS score of the union longest common
subsequence between reference sentence ri and candidate summary C.
For example:
if r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8
and c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1
is "w1 w2" and the longest common subsequence of r_i and c2 is "w1 w3 w5".
The union longest common subsequence of r_i, c1, and c2 is "w1 w2 w3 w5"
and LCS_u(r_i, C) = 4/5.
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
float: LCS_u(r_i, C)
ValueError:
Raises exception if a param has len <= 0
"""
if prev_union is None:
prev_union = set()
if len(evaluated_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
lcs_union = prev_union
prev_count = len(prev_union)
reference_words = _split_into_words([reference_sentence])
combined_lcs_length = 0
for eval_s in evaluated_sentences:
evaluated_words = _split_into_words([eval_s])
lcs = set(_recon_lcs(reference_words, evaluated_words))
combined_lcs_length += len(lcs)
lcs_union = lcs_union.union(lcs)
new_lcs_count = len(lcs_union) - prev_count
return new_lcs_count, lcs_union
|
[
"Returns",
"LCS_u",
"(",
"r_i",
"C",
")",
"which",
"is",
"the",
"LCS",
"score",
"of",
"the",
"union",
"longest",
"common",
"subsequence",
"between",
"reference",
"sentence",
"ri",
"and",
"candidate",
"summary",
"C",
".",
"For",
"example",
":",
"if",
"r_i",
"=",
"w1",
"w2",
"w3",
"w4",
"w5",
"and",
"C",
"contains",
"two",
"sentences",
":",
"c1",
"=",
"w1",
"w2",
"w6",
"w7",
"w8",
"and",
"c2",
"=",
"w1",
"w3",
"w8",
"w9",
"w5",
"then",
"the",
"longest",
"common",
"subsequence",
"of",
"r_i",
"and",
"c1",
"is",
"w1",
"w2",
"and",
"the",
"longest",
"common",
"subsequence",
"of",
"r_i",
"and",
"c2",
"is",
"w1",
"w3",
"w5",
".",
"The",
"union",
"longest",
"common",
"subsequence",
"of",
"r_i",
"c1",
"and",
"c2",
"is",
"w1",
"w2",
"w3",
"w5",
"and",
"LCS_u",
"(",
"r_i",
"C",
")",
"=",
"4",
"/",
"5",
"."
] |
pltrdy/rouge
|
python
|
https://github.com/pltrdy/rouge/blob/7bf8a83af5ca5c1677b93620b4e1f85ffd63b377/rouge/rouge_score.py#L227-L267
|
[
"def",
"_union_lcs",
"(",
"evaluated_sentences",
",",
"reference_sentence",
",",
"prev_union",
"=",
"None",
")",
":",
"if",
"prev_union",
"is",
"None",
":",
"prev_union",
"=",
"set",
"(",
")",
"if",
"len",
"(",
"evaluated_sentences",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Collections must contain at least 1 sentence.\"",
")",
"lcs_union",
"=",
"prev_union",
"prev_count",
"=",
"len",
"(",
"prev_union",
")",
"reference_words",
"=",
"_split_into_words",
"(",
"[",
"reference_sentence",
"]",
")",
"combined_lcs_length",
"=",
"0",
"for",
"eval_s",
"in",
"evaluated_sentences",
":",
"evaluated_words",
"=",
"_split_into_words",
"(",
"[",
"eval_s",
"]",
")",
"lcs",
"=",
"set",
"(",
"_recon_lcs",
"(",
"reference_words",
",",
"evaluated_words",
")",
")",
"combined_lcs_length",
"+=",
"len",
"(",
"lcs",
")",
"lcs_union",
"=",
"lcs_union",
".",
"union",
"(",
"lcs",
")",
"new_lcs_count",
"=",
"len",
"(",
"lcs_union",
")",
"-",
"prev_count",
"return",
"new_lcs_count",
",",
"lcs_union"
] |
7bf8a83af5ca5c1677b93620b4e1f85ffd63b377
|
test
|
rouge_l_summary_level
|
Computes ROUGE-L (summary level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m
P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
SUM(i,u) = SUM from i through u
u = number of sentences in reference summary
C = Candidate summary made up of v sentences
m = number of words in reference summary
n = number of words in candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
|
rouge/rouge_score.py
|
def rouge_l_summary_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (summary level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m
P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
SUM(i,u) = SUM from i through u
u = number of sentences in reference summary
C = Candidate summary made up of v sentences
m = number of words in reference summary
n = number of words in candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
# total number of words in reference sentences
m = len(set(_split_into_words(reference_sentences)))
# total number of words in evaluated sentences
n = len(set(_split_into_words(evaluated_sentences)))
# print("m,n %d %d" % (m, n))
union_lcs_sum_across_all_references = 0
union = set()
for ref_s in reference_sentences:
lcs_count, union = _union_lcs(evaluated_sentences,
ref_s,
prev_union=union)
union_lcs_sum_across_all_references += lcs_count
llcs = union_lcs_sum_across_all_references
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta**2)) * r_lcs * p_lcs
denom = r_lcs + ((beta**2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return {"f": f_lcs, "p": p_lcs, "r": r_lcs}
|
def rouge_l_summary_level(evaluated_sentences, reference_sentences):
"""
Computes ROUGE-L (summary level) of two text collections of sentences.
http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Calculated according to:
R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m
P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
SUM(i,u) = SUM from i through u
u = number of sentences in reference summary
C = Candidate summary made up of v sentences
m = number of words in reference summary
n = number of words in candidate summary
Args:
evaluated_sentences: The sentences that have been picked by the
summarizer
reference_sentence: One of the sentences in the reference summaries
Returns:
A float: F_lcs
Raises:
ValueError: raises exception if a param has len <= 0
"""
if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:
raise ValueError("Collections must contain at least 1 sentence.")
# total number of words in reference sentences
m = len(set(_split_into_words(reference_sentences)))
# total number of words in evaluated sentences
n = len(set(_split_into_words(evaluated_sentences)))
# print("m,n %d %d" % (m, n))
union_lcs_sum_across_all_references = 0
union = set()
for ref_s in reference_sentences:
lcs_count, union = _union_lcs(evaluated_sentences,
ref_s,
prev_union=union)
union_lcs_sum_across_all_references += lcs_count
llcs = union_lcs_sum_across_all_references
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta**2)) * r_lcs * p_lcs
denom = r_lcs + ((beta**2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return {"f": f_lcs, "p": p_lcs, "r": r_lcs}
|
[
"Computes",
"ROUGE",
"-",
"L",
"(",
"summary",
"level",
")",
"of",
"two",
"text",
"collections",
"of",
"sentences",
".",
"http",
":",
"//",
"research",
".",
"microsoft",
".",
"com",
"/",
"en",
"-",
"us",
"/",
"um",
"/",
"people",
"/",
"cyl",
"/",
"download",
"/",
"papers",
"/",
"rouge",
"-",
"working",
"-",
"note",
"-",
"v1",
".",
"3",
".",
"1",
".",
"pdf"
] |
pltrdy/rouge
|
python
|
https://github.com/pltrdy/rouge/blob/7bf8a83af5ca5c1677b93620b4e1f85ffd63b377/rouge/rouge_score.py#L270-L324
|
[
"def",
"rouge_l_summary_level",
"(",
"evaluated_sentences",
",",
"reference_sentences",
")",
":",
"if",
"len",
"(",
"evaluated_sentences",
")",
"<=",
"0",
"or",
"len",
"(",
"reference_sentences",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Collections must contain at least 1 sentence.\"",
")",
"# total number of words in reference sentences",
"m",
"=",
"len",
"(",
"set",
"(",
"_split_into_words",
"(",
"reference_sentences",
")",
")",
")",
"# total number of words in evaluated sentences",
"n",
"=",
"len",
"(",
"set",
"(",
"_split_into_words",
"(",
"evaluated_sentences",
")",
")",
")",
"# print(\"m,n %d %d\" % (m, n))",
"union_lcs_sum_across_all_references",
"=",
"0",
"union",
"=",
"set",
"(",
")",
"for",
"ref_s",
"in",
"reference_sentences",
":",
"lcs_count",
",",
"union",
"=",
"_union_lcs",
"(",
"evaluated_sentences",
",",
"ref_s",
",",
"prev_union",
"=",
"union",
")",
"union_lcs_sum_across_all_references",
"+=",
"lcs_count",
"llcs",
"=",
"union_lcs_sum_across_all_references",
"r_lcs",
"=",
"llcs",
"/",
"m",
"p_lcs",
"=",
"llcs",
"/",
"n",
"beta",
"=",
"p_lcs",
"/",
"(",
"r_lcs",
"+",
"1e-12",
")",
"num",
"=",
"(",
"1",
"+",
"(",
"beta",
"**",
"2",
")",
")",
"*",
"r_lcs",
"*",
"p_lcs",
"denom",
"=",
"r_lcs",
"+",
"(",
"(",
"beta",
"**",
"2",
")",
"*",
"p_lcs",
")",
"f_lcs",
"=",
"num",
"/",
"(",
"denom",
"+",
"1e-12",
")",
"return",
"{",
"\"f\"",
":",
"f_lcs",
",",
"\"p\"",
":",
"p_lcs",
",",
"\"r\"",
":",
"r_lcs",
"}"
] |
7bf8a83af5ca5c1677b93620b4e1f85ffd63b377
|
test
|
FilesRouge.get_scores
|
Calculate ROUGE scores between each pair of
lines (hyp_file[i], ref_file[i]).
Args:
* hyp_path: hypothesis file path
* ref_path: references file path
* avg (False): whether to get an average scores or a list
|
rouge/rouge.py
|
def get_scores(self, avg=False, ignore_empty=False):
"""Calculate ROUGE scores between each pair of
lines (hyp_file[i], ref_file[i]).
Args:
* hyp_path: hypothesis file path
* ref_path: references file path
* avg (False): whether to get an average scores or a list
"""
hyp_path, ref_path = self.hyp_path, self.ref_path
with io.open(hyp_path, encoding="utf-8", mode="r") as hyp_file:
hyps = [line[:-1] for line in hyp_file]
with io.open(ref_path, encoding="utf-8", mode="r") as ref_file:
refs = [line[:-1] for line in ref_file]
return self.rouge.get_scores(hyps, refs, avg=avg,
ignore_empty=ignore_empty)
|
def get_scores(self, avg=False, ignore_empty=False):
"""Calculate ROUGE scores between each pair of
lines (hyp_file[i], ref_file[i]).
Args:
* hyp_path: hypothesis file path
* ref_path: references file path
* avg (False): whether to get an average scores or a list
"""
hyp_path, ref_path = self.hyp_path, self.ref_path
with io.open(hyp_path, encoding="utf-8", mode="r") as hyp_file:
hyps = [line[:-1] for line in hyp_file]
with io.open(ref_path, encoding="utf-8", mode="r") as ref_file:
refs = [line[:-1] for line in ref_file]
return self.rouge.get_scores(hyps, refs, avg=avg,
ignore_empty=ignore_empty)
|
[
"Calculate",
"ROUGE",
"scores",
"between",
"each",
"pair",
"of",
"lines",
"(",
"hyp_file",
"[",
"i",
"]",
"ref_file",
"[",
"i",
"]",
")",
".",
"Args",
":",
"*",
"hyp_path",
":",
"hypothesis",
"file",
"path",
"*",
"ref_path",
":",
"references",
"file",
"path",
"*",
"avg",
"(",
"False",
")",
":",
"whether",
"to",
"get",
"an",
"average",
"scores",
"or",
"a",
"list"
] |
pltrdy/rouge
|
python
|
https://github.com/pltrdy/rouge/blob/7bf8a83af5ca5c1677b93620b4e1f85ffd63b377/rouge/rouge.py#L34-L50
|
[
"def",
"get_scores",
"(",
"self",
",",
"avg",
"=",
"False",
",",
"ignore_empty",
"=",
"False",
")",
":",
"hyp_path",
",",
"ref_path",
"=",
"self",
".",
"hyp_path",
",",
"self",
".",
"ref_path",
"with",
"io",
".",
"open",
"(",
"hyp_path",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"hyp_file",
":",
"hyps",
"=",
"[",
"line",
"[",
":",
"-",
"1",
"]",
"for",
"line",
"in",
"hyp_file",
"]",
"with",
"io",
".",
"open",
"(",
"ref_path",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"mode",
"=",
"\"r\"",
")",
"as",
"ref_file",
":",
"refs",
"=",
"[",
"line",
"[",
":",
"-",
"1",
"]",
"for",
"line",
"in",
"ref_file",
"]",
"return",
"self",
".",
"rouge",
".",
"get_scores",
"(",
"hyps",
",",
"refs",
",",
"avg",
"=",
"avg",
",",
"ignore_empty",
"=",
"ignore_empty",
")"
] |
7bf8a83af5ca5c1677b93620b4e1f85ffd63b377
|
test
|
calc_pvalues
|
calculate pvalues for all categories in the graph
:param set query: set of identifiers for which the p value is calculated
:param dict gene_sets: gmt file dict after background was set
:param set background: total number of genes in your annotated database.
:returns: pvalues
x: overlapped gene number
n: length of gene_set which belongs to each terms
hits: overlapped gene names.
For 2*2 contingency table:
=============================================================================
| in query | not in query | row total
=> in gene_set | a | b | a+b
=> not in gene_set | c | d | c+d
column total | a+b+c+d = anno database
=============================================================================
background genes number = a + b + c + d.
Then, in R
x=a the number of white balls drawn without replacement
from an urn which contains both black and white balls.
m=a+b the number of white balls in the urn
n=c+d the number of black balls in the urn
k=a+c the number of balls drawn from the urn
In Scipy:
for args in scipy.hypergeom.sf(k, M, n, N, loc=0):
M: the total number of objects,
n: the total number of Type I objects.
k: the random variate represents the number of Type I objects in N drawn
without replacement from the total population.
Therefore, these two functions are the same when using parameters from 2*2 table:
R: > phyper(x-1, m, n, k, lower.tail=FALSE)
Scipy: >>> hypergeom.sf(x-1, m+n, m, k)
|
gseapy/stats.py
|
def calc_pvalues(query, gene_sets, background=20000, **kwargs):
""" calculate pvalues for all categories in the graph
:param set query: set of identifiers for which the p value is calculated
:param dict gene_sets: gmt file dict after background was set
:param set background: total number of genes in your annotated database.
:returns: pvalues
x: overlapped gene number
n: length of gene_set which belongs to each terms
hits: overlapped gene names.
For 2*2 contingency table:
=============================================================================
| in query | not in query | row total
=> in gene_set | a | b | a+b
=> not in gene_set | c | d | c+d
column total | a+b+c+d = anno database
=============================================================================
background genes number = a + b + c + d.
Then, in R
x=a the number of white balls drawn without replacement
from an urn which contains both black and white balls.
m=a+b the number of white balls in the urn
n=c+d the number of black balls in the urn
k=a+c the number of balls drawn from the urn
In Scipy:
for args in scipy.hypergeom.sf(k, M, n, N, loc=0):
M: the total number of objects,
n: the total number of Type I objects.
k: the random variate represents the number of Type I objects in N drawn
without replacement from the total population.
Therefore, these two functions are the same when using parameters from 2*2 table:
R: > phyper(x-1, m, n, k, lower.tail=FALSE)
Scipy: >>> hypergeom.sf(x-1, m+n, m, k)
"""
# number of genes in your query data
k = len(query)
query = set(query)
vals = []
# background should be all genes in annotated database
# such as go, kegg et.al.
if isinstance(background, set):
bg = len(background) # total number in your annotated database
# filter genes that not found in annotated database
query = query.intersection(background)
elif isinstance(background, int):
bg = background
else:
raise ValueError("background should be set or int object")
# pval
subsets = sorted(gene_sets.keys())
for s in subsets:
category = gene_sets.get(s)
m = len(category)
hits = query.intersection(set(category))
x = len(hits)
if x < 1 : continue
# pVal = hypergeom.sf(hitCount-1,popTotal,bgHits,queryTotal)
# p(X >= hitCounts)
vals.append((s, hypergeom.sf(x-1, bg, m, k), x, m, hits))
return zip(*vals)
|
def calc_pvalues(query, gene_sets, background=20000, **kwargs):
""" calculate pvalues for all categories in the graph
:param set query: set of identifiers for which the p value is calculated
:param dict gene_sets: gmt file dict after background was set
:param set background: total number of genes in your annotated database.
:returns: pvalues
x: overlapped gene number
n: length of gene_set which belongs to each terms
hits: overlapped gene names.
For 2*2 contingency table:
=============================================================================
| in query | not in query | row total
=> in gene_set | a | b | a+b
=> not in gene_set | c | d | c+d
column total | a+b+c+d = anno database
=============================================================================
background genes number = a + b + c + d.
Then, in R
x=a the number of white balls drawn without replacement
from an urn which contains both black and white balls.
m=a+b the number of white balls in the urn
n=c+d the number of black balls in the urn
k=a+c the number of balls drawn from the urn
In Scipy:
for args in scipy.hypergeom.sf(k, M, n, N, loc=0):
M: the total number of objects,
n: the total number of Type I objects.
k: the random variate represents the number of Type I objects in N drawn
without replacement from the total population.
Therefore, these two functions are the same when using parameters from 2*2 table:
R: > phyper(x-1, m, n, k, lower.tail=FALSE)
Scipy: >>> hypergeom.sf(x-1, m+n, m, k)
"""
# number of genes in your query data
k = len(query)
query = set(query)
vals = []
# background should be all genes in annotated database
# such as go, kegg et.al.
if isinstance(background, set):
bg = len(background) # total number in your annotated database
# filter genes that not found in annotated database
query = query.intersection(background)
elif isinstance(background, int):
bg = background
else:
raise ValueError("background should be set or int object")
# pval
subsets = sorted(gene_sets.keys())
for s in subsets:
category = gene_sets.get(s)
m = len(category)
hits = query.intersection(set(category))
x = len(hits)
if x < 1 : continue
# pVal = hypergeom.sf(hitCount-1,popTotal,bgHits,queryTotal)
# p(X >= hitCounts)
vals.append((s, hypergeom.sf(x-1, bg, m, k), x, m, hits))
return zip(*vals)
|
[
"calculate",
"pvalues",
"for",
"all",
"categories",
"in",
"the",
"graph"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/stats.py#L10-L77
|
[
"def",
"calc_pvalues",
"(",
"query",
",",
"gene_sets",
",",
"background",
"=",
"20000",
",",
"*",
"*",
"kwargs",
")",
":",
"# number of genes in your query data",
"k",
"=",
"len",
"(",
"query",
")",
"query",
"=",
"set",
"(",
"query",
")",
"vals",
"=",
"[",
"]",
"# background should be all genes in annotated database",
"# such as go, kegg et.al.",
"if",
"isinstance",
"(",
"background",
",",
"set",
")",
":",
"bg",
"=",
"len",
"(",
"background",
")",
"# total number in your annotated database ",
"# filter genes that not found in annotated database",
"query",
"=",
"query",
".",
"intersection",
"(",
"background",
")",
"elif",
"isinstance",
"(",
"background",
",",
"int",
")",
":",
"bg",
"=",
"background",
"else",
":",
"raise",
"ValueError",
"(",
"\"background should be set or int object\"",
")",
"# pval",
"subsets",
"=",
"sorted",
"(",
"gene_sets",
".",
"keys",
"(",
")",
")",
"for",
"s",
"in",
"subsets",
":",
"category",
"=",
"gene_sets",
".",
"get",
"(",
"s",
")",
"m",
"=",
"len",
"(",
"category",
")",
"hits",
"=",
"query",
".",
"intersection",
"(",
"set",
"(",
"category",
")",
")",
"x",
"=",
"len",
"(",
"hits",
")",
"if",
"x",
"<",
"1",
":",
"continue",
"# pVal = hypergeom.sf(hitCount-1,popTotal,bgHits,queryTotal) ",
"# p(X >= hitCounts)",
"vals",
".",
"append",
"(",
"(",
"s",
",",
"hypergeom",
".",
"sf",
"(",
"x",
"-",
"1",
",",
"bg",
",",
"m",
",",
"k",
")",
",",
"x",
",",
"m",
",",
"hits",
")",
")",
"return",
"zip",
"(",
"*",
"vals",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
fdrcorrection
|
benjamini hocheberg fdr correction. inspired by statsmodels
|
gseapy/stats.py
|
def fdrcorrection(pvals, alpha=0.05):
""" benjamini hocheberg fdr correction. inspired by statsmodels
"""
# Implement copy from GOATools.
pvals = np.asarray(pvals)
pvals_sortind = np.argsort(pvals)
pvals_sorted = np.take(pvals, pvals_sortind)
ecdffactor = _ecdf(pvals_sorted)
reject = pvals_sorted <= ecdffactor*alpha
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
reject[:rejectmax] = True
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
pvals_corrected[pvals_corrected>1] = 1
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[pvals_sortind] = pvals_corrected
del pvals_corrected
reject_ = np.empty_like(reject)
reject_[pvals_sortind] = reject
return reject_, pvals_corrected_
|
def fdrcorrection(pvals, alpha=0.05):
""" benjamini hocheberg fdr correction. inspired by statsmodels
"""
# Implement copy from GOATools.
pvals = np.asarray(pvals)
pvals_sortind = np.argsort(pvals)
pvals_sorted = np.take(pvals, pvals_sortind)
ecdffactor = _ecdf(pvals_sorted)
reject = pvals_sorted <= ecdffactor*alpha
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
reject[:rejectmax] = True
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
pvals_corrected[pvals_corrected>1] = 1
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[pvals_sortind] = pvals_corrected
del pvals_corrected
reject_ = np.empty_like(reject)
reject_[pvals_sortind] = reject
return reject_, pvals_corrected_
|
[
"benjamini",
"hocheberg",
"fdr",
"correction",
".",
"inspired",
"by",
"statsmodels"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/stats.py#L85-L107
|
[
"def",
"fdrcorrection",
"(",
"pvals",
",",
"alpha",
"=",
"0.05",
")",
":",
"# Implement copy from GOATools.",
"pvals",
"=",
"np",
".",
"asarray",
"(",
"pvals",
")",
"pvals_sortind",
"=",
"np",
".",
"argsort",
"(",
"pvals",
")",
"pvals_sorted",
"=",
"np",
".",
"take",
"(",
"pvals",
",",
"pvals_sortind",
")",
"ecdffactor",
"=",
"_ecdf",
"(",
"pvals_sorted",
")",
"reject",
"=",
"pvals_sorted",
"<=",
"ecdffactor",
"*",
"alpha",
"if",
"reject",
".",
"any",
"(",
")",
":",
"rejectmax",
"=",
"max",
"(",
"np",
".",
"nonzero",
"(",
"reject",
")",
"[",
"0",
"]",
")",
"reject",
"[",
":",
"rejectmax",
"]",
"=",
"True",
"pvals_corrected_raw",
"=",
"pvals_sorted",
"/",
"ecdffactor",
"pvals_corrected",
"=",
"np",
".",
"minimum",
".",
"accumulate",
"(",
"pvals_corrected_raw",
"[",
":",
":",
"-",
"1",
"]",
")",
"[",
":",
":",
"-",
"1",
"]",
"del",
"pvals_corrected_raw",
"pvals_corrected",
"[",
"pvals_corrected",
">",
"1",
"]",
"=",
"1",
"pvals_corrected_",
"=",
"np",
".",
"empty_like",
"(",
"pvals_corrected",
")",
"pvals_corrected_",
"[",
"pvals_sortind",
"]",
"=",
"pvals_corrected",
"del",
"pvals_corrected",
"reject_",
"=",
"np",
".",
"empty_like",
"(",
"reject",
")",
"reject_",
"[",
"pvals_sortind",
"]",
"=",
"reject",
"return",
"reject_",
",",
"pvals_corrected_"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
zscore
|
Standardize the mean and variance of the data axis Parameters.
:param data2d: DataFrame to normalize.
:param axis: int, Which axis to normalize across. If 0, normalize across rows,
if 1, normalize across columns. If None, don't change data
:Returns: Normalized DataFrame. Normalized data with a mean of 0 and variance of 1
across the specified axis.
|
gseapy/plot.py
|
def zscore(data2d, axis=0):
"""Standardize the mean and variance of the data axis Parameters.
:param data2d: DataFrame to normalize.
:param axis: int, Which axis to normalize across. If 0, normalize across rows,
if 1, normalize across columns. If None, don't change data
:Returns: Normalized DataFrame. Normalized data with a mean of 0 and variance of 1
across the specified axis.
"""
if axis is None:
# normalized to mean and std using entire matrix
# z_scored = (data2d - data2d.values.mean()) / data2d.values.std(ddof=1)
return data2d
assert axis in [0,1]
# if axis == 1:
# z_scored = data2d
# else:
# z_scored = data2d.T
# z_scored = (z_scored - z_scored.mean()) / z_scored.std(ddof=1)
# if axis == 1:
# return z_scored
# else:
# return z_scored.T
z_scored = data2d.apply(lambda x: (x-x.mean())/x.std(ddof=1),
axis=operator.xor(1, axis))
return z_scored
|
def zscore(data2d, axis=0):
"""Standardize the mean and variance of the data axis Parameters.
:param data2d: DataFrame to normalize.
:param axis: int, Which axis to normalize across. If 0, normalize across rows,
if 1, normalize across columns. If None, don't change data
:Returns: Normalized DataFrame. Normalized data with a mean of 0 and variance of 1
across the specified axis.
"""
if axis is None:
# normalized to mean and std using entire matrix
# z_scored = (data2d - data2d.values.mean()) / data2d.values.std(ddof=1)
return data2d
assert axis in [0,1]
# if axis == 1:
# z_scored = data2d
# else:
# z_scored = data2d.T
# z_scored = (z_scored - z_scored.mean()) / z_scored.std(ddof=1)
# if axis == 1:
# return z_scored
# else:
# return z_scored.T
z_scored = data2d.apply(lambda x: (x-x.mean())/x.std(ddof=1),
axis=operator.xor(1, axis))
return z_scored
|
[
"Standardize",
"the",
"mean",
"and",
"variance",
"of",
"the",
"data",
"axis",
"Parameters",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/plot.py#L28-L57
|
[
"def",
"zscore",
"(",
"data2d",
",",
"axis",
"=",
"0",
")",
":",
"if",
"axis",
"is",
"None",
":",
"# normalized to mean and std using entire matrix",
"# z_scored = (data2d - data2d.values.mean()) / data2d.values.std(ddof=1)",
"return",
"data2d",
"assert",
"axis",
"in",
"[",
"0",
",",
"1",
"]",
"# if axis == 1:",
"# z_scored = data2d",
"# else:",
"# z_scored = data2d.T",
"# z_scored = (z_scored - z_scored.mean()) / z_scored.std(ddof=1)",
"# if axis == 1:",
"# return z_scored",
"# else:",
"# return z_scored.T",
"z_scored",
"=",
"data2d",
".",
"apply",
"(",
"lambda",
"x",
":",
"(",
"x",
"-",
"x",
".",
"mean",
"(",
")",
")",
"/",
"x",
".",
"std",
"(",
"ddof",
"=",
"1",
")",
",",
"axis",
"=",
"operator",
".",
"xor",
"(",
"1",
",",
"axis",
")",
")",
"return",
"z_scored"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
heatmap
|
Visualize the dataframe.
:param df: DataFrame from expression table.
:param z_score: z_score axis{0, 1}. If None, don't normalize data.
:param title: gene set name.
:param outdir: path to save heatmap.
:param figsize: heatmap figsize.
:param cmap: matplotlib colormap.
:param ofname: output file name. If None, don't save figure
|
gseapy/plot.py
|
def heatmap(df, z_score=None, title='', figsize=(5,5), cmap='RdBu_r',
xticklabels=True, yticklabels=True, ofname=None, **kwargs):
"""Visualize the dataframe.
:param df: DataFrame from expression table.
:param z_score: z_score axis{0, 1}. If None, don't normalize data.
:param title: gene set name.
:param outdir: path to save heatmap.
:param figsize: heatmap figsize.
:param cmap: matplotlib colormap.
:param ofname: output file name. If None, don't save figure
"""
df = zscore(df, axis=z_score)
df = df.iloc[::-1]
# Get the positions and used label for the ticks
ny, nx = df.shape
xticks = np.arange(0, nx, 1) + .5
yticks = np.arange(0, ny, 1) + .5
# If working on commandline, don't show figure
if hasattr(sys, 'ps1') and (ofname is None):
fig = plt.figure(figsize=figsize)
else:
fig = Figure(figsize=figsize)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
vmin = np.percentile(df.min(), 2)
vmax = np.percentile(df.max(), 98)
matrix = ax.pcolormesh(df.values, cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_ylim([0,len(df)])
ax.set(xticks=xticks, yticks=yticks)
ax.set_xticklabels(df.columns.values if xticklabels else '', fontsize=14, rotation=90)
ax.set_yticklabels(df.index.values if yticklabels else '', fontsize=14)
ax.set_title("%s\nHeatmap of the Analyzed Geneset"%title, fontsize=20)
ax.tick_params(axis='both', which='both', bottom=False, top=False,
right=False, left=False)
# cax=fig.add_axes([0.93,0.25,0.05,0.20])
# cbar = fig.colorbar(matrix, cax=cax)
cbar = colorbar(matrix)
cbar.ax.tick_params(axis='both', which='both', bottom=False, top=False,
right=False, left=False)
for side in ["top", "right", "left", "bottom"]:
ax.spines[side].set_visible(False)
cbar.ax.spines[side].set_visible(False)
# cbar.ax.set_title('',loc='left')
if ofname is not None:
# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)
fig.savefig(ofname, bbox_inches='tight', dpi=300)
return
|
def heatmap(df, z_score=None, title='', figsize=(5,5), cmap='RdBu_r',
xticklabels=True, yticklabels=True, ofname=None, **kwargs):
"""Visualize the dataframe.
:param df: DataFrame from expression table.
:param z_score: z_score axis{0, 1}. If None, don't normalize data.
:param title: gene set name.
:param outdir: path to save heatmap.
:param figsize: heatmap figsize.
:param cmap: matplotlib colormap.
:param ofname: output file name. If None, don't save figure
"""
df = zscore(df, axis=z_score)
df = df.iloc[::-1]
# Get the positions and used label for the ticks
ny, nx = df.shape
xticks = np.arange(0, nx, 1) + .5
yticks = np.arange(0, ny, 1) + .5
# If working on commandline, don't show figure
if hasattr(sys, 'ps1') and (ofname is None):
fig = plt.figure(figsize=figsize)
else:
fig = Figure(figsize=figsize)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
vmin = np.percentile(df.min(), 2)
vmax = np.percentile(df.max(), 98)
matrix = ax.pcolormesh(df.values, cmap=cmap, vmin=vmin, vmax=vmax)
ax.set_ylim([0,len(df)])
ax.set(xticks=xticks, yticks=yticks)
ax.set_xticklabels(df.columns.values if xticklabels else '', fontsize=14, rotation=90)
ax.set_yticklabels(df.index.values if yticklabels else '', fontsize=14)
ax.set_title("%s\nHeatmap of the Analyzed Geneset"%title, fontsize=20)
ax.tick_params(axis='both', which='both', bottom=False, top=False,
right=False, left=False)
# cax=fig.add_axes([0.93,0.25,0.05,0.20])
# cbar = fig.colorbar(matrix, cax=cax)
cbar = colorbar(matrix)
cbar.ax.tick_params(axis='both', which='both', bottom=False, top=False,
right=False, left=False)
for side in ["top", "right", "left", "bottom"]:
ax.spines[side].set_visible(False)
cbar.ax.spines[side].set_visible(False)
# cbar.ax.set_title('',loc='left')
if ofname is not None:
# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)
fig.savefig(ofname, bbox_inches='tight', dpi=300)
return
|
[
"Visualize",
"the",
"dataframe",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/plot.py#L67-L117
|
[
"def",
"heatmap",
"(",
"df",
",",
"z_score",
"=",
"None",
",",
"title",
"=",
"''",
",",
"figsize",
"=",
"(",
"5",
",",
"5",
")",
",",
"cmap",
"=",
"'RdBu_r'",
",",
"xticklabels",
"=",
"True",
",",
"yticklabels",
"=",
"True",
",",
"ofname",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"df",
"=",
"zscore",
"(",
"df",
",",
"axis",
"=",
"z_score",
")",
"df",
"=",
"df",
".",
"iloc",
"[",
":",
":",
"-",
"1",
"]",
"# Get the positions and used label for the ticks",
"ny",
",",
"nx",
"=",
"df",
".",
"shape",
"xticks",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"nx",
",",
"1",
")",
"+",
".5",
"yticks",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"ny",
",",
"1",
")",
"+",
".5",
"# If working on commandline, don't show figure",
"if",
"hasattr",
"(",
"sys",
",",
"'ps1'",
")",
"and",
"(",
"ofname",
"is",
"None",
")",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"else",
":",
"fig",
"=",
"Figure",
"(",
"figsize",
"=",
"figsize",
")",
"canvas",
"=",
"FigureCanvas",
"(",
"fig",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"vmin",
"=",
"np",
".",
"percentile",
"(",
"df",
".",
"min",
"(",
")",
",",
"2",
")",
"vmax",
"=",
"np",
".",
"percentile",
"(",
"df",
".",
"max",
"(",
")",
",",
"98",
")",
"matrix",
"=",
"ax",
".",
"pcolormesh",
"(",
"df",
".",
"values",
",",
"cmap",
"=",
"cmap",
",",
"vmin",
"=",
"vmin",
",",
"vmax",
"=",
"vmax",
")",
"ax",
".",
"set_ylim",
"(",
"[",
"0",
",",
"len",
"(",
"df",
")",
"]",
")",
"ax",
".",
"set",
"(",
"xticks",
"=",
"xticks",
",",
"yticks",
"=",
"yticks",
")",
"ax",
".",
"set_xticklabels",
"(",
"df",
".",
"columns",
".",
"values",
"if",
"xticklabels",
"else",
"''",
",",
"fontsize",
"=",
"14",
",",
"rotation",
"=",
"90",
")",
"ax",
".",
"set_yticklabels",
"(",
"df",
".",
"index",
".",
"values",
"if",
"yticklabels",
"else",
"''",
",",
"fontsize",
"=",
"14",
")",
"ax",
".",
"set_title",
"(",
"\"%s\\nHeatmap of the Analyzed Geneset\"",
"%",
"title",
",",
"fontsize",
"=",
"20",
")",
"ax",
".",
"tick_params",
"(",
"axis",
"=",
"'both'",
",",
"which",
"=",
"'both'",
",",
"bottom",
"=",
"False",
",",
"top",
"=",
"False",
",",
"right",
"=",
"False",
",",
"left",
"=",
"False",
")",
"# cax=fig.add_axes([0.93,0.25,0.05,0.20])",
"# cbar = fig.colorbar(matrix, cax=cax)",
"cbar",
"=",
"colorbar",
"(",
"matrix",
")",
"cbar",
".",
"ax",
".",
"tick_params",
"(",
"axis",
"=",
"'both'",
",",
"which",
"=",
"'both'",
",",
"bottom",
"=",
"False",
",",
"top",
"=",
"False",
",",
"right",
"=",
"False",
",",
"left",
"=",
"False",
")",
"for",
"side",
"in",
"[",
"\"top\"",
",",
"\"right\"",
",",
"\"left\"",
",",
"\"bottom\"",
"]",
":",
"ax",
".",
"spines",
"[",
"side",
"]",
".",
"set_visible",
"(",
"False",
")",
"cbar",
".",
"ax",
".",
"spines",
"[",
"side",
"]",
".",
"set_visible",
"(",
"False",
")",
"# cbar.ax.set_title('',loc='left')",
"if",
"ofname",
"is",
"not",
"None",
":",
"# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)",
"fig",
".",
"savefig",
"(",
"ofname",
",",
"bbox_inches",
"=",
"'tight'",
",",
"dpi",
"=",
"300",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
gseaplot
|
This is the main function for reproducing the gsea plot.
:param rank_metric: pd.Series for rankings, rank_metric.values.
:param term: gene_set name
:param hits_indices: hits indices of rank_metric.index presented in gene set S.
:param nes: Normalized enrichment scores.
:param pval: nominal p-value.
:param fdr: false discovery rate.
:param RES: running enrichment scores.
:param pheno_pos: phenotype label, positive correlated.
:param pheno_neg: phenotype label, negative correlated.
:param figsize: matplotlib figsize.
:param ofname: output file name. If None, don't save figure
|
gseapy/plot.py
|
def gseaplot(rank_metric, term, hits_indices, nes, pval, fdr, RES,
pheno_pos='', pheno_neg='', figsize=(6,5.5),
cmap='seismic', ofname=None, **kwargs):
"""This is the main function for reproducing the gsea plot.
:param rank_metric: pd.Series for rankings, rank_metric.values.
:param term: gene_set name
:param hits_indices: hits indices of rank_metric.index presented in gene set S.
:param nes: Normalized enrichment scores.
:param pval: nominal p-value.
:param fdr: false discovery rate.
:param RES: running enrichment scores.
:param pheno_pos: phenotype label, positive correlated.
:param pheno_neg: phenotype label, negative correlated.
:param figsize: matplotlib figsize.
:param ofname: output file name. If None, don't save figure
"""
# plt.style.use('classic')
# center color map at midpoint = 0
norm = _MidpointNormalize(midpoint=0)
#dataFrame of ranked matrix scores
x = np.arange(len(rank_metric))
rankings = rank_metric.values
# figsize = (6,6)
phenoP_label = pheno_pos + ' (Positively Correlated)'
phenoN_label = pheno_neg + ' (Negatively Correlated)'
zero_score_ind = np.abs(rankings).argmin()
z_score_label = 'Zero score at ' + str(zero_score_ind)
nes_label = 'NES: '+ "{:.3f}".format(float(nes))
pval_label = 'Pval: '+ "{:.3f}".format(float(pval))
fdr_label = 'FDR: '+ "{:.3f}".format(float(fdr))
im_matrix = np.tile(rankings, (2,1))
# output truetype
plt.rcParams.update({'pdf.fonttype':42,'ps.fonttype':42})
# in most case, we will have many plots, so do not display plots
# It's also usefull to run this script on command line.
# GSEA Plots
gs = plt.GridSpec(16,1)
if hasattr(sys, 'ps1') and (ofname is None):
# working inside python console, show figure
fig = plt.figure(figsize=figsize)
else:
# If working on commandline, don't show figure
fig = Figure(figsize=figsize)
canvas = FigureCanvas(fig)
# Ranked Metric Scores Plot
ax1 = fig.add_subplot(gs[11:])
module = 'tmp' if ofname is None else ofname.split(".")[-2]
if module == 'ssgsea':
nes_label = 'ES: '+ "{:.3f}".format(float(nes))
pval_label='Pval: '
fdr_label='FDR: '
ax1.fill_between(x, y1=np.log(rankings), y2=0, color='#C9D3DB')
ax1.set_ylabel("log ranked metric", fontsize=14)
else:
ax1.fill_between(x, y1=rankings, y2=0, color='#C9D3DB')
ax1.set_ylabel("Ranked list metric", fontsize=14)
ax1.text(.05, .9, phenoP_label, color='red',
horizontalalignment='left', verticalalignment='top',
transform=ax1.transAxes)
ax1.text(.95, .05, phenoN_label, color='Blue',
horizontalalignment='right', verticalalignment='bottom',
transform=ax1.transAxes)
# the x coords of this transformation are data, and the y coord are axes
trans1 = transforms.blended_transform_factory(ax1.transData, ax1.transAxes)
if module != 'ssgsea':
ax1.vlines(zero_score_ind, 0, 1, linewidth=.5, transform=trans1, linestyles='--', color='grey')
ax1.text(zero_score_ind, 0.5, z_score_label,
horizontalalignment='center',
verticalalignment='center',
transform=trans1)
ax1.set_xlabel("Rank in Ordered Dataset", fontsize=14)
ax1.spines['top'].set_visible(False)
ax1.tick_params(axis='both', which='both', top=False, right=False, left=False)
ax1.locator_params(axis='y', nbins=5)
ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc) ))
# use round method to control float number
# ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : round(tick_loc, 1) ))
# gene hits
ax2 = fig.add_subplot(gs[8:10], sharex=ax1)
# the x coords of this transformation are data, and the y coord are axes
trans2 = transforms.blended_transform_factory(ax2.transData, ax2.transAxes)
ax2.vlines(hits_indices, 0, 1,linewidth=.5,transform=trans2)
ax2.spines['bottom'].set_visible(False)
ax2.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False, right=False, left=False, labelleft=False)
# colormap
ax3 = fig.add_subplot(gs[10], sharex=ax1)
ax3.imshow(im_matrix, aspect='auto', norm=norm, cmap=cmap, interpolation='none') # cm.coolwarm
ax3.spines['bottom'].set_visible(False)
ax3.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False, right=False, left=False,labelleft=False)
# Enrichment score plot
ax4 = fig.add_subplot(gs[:8], sharex=ax1)
ax4.plot(x, RES, linewidth=4, color ='#88C544')
ax4.text(.1, .1, fdr_label, transform=ax4.transAxes)
ax4.text(.1, .2, pval_label, transform=ax4.transAxes)
ax4.text(.1, .3, nes_label, transform=ax4.transAxes)
# the y coords of this transformation are data, and the x coord are axes
trans4 = transforms.blended_transform_factory(ax4.transAxes, ax4.transData)
ax4.hlines(0, 0, 1, linewidth=.5, transform=trans4, color='grey')
ax4.set_ylabel("Enrichment score (ES)", fontsize=14)
ax4.set_xlim(min(x), max(x))
ax4.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False)
ax4.locator_params(axis='y', nbins=5)
# FuncFormatter need two argument, I don't know why. this lambda function used to format yaxis tick labels.
ax4.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc)) )
# fig adjustment
fig.suptitle(term, fontsize=16, fontweight='bold')
fig.subplots_adjust(hspace=0)
# fig.tight_layout()
if ofname is not None:
# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)
fig.savefig(ofname, bbox_inches='tight', dpi=300)
return
|
def gseaplot(rank_metric, term, hits_indices, nes, pval, fdr, RES,
pheno_pos='', pheno_neg='', figsize=(6,5.5),
cmap='seismic', ofname=None, **kwargs):
"""This is the main function for reproducing the gsea plot.
:param rank_metric: pd.Series for rankings, rank_metric.values.
:param term: gene_set name
:param hits_indices: hits indices of rank_metric.index presented in gene set S.
:param nes: Normalized enrichment scores.
:param pval: nominal p-value.
:param fdr: false discovery rate.
:param RES: running enrichment scores.
:param pheno_pos: phenotype label, positive correlated.
:param pheno_neg: phenotype label, negative correlated.
:param figsize: matplotlib figsize.
:param ofname: output file name. If None, don't save figure
"""
# plt.style.use('classic')
# center color map at midpoint = 0
norm = _MidpointNormalize(midpoint=0)
#dataFrame of ranked matrix scores
x = np.arange(len(rank_metric))
rankings = rank_metric.values
# figsize = (6,6)
phenoP_label = pheno_pos + ' (Positively Correlated)'
phenoN_label = pheno_neg + ' (Negatively Correlated)'
zero_score_ind = np.abs(rankings).argmin()
z_score_label = 'Zero score at ' + str(zero_score_ind)
nes_label = 'NES: '+ "{:.3f}".format(float(nes))
pval_label = 'Pval: '+ "{:.3f}".format(float(pval))
fdr_label = 'FDR: '+ "{:.3f}".format(float(fdr))
im_matrix = np.tile(rankings, (2,1))
# output truetype
plt.rcParams.update({'pdf.fonttype':42,'ps.fonttype':42})
# in most case, we will have many plots, so do not display plots
# It's also usefull to run this script on command line.
# GSEA Plots
gs = plt.GridSpec(16,1)
if hasattr(sys, 'ps1') and (ofname is None):
# working inside python console, show figure
fig = plt.figure(figsize=figsize)
else:
# If working on commandline, don't show figure
fig = Figure(figsize=figsize)
canvas = FigureCanvas(fig)
# Ranked Metric Scores Plot
ax1 = fig.add_subplot(gs[11:])
module = 'tmp' if ofname is None else ofname.split(".")[-2]
if module == 'ssgsea':
nes_label = 'ES: '+ "{:.3f}".format(float(nes))
pval_label='Pval: '
fdr_label='FDR: '
ax1.fill_between(x, y1=np.log(rankings), y2=0, color='#C9D3DB')
ax1.set_ylabel("log ranked metric", fontsize=14)
else:
ax1.fill_between(x, y1=rankings, y2=0, color='#C9D3DB')
ax1.set_ylabel("Ranked list metric", fontsize=14)
ax1.text(.05, .9, phenoP_label, color='red',
horizontalalignment='left', verticalalignment='top',
transform=ax1.transAxes)
ax1.text(.95, .05, phenoN_label, color='Blue',
horizontalalignment='right', verticalalignment='bottom',
transform=ax1.transAxes)
# the x coords of this transformation are data, and the y coord are axes
trans1 = transforms.blended_transform_factory(ax1.transData, ax1.transAxes)
if module != 'ssgsea':
ax1.vlines(zero_score_ind, 0, 1, linewidth=.5, transform=trans1, linestyles='--', color='grey')
ax1.text(zero_score_ind, 0.5, z_score_label,
horizontalalignment='center',
verticalalignment='center',
transform=trans1)
ax1.set_xlabel("Rank in Ordered Dataset", fontsize=14)
ax1.spines['top'].set_visible(False)
ax1.tick_params(axis='both', which='both', top=False, right=False, left=False)
ax1.locator_params(axis='y', nbins=5)
ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc) ))
# use round method to control float number
# ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : round(tick_loc, 1) ))
# gene hits
ax2 = fig.add_subplot(gs[8:10], sharex=ax1)
# the x coords of this transformation are data, and the y coord are axes
trans2 = transforms.blended_transform_factory(ax2.transData, ax2.transAxes)
ax2.vlines(hits_indices, 0, 1,linewidth=.5,transform=trans2)
ax2.spines['bottom'].set_visible(False)
ax2.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False, right=False, left=False, labelleft=False)
# colormap
ax3 = fig.add_subplot(gs[10], sharex=ax1)
ax3.imshow(im_matrix, aspect='auto', norm=norm, cmap=cmap, interpolation='none') # cm.coolwarm
ax3.spines['bottom'].set_visible(False)
ax3.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False, right=False, left=False,labelleft=False)
# Enrichment score plot
ax4 = fig.add_subplot(gs[:8], sharex=ax1)
ax4.plot(x, RES, linewidth=4, color ='#88C544')
ax4.text(.1, .1, fdr_label, transform=ax4.transAxes)
ax4.text(.1, .2, pval_label, transform=ax4.transAxes)
ax4.text(.1, .3, nes_label, transform=ax4.transAxes)
# the y coords of this transformation are data, and the x coord are axes
trans4 = transforms.blended_transform_factory(ax4.transAxes, ax4.transData)
ax4.hlines(0, 0, 1, linewidth=.5, transform=trans4, color='grey')
ax4.set_ylabel("Enrichment score (ES)", fontsize=14)
ax4.set_xlim(min(x), max(x))
ax4.tick_params(axis='both', which='both', bottom=False, top=False, labelbottom=False, right=False)
ax4.locator_params(axis='y', nbins=5)
# FuncFormatter need two argument, I don't know why. this lambda function used to format yaxis tick labels.
ax4.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc)) )
# fig adjustment
fig.suptitle(term, fontsize=16, fontweight='bold')
fig.subplots_adjust(hspace=0)
# fig.tight_layout()
if ofname is not None:
# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)
fig.savefig(ofname, bbox_inches='tight', dpi=300)
return
|
[
"This",
"is",
"the",
"main",
"function",
"for",
"reproducing",
"the",
"gsea",
"plot",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/plot.py#L119-L243
|
[
"def",
"gseaplot",
"(",
"rank_metric",
",",
"term",
",",
"hits_indices",
",",
"nes",
",",
"pval",
",",
"fdr",
",",
"RES",
",",
"pheno_pos",
"=",
"''",
",",
"pheno_neg",
"=",
"''",
",",
"figsize",
"=",
"(",
"6",
",",
"5.5",
")",
",",
"cmap",
"=",
"'seismic'",
",",
"ofname",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# plt.style.use('classic')",
"# center color map at midpoint = 0",
"norm",
"=",
"_MidpointNormalize",
"(",
"midpoint",
"=",
"0",
")",
"#dataFrame of ranked matrix scores",
"x",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"rank_metric",
")",
")",
"rankings",
"=",
"rank_metric",
".",
"values",
"# figsize = (6,6)",
"phenoP_label",
"=",
"pheno_pos",
"+",
"' (Positively Correlated)'",
"phenoN_label",
"=",
"pheno_neg",
"+",
"' (Negatively Correlated)'",
"zero_score_ind",
"=",
"np",
".",
"abs",
"(",
"rankings",
")",
".",
"argmin",
"(",
")",
"z_score_label",
"=",
"'Zero score at '",
"+",
"str",
"(",
"zero_score_ind",
")",
"nes_label",
"=",
"'NES: '",
"+",
"\"{:.3f}\"",
".",
"format",
"(",
"float",
"(",
"nes",
")",
")",
"pval_label",
"=",
"'Pval: '",
"+",
"\"{:.3f}\"",
".",
"format",
"(",
"float",
"(",
"pval",
")",
")",
"fdr_label",
"=",
"'FDR: '",
"+",
"\"{:.3f}\"",
".",
"format",
"(",
"float",
"(",
"fdr",
")",
")",
"im_matrix",
"=",
"np",
".",
"tile",
"(",
"rankings",
",",
"(",
"2",
",",
"1",
")",
")",
"# output truetype",
"plt",
".",
"rcParams",
".",
"update",
"(",
"{",
"'pdf.fonttype'",
":",
"42",
",",
"'ps.fonttype'",
":",
"42",
"}",
")",
"# in most case, we will have many plots, so do not display plots",
"# It's also usefull to run this script on command line.",
"# GSEA Plots",
"gs",
"=",
"plt",
".",
"GridSpec",
"(",
"16",
",",
"1",
")",
"if",
"hasattr",
"(",
"sys",
",",
"'ps1'",
")",
"and",
"(",
"ofname",
"is",
"None",
")",
":",
"# working inside python console, show figure",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"else",
":",
"# If working on commandline, don't show figure",
"fig",
"=",
"Figure",
"(",
"figsize",
"=",
"figsize",
")",
"canvas",
"=",
"FigureCanvas",
"(",
"fig",
")",
"# Ranked Metric Scores Plot",
"ax1",
"=",
"fig",
".",
"add_subplot",
"(",
"gs",
"[",
"11",
":",
"]",
")",
"module",
"=",
"'tmp'",
"if",
"ofname",
"is",
"None",
"else",
"ofname",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"2",
"]",
"if",
"module",
"==",
"'ssgsea'",
":",
"nes_label",
"=",
"'ES: '",
"+",
"\"{:.3f}\"",
".",
"format",
"(",
"float",
"(",
"nes",
")",
")",
"pval_label",
"=",
"'Pval: '",
"fdr_label",
"=",
"'FDR: '",
"ax1",
".",
"fill_between",
"(",
"x",
",",
"y1",
"=",
"np",
".",
"log",
"(",
"rankings",
")",
",",
"y2",
"=",
"0",
",",
"color",
"=",
"'#C9D3DB'",
")",
"ax1",
".",
"set_ylabel",
"(",
"\"log ranked metric\"",
",",
"fontsize",
"=",
"14",
")",
"else",
":",
"ax1",
".",
"fill_between",
"(",
"x",
",",
"y1",
"=",
"rankings",
",",
"y2",
"=",
"0",
",",
"color",
"=",
"'#C9D3DB'",
")",
"ax1",
".",
"set_ylabel",
"(",
"\"Ranked list metric\"",
",",
"fontsize",
"=",
"14",
")",
"ax1",
".",
"text",
"(",
".05",
",",
".9",
",",
"phenoP_label",
",",
"color",
"=",
"'red'",
",",
"horizontalalignment",
"=",
"'left'",
",",
"verticalalignment",
"=",
"'top'",
",",
"transform",
"=",
"ax1",
".",
"transAxes",
")",
"ax1",
".",
"text",
"(",
".95",
",",
".05",
",",
"phenoN_label",
",",
"color",
"=",
"'Blue'",
",",
"horizontalalignment",
"=",
"'right'",
",",
"verticalalignment",
"=",
"'bottom'",
",",
"transform",
"=",
"ax1",
".",
"transAxes",
")",
"# the x coords of this transformation are data, and the y coord are axes",
"trans1",
"=",
"transforms",
".",
"blended_transform_factory",
"(",
"ax1",
".",
"transData",
",",
"ax1",
".",
"transAxes",
")",
"if",
"module",
"!=",
"'ssgsea'",
":",
"ax1",
".",
"vlines",
"(",
"zero_score_ind",
",",
"0",
",",
"1",
",",
"linewidth",
"=",
".5",
",",
"transform",
"=",
"trans1",
",",
"linestyles",
"=",
"'--'",
",",
"color",
"=",
"'grey'",
")",
"ax1",
".",
"text",
"(",
"zero_score_ind",
",",
"0.5",
",",
"z_score_label",
",",
"horizontalalignment",
"=",
"'center'",
",",
"verticalalignment",
"=",
"'center'",
",",
"transform",
"=",
"trans1",
")",
"ax1",
".",
"set_xlabel",
"(",
"\"Rank in Ordered Dataset\"",
",",
"fontsize",
"=",
"14",
")",
"ax1",
".",
"spines",
"[",
"'top'",
"]",
".",
"set_visible",
"(",
"False",
")",
"ax1",
".",
"tick_params",
"(",
"axis",
"=",
"'both'",
",",
"which",
"=",
"'both'",
",",
"top",
"=",
"False",
",",
"right",
"=",
"False",
",",
"left",
"=",
"False",
")",
"ax1",
".",
"locator_params",
"(",
"axis",
"=",
"'y'",
",",
"nbins",
"=",
"5",
")",
"ax1",
".",
"yaxis",
".",
"set_major_formatter",
"(",
"plt",
".",
"FuncFormatter",
"(",
"lambda",
"tick_loc",
",",
"tick_num",
":",
"'{:.1f}'",
".",
"format",
"(",
"tick_loc",
")",
")",
")",
"# use round method to control float number",
"# ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : round(tick_loc, 1) ))",
"# gene hits",
"ax2",
"=",
"fig",
".",
"add_subplot",
"(",
"gs",
"[",
"8",
":",
"10",
"]",
",",
"sharex",
"=",
"ax1",
")",
"# the x coords of this transformation are data, and the y coord are axes",
"trans2",
"=",
"transforms",
".",
"blended_transform_factory",
"(",
"ax2",
".",
"transData",
",",
"ax2",
".",
"transAxes",
")",
"ax2",
".",
"vlines",
"(",
"hits_indices",
",",
"0",
",",
"1",
",",
"linewidth",
"=",
".5",
",",
"transform",
"=",
"trans2",
")",
"ax2",
".",
"spines",
"[",
"'bottom'",
"]",
".",
"set_visible",
"(",
"False",
")",
"ax2",
".",
"tick_params",
"(",
"axis",
"=",
"'both'",
",",
"which",
"=",
"'both'",
",",
"bottom",
"=",
"False",
",",
"top",
"=",
"False",
",",
"labelbottom",
"=",
"False",
",",
"right",
"=",
"False",
",",
"left",
"=",
"False",
",",
"labelleft",
"=",
"False",
")",
"# colormap",
"ax3",
"=",
"fig",
".",
"add_subplot",
"(",
"gs",
"[",
"10",
"]",
",",
"sharex",
"=",
"ax1",
")",
"ax3",
".",
"imshow",
"(",
"im_matrix",
",",
"aspect",
"=",
"'auto'",
",",
"norm",
"=",
"norm",
",",
"cmap",
"=",
"cmap",
",",
"interpolation",
"=",
"'none'",
")",
"# cm.coolwarm",
"ax3",
".",
"spines",
"[",
"'bottom'",
"]",
".",
"set_visible",
"(",
"False",
")",
"ax3",
".",
"tick_params",
"(",
"axis",
"=",
"'both'",
",",
"which",
"=",
"'both'",
",",
"bottom",
"=",
"False",
",",
"top",
"=",
"False",
",",
"labelbottom",
"=",
"False",
",",
"right",
"=",
"False",
",",
"left",
"=",
"False",
",",
"labelleft",
"=",
"False",
")",
"# Enrichment score plot",
"ax4",
"=",
"fig",
".",
"add_subplot",
"(",
"gs",
"[",
":",
"8",
"]",
",",
"sharex",
"=",
"ax1",
")",
"ax4",
".",
"plot",
"(",
"x",
",",
"RES",
",",
"linewidth",
"=",
"4",
",",
"color",
"=",
"'#88C544'",
")",
"ax4",
".",
"text",
"(",
".1",
",",
".1",
",",
"fdr_label",
",",
"transform",
"=",
"ax4",
".",
"transAxes",
")",
"ax4",
".",
"text",
"(",
".1",
",",
".2",
",",
"pval_label",
",",
"transform",
"=",
"ax4",
".",
"transAxes",
")",
"ax4",
".",
"text",
"(",
".1",
",",
".3",
",",
"nes_label",
",",
"transform",
"=",
"ax4",
".",
"transAxes",
")",
"# the y coords of this transformation are data, and the x coord are axes",
"trans4",
"=",
"transforms",
".",
"blended_transform_factory",
"(",
"ax4",
".",
"transAxes",
",",
"ax4",
".",
"transData",
")",
"ax4",
".",
"hlines",
"(",
"0",
",",
"0",
",",
"1",
",",
"linewidth",
"=",
".5",
",",
"transform",
"=",
"trans4",
",",
"color",
"=",
"'grey'",
")",
"ax4",
".",
"set_ylabel",
"(",
"\"Enrichment score (ES)\"",
",",
"fontsize",
"=",
"14",
")",
"ax4",
".",
"set_xlim",
"(",
"min",
"(",
"x",
")",
",",
"max",
"(",
"x",
")",
")",
"ax4",
".",
"tick_params",
"(",
"axis",
"=",
"'both'",
",",
"which",
"=",
"'both'",
",",
"bottom",
"=",
"False",
",",
"top",
"=",
"False",
",",
"labelbottom",
"=",
"False",
",",
"right",
"=",
"False",
")",
"ax4",
".",
"locator_params",
"(",
"axis",
"=",
"'y'",
",",
"nbins",
"=",
"5",
")",
"# FuncFormatter need two argument, I don't know why. this lambda function used to format yaxis tick labels.",
"ax4",
".",
"yaxis",
".",
"set_major_formatter",
"(",
"plt",
".",
"FuncFormatter",
"(",
"lambda",
"tick_loc",
",",
"tick_num",
":",
"'{:.1f}'",
".",
"format",
"(",
"tick_loc",
")",
")",
")",
"# fig adjustment",
"fig",
".",
"suptitle",
"(",
"term",
",",
"fontsize",
"=",
"16",
",",
"fontweight",
"=",
"'bold'",
")",
"fig",
".",
"subplots_adjust",
"(",
"hspace",
"=",
"0",
")",
"# fig.tight_layout()",
"if",
"ofname",
"is",
"not",
"None",
":",
"# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)",
"fig",
".",
"savefig",
"(",
"ofname",
",",
"bbox_inches",
"=",
"'tight'",
",",
"dpi",
"=",
"300",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
dotplot
|
Visualize enrichr results.
:param df: GSEApy DataFrame results.
:param column: which column of DataFrame to show. Default: Adjusted P-value
:param title: figure title
:param cutoff: p-adjust cut-off.
:param top_term: number of enriched terms to show.
:param ascending: bool, the order of y axis.
:param sizes: tuple, (min, max) scatter size. Not functional for now
:param norm: maplotlib.colors.Normalize object.
:param legend: bool, whether to show legend.
:param figsize: tuple, figure size.
:param cmap: matplotlib colormap
:param ofname: output file name. If None, don't save figure
|
gseapy/plot.py
|
def dotplot(df, column='Adjusted P-value', title='', cutoff=0.05, top_term=10,
sizes=None, norm=None, legend=True, figsize=(6, 5.5),
cmap='RdBu_r', ofname=None, **kwargs):
"""Visualize enrichr results.
:param df: GSEApy DataFrame results.
:param column: which column of DataFrame to show. Default: Adjusted P-value
:param title: figure title
:param cutoff: p-adjust cut-off.
:param top_term: number of enriched terms to show.
:param ascending: bool, the order of y axis.
:param sizes: tuple, (min, max) scatter size. Not functional for now
:param norm: maplotlib.colors.Normalize object.
:param legend: bool, whether to show legend.
:param figsize: tuple, figure size.
:param cmap: matplotlib colormap
:param ofname: output file name. If None, don't save figure
"""
colname = column
# sorting the dataframe for better visualization
if colname in ['Adjusted P-value', 'P-value']:
# check if any values in `df[colname]` can't be coerced to floats
can_be_coerced = df[colname].map(isfloat)
if np.sum(~can_be_coerced) > 0:
raise ValueError('some value in %s could not be typecast to `float`'%colname)
else:
df.loc[:, colname] = df[colname].map(float)
df = df[df[colname] <= cutoff]
if len(df) < 1:
msg = "Warning: No enrich terms when cutoff = %s"%cutoff
return msg
df = df.assign(logAP=lambda x: - x[colname].apply(np.log10))
colname='logAP'
df = df.sort_values(by=colname).iloc[-top_term:,:]
#
temp = df['Overlap'].str.split("/", expand=True).astype(int)
df = df.assign(Hits=temp.iloc[:,0], Background=temp.iloc[:,1])
df = df.assign(Hits_ratio=lambda x:x.Hits / x.Background)
# x axis values
x = df.loc[:, colname].values
combined_score = df['Combined Score'].round().astype('int')
# y axis index and values
y = [i for i in range(0,len(df))]
ylabels = df['Term'].values
# Normalise to [0,1]
# b = (df['Count'] - df['Count'].min())/ np.ptp(df['Count'])
# area = 100 * b
# control the size of scatter and legend marker
levels = numbers = np.sort(df.Hits.unique())
if norm is None:
norm = Normalize()
elif isinstance(norm, tuple):
norm = Normalize(*norm)
elif not isinstance(norm, Normalize):
err = ("``size_norm`` must be None, tuple, "
"or Normalize object.")
raise ValueError(err)
min_width, max_width = np.r_[20, 100] * plt.rcParams["lines.linewidth"]
norm.clip = True
if not norm.scaled():
norm(np.asarray(numbers))
size_limits = norm.vmin, norm.vmax
scl = norm(numbers)
widths = np.asarray(min_width + scl * (max_width - min_width))
if scl.mask.any():
widths[scl.mask] = 0
sizes = dict(zip(levels, widths))
df['sizes'] = df.Hits.map(sizes)
area = df['sizes'].values
# creat scatter plot
if hasattr(sys, 'ps1') and (ofname is None):
# working inside python console, show figure
fig, ax = plt.subplots(figsize=figsize)
else:
# If working on commandline, don't show figure
fig = Figure(figsize=figsize)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
vmin = np.percentile(combined_score.min(), 2)
vmax = np.percentile(combined_score.max(), 98)
sc = ax.scatter(x=x, y=y, s=area, edgecolors='face', c=combined_score,
cmap=cmap, vmin=vmin, vmax=vmax)
if column in ['Adjusted P-value', 'P-value']:
xlabel = "-log$_{10}$(%s)"%column
else:
xlabel = column
ax.set_xlabel(xlabel, fontsize=14, fontweight='bold')
ax.yaxis.set_major_locator(plt.FixedLocator(y))
ax.yaxis.set_major_formatter(plt.FixedFormatter(ylabels))
ax.set_yticklabels(ylabels, fontsize=16)
# ax.set_ylim([-1, len(df)])
ax.grid()
# colorbar
cax=fig.add_axes([0.95,0.20,0.03,0.22])
cbar = fig.colorbar(sc, cax=cax,)
cbar.ax.tick_params(right=True)
cbar.ax.set_title('Combined\nScore',loc='left', fontsize=12)
# for terms less than 3
if len(df) >= 3:
# find the index of the closest value to the median
idx = [area.argmax(), np.abs(area - area.mean()).argmin(), area.argmin()]
idx = unique(idx)
else:
idx = df.index.values
label = df.iloc[idx, df.columns.get_loc('Hits')]
if legend:
handles, _ = ax.get_legend_handles_labels()
legend_markers = []
for ix in idx:
legend_markers.append(ax.scatter([],[], s=area[ix], c='b'))
# artist = ax.scatter([], [], s=size_levels,)
ax.legend(legend_markers, label, title='Hits')
ax.set_title(title, fontsize=20, fontweight='bold')
if ofname is not None:
# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)
fig.savefig(ofname, bbox_inches='tight', dpi=300)
return
return ax
|
def dotplot(df, column='Adjusted P-value', title='', cutoff=0.05, top_term=10,
sizes=None, norm=None, legend=True, figsize=(6, 5.5),
cmap='RdBu_r', ofname=None, **kwargs):
"""Visualize enrichr results.
:param df: GSEApy DataFrame results.
:param column: which column of DataFrame to show. Default: Adjusted P-value
:param title: figure title
:param cutoff: p-adjust cut-off.
:param top_term: number of enriched terms to show.
:param ascending: bool, the order of y axis.
:param sizes: tuple, (min, max) scatter size. Not functional for now
:param norm: maplotlib.colors.Normalize object.
:param legend: bool, whether to show legend.
:param figsize: tuple, figure size.
:param cmap: matplotlib colormap
:param ofname: output file name. If None, don't save figure
"""
colname = column
# sorting the dataframe for better visualization
if colname in ['Adjusted P-value', 'P-value']:
# check if any values in `df[colname]` can't be coerced to floats
can_be_coerced = df[colname].map(isfloat)
if np.sum(~can_be_coerced) > 0:
raise ValueError('some value in %s could not be typecast to `float`'%colname)
else:
df.loc[:, colname] = df[colname].map(float)
df = df[df[colname] <= cutoff]
if len(df) < 1:
msg = "Warning: No enrich terms when cutoff = %s"%cutoff
return msg
df = df.assign(logAP=lambda x: - x[colname].apply(np.log10))
colname='logAP'
df = df.sort_values(by=colname).iloc[-top_term:,:]
#
temp = df['Overlap'].str.split("/", expand=True).astype(int)
df = df.assign(Hits=temp.iloc[:,0], Background=temp.iloc[:,1])
df = df.assign(Hits_ratio=lambda x:x.Hits / x.Background)
# x axis values
x = df.loc[:, colname].values
combined_score = df['Combined Score'].round().astype('int')
# y axis index and values
y = [i for i in range(0,len(df))]
ylabels = df['Term'].values
# Normalise to [0,1]
# b = (df['Count'] - df['Count'].min())/ np.ptp(df['Count'])
# area = 100 * b
# control the size of scatter and legend marker
levels = numbers = np.sort(df.Hits.unique())
if norm is None:
norm = Normalize()
elif isinstance(norm, tuple):
norm = Normalize(*norm)
elif not isinstance(norm, Normalize):
err = ("``size_norm`` must be None, tuple, "
"or Normalize object.")
raise ValueError(err)
min_width, max_width = np.r_[20, 100] * plt.rcParams["lines.linewidth"]
norm.clip = True
if not norm.scaled():
norm(np.asarray(numbers))
size_limits = norm.vmin, norm.vmax
scl = norm(numbers)
widths = np.asarray(min_width + scl * (max_width - min_width))
if scl.mask.any():
widths[scl.mask] = 0
sizes = dict(zip(levels, widths))
df['sizes'] = df.Hits.map(sizes)
area = df['sizes'].values
# creat scatter plot
if hasattr(sys, 'ps1') and (ofname is None):
# working inside python console, show figure
fig, ax = plt.subplots(figsize=figsize)
else:
# If working on commandline, don't show figure
fig = Figure(figsize=figsize)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
vmin = np.percentile(combined_score.min(), 2)
vmax = np.percentile(combined_score.max(), 98)
sc = ax.scatter(x=x, y=y, s=area, edgecolors='face', c=combined_score,
cmap=cmap, vmin=vmin, vmax=vmax)
if column in ['Adjusted P-value', 'P-value']:
xlabel = "-log$_{10}$(%s)"%column
else:
xlabel = column
ax.set_xlabel(xlabel, fontsize=14, fontweight='bold')
ax.yaxis.set_major_locator(plt.FixedLocator(y))
ax.yaxis.set_major_formatter(plt.FixedFormatter(ylabels))
ax.set_yticklabels(ylabels, fontsize=16)
# ax.set_ylim([-1, len(df)])
ax.grid()
# colorbar
cax=fig.add_axes([0.95,0.20,0.03,0.22])
cbar = fig.colorbar(sc, cax=cax,)
cbar.ax.tick_params(right=True)
cbar.ax.set_title('Combined\nScore',loc='left', fontsize=12)
# for terms less than 3
if len(df) >= 3:
# find the index of the closest value to the median
idx = [area.argmax(), np.abs(area - area.mean()).argmin(), area.argmin()]
idx = unique(idx)
else:
idx = df.index.values
label = df.iloc[idx, df.columns.get_loc('Hits')]
if legend:
handles, _ = ax.get_legend_handles_labels()
legend_markers = []
for ix in idx:
legend_markers.append(ax.scatter([],[], s=area[ix], c='b'))
# artist = ax.scatter([], [], s=size_levels,)
ax.legend(legend_markers, label, title='Hits')
ax.set_title(title, fontsize=20, fontweight='bold')
if ofname is not None:
# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)
fig.savefig(ofname, bbox_inches='tight', dpi=300)
return
return ax
|
[
"Visualize",
"enrichr",
"results",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/plot.py#L253-L380
|
[
"def",
"dotplot",
"(",
"df",
",",
"column",
"=",
"'Adjusted P-value'",
",",
"title",
"=",
"''",
",",
"cutoff",
"=",
"0.05",
",",
"top_term",
"=",
"10",
",",
"sizes",
"=",
"None",
",",
"norm",
"=",
"None",
",",
"legend",
"=",
"True",
",",
"figsize",
"=",
"(",
"6",
",",
"5.5",
")",
",",
"cmap",
"=",
"'RdBu_r'",
",",
"ofname",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"colname",
"=",
"column",
"# sorting the dataframe for better visualization",
"if",
"colname",
"in",
"[",
"'Adjusted P-value'",
",",
"'P-value'",
"]",
":",
"# check if any values in `df[colname]` can't be coerced to floats",
"can_be_coerced",
"=",
"df",
"[",
"colname",
"]",
".",
"map",
"(",
"isfloat",
")",
"if",
"np",
".",
"sum",
"(",
"~",
"can_be_coerced",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"'some value in %s could not be typecast to `float`'",
"%",
"colname",
")",
"else",
":",
"df",
".",
"loc",
"[",
":",
",",
"colname",
"]",
"=",
"df",
"[",
"colname",
"]",
".",
"map",
"(",
"float",
")",
"df",
"=",
"df",
"[",
"df",
"[",
"colname",
"]",
"<=",
"cutoff",
"]",
"if",
"len",
"(",
"df",
")",
"<",
"1",
":",
"msg",
"=",
"\"Warning: No enrich terms when cutoff = %s\"",
"%",
"cutoff",
"return",
"msg",
"df",
"=",
"df",
".",
"assign",
"(",
"logAP",
"=",
"lambda",
"x",
":",
"-",
"x",
"[",
"colname",
"]",
".",
"apply",
"(",
"np",
".",
"log10",
")",
")",
"colname",
"=",
"'logAP'",
"df",
"=",
"df",
".",
"sort_values",
"(",
"by",
"=",
"colname",
")",
".",
"iloc",
"[",
"-",
"top_term",
":",
",",
":",
"]",
"# ",
"temp",
"=",
"df",
"[",
"'Overlap'",
"]",
".",
"str",
".",
"split",
"(",
"\"/\"",
",",
"expand",
"=",
"True",
")",
".",
"astype",
"(",
"int",
")",
"df",
"=",
"df",
".",
"assign",
"(",
"Hits",
"=",
"temp",
".",
"iloc",
"[",
":",
",",
"0",
"]",
",",
"Background",
"=",
"temp",
".",
"iloc",
"[",
":",
",",
"1",
"]",
")",
"df",
"=",
"df",
".",
"assign",
"(",
"Hits_ratio",
"=",
"lambda",
"x",
":",
"x",
".",
"Hits",
"/",
"x",
".",
"Background",
")",
"# x axis values",
"x",
"=",
"df",
".",
"loc",
"[",
":",
",",
"colname",
"]",
".",
"values",
"combined_score",
"=",
"df",
"[",
"'Combined Score'",
"]",
".",
"round",
"(",
")",
".",
"astype",
"(",
"'int'",
")",
"# y axis index and values",
"y",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"df",
")",
")",
"]",
"ylabels",
"=",
"df",
"[",
"'Term'",
"]",
".",
"values",
"# Normalise to [0,1]",
"# b = (df['Count'] - df['Count'].min())/ np.ptp(df['Count'])",
"# area = 100 * b",
"# control the size of scatter and legend marker",
"levels",
"=",
"numbers",
"=",
"np",
".",
"sort",
"(",
"df",
".",
"Hits",
".",
"unique",
"(",
")",
")",
"if",
"norm",
"is",
"None",
":",
"norm",
"=",
"Normalize",
"(",
")",
"elif",
"isinstance",
"(",
"norm",
",",
"tuple",
")",
":",
"norm",
"=",
"Normalize",
"(",
"*",
"norm",
")",
"elif",
"not",
"isinstance",
"(",
"norm",
",",
"Normalize",
")",
":",
"err",
"=",
"(",
"\"``size_norm`` must be None, tuple, \"",
"\"or Normalize object.\"",
")",
"raise",
"ValueError",
"(",
"err",
")",
"min_width",
",",
"max_width",
"=",
"np",
".",
"r_",
"[",
"20",
",",
"100",
"]",
"*",
"plt",
".",
"rcParams",
"[",
"\"lines.linewidth\"",
"]",
"norm",
".",
"clip",
"=",
"True",
"if",
"not",
"norm",
".",
"scaled",
"(",
")",
":",
"norm",
"(",
"np",
".",
"asarray",
"(",
"numbers",
")",
")",
"size_limits",
"=",
"norm",
".",
"vmin",
",",
"norm",
".",
"vmax",
"scl",
"=",
"norm",
"(",
"numbers",
")",
"widths",
"=",
"np",
".",
"asarray",
"(",
"min_width",
"+",
"scl",
"*",
"(",
"max_width",
"-",
"min_width",
")",
")",
"if",
"scl",
".",
"mask",
".",
"any",
"(",
")",
":",
"widths",
"[",
"scl",
".",
"mask",
"]",
"=",
"0",
"sizes",
"=",
"dict",
"(",
"zip",
"(",
"levels",
",",
"widths",
")",
")",
"df",
"[",
"'sizes'",
"]",
"=",
"df",
".",
"Hits",
".",
"map",
"(",
"sizes",
")",
"area",
"=",
"df",
"[",
"'sizes'",
"]",
".",
"values",
"# creat scatter plot",
"if",
"hasattr",
"(",
"sys",
",",
"'ps1'",
")",
"and",
"(",
"ofname",
"is",
"None",
")",
":",
"# working inside python console, show figure",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"figsize",
")",
"else",
":",
"# If working on commandline, don't show figure",
"fig",
"=",
"Figure",
"(",
"figsize",
"=",
"figsize",
")",
"canvas",
"=",
"FigureCanvas",
"(",
"fig",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"vmin",
"=",
"np",
".",
"percentile",
"(",
"combined_score",
".",
"min",
"(",
")",
",",
"2",
")",
"vmax",
"=",
"np",
".",
"percentile",
"(",
"combined_score",
".",
"max",
"(",
")",
",",
"98",
")",
"sc",
"=",
"ax",
".",
"scatter",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"s",
"=",
"area",
",",
"edgecolors",
"=",
"'face'",
",",
"c",
"=",
"combined_score",
",",
"cmap",
"=",
"cmap",
",",
"vmin",
"=",
"vmin",
",",
"vmax",
"=",
"vmax",
")",
"if",
"column",
"in",
"[",
"'Adjusted P-value'",
",",
"'P-value'",
"]",
":",
"xlabel",
"=",
"\"-log$_{10}$(%s)\"",
"%",
"column",
"else",
":",
"xlabel",
"=",
"column",
"ax",
".",
"set_xlabel",
"(",
"xlabel",
",",
"fontsize",
"=",
"14",
",",
"fontweight",
"=",
"'bold'",
")",
"ax",
".",
"yaxis",
".",
"set_major_locator",
"(",
"plt",
".",
"FixedLocator",
"(",
"y",
")",
")",
"ax",
".",
"yaxis",
".",
"set_major_formatter",
"(",
"plt",
".",
"FixedFormatter",
"(",
"ylabels",
")",
")",
"ax",
".",
"set_yticklabels",
"(",
"ylabels",
",",
"fontsize",
"=",
"16",
")",
"# ax.set_ylim([-1, len(df)])",
"ax",
".",
"grid",
"(",
")",
"# colorbar",
"cax",
"=",
"fig",
".",
"add_axes",
"(",
"[",
"0.95",
",",
"0.20",
",",
"0.03",
",",
"0.22",
"]",
")",
"cbar",
"=",
"fig",
".",
"colorbar",
"(",
"sc",
",",
"cax",
"=",
"cax",
",",
")",
"cbar",
".",
"ax",
".",
"tick_params",
"(",
"right",
"=",
"True",
")",
"cbar",
".",
"ax",
".",
"set_title",
"(",
"'Combined\\nScore'",
",",
"loc",
"=",
"'left'",
",",
"fontsize",
"=",
"12",
")",
"# for terms less than 3",
"if",
"len",
"(",
"df",
")",
">=",
"3",
":",
"# find the index of the closest value to the median",
"idx",
"=",
"[",
"area",
".",
"argmax",
"(",
")",
",",
"np",
".",
"abs",
"(",
"area",
"-",
"area",
".",
"mean",
"(",
")",
")",
".",
"argmin",
"(",
")",
",",
"area",
".",
"argmin",
"(",
")",
"]",
"idx",
"=",
"unique",
"(",
"idx",
")",
"else",
":",
"idx",
"=",
"df",
".",
"index",
".",
"values",
"label",
"=",
"df",
".",
"iloc",
"[",
"idx",
",",
"df",
".",
"columns",
".",
"get_loc",
"(",
"'Hits'",
")",
"]",
"if",
"legend",
":",
"handles",
",",
"_",
"=",
"ax",
".",
"get_legend_handles_labels",
"(",
")",
"legend_markers",
"=",
"[",
"]",
"for",
"ix",
"in",
"idx",
":",
"legend_markers",
".",
"append",
"(",
"ax",
".",
"scatter",
"(",
"[",
"]",
",",
"[",
"]",
",",
"s",
"=",
"area",
"[",
"ix",
"]",
",",
"c",
"=",
"'b'",
")",
")",
"# artist = ax.scatter([], [], s=size_levels,) ",
"ax",
".",
"legend",
"(",
"legend_markers",
",",
"label",
",",
"title",
"=",
"'Hits'",
")",
"ax",
".",
"set_title",
"(",
"title",
",",
"fontsize",
"=",
"20",
",",
"fontweight",
"=",
"'bold'",
")",
"if",
"ofname",
"is",
"not",
"None",
":",
"# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)",
"fig",
".",
"savefig",
"(",
"ofname",
",",
"bbox_inches",
"=",
"'tight'",
",",
"dpi",
"=",
"300",
")",
"return",
"return",
"ax"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
barplot
|
Visualize enrichr results.
:param df: GSEApy DataFrame results.
:param column: which column of DataFrame to show. Default: Adjusted P-value
:param title: figure title.
:param cutoff: cut-off of the cloumn you've chosen.
:param top_term: number of top enriched terms to show.
:param figsize: tuple, matplotlib figsize.
:param color: color for bars.
:param ofname: output file name. If None, don't save figure
|
gseapy/plot.py
|
def barplot(df, column='Adjusted P-value', title="", cutoff=0.05, top_term=10,
figsize=(6.5,6), color='salmon', ofname=None, **kwargs):
"""Visualize enrichr results.
:param df: GSEApy DataFrame results.
:param column: which column of DataFrame to show. Default: Adjusted P-value
:param title: figure title.
:param cutoff: cut-off of the cloumn you've chosen.
:param top_term: number of top enriched terms to show.
:param figsize: tuple, matplotlib figsize.
:param color: color for bars.
:param ofname: output file name. If None, don't save figure
"""
colname = column
if colname in ['Adjusted P-value', 'P-value']:
df = df[df[colname] <= cutoff]
if len(df) < 1:
msg = "Warning: No enrich terms using library %s when cutoff = %s"%(title, cutoff)
return msg
df = df.assign(logAP = lambda x: - x[colname].apply(np.log10))
colname = 'logAP'
dd = df.sort_values(by=colname).iloc[-top_term:,:]
# dd = d.head(top_term).sort_values('logAP')
# create bar plot
if hasattr(sys, 'ps1') and (ofname is None):
# working inside python console, show (True) figure
fig = plt.figure(figsize=figsize)
else:
# If working on commandline, don't show figure
fig = Figure(figsize=figsize)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
bar = dd.plot.barh(x='Term', y=colname, color=color,
alpha=0.75, fontsize=16, ax=ax)
if column in ['Adjusted P-value', 'P-value']:
xlabel = "-log$_{10}$(%s)"%column
else:
xlabel = column
bar.set_xlabel(xlabel, fontsize=16, fontweight='bold')
bar.set_ylabel("")
bar.set_title(title, fontsize=24, fontweight='bold')
bar.xaxis.set_major_locator(MaxNLocator(integer=True))
bar.legend_.remove()
adjust_spines(ax, spines=['left','bottom'])
if ofname is not None:
# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)
fig.savefig(ofname, bbox_inches='tight', dpi=300)
return
return ax
|
def barplot(df, column='Adjusted P-value', title="", cutoff=0.05, top_term=10,
figsize=(6.5,6), color='salmon', ofname=None, **kwargs):
"""Visualize enrichr results.
:param df: GSEApy DataFrame results.
:param column: which column of DataFrame to show. Default: Adjusted P-value
:param title: figure title.
:param cutoff: cut-off of the cloumn you've chosen.
:param top_term: number of top enriched terms to show.
:param figsize: tuple, matplotlib figsize.
:param color: color for bars.
:param ofname: output file name. If None, don't save figure
"""
colname = column
if colname in ['Adjusted P-value', 'P-value']:
df = df[df[colname] <= cutoff]
if len(df) < 1:
msg = "Warning: No enrich terms using library %s when cutoff = %s"%(title, cutoff)
return msg
df = df.assign(logAP = lambda x: - x[colname].apply(np.log10))
colname = 'logAP'
dd = df.sort_values(by=colname).iloc[-top_term:,:]
# dd = d.head(top_term).sort_values('logAP')
# create bar plot
if hasattr(sys, 'ps1') and (ofname is None):
# working inside python console, show (True) figure
fig = plt.figure(figsize=figsize)
else:
# If working on commandline, don't show figure
fig = Figure(figsize=figsize)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
bar = dd.plot.barh(x='Term', y=colname, color=color,
alpha=0.75, fontsize=16, ax=ax)
if column in ['Adjusted P-value', 'P-value']:
xlabel = "-log$_{10}$(%s)"%column
else:
xlabel = column
bar.set_xlabel(xlabel, fontsize=16, fontweight='bold')
bar.set_ylabel("")
bar.set_title(title, fontsize=24, fontweight='bold')
bar.xaxis.set_major_locator(MaxNLocator(integer=True))
bar.legend_.remove()
adjust_spines(ax, spines=['left','bottom'])
if ofname is not None:
# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)
fig.savefig(ofname, bbox_inches='tight', dpi=300)
return
return ax
|
[
"Visualize",
"enrichr",
"results",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/plot.py#L382-L434
|
[
"def",
"barplot",
"(",
"df",
",",
"column",
"=",
"'Adjusted P-value'",
",",
"title",
"=",
"\"\"",
",",
"cutoff",
"=",
"0.05",
",",
"top_term",
"=",
"10",
",",
"figsize",
"=",
"(",
"6.5",
",",
"6",
")",
",",
"color",
"=",
"'salmon'",
",",
"ofname",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"colname",
"=",
"column",
"if",
"colname",
"in",
"[",
"'Adjusted P-value'",
",",
"'P-value'",
"]",
":",
"df",
"=",
"df",
"[",
"df",
"[",
"colname",
"]",
"<=",
"cutoff",
"]",
"if",
"len",
"(",
"df",
")",
"<",
"1",
":",
"msg",
"=",
"\"Warning: No enrich terms using library %s when cutoff = %s\"",
"%",
"(",
"title",
",",
"cutoff",
")",
"return",
"msg",
"df",
"=",
"df",
".",
"assign",
"(",
"logAP",
"=",
"lambda",
"x",
":",
"-",
"x",
"[",
"colname",
"]",
".",
"apply",
"(",
"np",
".",
"log10",
")",
")",
"colname",
"=",
"'logAP'",
"dd",
"=",
"df",
".",
"sort_values",
"(",
"by",
"=",
"colname",
")",
".",
"iloc",
"[",
"-",
"top_term",
":",
",",
":",
"]",
"# dd = d.head(top_term).sort_values('logAP')",
"# create bar plot",
"if",
"hasattr",
"(",
"sys",
",",
"'ps1'",
")",
"and",
"(",
"ofname",
"is",
"None",
")",
":",
"# working inside python console, show (True) figure",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"else",
":",
"# If working on commandline, don't show figure",
"fig",
"=",
"Figure",
"(",
"figsize",
"=",
"figsize",
")",
"canvas",
"=",
"FigureCanvas",
"(",
"fig",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"bar",
"=",
"dd",
".",
"plot",
".",
"barh",
"(",
"x",
"=",
"'Term'",
",",
"y",
"=",
"colname",
",",
"color",
"=",
"color",
",",
"alpha",
"=",
"0.75",
",",
"fontsize",
"=",
"16",
",",
"ax",
"=",
"ax",
")",
"if",
"column",
"in",
"[",
"'Adjusted P-value'",
",",
"'P-value'",
"]",
":",
"xlabel",
"=",
"\"-log$_{10}$(%s)\"",
"%",
"column",
"else",
":",
"xlabel",
"=",
"column",
"bar",
".",
"set_xlabel",
"(",
"xlabel",
",",
"fontsize",
"=",
"16",
",",
"fontweight",
"=",
"'bold'",
")",
"bar",
".",
"set_ylabel",
"(",
"\"\"",
")",
"bar",
".",
"set_title",
"(",
"title",
",",
"fontsize",
"=",
"24",
",",
"fontweight",
"=",
"'bold'",
")",
"bar",
".",
"xaxis",
".",
"set_major_locator",
"(",
"MaxNLocator",
"(",
"integer",
"=",
"True",
")",
")",
"bar",
".",
"legend_",
".",
"remove",
"(",
")",
"adjust_spines",
"(",
"ax",
",",
"spines",
"=",
"[",
"'left'",
",",
"'bottom'",
"]",
")",
"if",
"ofname",
"is",
"not",
"None",
":",
"# canvas.print_figure(ofname, bbox_inches='tight', dpi=300)",
"fig",
".",
"savefig",
"(",
"ofname",
",",
"bbox_inches",
"=",
"'tight'",
",",
"dpi",
"=",
"300",
")",
"return",
"return",
"ax"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
adjust_spines
|
function for removing spines and ticks.
:param ax: axes object
:param spines: a list of spines names to keep. e.g [left, right, top, bottom]
if spines = []. remove all spines and ticks.
|
gseapy/plot.py
|
def adjust_spines(ax, spines):
"""function for removing spines and ticks.
:param ax: axes object
:param spines: a list of spines names to keep. e.g [left, right, top, bottom]
if spines = []. remove all spines and ticks.
"""
for loc, spine in ax.spines.items():
if loc in spines:
# spine.set_position(('outward', 10)) # outward by 10 points
# spine.set_smart_bounds(True)
continue
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
|
def adjust_spines(ax, spines):
"""function for removing spines and ticks.
:param ax: axes object
:param spines: a list of spines names to keep. e.g [left, right, top, bottom]
if spines = []. remove all spines and ticks.
"""
for loc, spine in ax.spines.items():
if loc in spines:
# spine.set_position(('outward', 10)) # outward by 10 points
# spine.set_smart_bounds(True)
continue
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
|
[
"function",
"for",
"removing",
"spines",
"and",
"ticks",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/plot.py#L436-L463
|
[
"def",
"adjust_spines",
"(",
"ax",
",",
"spines",
")",
":",
"for",
"loc",
",",
"spine",
"in",
"ax",
".",
"spines",
".",
"items",
"(",
")",
":",
"if",
"loc",
"in",
"spines",
":",
"# spine.set_position(('outward', 10)) # outward by 10 points",
"# spine.set_smart_bounds(True)",
"continue",
"else",
":",
"spine",
".",
"set_color",
"(",
"'none'",
")",
"# don't draw spine",
"# turn off ticks where there is no spine",
"if",
"'left'",
"in",
"spines",
":",
"ax",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"'left'",
")",
"else",
":",
"# no yaxis ticks",
"ax",
".",
"yaxis",
".",
"set_ticks",
"(",
"[",
"]",
")",
"if",
"'bottom'",
"in",
"spines",
":",
"ax",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"'bottom'",
")",
"else",
":",
"# no xaxis ticks",
"ax",
".",
"xaxis",
".",
"set_ticks",
"(",
"[",
"]",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
main
|
The Main function/pipeline for GSEApy.
|
gseapy/__main__.py
|
def main():
"""The Main function/pipeline for GSEApy."""
# Parse options...
argparser = prepare_argparser()
args = argparser.parse_args()
subcommand = args.subcommand_name
if subcommand == "replot":
# reproduce plots using GSEAPY
from .gsea import Replot
rep = Replot(indir=args.indir, outdir=args.outdir, weighted_score_type=args.weight,
figsize=args.figsize, graph_num=args.graph,
format=args.format, verbose=args.verbose)
rep.run()
elif subcommand == "gsea":
# compute using GSEAPY
from .gsea import GSEA
gs = GSEA(args.data, args.gmt, args.cls, args.outdir,
args.mins, args.maxs, args.n, args.weight,
args.type, args.method, args.ascending, args.threads,
args.figsize, args.format, args.graph, args.noplot, args.seed, args.verbose)
gs.run()
elif subcommand == "prerank":
from .gsea import Prerank
pre = Prerank(args.rnk, args.gmt, args.outdir, args.label[0], args.label[1],
args.mins, args.maxs, args.n, args.weight, args.ascending, args.threads,
args.figsize, args.format, args.graph, args.noplot, args.seed, args.verbose)
pre.run()
elif subcommand == "ssgsea":
from .gsea import SingleSampleGSEA
ss = SingleSampleGSEA(data=args.data, gene_sets=args.gmt, outdir=args.outdir,
sample_norm_method=args.norm,
min_size=args.mins, max_size=args.maxs, permutation_num=args.n,
weighted_score_type=args.weight, scale=args.scale,
ascending=args.ascending, processes=args.threads,
figsize=args.figsize, format=args.format, graph_num=args.graph,
no_plot=args.noplot, seed=args.seed, verbose=args.verbose)
ss.run()
elif subcommand == "enrichr":
# calling enrichr API
from .enrichr import Enrichr
enr = Enrichr(gene_list=args.gene_list, descriptions=args.descrip,
gene_sets=args.library, organism=args.organism,
outdir=args.outdir, format=args.format, cutoff=args.thresh,
background=args.bg, figsize=args.figsize,
top_term=args.term, no_plot=args.noplot, verbose=args.verbose)
enr.run()
elif subcommand == "biomart":
from .parser import Biomart
# read input file or a argument
name, value = args.filter
if os.path.isfile(value):
with open(value, 'r') as val:
lines = val.readlines()
value = [ l.strip() for l in lines]
# run query
bm = Biomart(host=args.host, verbose=args.verbose)
bm.query(dataset=args.bg, attributes=args.attrs.split(","),
filters={name : value}, filename=args.ofile)
else:
argparser.print_help()
sys.exit(0)
|
def main():
"""The Main function/pipeline for GSEApy."""
# Parse options...
argparser = prepare_argparser()
args = argparser.parse_args()
subcommand = args.subcommand_name
if subcommand == "replot":
# reproduce plots using GSEAPY
from .gsea import Replot
rep = Replot(indir=args.indir, outdir=args.outdir, weighted_score_type=args.weight,
figsize=args.figsize, graph_num=args.graph,
format=args.format, verbose=args.verbose)
rep.run()
elif subcommand == "gsea":
# compute using GSEAPY
from .gsea import GSEA
gs = GSEA(args.data, args.gmt, args.cls, args.outdir,
args.mins, args.maxs, args.n, args.weight,
args.type, args.method, args.ascending, args.threads,
args.figsize, args.format, args.graph, args.noplot, args.seed, args.verbose)
gs.run()
elif subcommand == "prerank":
from .gsea import Prerank
pre = Prerank(args.rnk, args.gmt, args.outdir, args.label[0], args.label[1],
args.mins, args.maxs, args.n, args.weight, args.ascending, args.threads,
args.figsize, args.format, args.graph, args.noplot, args.seed, args.verbose)
pre.run()
elif subcommand == "ssgsea":
from .gsea import SingleSampleGSEA
ss = SingleSampleGSEA(data=args.data, gene_sets=args.gmt, outdir=args.outdir,
sample_norm_method=args.norm,
min_size=args.mins, max_size=args.maxs, permutation_num=args.n,
weighted_score_type=args.weight, scale=args.scale,
ascending=args.ascending, processes=args.threads,
figsize=args.figsize, format=args.format, graph_num=args.graph,
no_plot=args.noplot, seed=args.seed, verbose=args.verbose)
ss.run()
elif subcommand == "enrichr":
# calling enrichr API
from .enrichr import Enrichr
enr = Enrichr(gene_list=args.gene_list, descriptions=args.descrip,
gene_sets=args.library, organism=args.organism,
outdir=args.outdir, format=args.format, cutoff=args.thresh,
background=args.bg, figsize=args.figsize,
top_term=args.term, no_plot=args.noplot, verbose=args.verbose)
enr.run()
elif subcommand == "biomart":
from .parser import Biomart
# read input file or a argument
name, value = args.filter
if os.path.isfile(value):
with open(value, 'r') as val:
lines = val.readlines()
value = [ l.strip() for l in lines]
# run query
bm = Biomart(host=args.host, verbose=args.verbose)
bm.query(dataset=args.bg, attributes=args.attrs.split(","),
filters={name : value}, filename=args.ofile)
else:
argparser.print_help()
sys.exit(0)
|
[
"The",
"Main",
"function",
"/",
"pipeline",
"for",
"GSEApy",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/__main__.py#L16-L84
|
[
"def",
"main",
"(",
")",
":",
"# Parse options...",
"argparser",
"=",
"prepare_argparser",
"(",
")",
"args",
"=",
"argparser",
".",
"parse_args",
"(",
")",
"subcommand",
"=",
"args",
".",
"subcommand_name",
"if",
"subcommand",
"==",
"\"replot\"",
":",
"# reproduce plots using GSEAPY",
"from",
".",
"gsea",
"import",
"Replot",
"rep",
"=",
"Replot",
"(",
"indir",
"=",
"args",
".",
"indir",
",",
"outdir",
"=",
"args",
".",
"outdir",
",",
"weighted_score_type",
"=",
"args",
".",
"weight",
",",
"figsize",
"=",
"args",
".",
"figsize",
",",
"graph_num",
"=",
"args",
".",
"graph",
",",
"format",
"=",
"args",
".",
"format",
",",
"verbose",
"=",
"args",
".",
"verbose",
")",
"rep",
".",
"run",
"(",
")",
"elif",
"subcommand",
"==",
"\"gsea\"",
":",
"# compute using GSEAPY",
"from",
".",
"gsea",
"import",
"GSEA",
"gs",
"=",
"GSEA",
"(",
"args",
".",
"data",
",",
"args",
".",
"gmt",
",",
"args",
".",
"cls",
",",
"args",
".",
"outdir",
",",
"args",
".",
"mins",
",",
"args",
".",
"maxs",
",",
"args",
".",
"n",
",",
"args",
".",
"weight",
",",
"args",
".",
"type",
",",
"args",
".",
"method",
",",
"args",
".",
"ascending",
",",
"args",
".",
"threads",
",",
"args",
".",
"figsize",
",",
"args",
".",
"format",
",",
"args",
".",
"graph",
",",
"args",
".",
"noplot",
",",
"args",
".",
"seed",
",",
"args",
".",
"verbose",
")",
"gs",
".",
"run",
"(",
")",
"elif",
"subcommand",
"==",
"\"prerank\"",
":",
"from",
".",
"gsea",
"import",
"Prerank",
"pre",
"=",
"Prerank",
"(",
"args",
".",
"rnk",
",",
"args",
".",
"gmt",
",",
"args",
".",
"outdir",
",",
"args",
".",
"label",
"[",
"0",
"]",
",",
"args",
".",
"label",
"[",
"1",
"]",
",",
"args",
".",
"mins",
",",
"args",
".",
"maxs",
",",
"args",
".",
"n",
",",
"args",
".",
"weight",
",",
"args",
".",
"ascending",
",",
"args",
".",
"threads",
",",
"args",
".",
"figsize",
",",
"args",
".",
"format",
",",
"args",
".",
"graph",
",",
"args",
".",
"noplot",
",",
"args",
".",
"seed",
",",
"args",
".",
"verbose",
")",
"pre",
".",
"run",
"(",
")",
"elif",
"subcommand",
"==",
"\"ssgsea\"",
":",
"from",
".",
"gsea",
"import",
"SingleSampleGSEA",
"ss",
"=",
"SingleSampleGSEA",
"(",
"data",
"=",
"args",
".",
"data",
",",
"gene_sets",
"=",
"args",
".",
"gmt",
",",
"outdir",
"=",
"args",
".",
"outdir",
",",
"sample_norm_method",
"=",
"args",
".",
"norm",
",",
"min_size",
"=",
"args",
".",
"mins",
",",
"max_size",
"=",
"args",
".",
"maxs",
",",
"permutation_num",
"=",
"args",
".",
"n",
",",
"weighted_score_type",
"=",
"args",
".",
"weight",
",",
"scale",
"=",
"args",
".",
"scale",
",",
"ascending",
"=",
"args",
".",
"ascending",
",",
"processes",
"=",
"args",
".",
"threads",
",",
"figsize",
"=",
"args",
".",
"figsize",
",",
"format",
"=",
"args",
".",
"format",
",",
"graph_num",
"=",
"args",
".",
"graph",
",",
"no_plot",
"=",
"args",
".",
"noplot",
",",
"seed",
"=",
"args",
".",
"seed",
",",
"verbose",
"=",
"args",
".",
"verbose",
")",
"ss",
".",
"run",
"(",
")",
"elif",
"subcommand",
"==",
"\"enrichr\"",
":",
"# calling enrichr API",
"from",
".",
"enrichr",
"import",
"Enrichr",
"enr",
"=",
"Enrichr",
"(",
"gene_list",
"=",
"args",
".",
"gene_list",
",",
"descriptions",
"=",
"args",
".",
"descrip",
",",
"gene_sets",
"=",
"args",
".",
"library",
",",
"organism",
"=",
"args",
".",
"organism",
",",
"outdir",
"=",
"args",
".",
"outdir",
",",
"format",
"=",
"args",
".",
"format",
",",
"cutoff",
"=",
"args",
".",
"thresh",
",",
"background",
"=",
"args",
".",
"bg",
",",
"figsize",
"=",
"args",
".",
"figsize",
",",
"top_term",
"=",
"args",
".",
"term",
",",
"no_plot",
"=",
"args",
".",
"noplot",
",",
"verbose",
"=",
"args",
".",
"verbose",
")",
"enr",
".",
"run",
"(",
")",
"elif",
"subcommand",
"==",
"\"biomart\"",
":",
"from",
".",
"parser",
"import",
"Biomart",
"# read input file or a argument",
"name",
",",
"value",
"=",
"args",
".",
"filter",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"value",
")",
":",
"with",
"open",
"(",
"value",
",",
"'r'",
")",
"as",
"val",
":",
"lines",
"=",
"val",
".",
"readlines",
"(",
")",
"value",
"=",
"[",
"l",
".",
"strip",
"(",
")",
"for",
"l",
"in",
"lines",
"]",
"# run query",
"bm",
"=",
"Biomart",
"(",
"host",
"=",
"args",
".",
"host",
",",
"verbose",
"=",
"args",
".",
"verbose",
")",
"bm",
".",
"query",
"(",
"dataset",
"=",
"args",
".",
"bg",
",",
"attributes",
"=",
"args",
".",
"attrs",
".",
"split",
"(",
"\",\"",
")",
",",
"filters",
"=",
"{",
"name",
":",
"value",
"}",
",",
"filename",
"=",
"args",
".",
"ofile",
")",
"else",
":",
"argparser",
".",
"print_help",
"(",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
prepare_argparser
|
Prepare argparser object. New options will be added in this function first.
|
gseapy/__main__.py
|
def prepare_argparser():
"""Prepare argparser object. New options will be added in this function first."""
description = "%(prog)s -- Gene Set Enrichment Analysis in Python"
epilog = "For command line options of each command, type: %(prog)s COMMAND -h"
# top-level parser
argparser = ap.ArgumentParser(description=description, epilog=epilog)
argparser.add_argument("--version", action="version", version="%(prog)s "+ __version__)
subparsers = argparser.add_subparsers(dest='subcommand_name') #help="sub-command help")
# command for 'gsea'
add_gsea_parser(subparsers)
# command for 'prerank'
add_prerank_parser(subparsers)
# command for 'ssgsea'
add_singlesample_parser(subparsers)
# command for 'plot'
add_plot_parser(subparsers)
# command for 'enrichr'
add_enrichr_parser(subparsers)
# command for 'biomart'
add_biomart_parser(subparsers)
return argparser
|
def prepare_argparser():
"""Prepare argparser object. New options will be added in this function first."""
description = "%(prog)s -- Gene Set Enrichment Analysis in Python"
epilog = "For command line options of each command, type: %(prog)s COMMAND -h"
# top-level parser
argparser = ap.ArgumentParser(description=description, epilog=epilog)
argparser.add_argument("--version", action="version", version="%(prog)s "+ __version__)
subparsers = argparser.add_subparsers(dest='subcommand_name') #help="sub-command help")
# command for 'gsea'
add_gsea_parser(subparsers)
# command for 'prerank'
add_prerank_parser(subparsers)
# command for 'ssgsea'
add_singlesample_parser(subparsers)
# command for 'plot'
add_plot_parser(subparsers)
# command for 'enrichr'
add_enrichr_parser(subparsers)
# command for 'biomart'
add_biomart_parser(subparsers)
return argparser
|
[
"Prepare",
"argparser",
"object",
".",
"New",
"options",
"will",
"be",
"added",
"in",
"this",
"function",
"first",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/__main__.py#L87-L110
|
[
"def",
"prepare_argparser",
"(",
")",
":",
"description",
"=",
"\"%(prog)s -- Gene Set Enrichment Analysis in Python\"",
"epilog",
"=",
"\"For command line options of each command, type: %(prog)s COMMAND -h\"",
"# top-level parser",
"argparser",
"=",
"ap",
".",
"ArgumentParser",
"(",
"description",
"=",
"description",
",",
"epilog",
"=",
"epilog",
")",
"argparser",
".",
"add_argument",
"(",
"\"--version\"",
",",
"action",
"=",
"\"version\"",
",",
"version",
"=",
"\"%(prog)s \"",
"+",
"__version__",
")",
"subparsers",
"=",
"argparser",
".",
"add_subparsers",
"(",
"dest",
"=",
"'subcommand_name'",
")",
"#help=\"sub-command help\")",
"# command for 'gsea'",
"add_gsea_parser",
"(",
"subparsers",
")",
"# command for 'prerank'",
"add_prerank_parser",
"(",
"subparsers",
")",
"# command for 'ssgsea'",
"add_singlesample_parser",
"(",
"subparsers",
")",
"# command for 'plot'",
"add_plot_parser",
"(",
"subparsers",
")",
"# command for 'enrichr'",
"add_enrichr_parser",
"(",
"subparsers",
")",
"# command for 'biomart'",
"add_biomart_parser",
"(",
"subparsers",
")",
"return",
"argparser"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
add_output_option
|
output option
|
gseapy/__main__.py
|
def add_output_option(parser):
"""output option"""
parser.add_argument("-o", "--outdir", dest="outdir", type=str, default='GSEApy_reports',
metavar='', action="store",
help="The GSEApy output directory. Default: the current working directory")
parser.add_argument("-f", "--format", dest="format", type=str, metavar='', action="store",
choices=("pdf", "png", "jpeg", "eps", "svg"), default="pdf",
help="File extensions supported by Matplotlib active backend,\
choose from {'pdf', 'png', 'jpeg','ps', 'eps','svg'}. Default: 'pdf'.")
parser.add_argument("--fs", "--figsize", action='store', nargs=2, dest='figsize',
metavar=('width', 'height'),type=float, default=(6.5, 6),
help="The figsize keyword argument need two parameters to define. Default: (6.5, 6)")
parser.add_argument("--graph", dest = "graph", action="store", type=int, default=20, metavar='int',
help="Numbers of top graphs produced. Default: 20")
parser.add_argument("--no-plot", action='store_true', dest='noplot', default=False,
help="Speed up computing by suppressing the plot output."+\
"This is useful only if data are interested. Default: False.")
parser.add_argument("-v", "--verbose", action="store_true", default=False, dest='verbose',
help="Increase output verbosity, print out progress of your job", )
|
def add_output_option(parser):
"""output option"""
parser.add_argument("-o", "--outdir", dest="outdir", type=str, default='GSEApy_reports',
metavar='', action="store",
help="The GSEApy output directory. Default: the current working directory")
parser.add_argument("-f", "--format", dest="format", type=str, metavar='', action="store",
choices=("pdf", "png", "jpeg", "eps", "svg"), default="pdf",
help="File extensions supported by Matplotlib active backend,\
choose from {'pdf', 'png', 'jpeg','ps', 'eps','svg'}. Default: 'pdf'.")
parser.add_argument("--fs", "--figsize", action='store', nargs=2, dest='figsize',
metavar=('width', 'height'),type=float, default=(6.5, 6),
help="The figsize keyword argument need two parameters to define. Default: (6.5, 6)")
parser.add_argument("--graph", dest = "graph", action="store", type=int, default=20, metavar='int',
help="Numbers of top graphs produced. Default: 20")
parser.add_argument("--no-plot", action='store_true', dest='noplot', default=False,
help="Speed up computing by suppressing the plot output."+\
"This is useful only if data are interested. Default: False.")
parser.add_argument("-v", "--verbose", action="store_true", default=False, dest='verbose',
help="Increase output verbosity, print out progress of your job", )
|
[
"output",
"option"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/__main__.py#L112-L131
|
[
"def",
"add_output_option",
"(",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--outdir\"",
",",
"dest",
"=",
"\"outdir\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'GSEApy_reports'",
",",
"metavar",
"=",
"''",
",",
"action",
"=",
"\"store\"",
",",
"help",
"=",
"\"The GSEApy output directory. Default: the current working directory\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-f\"",
",",
"\"--format\"",
",",
"dest",
"=",
"\"format\"",
",",
"type",
"=",
"str",
",",
"metavar",
"=",
"''",
",",
"action",
"=",
"\"store\"",
",",
"choices",
"=",
"(",
"\"pdf\"",
",",
"\"png\"",
",",
"\"jpeg\"",
",",
"\"eps\"",
",",
"\"svg\"",
")",
",",
"default",
"=",
"\"pdf\"",
",",
"help",
"=",
"\"File extensions supported by Matplotlib active backend,\\\n choose from {'pdf', 'png', 'jpeg','ps', 'eps','svg'}. Default: 'pdf'.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--fs\"",
",",
"\"--figsize\"",
",",
"action",
"=",
"'store'",
",",
"nargs",
"=",
"2",
",",
"dest",
"=",
"'figsize'",
",",
"metavar",
"=",
"(",
"'width'",
",",
"'height'",
")",
",",
"type",
"=",
"float",
",",
"default",
"=",
"(",
"6.5",
",",
"6",
")",
",",
"help",
"=",
"\"The figsize keyword argument need two parameters to define. Default: (6.5, 6)\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--graph\"",
",",
"dest",
"=",
"\"graph\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"20",
",",
"metavar",
"=",
"'int'",
",",
"help",
"=",
"\"Numbers of top graphs produced. Default: 20\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--no-plot\"",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'noplot'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Speed up computing by suppressing the plot output.\"",
"+",
"\"This is useful only if data are interested. Default: False.\"",
")",
"parser",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--verbose\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"'verbose'",
",",
"help",
"=",
"\"Increase output verbosity, print out progress of your job\"",
",",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
add_output_group
|
output group
|
gseapy/__main__.py
|
def add_output_group(parser, required=True):
"""output group"""
output_group = parser.add_mutually_exclusive_group(required=required)
output_group.add_argument("-o", "--ofile", dest="ofile", type=str, default='GSEApy_reports',
help="Output file name. Mutually exclusive with --o-prefix.")
output_group.add_argument("--o-prefix", dest="ofile", type=str, default='GSEApy_reports',
help="Output file prefix. Mutually exclusive with -o/--ofile.")
|
def add_output_group(parser, required=True):
"""output group"""
output_group = parser.add_mutually_exclusive_group(required=required)
output_group.add_argument("-o", "--ofile", dest="ofile", type=str, default='GSEApy_reports',
help="Output file name. Mutually exclusive with --o-prefix.")
output_group.add_argument("--o-prefix", dest="ofile", type=str, default='GSEApy_reports',
help="Output file prefix. Mutually exclusive with -o/--ofile.")
|
[
"output",
"group"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/__main__.py#L134-L141
|
[
"def",
"add_output_group",
"(",
"parser",
",",
"required",
"=",
"True",
")",
":",
"output_group",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
"required",
"=",
"required",
")",
"output_group",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--ofile\"",
",",
"dest",
"=",
"\"ofile\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'GSEApy_reports'",
",",
"help",
"=",
"\"Output file name. Mutually exclusive with --o-prefix.\"",
")",
"output_group",
".",
"add_argument",
"(",
"\"--o-prefix\"",
",",
"dest",
"=",
"\"ofile\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'GSEApy_reports'",
",",
"help",
"=",
"\"Output file prefix. Mutually exclusive with -o/--ofile.\"",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
add_gsea_parser
|
Add main function 'gsea' argument parsers.
|
gseapy/__main__.py
|
def add_gsea_parser(subparsers):
"""Add main function 'gsea' argument parsers."""
argparser_gsea = subparsers.add_parser("gsea", help="Main GSEApy Function: run GSEApy instead of GSEA.")
# group for input files
group_input = argparser_gsea.add_argument_group("Input files arguments")
group_input.add_argument("-d", "--data", dest="data", action="store", type=str, required=True,
help="Input gene expression dataset file in txt format.Same with GSEA.")
group_input.add_argument("-c", "--cls", dest="cls", action="store", type=str, required=True,
help="Input class vector (phenotype) file in CLS format. Same with GSEA.")
group_input.add_argument("-g", "--gmt", dest="gmt", action="store", type=str, required=True,
help="Gene set database in GMT format. Same with GSEA.")
group_input.add_argument("-t", "--permu-type", action="store", dest="type", type=str, metavar='perType',
choices=("gene_set", "phenotype"), default="gene_set",
help="Permutation type. Same with GSEA, choose from {'gene_set', 'phenotype'}")
# group for output files
group_output = argparser_gsea.add_argument_group("Output arguments")
add_output_option(group_output)
# group for General options.
group_opt = argparser_gsea.add_argument_group("GSEA advanced arguments")
group_opt.add_argument("-n", "--permu-num", dest = "n", action="store", type=int, default=1000, metavar='nperm',
help="Number of random permutations. For calculating esnulls. Default: 1000")
group_opt.add_argument("--min-size", dest="mins", action="store", type=int, default=15, metavar='int',
help="Min size of input genes presented in Gene Sets. Default: 15")
group_opt.add_argument("--max-size", dest = "maxs", action="store", type=int, default=500, metavar='int',
help="Max size of input genes presented in Gene Sets. Default: 500")
group_opt.add_argument("-w", "--weight", action='store', dest='weight', default=1.0, type=float, metavar='float',
help='Weighted_score of rank_metrics. For weighting input genes. Choose from {0, 1, 1.5, 2}. Default: 1',)
group_opt.add_argument("-m", "--method", action="store", dest="method", type=str, metavar='',
choices=("signal_to_noise", "t_test", "ratio_of_classes", "diff_of_classes", "log2_ratio_of_classes"),
default="log2_ratio_of_classes",
help="Methods to calculate correlations of ranking metrics. \
Choose from {'signal_to_noise', 't_test', 'ratio_of_classes', 'diff_of_classes','log2_ratio_of_classes'}.\
Default: 'log2_ratio_of_classes'")
group_opt.add_argument("-a", "--ascending", action='store_true', dest='ascending', default=False,
help='Rank metric sorting order. If the -a flag was chosen, then ascending equals to True. Default: False.')
group_opt.add_argument("-s", "--seed", dest = "seed", action="store", type=int, default=None, metavar='',
help="Number of random seed. Default: None")
group_opt.add_argument("-p", "--threads", dest = "threads", action="store", type=int, default=1, metavar='procs',
help="Number of Processes you are going to use. Default: 1")
return
|
def add_gsea_parser(subparsers):
"""Add main function 'gsea' argument parsers."""
argparser_gsea = subparsers.add_parser("gsea", help="Main GSEApy Function: run GSEApy instead of GSEA.")
# group for input files
group_input = argparser_gsea.add_argument_group("Input files arguments")
group_input.add_argument("-d", "--data", dest="data", action="store", type=str, required=True,
help="Input gene expression dataset file in txt format.Same with GSEA.")
group_input.add_argument("-c", "--cls", dest="cls", action="store", type=str, required=True,
help="Input class vector (phenotype) file in CLS format. Same with GSEA.")
group_input.add_argument("-g", "--gmt", dest="gmt", action="store", type=str, required=True,
help="Gene set database in GMT format. Same with GSEA.")
group_input.add_argument("-t", "--permu-type", action="store", dest="type", type=str, metavar='perType',
choices=("gene_set", "phenotype"), default="gene_set",
help="Permutation type. Same with GSEA, choose from {'gene_set', 'phenotype'}")
# group for output files
group_output = argparser_gsea.add_argument_group("Output arguments")
add_output_option(group_output)
# group for General options.
group_opt = argparser_gsea.add_argument_group("GSEA advanced arguments")
group_opt.add_argument("-n", "--permu-num", dest = "n", action="store", type=int, default=1000, metavar='nperm',
help="Number of random permutations. For calculating esnulls. Default: 1000")
group_opt.add_argument("--min-size", dest="mins", action="store", type=int, default=15, metavar='int',
help="Min size of input genes presented in Gene Sets. Default: 15")
group_opt.add_argument("--max-size", dest = "maxs", action="store", type=int, default=500, metavar='int',
help="Max size of input genes presented in Gene Sets. Default: 500")
group_opt.add_argument("-w", "--weight", action='store', dest='weight', default=1.0, type=float, metavar='float',
help='Weighted_score of rank_metrics. For weighting input genes. Choose from {0, 1, 1.5, 2}. Default: 1',)
group_opt.add_argument("-m", "--method", action="store", dest="method", type=str, metavar='',
choices=("signal_to_noise", "t_test", "ratio_of_classes", "diff_of_classes", "log2_ratio_of_classes"),
default="log2_ratio_of_classes",
help="Methods to calculate correlations of ranking metrics. \
Choose from {'signal_to_noise', 't_test', 'ratio_of_classes', 'diff_of_classes','log2_ratio_of_classes'}.\
Default: 'log2_ratio_of_classes'")
group_opt.add_argument("-a", "--ascending", action='store_true', dest='ascending', default=False,
help='Rank metric sorting order. If the -a flag was chosen, then ascending equals to True. Default: False.')
group_opt.add_argument("-s", "--seed", dest = "seed", action="store", type=int, default=None, metavar='',
help="Number of random seed. Default: None")
group_opt.add_argument("-p", "--threads", dest = "threads", action="store", type=int, default=1, metavar='procs',
help="Number of Processes you are going to use. Default: 1")
return
|
[
"Add",
"main",
"function",
"gsea",
"argument",
"parsers",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/__main__.py#L144-L188
|
[
"def",
"add_gsea_parser",
"(",
"subparsers",
")",
":",
"argparser_gsea",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"gsea\"",
",",
"help",
"=",
"\"Main GSEApy Function: run GSEApy instead of GSEA.\"",
")",
"# group for input files",
"group_input",
"=",
"argparser_gsea",
".",
"add_argument_group",
"(",
"\"Input files arguments\"",
")",
"group_input",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--data\"",
",",
"dest",
"=",
"\"data\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Input gene expression dataset file in txt format.Same with GSEA.\"",
")",
"group_input",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--cls\"",
",",
"dest",
"=",
"\"cls\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Input class vector (phenotype) file in CLS format. Same with GSEA.\"",
")",
"group_input",
".",
"add_argument",
"(",
"\"-g\"",
",",
"\"--gmt\"",
",",
"dest",
"=",
"\"gmt\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Gene set database in GMT format. Same with GSEA.\"",
")",
"group_input",
".",
"add_argument",
"(",
"\"-t\"",
",",
"\"--permu-type\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"type\"",
",",
"type",
"=",
"str",
",",
"metavar",
"=",
"'perType'",
",",
"choices",
"=",
"(",
"\"gene_set\"",
",",
"\"phenotype\"",
")",
",",
"default",
"=",
"\"gene_set\"",
",",
"help",
"=",
"\"Permutation type. Same with GSEA, choose from {'gene_set', 'phenotype'}\"",
")",
"# group for output files",
"group_output",
"=",
"argparser_gsea",
".",
"add_argument_group",
"(",
"\"Output arguments\"",
")",
"add_output_option",
"(",
"group_output",
")",
"# group for General options.",
"group_opt",
"=",
"argparser_gsea",
".",
"add_argument_group",
"(",
"\"GSEA advanced arguments\"",
")",
"group_opt",
".",
"add_argument",
"(",
"\"-n\"",
",",
"\"--permu-num\"",
",",
"dest",
"=",
"\"n\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1000",
",",
"metavar",
"=",
"'nperm'",
",",
"help",
"=",
"\"Number of random permutations. For calculating esnulls. Default: 1000\"",
")",
"group_opt",
".",
"add_argument",
"(",
"\"--min-size\"",
",",
"dest",
"=",
"\"mins\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"15",
",",
"metavar",
"=",
"'int'",
",",
"help",
"=",
"\"Min size of input genes presented in Gene Sets. Default: 15\"",
")",
"group_opt",
".",
"add_argument",
"(",
"\"--max-size\"",
",",
"dest",
"=",
"\"maxs\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"500",
",",
"metavar",
"=",
"'int'",
",",
"help",
"=",
"\"Max size of input genes presented in Gene Sets. Default: 500\"",
")",
"group_opt",
".",
"add_argument",
"(",
"\"-w\"",
",",
"\"--weight\"",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'weight'",
",",
"default",
"=",
"1.0",
",",
"type",
"=",
"float",
",",
"metavar",
"=",
"'float'",
",",
"help",
"=",
"'Weighted_score of rank_metrics. For weighting input genes. Choose from {0, 1, 1.5, 2}. Default: 1'",
",",
")",
"group_opt",
".",
"add_argument",
"(",
"\"-m\"",
",",
"\"--method\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"method\"",
",",
"type",
"=",
"str",
",",
"metavar",
"=",
"''",
",",
"choices",
"=",
"(",
"\"signal_to_noise\"",
",",
"\"t_test\"",
",",
"\"ratio_of_classes\"",
",",
"\"diff_of_classes\"",
",",
"\"log2_ratio_of_classes\"",
")",
",",
"default",
"=",
"\"log2_ratio_of_classes\"",
",",
"help",
"=",
"\"Methods to calculate correlations of ranking metrics. \\\n Choose from {'signal_to_noise', 't_test', 'ratio_of_classes', 'diff_of_classes','log2_ratio_of_classes'}.\\\n Default: 'log2_ratio_of_classes'\"",
")",
"group_opt",
".",
"add_argument",
"(",
"\"-a\"",
",",
"\"--ascending\"",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'ascending'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Rank metric sorting order. If the -a flag was chosen, then ascending equals to True. Default: False.'",
")",
"group_opt",
".",
"add_argument",
"(",
"\"-s\"",
",",
"\"--seed\"",
",",
"dest",
"=",
"\"seed\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"None",
",",
"metavar",
"=",
"''",
",",
"help",
"=",
"\"Number of random seed. Default: None\"",
")",
"group_opt",
".",
"add_argument",
"(",
"\"-p\"",
",",
"\"--threads\"",
",",
"dest",
"=",
"\"threads\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"metavar",
"=",
"'procs'",
",",
"help",
"=",
"\"Number of Processes you are going to use. Default: 1\"",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
add_prerank_parser
|
Add function 'prerank' argument parsers.
|
gseapy/__main__.py
|
def add_prerank_parser(subparsers):
"""Add function 'prerank' argument parsers."""
argparser_prerank = subparsers.add_parser("prerank", help="Run GSEApy Prerank tool on preranked gene list.")
# group for input files
prerank_input = argparser_prerank.add_argument_group("Input files arguments")
prerank_input.add_argument("-r", "--rnk", dest="rnk", action="store", type=str, required=True,
help="Ranking metric file in .rnk format. Same with GSEA.")
prerank_input.add_argument("-g", "--gmt", dest="gmt", action="store", type=str, required=True,
help="Gene set database in GMT format. Same with GSEA.")
prerank_input.add_argument("-l", "--label", action='store', nargs=2, dest='label',
metavar=('pos', 'neg'), type=str, default=('Pos','Neg'),
help="The phenotype label argument need two parameters to define. Default: ('Pos','Neg')")
# group for output files
prerank_output = argparser_prerank.add_argument_group("Output arguments")
add_output_option(prerank_output)
# group for General options.
prerank_opt = argparser_prerank.add_argument_group("GSEA advanced arguments")
prerank_opt.add_argument("-n", "--permu-num", dest = "n", action="store", type=int, default=1000, metavar='nperm',
help="Number of random permutations. For calculating esnulls. Default: 1000")
prerank_opt.add_argument("--min-size", dest="mins", action="store", type=int, default=15, metavar='int',
help="Min size of input genes presented in Gene Sets. Default: 15")
prerank_opt.add_argument("--max-size", dest = "maxs", action="store", type=int, default=500, metavar='int',
help="Max size of input genes presented in Gene Sets. Default: 500")
prerank_opt.add_argument("-w", "--weight", action='store', dest='weight', default=1.0, type=float, metavar='float',
help='Weighted_score of rank_metrics. For weighting input genes. Choose from {0, 1, 1.5, 2}. Default: 1',)
prerank_opt.add_argument("-a", "--ascending", action='store_true', dest='ascending', default=False,
help='Rank metric sorting order. If the -a flag was chosen, then ascending equals to True. Default: False.')
prerank_opt.add_argument("-s", "--seed", dest = "seed", action="store", type=int, default=None, metavar='',
help="Number of random seed. Default: None")
prerank_opt.add_argument("-p", "--threads", dest = "threads", action="store", type=int, default=1, metavar='procs',
help="Number of Processes you are going to use. Default: 1")
return
|
def add_prerank_parser(subparsers):
"""Add function 'prerank' argument parsers."""
argparser_prerank = subparsers.add_parser("prerank", help="Run GSEApy Prerank tool on preranked gene list.")
# group for input files
prerank_input = argparser_prerank.add_argument_group("Input files arguments")
prerank_input.add_argument("-r", "--rnk", dest="rnk", action="store", type=str, required=True,
help="Ranking metric file in .rnk format. Same with GSEA.")
prerank_input.add_argument("-g", "--gmt", dest="gmt", action="store", type=str, required=True,
help="Gene set database in GMT format. Same with GSEA.")
prerank_input.add_argument("-l", "--label", action='store', nargs=2, dest='label',
metavar=('pos', 'neg'), type=str, default=('Pos','Neg'),
help="The phenotype label argument need two parameters to define. Default: ('Pos','Neg')")
# group for output files
prerank_output = argparser_prerank.add_argument_group("Output arguments")
add_output_option(prerank_output)
# group for General options.
prerank_opt = argparser_prerank.add_argument_group("GSEA advanced arguments")
prerank_opt.add_argument("-n", "--permu-num", dest = "n", action="store", type=int, default=1000, metavar='nperm',
help="Number of random permutations. For calculating esnulls. Default: 1000")
prerank_opt.add_argument("--min-size", dest="mins", action="store", type=int, default=15, metavar='int',
help="Min size of input genes presented in Gene Sets. Default: 15")
prerank_opt.add_argument("--max-size", dest = "maxs", action="store", type=int, default=500, metavar='int',
help="Max size of input genes presented in Gene Sets. Default: 500")
prerank_opt.add_argument("-w", "--weight", action='store', dest='weight', default=1.0, type=float, metavar='float',
help='Weighted_score of rank_metrics. For weighting input genes. Choose from {0, 1, 1.5, 2}. Default: 1',)
prerank_opt.add_argument("-a", "--ascending", action='store_true', dest='ascending', default=False,
help='Rank metric sorting order. If the -a flag was chosen, then ascending equals to True. Default: False.')
prerank_opt.add_argument("-s", "--seed", dest = "seed", action="store", type=int, default=None, metavar='',
help="Number of random seed. Default: None")
prerank_opt.add_argument("-p", "--threads", dest = "threads", action="store", type=int, default=1, metavar='procs',
help="Number of Processes you are going to use. Default: 1")
return
|
[
"Add",
"function",
"prerank",
"argument",
"parsers",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/__main__.py#L191-L227
|
[
"def",
"add_prerank_parser",
"(",
"subparsers",
")",
":",
"argparser_prerank",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"prerank\"",
",",
"help",
"=",
"\"Run GSEApy Prerank tool on preranked gene list.\"",
")",
"# group for input files",
"prerank_input",
"=",
"argparser_prerank",
".",
"add_argument_group",
"(",
"\"Input files arguments\"",
")",
"prerank_input",
".",
"add_argument",
"(",
"\"-r\"",
",",
"\"--rnk\"",
",",
"dest",
"=",
"\"rnk\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Ranking metric file in .rnk format. Same with GSEA.\"",
")",
"prerank_input",
".",
"add_argument",
"(",
"\"-g\"",
",",
"\"--gmt\"",
",",
"dest",
"=",
"\"gmt\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Gene set database in GMT format. Same with GSEA.\"",
")",
"prerank_input",
".",
"add_argument",
"(",
"\"-l\"",
",",
"\"--label\"",
",",
"action",
"=",
"'store'",
",",
"nargs",
"=",
"2",
",",
"dest",
"=",
"'label'",
",",
"metavar",
"=",
"(",
"'pos'",
",",
"'neg'",
")",
",",
"type",
"=",
"str",
",",
"default",
"=",
"(",
"'Pos'",
",",
"'Neg'",
")",
",",
"help",
"=",
"\"The phenotype label argument need two parameters to define. Default: ('Pos','Neg')\"",
")",
"# group for output files",
"prerank_output",
"=",
"argparser_prerank",
".",
"add_argument_group",
"(",
"\"Output arguments\"",
")",
"add_output_option",
"(",
"prerank_output",
")",
"# group for General options.",
"prerank_opt",
"=",
"argparser_prerank",
".",
"add_argument_group",
"(",
"\"GSEA advanced arguments\"",
")",
"prerank_opt",
".",
"add_argument",
"(",
"\"-n\"",
",",
"\"--permu-num\"",
",",
"dest",
"=",
"\"n\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1000",
",",
"metavar",
"=",
"'nperm'",
",",
"help",
"=",
"\"Number of random permutations. For calculating esnulls. Default: 1000\"",
")",
"prerank_opt",
".",
"add_argument",
"(",
"\"--min-size\"",
",",
"dest",
"=",
"\"mins\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"15",
",",
"metavar",
"=",
"'int'",
",",
"help",
"=",
"\"Min size of input genes presented in Gene Sets. Default: 15\"",
")",
"prerank_opt",
".",
"add_argument",
"(",
"\"--max-size\"",
",",
"dest",
"=",
"\"maxs\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"500",
",",
"metavar",
"=",
"'int'",
",",
"help",
"=",
"\"Max size of input genes presented in Gene Sets. Default: 500\"",
")",
"prerank_opt",
".",
"add_argument",
"(",
"\"-w\"",
",",
"\"--weight\"",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'weight'",
",",
"default",
"=",
"1.0",
",",
"type",
"=",
"float",
",",
"metavar",
"=",
"'float'",
",",
"help",
"=",
"'Weighted_score of rank_metrics. For weighting input genes. Choose from {0, 1, 1.5, 2}. Default: 1'",
",",
")",
"prerank_opt",
".",
"add_argument",
"(",
"\"-a\"",
",",
"\"--ascending\"",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'ascending'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Rank metric sorting order. If the -a flag was chosen, then ascending equals to True. Default: False.'",
")",
"prerank_opt",
".",
"add_argument",
"(",
"\"-s\"",
",",
"\"--seed\"",
",",
"dest",
"=",
"\"seed\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"None",
",",
"metavar",
"=",
"''",
",",
"help",
"=",
"\"Number of random seed. Default: None\"",
")",
"prerank_opt",
".",
"add_argument",
"(",
"\"-p\"",
",",
"\"--threads\"",
",",
"dest",
"=",
"\"threads\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"metavar",
"=",
"'procs'",
",",
"help",
"=",
"\"Number of Processes you are going to use. Default: 1\"",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
add_plot_parser
|
Add function 'plot' argument parsers.
|
gseapy/__main__.py
|
def add_plot_parser(subparsers):
"""Add function 'plot' argument parsers."""
argparser_replot = subparsers.add_parser("replot", help="Reproduce GSEA desktop output figures.")
group_replot = argparser_replot.add_argument_group("Input arguments")
group_replot.add_argument("-i", "--indir", action="store", dest="indir", required=True, metavar='GSEA_dir',
help="The GSEA desktop results directroy that you want to reproduce the figure ")
add_output_option(group_replot)
#add_output_group( argparser_plot )
group_replot.add_argument("-w", "--weight", action='store', dest='weight', default=1.0, type=float, metavar='float',
help='Weighted_score of rank_metrics. Please Use the same value in GSEA. Choose from (0, 1, 1.5, 2),default: 1',)
return
|
def add_plot_parser(subparsers):
"""Add function 'plot' argument parsers."""
argparser_replot = subparsers.add_parser("replot", help="Reproduce GSEA desktop output figures.")
group_replot = argparser_replot.add_argument_group("Input arguments")
group_replot.add_argument("-i", "--indir", action="store", dest="indir", required=True, metavar='GSEA_dir',
help="The GSEA desktop results directroy that you want to reproduce the figure ")
add_output_option(group_replot)
#add_output_group( argparser_plot )
group_replot.add_argument("-w", "--weight", action='store', dest='weight', default=1.0, type=float, metavar='float',
help='Weighted_score of rank_metrics. Please Use the same value in GSEA. Choose from (0, 1, 1.5, 2),default: 1',)
return
|
[
"Add",
"function",
"plot",
"argument",
"parsers",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/__main__.py#L271-L285
|
[
"def",
"add_plot_parser",
"(",
"subparsers",
")",
":",
"argparser_replot",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"replot\"",
",",
"help",
"=",
"\"Reproduce GSEA desktop output figures.\"",
")",
"group_replot",
"=",
"argparser_replot",
".",
"add_argument_group",
"(",
"\"Input arguments\"",
")",
"group_replot",
".",
"add_argument",
"(",
"\"-i\"",
",",
"\"--indir\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"indir\"",
",",
"required",
"=",
"True",
",",
"metavar",
"=",
"'GSEA_dir'",
",",
"help",
"=",
"\"The GSEA desktop results directroy that you want to reproduce the figure \"",
")",
"add_output_option",
"(",
"group_replot",
")",
"#add_output_group( argparser_plot )",
"group_replot",
".",
"add_argument",
"(",
"\"-w\"",
",",
"\"--weight\"",
",",
"action",
"=",
"'store'",
",",
"dest",
"=",
"'weight'",
",",
"default",
"=",
"1.0",
",",
"type",
"=",
"float",
",",
"metavar",
"=",
"'float'",
",",
"help",
"=",
"'Weighted_score of rank_metrics. Please Use the same value in GSEA. Choose from (0, 1, 1.5, 2),default: 1'",
",",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
add_enrichr_parser
|
Add function 'enrichr' argument parsers.
|
gseapy/__main__.py
|
def add_enrichr_parser(subparsers):
"""Add function 'enrichr' argument parsers."""
argparser_enrichr = subparsers.add_parser("enrichr", help="Using Enrichr API to perform GO analysis.")
# group for required options.
enrichr_opt = argparser_enrichr.add_argument_group("Input arguments")
enrichr_opt.add_argument("-i", "--input-list", action="store", dest="gene_list", type=str, required=True, metavar='IDs',
help="Enrichr uses a list of gene names as input.")
enrichr_opt.add_argument("-g", "--gene-sets", action="store", dest="library", type=str, required=True, metavar='GMT',
help="Enrichr library name(s) required. Separate each name by comma.")
enrichr_opt.add_argument("--org", "--organism", action="store", dest="organism", type=str, default='',
help="Enrichr supported organism name. Default: human. See here: https://amp.pharm.mssm.edu/modEnrichr.")
enrichr_opt.add_argument("--ds", "--description", action="store", dest="descrip", type=str, default='enrichr', metavar='STRING',
help="It is recommended to enter a short description for your list so that multiple lists \
can be differentiated from each other if you choose to save or share your list.")
enrichr_opt.add_argument("--cut", "--cut-off", action="store", dest="thresh", metavar='float', type=float, default=0.05,
help="Adjust-Pval cutoff, used for generating plots. Default: 0.05.")
enrichr_opt.add_argument("--bg", "--background", action="store", dest="bg", default='hsapiens_gene_ensembl', metavar='BGNUM',
help="BioMart Dataset name or Background total genes number. Default: None")
enrichr_opt.add_argument("-t", "--top-term", dest="term", action="store", type=int, default=10, metavar='int',
help="Numbers of top terms shown in the plot. Default: 10")
# enrichr_opt.add_argument("--scale", dest = "scale", action="store", type=float, default=0.5, metavar='float',
# help="scatter dot scale in the dotplot. Default: 0.5")
# enrichr_opt.add_argument("--no-plot", action='store_true', dest='no_plot', default=False,
# help="Suppress the plot output.This is useful only if data are interested. Default: False.")
enrichr_output = argparser_enrichr.add_argument_group("Output figure arguments")
add_output_option(enrichr_output)
return
|
def add_enrichr_parser(subparsers):
"""Add function 'enrichr' argument parsers."""
argparser_enrichr = subparsers.add_parser("enrichr", help="Using Enrichr API to perform GO analysis.")
# group for required options.
enrichr_opt = argparser_enrichr.add_argument_group("Input arguments")
enrichr_opt.add_argument("-i", "--input-list", action="store", dest="gene_list", type=str, required=True, metavar='IDs',
help="Enrichr uses a list of gene names as input.")
enrichr_opt.add_argument("-g", "--gene-sets", action="store", dest="library", type=str, required=True, metavar='GMT',
help="Enrichr library name(s) required. Separate each name by comma.")
enrichr_opt.add_argument("--org", "--organism", action="store", dest="organism", type=str, default='',
help="Enrichr supported organism name. Default: human. See here: https://amp.pharm.mssm.edu/modEnrichr.")
enrichr_opt.add_argument("--ds", "--description", action="store", dest="descrip", type=str, default='enrichr', metavar='STRING',
help="It is recommended to enter a short description for your list so that multiple lists \
can be differentiated from each other if you choose to save or share your list.")
enrichr_opt.add_argument("--cut", "--cut-off", action="store", dest="thresh", metavar='float', type=float, default=0.05,
help="Adjust-Pval cutoff, used for generating plots. Default: 0.05.")
enrichr_opt.add_argument("--bg", "--background", action="store", dest="bg", default='hsapiens_gene_ensembl', metavar='BGNUM',
help="BioMart Dataset name or Background total genes number. Default: None")
enrichr_opt.add_argument("-t", "--top-term", dest="term", action="store", type=int, default=10, metavar='int',
help="Numbers of top terms shown in the plot. Default: 10")
# enrichr_opt.add_argument("--scale", dest = "scale", action="store", type=float, default=0.5, metavar='float',
# help="scatter dot scale in the dotplot. Default: 0.5")
# enrichr_opt.add_argument("--no-plot", action='store_true', dest='no_plot', default=False,
# help="Suppress the plot output.This is useful only if data are interested. Default: False.")
enrichr_output = argparser_enrichr.add_argument_group("Output figure arguments")
add_output_option(enrichr_output)
return
|
[
"Add",
"function",
"enrichr",
"argument",
"parsers",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/__main__.py#L288-L317
|
[
"def",
"add_enrichr_parser",
"(",
"subparsers",
")",
":",
"argparser_enrichr",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"enrichr\"",
",",
"help",
"=",
"\"Using Enrichr API to perform GO analysis.\"",
")",
"# group for required options.",
"enrichr_opt",
"=",
"argparser_enrichr",
".",
"add_argument_group",
"(",
"\"Input arguments\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"-i\"",
",",
"\"--input-list\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"gene_list\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"metavar",
"=",
"'IDs'",
",",
"help",
"=",
"\"Enrichr uses a list of gene names as input.\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"-g\"",
",",
"\"--gene-sets\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"library\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"metavar",
"=",
"'GMT'",
",",
"help",
"=",
"\"Enrichr library name(s) required. Separate each name by comma.\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"--org\"",
",",
"\"--organism\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"organism\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"\"Enrichr supported organism name. Default: human. See here: https://amp.pharm.mssm.edu/modEnrichr.\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"--ds\"",
",",
"\"--description\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"descrip\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'enrichr'",
",",
"metavar",
"=",
"'STRING'",
",",
"help",
"=",
"\"It is recommended to enter a short description for your list so that multiple lists \\\n can be differentiated from each other if you choose to save or share your list.\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"--cut\"",
",",
"\"--cut-off\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"thresh\"",
",",
"metavar",
"=",
"'float'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.05",
",",
"help",
"=",
"\"Adjust-Pval cutoff, used for generating plots. Default: 0.05.\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"--bg\"",
",",
"\"--background\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"bg\"",
",",
"default",
"=",
"'hsapiens_gene_ensembl'",
",",
"metavar",
"=",
"'BGNUM'",
",",
"help",
"=",
"\"BioMart Dataset name or Background total genes number. Default: None\"",
")",
"enrichr_opt",
".",
"add_argument",
"(",
"\"-t\"",
",",
"\"--top-term\"",
",",
"dest",
"=",
"\"term\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"10",
",",
"metavar",
"=",
"'int'",
",",
"help",
"=",
"\"Numbers of top terms shown in the plot. Default: 10\"",
")",
"# enrichr_opt.add_argument(\"--scale\", dest = \"scale\", action=\"store\", type=float, default=0.5, metavar='float',",
"# help=\"scatter dot scale in the dotplot. Default: 0.5\")",
"# enrichr_opt.add_argument(\"--no-plot\", action='store_true', dest='no_plot', default=False,",
"# help=\"Suppress the plot output.This is useful only if data are interested. Default: False.\")",
"enrichr_output",
"=",
"argparser_enrichr",
".",
"add_argument_group",
"(",
"\"Output figure arguments\"",
")",
"add_output_option",
"(",
"enrichr_output",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
add_biomart_parser
|
Add function 'biomart' argument parsers.
|
gseapy/__main__.py
|
def add_biomart_parser(subparsers):
"""Add function 'biomart' argument parsers."""
argparser_biomart = subparsers.add_parser("biomart", help="Using BioMart API to convert gene ids.")
# group for required options.
biomart_opt = argparser_biomart.add_argument_group("Input arguments")
biomart_opt.add_argument("-f", "--filter", action='store', nargs=2, dest='filter',
required=True, metavar=('NAME', 'VALUE'),
help="""Which filter to use. Input filter name, and value.
If multi-value required, separate each value by comma.
If value is a txt file, then one ID per row, exclude header.""")
biomart_opt.add_argument("-a", "--attributes", action="store", dest="attrs", type=str, required=True, metavar='ATTR',
help="Which attribute(s) to retrieve. Separate each attr by comma.")
biomart_opt.add_argument("-o", "--ofile", dest="ofile", type=str, required=True, help="Output file name")
biomart_opt.add_argument("-d", "--dataset", action="store", dest="bg", type=str, default='hsapiens_gene_ensembl', metavar='DATA',
help="Which dataset to use. Default: hsapiens_gene_ensembl")
biomart_opt.add_argument("--host", action="store", dest="host", type=str, default='www.ensembl.org', metavar='HOST',
help="Which host to use. Select from {'www.ensembl.org', 'asia.ensembl.org', 'useast.ensembl.org'}.")
biomart_opt.add_argument("-m", "--mart", action="store", dest="mart", type=str, metavar='MART',
default="ENSEMBL_MART_ENSEMBL", help="Which mart to use. Default: ENSEMBL_MART_ENSEMBL.")
biomart_opt.add_argument("-v", "--verbose", action="store_true", default=False, dest='verbose',
help="Increase output verbosity, print out progress of your job", )
|
def add_biomart_parser(subparsers):
"""Add function 'biomart' argument parsers."""
argparser_biomart = subparsers.add_parser("biomart", help="Using BioMart API to convert gene ids.")
# group for required options.
biomart_opt = argparser_biomart.add_argument_group("Input arguments")
biomart_opt.add_argument("-f", "--filter", action='store', nargs=2, dest='filter',
required=True, metavar=('NAME', 'VALUE'),
help="""Which filter to use. Input filter name, and value.
If multi-value required, separate each value by comma.
If value is a txt file, then one ID per row, exclude header.""")
biomart_opt.add_argument("-a", "--attributes", action="store", dest="attrs", type=str, required=True, metavar='ATTR',
help="Which attribute(s) to retrieve. Separate each attr by comma.")
biomart_opt.add_argument("-o", "--ofile", dest="ofile", type=str, required=True, help="Output file name")
biomart_opt.add_argument("-d", "--dataset", action="store", dest="bg", type=str, default='hsapiens_gene_ensembl', metavar='DATA',
help="Which dataset to use. Default: hsapiens_gene_ensembl")
biomart_opt.add_argument("--host", action="store", dest="host", type=str, default='www.ensembl.org', metavar='HOST',
help="Which host to use. Select from {'www.ensembl.org', 'asia.ensembl.org', 'useast.ensembl.org'}.")
biomart_opt.add_argument("-m", "--mart", action="store", dest="mart", type=str, metavar='MART',
default="ENSEMBL_MART_ENSEMBL", help="Which mart to use. Default: ENSEMBL_MART_ENSEMBL.")
biomart_opt.add_argument("-v", "--verbose", action="store_true", default=False, dest='verbose',
help="Increase output verbosity, print out progress of your job", )
|
[
"Add",
"function",
"biomart",
"argument",
"parsers",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/__main__.py#L320-L342
|
[
"def",
"add_biomart_parser",
"(",
"subparsers",
")",
":",
"argparser_biomart",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"biomart\"",
",",
"help",
"=",
"\"Using BioMart API to convert gene ids.\"",
")",
"# group for required options.",
"biomart_opt",
"=",
"argparser_biomart",
".",
"add_argument_group",
"(",
"\"Input arguments\"",
")",
"biomart_opt",
".",
"add_argument",
"(",
"\"-f\"",
",",
"\"--filter\"",
",",
"action",
"=",
"'store'",
",",
"nargs",
"=",
"2",
",",
"dest",
"=",
"'filter'",
",",
"required",
"=",
"True",
",",
"metavar",
"=",
"(",
"'NAME'",
",",
"'VALUE'",
")",
",",
"help",
"=",
"\"\"\"Which filter to use. Input filter name, and value.\n If multi-value required, separate each value by comma.\n If value is a txt file, then one ID per row, exclude header.\"\"\"",
")",
"biomart_opt",
".",
"add_argument",
"(",
"\"-a\"",
",",
"\"--attributes\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"attrs\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"metavar",
"=",
"'ATTR'",
",",
"help",
"=",
"\"Which attribute(s) to retrieve. Separate each attr by comma.\"",
")",
"biomart_opt",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--ofile\"",
",",
"dest",
"=",
"\"ofile\"",
",",
"type",
"=",
"str",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"Output file name\"",
")",
"biomart_opt",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--dataset\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"bg\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'hsapiens_gene_ensembl'",
",",
"metavar",
"=",
"'DATA'",
",",
"help",
"=",
"\"Which dataset to use. Default: hsapiens_gene_ensembl\"",
")",
"biomart_opt",
".",
"add_argument",
"(",
"\"--host\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"host\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'www.ensembl.org'",
",",
"metavar",
"=",
"'HOST'",
",",
"help",
"=",
"\"Which host to use. Select from {'www.ensembl.org', 'asia.ensembl.org', 'useast.ensembl.org'}.\"",
")",
"biomart_opt",
".",
"add_argument",
"(",
"\"-m\"",
",",
"\"--mart\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"mart\"",
",",
"type",
"=",
"str",
",",
"metavar",
"=",
"'MART'",
",",
"default",
"=",
"\"ENSEMBL_MART_ENSEMBL\"",
",",
"help",
"=",
"\"Which mart to use. Default: ENSEMBL_MART_ENSEMBL.\"",
")",
"biomart_opt",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--verbose\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"dest",
"=",
"'verbose'",
",",
"help",
"=",
"\"Increase output verbosity, print out progress of your job\"",
",",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
enrichment_score
|
This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA.
:param gene_list: The ordered gene list gene_name_list, rank_metric.index.values
:param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set.
:param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation
is a very reasonable choice that allows significant gene sets with less than perfect coherence.
options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of
coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1
might be appropriate. On the other hand, if one uses sets with large number of genes and only
a small subset of those is expected to be coherent, then one could consider using p > 1.
Our recommendation is to use p = 1 and use other settings only if you are very experienced
with the method and its behavior.
:param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in
the gene list. Or rankings, rank_metric.values
:param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value
equal to the permutation number.
:param rs: Random state for initializing gene list shuffling. Default: np.random.RandomState(seed=None)
:return:
ES: Enrichment score (real number between -1 and +1)
ESNULL: Enrichment score calculated from random permutations.
Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set.
RES: Numerical vector containing the running enrichment score for all locations in the gene list .
|
gseapy/algorithm.py
|
def enrichment_score(gene_list, correl_vector, gene_set, weighted_score_type=1,
nperm=1000, rs=np.random.RandomState(), single=False, scale=False):
"""This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA.
:param gene_list: The ordered gene list gene_name_list, rank_metric.index.values
:param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set.
:param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation
is a very reasonable choice that allows significant gene sets with less than perfect coherence.
options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of
coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1
might be appropriate. On the other hand, if one uses sets with large number of genes and only
a small subset of those is expected to be coherent, then one could consider using p > 1.
Our recommendation is to use p = 1 and use other settings only if you are very experienced
with the method and its behavior.
:param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in
the gene list. Or rankings, rank_metric.values
:param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value
equal to the permutation number.
:param rs: Random state for initializing gene list shuffling. Default: np.random.RandomState(seed=None)
:return:
ES: Enrichment score (real number between -1 and +1)
ESNULL: Enrichment score calculated from random permutations.
Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set.
RES: Numerical vector containing the running enrichment score for all locations in the gene list .
"""
N = len(gene_list)
# Test whether each element of a 1-D array is also present in a second array
# It's more intuitive here than original enrichment_score source code.
# use .astype to covert bool to integer
tag_indicator = np.in1d(gene_list, gene_set, assume_unique=True).astype(int) # notice that the sign is 0 (no tag) or 1 (tag)
if weighted_score_type == 0 :
correl_vector = np.repeat(1, N)
else:
correl_vector = np.abs(correl_vector)**weighted_score_type
# get indices of tag_indicator
hit_ind = np.flatnonzero(tag_indicator).tolist()
# if used for compute esnull, set esnull equal to permutation number, e.g. 1000
# else just compute enrichment scores
# set axis to 1, because we have 2D array
axis = 1
tag_indicator = np.tile(tag_indicator, (nperm+1,1))
correl_vector = np.tile(correl_vector,(nperm+1,1))
# gene list permutation
for i in range(nperm): rs.shuffle(tag_indicator[i])
# np.apply_along_axis(rs.shuffle, 1, tag_indicator)
Nhint = tag_indicator.sum(axis=axis, keepdims=True)
sum_correl_tag = np.sum(correl_vector*tag_indicator, axis=axis, keepdims=True)
# compute ES score, the code below is identical to gsea enrichment_score method.
no_tag_indicator = 1 - tag_indicator
Nmiss = N - Nhint
norm_tag = 1.0/sum_correl_tag
norm_no_tag = 1.0/Nmiss
RES = np.cumsum(tag_indicator * correl_vector * norm_tag - no_tag_indicator * norm_no_tag, axis=axis)
if scale: RES = RES / N
if single:
es_vec = RES.sum(axis=axis)
else:
max_ES, min_ES = RES.max(axis=axis), RES.min(axis=axis)
es_vec = np.where(np.abs(max_ES) > np.abs(min_ES), max_ES, min_ES)
# extract values
es, esnull, RES = es_vec[-1], es_vec[:-1], RES[-1,:]
return es, esnull, hit_ind, RES
|
def enrichment_score(gene_list, correl_vector, gene_set, weighted_score_type=1,
nperm=1000, rs=np.random.RandomState(), single=False, scale=False):
"""This is the most important function of GSEApy. It has the same algorithm with GSEA and ssGSEA.
:param gene_list: The ordered gene list gene_name_list, rank_metric.index.values
:param gene_set: gene_sets in gmt file, please use gsea_gmt_parser to get gene_set.
:param weighted_score_type: It's the same with gsea's weighted_score method. Weighting by the correlation
is a very reasonable choice that allows significant gene sets with less than perfect coherence.
options: 0(classic),1,1.5,2. default:1. if one is interested in penalizing sets for lack of
coherence or to discover sets with any type of nonrandom distribution of tags, a value p < 1
might be appropriate. On the other hand, if one uses sets with large number of genes and only
a small subset of those is expected to be coherent, then one could consider using p > 1.
Our recommendation is to use p = 1 and use other settings only if you are very experienced
with the method and its behavior.
:param correl_vector: A vector with the correlations (e.g. signal to noise scores) corresponding to the genes in
the gene list. Or rankings, rank_metric.values
:param nperm: Only use this parameter when computing esnull for statistical testing. Set the esnull value
equal to the permutation number.
:param rs: Random state for initializing gene list shuffling. Default: np.random.RandomState(seed=None)
:return:
ES: Enrichment score (real number between -1 and +1)
ESNULL: Enrichment score calculated from random permutations.
Hits_Indices: Index of a gene in gene_list, if gene is included in gene_set.
RES: Numerical vector containing the running enrichment score for all locations in the gene list .
"""
N = len(gene_list)
# Test whether each element of a 1-D array is also present in a second array
# It's more intuitive here than original enrichment_score source code.
# use .astype to covert bool to integer
tag_indicator = np.in1d(gene_list, gene_set, assume_unique=True).astype(int) # notice that the sign is 0 (no tag) or 1 (tag)
if weighted_score_type == 0 :
correl_vector = np.repeat(1, N)
else:
correl_vector = np.abs(correl_vector)**weighted_score_type
# get indices of tag_indicator
hit_ind = np.flatnonzero(tag_indicator).tolist()
# if used for compute esnull, set esnull equal to permutation number, e.g. 1000
# else just compute enrichment scores
# set axis to 1, because we have 2D array
axis = 1
tag_indicator = np.tile(tag_indicator, (nperm+1,1))
correl_vector = np.tile(correl_vector,(nperm+1,1))
# gene list permutation
for i in range(nperm): rs.shuffle(tag_indicator[i])
# np.apply_along_axis(rs.shuffle, 1, tag_indicator)
Nhint = tag_indicator.sum(axis=axis, keepdims=True)
sum_correl_tag = np.sum(correl_vector*tag_indicator, axis=axis, keepdims=True)
# compute ES score, the code below is identical to gsea enrichment_score method.
no_tag_indicator = 1 - tag_indicator
Nmiss = N - Nhint
norm_tag = 1.0/sum_correl_tag
norm_no_tag = 1.0/Nmiss
RES = np.cumsum(tag_indicator * correl_vector * norm_tag - no_tag_indicator * norm_no_tag, axis=axis)
if scale: RES = RES / N
if single:
es_vec = RES.sum(axis=axis)
else:
max_ES, min_ES = RES.max(axis=axis), RES.min(axis=axis)
es_vec = np.where(np.abs(max_ES) > np.abs(min_ES), max_ES, min_ES)
# extract values
es, esnull, RES = es_vec[-1], es_vec[:-1], RES[-1,:]
return es, esnull, hit_ind, RES
|
[
"This",
"is",
"the",
"most",
"important",
"function",
"of",
"GSEApy",
".",
"It",
"has",
"the",
"same",
"algorithm",
"with",
"GSEA",
"and",
"ssGSEA",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/algorithm.py#L10-L85
|
[
"def",
"enrichment_score",
"(",
"gene_list",
",",
"correl_vector",
",",
"gene_set",
",",
"weighted_score_type",
"=",
"1",
",",
"nperm",
"=",
"1000",
",",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
")",
",",
"single",
"=",
"False",
",",
"scale",
"=",
"False",
")",
":",
"N",
"=",
"len",
"(",
"gene_list",
")",
"# Test whether each element of a 1-D array is also present in a second array",
"# It's more intuitive here than original enrichment_score source code.",
"# use .astype to covert bool to integer",
"tag_indicator",
"=",
"np",
".",
"in1d",
"(",
"gene_list",
",",
"gene_set",
",",
"assume_unique",
"=",
"True",
")",
".",
"astype",
"(",
"int",
")",
"# notice that the sign is 0 (no tag) or 1 (tag)",
"if",
"weighted_score_type",
"==",
"0",
":",
"correl_vector",
"=",
"np",
".",
"repeat",
"(",
"1",
",",
"N",
")",
"else",
":",
"correl_vector",
"=",
"np",
".",
"abs",
"(",
"correl_vector",
")",
"**",
"weighted_score_type",
"# get indices of tag_indicator",
"hit_ind",
"=",
"np",
".",
"flatnonzero",
"(",
"tag_indicator",
")",
".",
"tolist",
"(",
")",
"# if used for compute esnull, set esnull equal to permutation number, e.g. 1000",
"# else just compute enrichment scores",
"# set axis to 1, because we have 2D array",
"axis",
"=",
"1",
"tag_indicator",
"=",
"np",
".",
"tile",
"(",
"tag_indicator",
",",
"(",
"nperm",
"+",
"1",
",",
"1",
")",
")",
"correl_vector",
"=",
"np",
".",
"tile",
"(",
"correl_vector",
",",
"(",
"nperm",
"+",
"1",
",",
"1",
")",
")",
"# gene list permutation",
"for",
"i",
"in",
"range",
"(",
"nperm",
")",
":",
"rs",
".",
"shuffle",
"(",
"tag_indicator",
"[",
"i",
"]",
")",
"# np.apply_along_axis(rs.shuffle, 1, tag_indicator)",
"Nhint",
"=",
"tag_indicator",
".",
"sum",
"(",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"True",
")",
"sum_correl_tag",
"=",
"np",
".",
"sum",
"(",
"correl_vector",
"*",
"tag_indicator",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"True",
")",
"# compute ES score, the code below is identical to gsea enrichment_score method.",
"no_tag_indicator",
"=",
"1",
"-",
"tag_indicator",
"Nmiss",
"=",
"N",
"-",
"Nhint",
"norm_tag",
"=",
"1.0",
"/",
"sum_correl_tag",
"norm_no_tag",
"=",
"1.0",
"/",
"Nmiss",
"RES",
"=",
"np",
".",
"cumsum",
"(",
"tag_indicator",
"*",
"correl_vector",
"*",
"norm_tag",
"-",
"no_tag_indicator",
"*",
"norm_no_tag",
",",
"axis",
"=",
"axis",
")",
"if",
"scale",
":",
"RES",
"=",
"RES",
"/",
"N",
"if",
"single",
":",
"es_vec",
"=",
"RES",
".",
"sum",
"(",
"axis",
"=",
"axis",
")",
"else",
":",
"max_ES",
",",
"min_ES",
"=",
"RES",
".",
"max",
"(",
"axis",
"=",
"axis",
")",
",",
"RES",
".",
"min",
"(",
"axis",
"=",
"axis",
")",
"es_vec",
"=",
"np",
".",
"where",
"(",
"np",
".",
"abs",
"(",
"max_ES",
")",
">",
"np",
".",
"abs",
"(",
"min_ES",
")",
",",
"max_ES",
",",
"min_ES",
")",
"# extract values",
"es",
",",
"esnull",
",",
"RES",
"=",
"es_vec",
"[",
"-",
"1",
"]",
",",
"es_vec",
"[",
":",
"-",
"1",
"]",
",",
"RES",
"[",
"-",
"1",
",",
":",
"]",
"return",
"es",
",",
"esnull",
",",
"hit_ind",
",",
"RES"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
enrichment_score_tensor
|
Next generation algorithm of GSEA and ssGSEA.
:param gene_mat: the ordered gene list(vector) with or without gene indices matrix.
:param cor_mat: correlation vector or matrix (e.g. signal to noise scores)
corresponding to the genes in the gene list or matrix.
:param dict gene_sets: gmt file dict.
:param float weighted_score_type: weighting by the correlation.
options: 0(classic), 1, 1.5, 2. default:1 for GSEA and 0.25 for ssGSEA.
:param int nperm: permutation times.
:param bool scale: If True, normalize the scores by number of genes_mat.
:param bool single: If True, use ssGSEA algorithm, otherwise use GSEA.
:param rs: Random state for initialize gene list shuffling.
Default: np.random.RandomState(seed=None)
:return: a tuple contains::
| ES: Enrichment score (real number between -1 and +1), for ssGSEA, set scale eq to True.
| ESNULL: Enrichment score calculated from random permutation.
| Hits_Indices: Indices of genes if genes are included in gene_set.
| RES: The running enrichment score for all locations in the gene list.
|
gseapy/algorithm.py
|
def enrichment_score_tensor(gene_mat, cor_mat, gene_sets, weighted_score_type, nperm=1000,
rs=np.random.RandomState(), single=False, scale=False):
"""Next generation algorithm of GSEA and ssGSEA.
:param gene_mat: the ordered gene list(vector) with or without gene indices matrix.
:param cor_mat: correlation vector or matrix (e.g. signal to noise scores)
corresponding to the genes in the gene list or matrix.
:param dict gene_sets: gmt file dict.
:param float weighted_score_type: weighting by the correlation.
options: 0(classic), 1, 1.5, 2. default:1 for GSEA and 0.25 for ssGSEA.
:param int nperm: permutation times.
:param bool scale: If True, normalize the scores by number of genes_mat.
:param bool single: If True, use ssGSEA algorithm, otherwise use GSEA.
:param rs: Random state for initialize gene list shuffling.
Default: np.random.RandomState(seed=None)
:return: a tuple contains::
| ES: Enrichment score (real number between -1 and +1), for ssGSEA, set scale eq to True.
| ESNULL: Enrichment score calculated from random permutation.
| Hits_Indices: Indices of genes if genes are included in gene_set.
| RES: The running enrichment score for all locations in the gene list.
"""
# gene_mat -> 1d: prerank, ssSSEA or 2d: GSEA
keys = sorted(gene_sets.keys())
if weighted_score_type == 0:
# don't bother doing calcuation, just set to 1
cor_mat = np.ones(cor_mat.shape)
elif weighted_score_type > 0:
pass
else:
logging.error("Using negative values of weighted_score_type, not allowed")
sys.exit(0)
cor_mat = np.abs(cor_mat)
if cor_mat.ndim ==1:
# ssGSEA or Prerank
# genestes->M, genes->N, perm-> axis=2
N, M = len(gene_mat), len(keys)
# generate gene hits matrix
# for 1d ndarray of gene_mat, set assume_unique=True,
# means the input arrays are both assumed to be unique,
# which can speed up the calculation.
tag_indicator = np.vstack([np.in1d(gene_mat, gene_sets[key], assume_unique=True) for key in keys])
tag_indicator = tag_indicator.astype(int)
# index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in tag_indicator ]
# generate permutated hits matrix
perm_tag_tensor = np.repeat(tag_indicator, nperm+1).reshape((M,N,nperm+1))
# shuffle matrix, last matrix is not shuffled when nperm > 0
if nperm: np.apply_along_axis(lambda x: np.apply_along_axis(rs.shuffle,0,x),1, perm_tag_tensor[:,:,:-1])
# missing hits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,np.newaxis])** weighted_score_type
elif cor_mat.ndim == 2:
# GSEA
# 2d ndarray, gene_mat and cor_mat are shuffled already
# reshape matrix
cor_mat = cor_mat.T
# gene_mat is a tuple contains (gene_name, permuate_gene_name_indices)
genes, genes_ind = gene_mat
# genestes->M, genes->N, perm-> axis=2
# don't use assume_unique=True in 2d array when use np.isin().
# elements in gene_mat are not unique, or will cause unwanted results
tag_indicator = np.vstack([np.in1d(genes, gene_sets[key], assume_unique=True) for key in keys])
tag_indicator = tag_indicator.astype(int)
perm_tag_tensor = np.stack([tag.take(genes_ind).T for tag in tag_indicator], axis=0)
#index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in perm_tag_tensor[:,:,-1] ]
# nohits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,:])** weighted_score_type
else:
logging.error("Program die because of unsupported input")
sys.exit(0)
# Nhint = tag_indicator.sum(1)
# Nmiss = N - Nhint
axis=1
P_GW_denominator = np.sum(rank_alpha, axis=axis, keepdims=True)
P_NG_denominator = np.sum(no_tag_tensor, axis=axis, keepdims=True)
REStensor = np.cumsum(rank_alpha / P_GW_denominator - no_tag_tensor / P_NG_denominator, axis=axis)
# ssGSEA: scale es by gene numbers ?
# https://gist.github.com/gaoce/39e0907146c752c127728ad74e123b33
if scale: REStensor = REStensor / len(gene_mat)
if single:
#ssGSEA
esmatrix = REStensor.sum(axis=axis)
else:
#GSEA
esmax, esmin = REStensor.max(axis=axis), REStensor.min(axis=axis)
esmatrix = np.where(np.abs(esmax)>np.abs(esmin), esmax, esmin)
es, esnull, RES = esmatrix[:,-1], esmatrix[:,:-1], REStensor[:,:,-1]
return es, esnull, hit_ind, RES
|
def enrichment_score_tensor(gene_mat, cor_mat, gene_sets, weighted_score_type, nperm=1000,
rs=np.random.RandomState(), single=False, scale=False):
"""Next generation algorithm of GSEA and ssGSEA.
:param gene_mat: the ordered gene list(vector) with or without gene indices matrix.
:param cor_mat: correlation vector or matrix (e.g. signal to noise scores)
corresponding to the genes in the gene list or matrix.
:param dict gene_sets: gmt file dict.
:param float weighted_score_type: weighting by the correlation.
options: 0(classic), 1, 1.5, 2. default:1 for GSEA and 0.25 for ssGSEA.
:param int nperm: permutation times.
:param bool scale: If True, normalize the scores by number of genes_mat.
:param bool single: If True, use ssGSEA algorithm, otherwise use GSEA.
:param rs: Random state for initialize gene list shuffling.
Default: np.random.RandomState(seed=None)
:return: a tuple contains::
| ES: Enrichment score (real number between -1 and +1), for ssGSEA, set scale eq to True.
| ESNULL: Enrichment score calculated from random permutation.
| Hits_Indices: Indices of genes if genes are included in gene_set.
| RES: The running enrichment score for all locations in the gene list.
"""
# gene_mat -> 1d: prerank, ssSSEA or 2d: GSEA
keys = sorted(gene_sets.keys())
if weighted_score_type == 0:
# don't bother doing calcuation, just set to 1
cor_mat = np.ones(cor_mat.shape)
elif weighted_score_type > 0:
pass
else:
logging.error("Using negative values of weighted_score_type, not allowed")
sys.exit(0)
cor_mat = np.abs(cor_mat)
if cor_mat.ndim ==1:
# ssGSEA or Prerank
# genestes->M, genes->N, perm-> axis=2
N, M = len(gene_mat), len(keys)
# generate gene hits matrix
# for 1d ndarray of gene_mat, set assume_unique=True,
# means the input arrays are both assumed to be unique,
# which can speed up the calculation.
tag_indicator = np.vstack([np.in1d(gene_mat, gene_sets[key], assume_unique=True) for key in keys])
tag_indicator = tag_indicator.astype(int)
# index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in tag_indicator ]
# generate permutated hits matrix
perm_tag_tensor = np.repeat(tag_indicator, nperm+1).reshape((M,N,nperm+1))
# shuffle matrix, last matrix is not shuffled when nperm > 0
if nperm: np.apply_along_axis(lambda x: np.apply_along_axis(rs.shuffle,0,x),1, perm_tag_tensor[:,:,:-1])
# missing hits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,np.newaxis])** weighted_score_type
elif cor_mat.ndim == 2:
# GSEA
# 2d ndarray, gene_mat and cor_mat are shuffled already
# reshape matrix
cor_mat = cor_mat.T
# gene_mat is a tuple contains (gene_name, permuate_gene_name_indices)
genes, genes_ind = gene_mat
# genestes->M, genes->N, perm-> axis=2
# don't use assume_unique=True in 2d array when use np.isin().
# elements in gene_mat are not unique, or will cause unwanted results
tag_indicator = np.vstack([np.in1d(genes, gene_sets[key], assume_unique=True) for key in keys])
tag_indicator = tag_indicator.astype(int)
perm_tag_tensor = np.stack([tag.take(genes_ind).T for tag in tag_indicator], axis=0)
#index of hits
hit_ind = [ np.flatnonzero(tag).tolist() for tag in perm_tag_tensor[:,:,-1] ]
# nohits
no_tag_tensor = 1 - perm_tag_tensor
# calculate numerator, denominator of each gene hits
rank_alpha = (perm_tag_tensor*cor_mat[np.newaxis,:,:])** weighted_score_type
else:
logging.error("Program die because of unsupported input")
sys.exit(0)
# Nhint = tag_indicator.sum(1)
# Nmiss = N - Nhint
axis=1
P_GW_denominator = np.sum(rank_alpha, axis=axis, keepdims=True)
P_NG_denominator = np.sum(no_tag_tensor, axis=axis, keepdims=True)
REStensor = np.cumsum(rank_alpha / P_GW_denominator - no_tag_tensor / P_NG_denominator, axis=axis)
# ssGSEA: scale es by gene numbers ?
# https://gist.github.com/gaoce/39e0907146c752c127728ad74e123b33
if scale: REStensor = REStensor / len(gene_mat)
if single:
#ssGSEA
esmatrix = REStensor.sum(axis=axis)
else:
#GSEA
esmax, esmin = REStensor.max(axis=axis), REStensor.min(axis=axis)
esmatrix = np.where(np.abs(esmax)>np.abs(esmin), esmax, esmin)
es, esnull, RES = esmatrix[:,-1], esmatrix[:,:-1], REStensor[:,:,-1]
return es, esnull, hit_ind, RES
|
[
"Next",
"generation",
"algorithm",
"of",
"GSEA",
"and",
"ssGSEA",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/algorithm.py#L89-L188
|
[
"def",
"enrichment_score_tensor",
"(",
"gene_mat",
",",
"cor_mat",
",",
"gene_sets",
",",
"weighted_score_type",
",",
"nperm",
"=",
"1000",
",",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
")",
",",
"single",
"=",
"False",
",",
"scale",
"=",
"False",
")",
":",
"# gene_mat -> 1d: prerank, ssSSEA or 2d: GSEA",
"keys",
"=",
"sorted",
"(",
"gene_sets",
".",
"keys",
"(",
")",
")",
"if",
"weighted_score_type",
"==",
"0",
":",
"# don't bother doing calcuation, just set to 1",
"cor_mat",
"=",
"np",
".",
"ones",
"(",
"cor_mat",
".",
"shape",
")",
"elif",
"weighted_score_type",
">",
"0",
":",
"pass",
"else",
":",
"logging",
".",
"error",
"(",
"\"Using negative values of weighted_score_type, not allowed\"",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"cor_mat",
"=",
"np",
".",
"abs",
"(",
"cor_mat",
")",
"if",
"cor_mat",
".",
"ndim",
"==",
"1",
":",
"# ssGSEA or Prerank",
"# genestes->M, genes->N, perm-> axis=2",
"N",
",",
"M",
"=",
"len",
"(",
"gene_mat",
")",
",",
"len",
"(",
"keys",
")",
"# generate gene hits matrix",
"# for 1d ndarray of gene_mat, set assume_unique=True,",
"# means the input arrays are both assumed to be unique,",
"# which can speed up the calculation.",
"tag_indicator",
"=",
"np",
".",
"vstack",
"(",
"[",
"np",
".",
"in1d",
"(",
"gene_mat",
",",
"gene_sets",
"[",
"key",
"]",
",",
"assume_unique",
"=",
"True",
")",
"for",
"key",
"in",
"keys",
"]",
")",
"tag_indicator",
"=",
"tag_indicator",
".",
"astype",
"(",
"int",
")",
"# index of hits",
"hit_ind",
"=",
"[",
"np",
".",
"flatnonzero",
"(",
"tag",
")",
".",
"tolist",
"(",
")",
"for",
"tag",
"in",
"tag_indicator",
"]",
"# generate permutated hits matrix",
"perm_tag_tensor",
"=",
"np",
".",
"repeat",
"(",
"tag_indicator",
",",
"nperm",
"+",
"1",
")",
".",
"reshape",
"(",
"(",
"M",
",",
"N",
",",
"nperm",
"+",
"1",
")",
")",
"# shuffle matrix, last matrix is not shuffled when nperm > 0",
"if",
"nperm",
":",
"np",
".",
"apply_along_axis",
"(",
"lambda",
"x",
":",
"np",
".",
"apply_along_axis",
"(",
"rs",
".",
"shuffle",
",",
"0",
",",
"x",
")",
",",
"1",
",",
"perm_tag_tensor",
"[",
":",
",",
":",
",",
":",
"-",
"1",
"]",
")",
"# missing hits",
"no_tag_tensor",
"=",
"1",
"-",
"perm_tag_tensor",
"# calculate numerator, denominator of each gene hits",
"rank_alpha",
"=",
"(",
"perm_tag_tensor",
"*",
"cor_mat",
"[",
"np",
".",
"newaxis",
",",
":",
",",
"np",
".",
"newaxis",
"]",
")",
"**",
"weighted_score_type",
"elif",
"cor_mat",
".",
"ndim",
"==",
"2",
":",
"# GSEA",
"# 2d ndarray, gene_mat and cor_mat are shuffled already",
"# reshape matrix",
"cor_mat",
"=",
"cor_mat",
".",
"T",
"# gene_mat is a tuple contains (gene_name, permuate_gene_name_indices)",
"genes",
",",
"genes_ind",
"=",
"gene_mat",
"# genestes->M, genes->N, perm-> axis=2",
"# don't use assume_unique=True in 2d array when use np.isin().",
"# elements in gene_mat are not unique, or will cause unwanted results",
"tag_indicator",
"=",
"np",
".",
"vstack",
"(",
"[",
"np",
".",
"in1d",
"(",
"genes",
",",
"gene_sets",
"[",
"key",
"]",
",",
"assume_unique",
"=",
"True",
")",
"for",
"key",
"in",
"keys",
"]",
")",
"tag_indicator",
"=",
"tag_indicator",
".",
"astype",
"(",
"int",
")",
"perm_tag_tensor",
"=",
"np",
".",
"stack",
"(",
"[",
"tag",
".",
"take",
"(",
"genes_ind",
")",
".",
"T",
"for",
"tag",
"in",
"tag_indicator",
"]",
",",
"axis",
"=",
"0",
")",
"#index of hits",
"hit_ind",
"=",
"[",
"np",
".",
"flatnonzero",
"(",
"tag",
")",
".",
"tolist",
"(",
")",
"for",
"tag",
"in",
"perm_tag_tensor",
"[",
":",
",",
":",
",",
"-",
"1",
"]",
"]",
"# nohits",
"no_tag_tensor",
"=",
"1",
"-",
"perm_tag_tensor",
"# calculate numerator, denominator of each gene hits",
"rank_alpha",
"=",
"(",
"perm_tag_tensor",
"*",
"cor_mat",
"[",
"np",
".",
"newaxis",
",",
":",
",",
":",
"]",
")",
"**",
"weighted_score_type",
"else",
":",
"logging",
".",
"error",
"(",
"\"Program die because of unsupported input\"",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"# Nhint = tag_indicator.sum(1)",
"# Nmiss = N - Nhint",
"axis",
"=",
"1",
"P_GW_denominator",
"=",
"np",
".",
"sum",
"(",
"rank_alpha",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"True",
")",
"P_NG_denominator",
"=",
"np",
".",
"sum",
"(",
"no_tag_tensor",
",",
"axis",
"=",
"axis",
",",
"keepdims",
"=",
"True",
")",
"REStensor",
"=",
"np",
".",
"cumsum",
"(",
"rank_alpha",
"/",
"P_GW_denominator",
"-",
"no_tag_tensor",
"/",
"P_NG_denominator",
",",
"axis",
"=",
"axis",
")",
"# ssGSEA: scale es by gene numbers ?",
"# https://gist.github.com/gaoce/39e0907146c752c127728ad74e123b33",
"if",
"scale",
":",
"REStensor",
"=",
"REStensor",
"/",
"len",
"(",
"gene_mat",
")",
"if",
"single",
":",
"#ssGSEA",
"esmatrix",
"=",
"REStensor",
".",
"sum",
"(",
"axis",
"=",
"axis",
")",
"else",
":",
"#GSEA",
"esmax",
",",
"esmin",
"=",
"REStensor",
".",
"max",
"(",
"axis",
"=",
"axis",
")",
",",
"REStensor",
".",
"min",
"(",
"axis",
"=",
"axis",
")",
"esmatrix",
"=",
"np",
".",
"where",
"(",
"np",
".",
"abs",
"(",
"esmax",
")",
">",
"np",
".",
"abs",
"(",
"esmin",
")",
",",
"esmax",
",",
"esmin",
")",
"es",
",",
"esnull",
",",
"RES",
"=",
"esmatrix",
"[",
":",
",",
"-",
"1",
"]",
",",
"esmatrix",
"[",
":",
",",
":",
"-",
"1",
"]",
",",
"REStensor",
"[",
":",
",",
":",
",",
"-",
"1",
"]",
"return",
"es",
",",
"esnull",
",",
"hit_ind",
",",
"RES"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
ranking_metric_tensor
|
Build shuffled ranking matrix when permutation_type eq to phenotype.
:param exprs: gene_expression DataFrame, gene_name indexed.
:param str method: calculate correlation or ranking. methods including:
1. 'signal_to_noise'.
2. 't_test'.
3. 'ratio_of_classes' (also referred to as fold change).
4. 'diff_of_classes'.
5. 'log2_ratio_of_classes'.
:param int permuation_num: how many times of classes is being shuffled
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of
dataframe belongs to what class of phenotype.
:param bool ascending: bool. Sort ascending vs. descending.
:return:
returns two 2d ndarray with shape (nperm, gene_num).
| cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.
| cor_mat: sorted and permutated (exclude last row) ranking matrix.
|
gseapy/algorithm.py
|
def ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes,
ascending, rs=np.random.RandomState()):
"""Build shuffled ranking matrix when permutation_type eq to phenotype.
:param exprs: gene_expression DataFrame, gene_name indexed.
:param str method: calculate correlation or ranking. methods including:
1. 'signal_to_noise'.
2. 't_test'.
3. 'ratio_of_classes' (also referred to as fold change).
4. 'diff_of_classes'.
5. 'log2_ratio_of_classes'.
:param int permuation_num: how many times of classes is being shuffled
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of
dataframe belongs to what class of phenotype.
:param bool ascending: bool. Sort ascending vs. descending.
:return:
returns two 2d ndarray with shape (nperm, gene_num).
| cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.
| cor_mat: sorted and permutated (exclude last row) ranking matrix.
"""
# S: samples, G: gene number
G, S = exprs.shape
# genes = exprs.index.values
expr_mat = exprs.values.T
perm_cor_tensor = np.tile(expr_mat, (permutation_num+1,1,1))
# random shuffle on the first dim, last matrix is not shuffled
for arr in perm_cor_tensor[:-1]: rs.shuffle(arr)
classes = np.array(classes)
pos = classes == pos
neg = classes == neg
pos_cor_mean = perm_cor_tensor[:,pos,:].mean(axis=1)
neg_cor_mean = perm_cor_tensor[:,neg,:].mean(axis=1)
pos_cor_std = perm_cor_tensor[:,pos,:].std(axis=1, ddof=1)
neg_cor_std = perm_cor_tensor[:,neg,:].std(axis=1, ddof=1)
if method == 'signal_to_noise':
cor_mat = (pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std)
elif method == 't_test':
denom = 1.0/G
cor_mat = (pos_cor_mean - neg_cor_mean)/ np.sqrt(denom*pos_cor_std**2 + denom*neg_cor_std**2)
elif method == 'ratio_of_classes':
cor_mat = pos_cor_mean / neg_cor_mean
elif method == 'diff_of_classes':
cor_mat = pos_cor_mean - neg_cor_mean
elif method == 'log2_ratio_of_classes':
cor_mat = np.log2(pos_cor_mean / neg_cor_mean)
else:
logging.error("Please provide correct method name!!!")
sys.exit(0)
# return matix[nperm+1, perm_cors]
cor_mat_ind = cor_mat.argsort()
# ndarray: sort in place
cor_mat.sort()
# genes_mat = genes.take(cor_mat_ind)
if ascending: return cor_mat_ind, cor_mat
# descending order of ranking and genes
# return genes_mat[:,::-1], cor_mat[:,::-1]
return cor_mat_ind[:, ::-1], cor_mat[:, ::-1]
|
def ranking_metric_tensor(exprs, method, permutation_num, pos, neg, classes,
ascending, rs=np.random.RandomState()):
"""Build shuffled ranking matrix when permutation_type eq to phenotype.
:param exprs: gene_expression DataFrame, gene_name indexed.
:param str method: calculate correlation or ranking. methods including:
1. 'signal_to_noise'.
2. 't_test'.
3. 'ratio_of_classes' (also referred to as fold change).
4. 'diff_of_classes'.
5. 'log2_ratio_of_classes'.
:param int permuation_num: how many times of classes is being shuffled
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of
dataframe belongs to what class of phenotype.
:param bool ascending: bool. Sort ascending vs. descending.
:return:
returns two 2d ndarray with shape (nperm, gene_num).
| cor_mat_indices: the indices of sorted and permutated (exclude last row) ranking matrix.
| cor_mat: sorted and permutated (exclude last row) ranking matrix.
"""
# S: samples, G: gene number
G, S = exprs.shape
# genes = exprs.index.values
expr_mat = exprs.values.T
perm_cor_tensor = np.tile(expr_mat, (permutation_num+1,1,1))
# random shuffle on the first dim, last matrix is not shuffled
for arr in perm_cor_tensor[:-1]: rs.shuffle(arr)
classes = np.array(classes)
pos = classes == pos
neg = classes == neg
pos_cor_mean = perm_cor_tensor[:,pos,:].mean(axis=1)
neg_cor_mean = perm_cor_tensor[:,neg,:].mean(axis=1)
pos_cor_std = perm_cor_tensor[:,pos,:].std(axis=1, ddof=1)
neg_cor_std = perm_cor_tensor[:,neg,:].std(axis=1, ddof=1)
if method == 'signal_to_noise':
cor_mat = (pos_cor_mean - neg_cor_mean)/(pos_cor_std + neg_cor_std)
elif method == 't_test':
denom = 1.0/G
cor_mat = (pos_cor_mean - neg_cor_mean)/ np.sqrt(denom*pos_cor_std**2 + denom*neg_cor_std**2)
elif method == 'ratio_of_classes':
cor_mat = pos_cor_mean / neg_cor_mean
elif method == 'diff_of_classes':
cor_mat = pos_cor_mean - neg_cor_mean
elif method == 'log2_ratio_of_classes':
cor_mat = np.log2(pos_cor_mean / neg_cor_mean)
else:
logging.error("Please provide correct method name!!!")
sys.exit(0)
# return matix[nperm+1, perm_cors]
cor_mat_ind = cor_mat.argsort()
# ndarray: sort in place
cor_mat.sort()
# genes_mat = genes.take(cor_mat_ind)
if ascending: return cor_mat_ind, cor_mat
# descending order of ranking and genes
# return genes_mat[:,::-1], cor_mat[:,::-1]
return cor_mat_ind[:, ::-1], cor_mat[:, ::-1]
|
[
"Build",
"shuffled",
"ranking",
"matrix",
"when",
"permutation_type",
"eq",
"to",
"phenotype",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/algorithm.py#L191-L253
|
[
"def",
"ranking_metric_tensor",
"(",
"exprs",
",",
"method",
",",
"permutation_num",
",",
"pos",
",",
"neg",
",",
"classes",
",",
"ascending",
",",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
")",
")",
":",
"# S: samples, G: gene number",
"G",
",",
"S",
"=",
"exprs",
".",
"shape",
"# genes = exprs.index.values",
"expr_mat",
"=",
"exprs",
".",
"values",
".",
"T",
"perm_cor_tensor",
"=",
"np",
".",
"tile",
"(",
"expr_mat",
",",
"(",
"permutation_num",
"+",
"1",
",",
"1",
",",
"1",
")",
")",
"# random shuffle on the first dim, last matrix is not shuffled",
"for",
"arr",
"in",
"perm_cor_tensor",
"[",
":",
"-",
"1",
"]",
":",
"rs",
".",
"shuffle",
"(",
"arr",
")",
"classes",
"=",
"np",
".",
"array",
"(",
"classes",
")",
"pos",
"=",
"classes",
"==",
"pos",
"neg",
"=",
"classes",
"==",
"neg",
"pos_cor_mean",
"=",
"perm_cor_tensor",
"[",
":",
",",
"pos",
",",
":",
"]",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"neg_cor_mean",
"=",
"perm_cor_tensor",
"[",
":",
",",
"neg",
",",
":",
"]",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"pos_cor_std",
"=",
"perm_cor_tensor",
"[",
":",
",",
"pos",
",",
":",
"]",
".",
"std",
"(",
"axis",
"=",
"1",
",",
"ddof",
"=",
"1",
")",
"neg_cor_std",
"=",
"perm_cor_tensor",
"[",
":",
",",
"neg",
",",
":",
"]",
".",
"std",
"(",
"axis",
"=",
"1",
",",
"ddof",
"=",
"1",
")",
"if",
"method",
"==",
"'signal_to_noise'",
":",
"cor_mat",
"=",
"(",
"pos_cor_mean",
"-",
"neg_cor_mean",
")",
"/",
"(",
"pos_cor_std",
"+",
"neg_cor_std",
")",
"elif",
"method",
"==",
"'t_test'",
":",
"denom",
"=",
"1.0",
"/",
"G",
"cor_mat",
"=",
"(",
"pos_cor_mean",
"-",
"neg_cor_mean",
")",
"/",
"np",
".",
"sqrt",
"(",
"denom",
"*",
"pos_cor_std",
"**",
"2",
"+",
"denom",
"*",
"neg_cor_std",
"**",
"2",
")",
"elif",
"method",
"==",
"'ratio_of_classes'",
":",
"cor_mat",
"=",
"pos_cor_mean",
"/",
"neg_cor_mean",
"elif",
"method",
"==",
"'diff_of_classes'",
":",
"cor_mat",
"=",
"pos_cor_mean",
"-",
"neg_cor_mean",
"elif",
"method",
"==",
"'log2_ratio_of_classes'",
":",
"cor_mat",
"=",
"np",
".",
"log2",
"(",
"pos_cor_mean",
"/",
"neg_cor_mean",
")",
"else",
":",
"logging",
".",
"error",
"(",
"\"Please provide correct method name!!!\"",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"# return matix[nperm+1, perm_cors]",
"cor_mat_ind",
"=",
"cor_mat",
".",
"argsort",
"(",
")",
"# ndarray: sort in place",
"cor_mat",
".",
"sort",
"(",
")",
"# genes_mat = genes.take(cor_mat_ind)",
"if",
"ascending",
":",
"return",
"cor_mat_ind",
",",
"cor_mat",
"# descending order of ranking and genes",
"# return genes_mat[:,::-1], cor_mat[:,::-1]",
"return",
"cor_mat_ind",
"[",
":",
",",
":",
":",
"-",
"1",
"]",
",",
"cor_mat",
"[",
":",
",",
":",
":",
"-",
"1",
"]"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
ranking_metric
|
The main function to rank an expression table.
:param df: gene_expression DataFrame.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for natural scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param bool ascending: bool or list of bool. Sort ascending vs. descending.
:return:
returns a pd.Series of correlation to class of each variable. Gene_name is index, and value is rankings.
visit here for more docs: http://software.broadinstitute.org/gsea/doc/GSEAUserGuideFrame.html
|
gseapy/algorithm.py
|
def ranking_metric(df, method, pos, neg, classes, ascending):
"""The main function to rank an expression table.
:param df: gene_expression DataFrame.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for natural scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param bool ascending: bool or list of bool. Sort ascending vs. descending.
:return:
returns a pd.Series of correlation to class of each variable. Gene_name is index, and value is rankings.
visit here for more docs: http://software.broadinstitute.org/gsea/doc/GSEAUserGuideFrame.html
"""
# exclude any zero stds.
df_mean = df.groupby(by=classes, axis=1).mean()
df_std = df.groupby(by=classes, axis=1).std()
if method == 'signal_to_noise':
ser = (df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg])
elif method == 't_test':
ser = (df_mean[pos] - df_mean[neg])/ np.sqrt(df_std[pos]**2/len(df_std)+df_std[neg]**2/len(df_std) )
elif method == 'ratio_of_classes':
ser = df_mean[pos] / df_mean[neg]
elif method == 'diff_of_classes':
ser = df_mean[pos] - df_mean[neg]
elif method == 'log2_ratio_of_classes':
ser = np.log2(df_mean[pos] / df_mean[neg])
else:
logging.error("Please provide correct method name!!!")
sys.exit(0)
ser = ser.sort_values(ascending=ascending)
return ser
|
def ranking_metric(df, method, pos, neg, classes, ascending):
"""The main function to rank an expression table.
:param df: gene_expression DataFrame.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for natural scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param str pos: one of labels of phenotype's names.
:param str neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param bool ascending: bool or list of bool. Sort ascending vs. descending.
:return:
returns a pd.Series of correlation to class of each variable. Gene_name is index, and value is rankings.
visit here for more docs: http://software.broadinstitute.org/gsea/doc/GSEAUserGuideFrame.html
"""
# exclude any zero stds.
df_mean = df.groupby(by=classes, axis=1).mean()
df_std = df.groupby(by=classes, axis=1).std()
if method == 'signal_to_noise':
ser = (df_mean[pos] - df_mean[neg])/(df_std[pos] + df_std[neg])
elif method == 't_test':
ser = (df_mean[pos] - df_mean[neg])/ np.sqrt(df_std[pos]**2/len(df_std)+df_std[neg]**2/len(df_std) )
elif method == 'ratio_of_classes':
ser = df_mean[pos] / df_mean[neg]
elif method == 'diff_of_classes':
ser = df_mean[pos] - df_mean[neg]
elif method == 'log2_ratio_of_classes':
ser = np.log2(df_mean[pos] / df_mean[neg])
else:
logging.error("Please provide correct method name!!!")
sys.exit(0)
ser = ser.sort_values(ascending=ascending)
return ser
|
[
"The",
"main",
"function",
"to",
"rank",
"an",
"expression",
"table",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/algorithm.py#L255-L320
|
[
"def",
"ranking_metric",
"(",
"df",
",",
"method",
",",
"pos",
",",
"neg",
",",
"classes",
",",
"ascending",
")",
":",
"# exclude any zero stds.",
"df_mean",
"=",
"df",
".",
"groupby",
"(",
"by",
"=",
"classes",
",",
"axis",
"=",
"1",
")",
".",
"mean",
"(",
")",
"df_std",
"=",
"df",
".",
"groupby",
"(",
"by",
"=",
"classes",
",",
"axis",
"=",
"1",
")",
".",
"std",
"(",
")",
"if",
"method",
"==",
"'signal_to_noise'",
":",
"ser",
"=",
"(",
"df_mean",
"[",
"pos",
"]",
"-",
"df_mean",
"[",
"neg",
"]",
")",
"/",
"(",
"df_std",
"[",
"pos",
"]",
"+",
"df_std",
"[",
"neg",
"]",
")",
"elif",
"method",
"==",
"'t_test'",
":",
"ser",
"=",
"(",
"df_mean",
"[",
"pos",
"]",
"-",
"df_mean",
"[",
"neg",
"]",
")",
"/",
"np",
".",
"sqrt",
"(",
"df_std",
"[",
"pos",
"]",
"**",
"2",
"/",
"len",
"(",
"df_std",
")",
"+",
"df_std",
"[",
"neg",
"]",
"**",
"2",
"/",
"len",
"(",
"df_std",
")",
")",
"elif",
"method",
"==",
"'ratio_of_classes'",
":",
"ser",
"=",
"df_mean",
"[",
"pos",
"]",
"/",
"df_mean",
"[",
"neg",
"]",
"elif",
"method",
"==",
"'diff_of_classes'",
":",
"ser",
"=",
"df_mean",
"[",
"pos",
"]",
"-",
"df_mean",
"[",
"neg",
"]",
"elif",
"method",
"==",
"'log2_ratio_of_classes'",
":",
"ser",
"=",
"np",
".",
"log2",
"(",
"df_mean",
"[",
"pos",
"]",
"/",
"df_mean",
"[",
"neg",
"]",
")",
"else",
":",
"logging",
".",
"error",
"(",
"\"Please provide correct method name!!!\"",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"ser",
"=",
"ser",
".",
"sort_values",
"(",
"ascending",
"=",
"ascending",
")",
"return",
"ser"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
gsea_compute_tensor
|
compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
|
gseapy/algorithm.py
|
def gsea_compute_tensor(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
"""compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
"""
w = weighted_score_type
subsets = sorted(gmt.keys())
rs = np.random.RandomState(seed)
genes_mat, cor_mat = data.index.values, data.values
base = 5 if data.shape[0] >= 5000 else 10
block = ceil(len(subsets) / base)
if permutation_type == "phenotype":
# shuffling classes and generate random correlation rankings
logging.debug("Start to permutate classes..............................")
genes_ind = []
cor_mat = []
temp_rnk = []
pool_rnk = Pool(processes=processes)
# split large array into smaller blocks to avoid memory overflow
i=1
while i <= block:
rs = np.random.RandomState(seed)
temp_rnk.append(pool_rnk.apply_async(ranking_metric_tensor,
args=(data, method, base, pheno_pos, pheno_neg, classes,
ascending, rs)))
i +=1
pool_rnk.close()
pool_rnk.join()
for k, temp in enumerate(temp_rnk):
gi, cor = temp.get()
if k+1 == block:
genes_ind.append(gi)
cor_mat.append(cor)
else:
genes_ind.append(gi[:-1])
cor_mat.append(cor[:-1])
genes_ind, cor_mat = np.vstack(genes_ind), np.vstack(cor_mat)
# convert to tuple
genes_mat = (data.index.values, genes_ind)
logging.debug("Start to compute es and esnulls........................")
# Prerank, ssGSEA, GSEA
es = []
RES = []
hit_ind = []
esnull = []
temp_esnu = []
pool_esnu = Pool(processes=processes)
# split large array into smaller blocks to avoid memory overflow
i, m = 1, 0
while i <= block:
# you have to reseed, or all your processes are sharing the same seed value
rs = np.random.RandomState(seed)
gmtrim = {k: gmt.get(k) for k in subsets[m:base * i]}
temp_esnu.append(pool_esnu.apply_async(enrichment_score_tensor,
args=(genes_mat, cor_mat,
gmtrim, w, n, rs,
single, scale)))
m = base * i
i += 1
pool_esnu.close()
pool_esnu.join()
# esn is a list, don't need to use append method.
for si, temp in enumerate(temp_esnu):
e, enu, hit, rune = temp.get()
esnull.append(enu)
es.append(e)
RES.append(rune)
hit_ind += hit
# concate results
es, esnull, RES = np.hstack(es), np.vstack(esnull), np.vstack(RES)
return gsea_significance(es, esnull), hit_ind, RES, subsets
|
def gsea_compute_tensor(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
"""compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
"""
w = weighted_score_type
subsets = sorted(gmt.keys())
rs = np.random.RandomState(seed)
genes_mat, cor_mat = data.index.values, data.values
base = 5 if data.shape[0] >= 5000 else 10
block = ceil(len(subsets) / base)
if permutation_type == "phenotype":
# shuffling classes and generate random correlation rankings
logging.debug("Start to permutate classes..............................")
genes_ind = []
cor_mat = []
temp_rnk = []
pool_rnk = Pool(processes=processes)
# split large array into smaller blocks to avoid memory overflow
i=1
while i <= block:
rs = np.random.RandomState(seed)
temp_rnk.append(pool_rnk.apply_async(ranking_metric_tensor,
args=(data, method, base, pheno_pos, pheno_neg, classes,
ascending, rs)))
i +=1
pool_rnk.close()
pool_rnk.join()
for k, temp in enumerate(temp_rnk):
gi, cor = temp.get()
if k+1 == block:
genes_ind.append(gi)
cor_mat.append(cor)
else:
genes_ind.append(gi[:-1])
cor_mat.append(cor[:-1])
genes_ind, cor_mat = np.vstack(genes_ind), np.vstack(cor_mat)
# convert to tuple
genes_mat = (data.index.values, genes_ind)
logging.debug("Start to compute es and esnulls........................")
# Prerank, ssGSEA, GSEA
es = []
RES = []
hit_ind = []
esnull = []
temp_esnu = []
pool_esnu = Pool(processes=processes)
# split large array into smaller blocks to avoid memory overflow
i, m = 1, 0
while i <= block:
# you have to reseed, or all your processes are sharing the same seed value
rs = np.random.RandomState(seed)
gmtrim = {k: gmt.get(k) for k in subsets[m:base * i]}
temp_esnu.append(pool_esnu.apply_async(enrichment_score_tensor,
args=(genes_mat, cor_mat,
gmtrim, w, n, rs,
single, scale)))
m = base * i
i += 1
pool_esnu.close()
pool_esnu.join()
# esn is a list, don't need to use append method.
for si, temp in enumerate(temp_esnu):
e, enu, hit, rune = temp.get()
esnull.append(enu)
es.append(e)
RES.append(rune)
hit_ind += hit
# concate results
es, esnull, RES = np.hstack(es), np.vstack(esnull), np.vstack(RES)
return gsea_significance(es, esnull), hit_ind, RES, subsets
|
[
"compute",
"enrichment",
"scores",
"and",
"enrichment",
"nulls",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/algorithm.py#L323-L418
|
[
"def",
"gsea_compute_tensor",
"(",
"data",
",",
"gmt",
",",
"n",
",",
"weighted_score_type",
",",
"permutation_type",
",",
"method",
",",
"pheno_pos",
",",
"pheno_neg",
",",
"classes",
",",
"ascending",
",",
"processes",
"=",
"1",
",",
"seed",
"=",
"None",
",",
"single",
"=",
"False",
",",
"scale",
"=",
"False",
")",
":",
"w",
"=",
"weighted_score_type",
"subsets",
"=",
"sorted",
"(",
"gmt",
".",
"keys",
"(",
")",
")",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
")",
"genes_mat",
",",
"cor_mat",
"=",
"data",
".",
"index",
".",
"values",
",",
"data",
".",
"values",
"base",
"=",
"5",
"if",
"data",
".",
"shape",
"[",
"0",
"]",
">=",
"5000",
"else",
"10",
"block",
"=",
"ceil",
"(",
"len",
"(",
"subsets",
")",
"/",
"base",
")",
"if",
"permutation_type",
"==",
"\"phenotype\"",
":",
"# shuffling classes and generate random correlation rankings",
"logging",
".",
"debug",
"(",
"\"Start to permutate classes..............................\"",
")",
"genes_ind",
"=",
"[",
"]",
"cor_mat",
"=",
"[",
"]",
"temp_rnk",
"=",
"[",
"]",
"pool_rnk",
"=",
"Pool",
"(",
"processes",
"=",
"processes",
")",
"# split large array into smaller blocks to avoid memory overflow",
"i",
"=",
"1",
"while",
"i",
"<=",
"block",
":",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
")",
"temp_rnk",
".",
"append",
"(",
"pool_rnk",
".",
"apply_async",
"(",
"ranking_metric_tensor",
",",
"args",
"=",
"(",
"data",
",",
"method",
",",
"base",
",",
"pheno_pos",
",",
"pheno_neg",
",",
"classes",
",",
"ascending",
",",
"rs",
")",
")",
")",
"i",
"+=",
"1",
"pool_rnk",
".",
"close",
"(",
")",
"pool_rnk",
".",
"join",
"(",
")",
"for",
"k",
",",
"temp",
"in",
"enumerate",
"(",
"temp_rnk",
")",
":",
"gi",
",",
"cor",
"=",
"temp",
".",
"get",
"(",
")",
"if",
"k",
"+",
"1",
"==",
"block",
":",
"genes_ind",
".",
"append",
"(",
"gi",
")",
"cor_mat",
".",
"append",
"(",
"cor",
")",
"else",
":",
"genes_ind",
".",
"append",
"(",
"gi",
"[",
":",
"-",
"1",
"]",
")",
"cor_mat",
".",
"append",
"(",
"cor",
"[",
":",
"-",
"1",
"]",
")",
"genes_ind",
",",
"cor_mat",
"=",
"np",
".",
"vstack",
"(",
"genes_ind",
")",
",",
"np",
".",
"vstack",
"(",
"cor_mat",
")",
"# convert to tuple",
"genes_mat",
"=",
"(",
"data",
".",
"index",
".",
"values",
",",
"genes_ind",
")",
"logging",
".",
"debug",
"(",
"\"Start to compute es and esnulls........................\"",
")",
"# Prerank, ssGSEA, GSEA",
"es",
"=",
"[",
"]",
"RES",
"=",
"[",
"]",
"hit_ind",
"=",
"[",
"]",
"esnull",
"=",
"[",
"]",
"temp_esnu",
"=",
"[",
"]",
"pool_esnu",
"=",
"Pool",
"(",
"processes",
"=",
"processes",
")",
"# split large array into smaller blocks to avoid memory overflow",
"i",
",",
"m",
"=",
"1",
",",
"0",
"while",
"i",
"<=",
"block",
":",
"# you have to reseed, or all your processes are sharing the same seed value",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
")",
"gmtrim",
"=",
"{",
"k",
":",
"gmt",
".",
"get",
"(",
"k",
")",
"for",
"k",
"in",
"subsets",
"[",
"m",
":",
"base",
"*",
"i",
"]",
"}",
"temp_esnu",
".",
"append",
"(",
"pool_esnu",
".",
"apply_async",
"(",
"enrichment_score_tensor",
",",
"args",
"=",
"(",
"genes_mat",
",",
"cor_mat",
",",
"gmtrim",
",",
"w",
",",
"n",
",",
"rs",
",",
"single",
",",
"scale",
")",
")",
")",
"m",
"=",
"base",
"*",
"i",
"i",
"+=",
"1",
"pool_esnu",
".",
"close",
"(",
")",
"pool_esnu",
".",
"join",
"(",
")",
"# esn is a list, don't need to use append method.",
"for",
"si",
",",
"temp",
"in",
"enumerate",
"(",
"temp_esnu",
")",
":",
"e",
",",
"enu",
",",
"hit",
",",
"rune",
"=",
"temp",
".",
"get",
"(",
")",
"esnull",
".",
"append",
"(",
"enu",
")",
"es",
".",
"append",
"(",
"e",
")",
"RES",
".",
"append",
"(",
"rune",
")",
"hit_ind",
"+=",
"hit",
"# concate results",
"es",
",",
"esnull",
",",
"RES",
"=",
"np",
".",
"hstack",
"(",
"es",
")",
",",
"np",
".",
"vstack",
"(",
"esnull",
")",
",",
"np",
".",
"vstack",
"(",
"RES",
")",
"return",
"gsea_significance",
"(",
"es",
",",
"esnull",
")",
",",
"hit_ind",
",",
"RES",
",",
"subsets"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
gsea_compute
|
compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
|
gseapy/algorithm.py
|
def gsea_compute(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
"""compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
"""
w = weighted_score_type
subsets = sorted(gmt.keys())
es = []
RES=[]
hit_ind=[]
esnull = [ [] for a in range(len(subsets)) ]
logging.debug("Start to compute enrichment scores......................")
if permutation_type == "phenotype":
logging.debug("Start to permutate classes..............................")
# shuffling classes and generate random correlation rankings
rs = np.random.RandomState(seed)
genes_mat, cor_mat = ranking_metric_tensor(exprs=data, method=method,
permutation_num=n,
pos=pheno_pos, neg=pheno_neg,
classes=classes,
ascending=ascending, rs=rs)
# compute es, esnulls. hits, RES
logging.debug("Start to compute enrichment nulls.......................")
es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=genes_mat,
cor_mat=cor_mat,
gene_sets=gmt,
weighted_score_type=w,
nperm=n, rs=rs,
single=False, scale=False,)
else:
# Prerank, ssGSEA, GSEA with gene_set permutation
gl, cor_vec = data.index.values, data.values
logging.debug("Start to compute es and esnulls........................")
# es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=gl,
# cor_mat=cor_mat,
# gene_sets=gmt,
# weighted_score_type=w,
# nperm=n, rs=rs
# single=single, scale=scale)
# split large array into smaller blocks to avoid memory overflow
temp_esnu=[]
pool_esnu = Pool(processes=processes)
for subset in subsets:
# you have to reseed, or all your processes are sharing the same seed value
rs = np.random.RandomState(seed)
temp_esnu.append(pool_esnu.apply_async(enrichment_score,
args=(gl, cor_vec, gmt.get(subset), w,
n, rs, single, scale)))
pool_esnu.close()
pool_esnu.join()
# esn is a list, don't need to use append method.
for si, temp in enumerate(temp_esnu):
e, enu, hit, rune = temp.get()
esnull[si] = enu
es.append(e)
RES.append(rune)
hit_ind.append(hit)
return gsea_significance(es, esnull), hit_ind, RES, subsets
|
def gsea_compute(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
"""compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
"""
w = weighted_score_type
subsets = sorted(gmt.keys())
es = []
RES=[]
hit_ind=[]
esnull = [ [] for a in range(len(subsets)) ]
logging.debug("Start to compute enrichment scores......................")
if permutation_type == "phenotype":
logging.debug("Start to permutate classes..............................")
# shuffling classes and generate random correlation rankings
rs = np.random.RandomState(seed)
genes_mat, cor_mat = ranking_metric_tensor(exprs=data, method=method,
permutation_num=n,
pos=pheno_pos, neg=pheno_neg,
classes=classes,
ascending=ascending, rs=rs)
# compute es, esnulls. hits, RES
logging.debug("Start to compute enrichment nulls.......................")
es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=genes_mat,
cor_mat=cor_mat,
gene_sets=gmt,
weighted_score_type=w,
nperm=n, rs=rs,
single=False, scale=False,)
else:
# Prerank, ssGSEA, GSEA with gene_set permutation
gl, cor_vec = data.index.values, data.values
logging.debug("Start to compute es and esnulls........................")
# es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=gl,
# cor_mat=cor_mat,
# gene_sets=gmt,
# weighted_score_type=w,
# nperm=n, rs=rs
# single=single, scale=scale)
# split large array into smaller blocks to avoid memory overflow
temp_esnu=[]
pool_esnu = Pool(processes=processes)
for subset in subsets:
# you have to reseed, or all your processes are sharing the same seed value
rs = np.random.RandomState(seed)
temp_esnu.append(pool_esnu.apply_async(enrichment_score,
args=(gl, cor_vec, gmt.get(subset), w,
n, rs, single, scale)))
pool_esnu.close()
pool_esnu.join()
# esn is a list, don't need to use append method.
for si, temp in enumerate(temp_esnu):
e, enu, hit, rune = temp.get()
esnull[si] = enu
es.append(e)
RES.append(rune)
hit_ind.append(hit)
return gsea_significance(es, esnull), hit_ind, RES, subsets
|
[
"compute",
"enrichment",
"scores",
"and",
"enrichment",
"nulls",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/algorithm.py#L422-L507
|
[
"def",
"gsea_compute",
"(",
"data",
",",
"gmt",
",",
"n",
",",
"weighted_score_type",
",",
"permutation_type",
",",
"method",
",",
"pheno_pos",
",",
"pheno_neg",
",",
"classes",
",",
"ascending",
",",
"processes",
"=",
"1",
",",
"seed",
"=",
"None",
",",
"single",
"=",
"False",
",",
"scale",
"=",
"False",
")",
":",
"w",
"=",
"weighted_score_type",
"subsets",
"=",
"sorted",
"(",
"gmt",
".",
"keys",
"(",
")",
")",
"es",
"=",
"[",
"]",
"RES",
"=",
"[",
"]",
"hit_ind",
"=",
"[",
"]",
"esnull",
"=",
"[",
"[",
"]",
"for",
"a",
"in",
"range",
"(",
"len",
"(",
"subsets",
")",
")",
"]",
"logging",
".",
"debug",
"(",
"\"Start to compute enrichment scores......................\"",
")",
"if",
"permutation_type",
"==",
"\"phenotype\"",
":",
"logging",
".",
"debug",
"(",
"\"Start to permutate classes..............................\"",
")",
"# shuffling classes and generate random correlation rankings",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
")",
"genes_mat",
",",
"cor_mat",
"=",
"ranking_metric_tensor",
"(",
"exprs",
"=",
"data",
",",
"method",
"=",
"method",
",",
"permutation_num",
"=",
"n",
",",
"pos",
"=",
"pheno_pos",
",",
"neg",
"=",
"pheno_neg",
",",
"classes",
"=",
"classes",
",",
"ascending",
"=",
"ascending",
",",
"rs",
"=",
"rs",
")",
"# compute es, esnulls. hits, RES",
"logging",
".",
"debug",
"(",
"\"Start to compute enrichment nulls.......................\"",
")",
"es",
",",
"esnull",
",",
"hit_ind",
",",
"RES",
"=",
"enrichment_score_tensor",
"(",
"gene_mat",
"=",
"genes_mat",
",",
"cor_mat",
"=",
"cor_mat",
",",
"gene_sets",
"=",
"gmt",
",",
"weighted_score_type",
"=",
"w",
",",
"nperm",
"=",
"n",
",",
"rs",
"=",
"rs",
",",
"single",
"=",
"False",
",",
"scale",
"=",
"False",
",",
")",
"else",
":",
"# Prerank, ssGSEA, GSEA with gene_set permutation",
"gl",
",",
"cor_vec",
"=",
"data",
".",
"index",
".",
"values",
",",
"data",
".",
"values",
"logging",
".",
"debug",
"(",
"\"Start to compute es and esnulls........................\"",
")",
"# es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=gl,",
"# cor_mat=cor_mat,",
"# gene_sets=gmt,",
"# weighted_score_type=w,",
"# nperm=n, rs=rs",
"# single=single, scale=scale)",
"# split large array into smaller blocks to avoid memory overflow",
"temp_esnu",
"=",
"[",
"]",
"pool_esnu",
"=",
"Pool",
"(",
"processes",
"=",
"processes",
")",
"for",
"subset",
"in",
"subsets",
":",
"# you have to reseed, or all your processes are sharing the same seed value",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"seed",
")",
"temp_esnu",
".",
"append",
"(",
"pool_esnu",
".",
"apply_async",
"(",
"enrichment_score",
",",
"args",
"=",
"(",
"gl",
",",
"cor_vec",
",",
"gmt",
".",
"get",
"(",
"subset",
")",
",",
"w",
",",
"n",
",",
"rs",
",",
"single",
",",
"scale",
")",
")",
")",
"pool_esnu",
".",
"close",
"(",
")",
"pool_esnu",
".",
"join",
"(",
")",
"# esn is a list, don't need to use append method.",
"for",
"si",
",",
"temp",
"in",
"enumerate",
"(",
"temp_esnu",
")",
":",
"e",
",",
"enu",
",",
"hit",
",",
"rune",
"=",
"temp",
".",
"get",
"(",
")",
"esnull",
"[",
"si",
"]",
"=",
"enu",
"es",
".",
"append",
"(",
"e",
")",
"RES",
".",
"append",
"(",
"rune",
")",
"hit_ind",
".",
"append",
"(",
"hit",
")",
"return",
"gsea_significance",
"(",
"es",
",",
"esnull",
")",
",",
"hit_ind",
",",
"RES",
",",
"subsets"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
gsea_pval
|
Compute nominal p-value.
From article (PNAS):
estimate nominal p-value for S from esnull by using the positive
or negative portion of the distribution corresponding to the sign
of the observed ES(S).
|
gseapy/algorithm.py
|
def gsea_pval(es, esnull):
"""Compute nominal p-value.
From article (PNAS):
estimate nominal p-value for S from esnull by using the positive
or negative portion of the distribution corresponding to the sign
of the observed ES(S).
"""
# to speed up, using numpy function to compute pval in parallel.
condlist = [ es < 0, es >=0]
choicelist = [np.sum(esnull < es.reshape(len(es),1), axis=1)/ np.sum(esnull < 0, axis=1),
np.sum(esnull >= es.reshape(len(es),1), axis=1)/ np.sum(esnull >= 0, axis=1)]
pval = np.select(condlist, choicelist)
return pval
|
def gsea_pval(es, esnull):
"""Compute nominal p-value.
From article (PNAS):
estimate nominal p-value for S from esnull by using the positive
or negative portion of the distribution corresponding to the sign
of the observed ES(S).
"""
# to speed up, using numpy function to compute pval in parallel.
condlist = [ es < 0, es >=0]
choicelist = [np.sum(esnull < es.reshape(len(es),1), axis=1)/ np.sum(esnull < 0, axis=1),
np.sum(esnull >= es.reshape(len(es),1), axis=1)/ np.sum(esnull >= 0, axis=1)]
pval = np.select(condlist, choicelist)
return pval
|
[
"Compute",
"nominal",
"p",
"-",
"value",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/algorithm.py#L509-L524
|
[
"def",
"gsea_pval",
"(",
"es",
",",
"esnull",
")",
":",
"# to speed up, using numpy function to compute pval in parallel.",
"condlist",
"=",
"[",
"es",
"<",
"0",
",",
"es",
">=",
"0",
"]",
"choicelist",
"=",
"[",
"np",
".",
"sum",
"(",
"esnull",
"<",
"es",
".",
"reshape",
"(",
"len",
"(",
"es",
")",
",",
"1",
")",
",",
"axis",
"=",
"1",
")",
"/",
"np",
".",
"sum",
"(",
"esnull",
"<",
"0",
",",
"axis",
"=",
"1",
")",
",",
"np",
".",
"sum",
"(",
"esnull",
">=",
"es",
".",
"reshape",
"(",
"len",
"(",
"es",
")",
",",
"1",
")",
",",
"axis",
"=",
"1",
")",
"/",
"np",
".",
"sum",
"(",
"esnull",
">=",
"0",
",",
"axis",
"=",
"1",
")",
"]",
"pval",
"=",
"np",
".",
"select",
"(",
"condlist",
",",
"choicelist",
")",
"return",
"pval"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
normalize
|
normalize the ES(S,pi) and the observed ES(S), separately rescaling
the positive and negative scores by dividing the mean of the ES(S,pi).
return: NES, NESnull
|
gseapy/algorithm.py
|
def normalize(es, esnull):
"""normalize the ES(S,pi) and the observed ES(S), separately rescaling
the positive and negative scores by dividing the mean of the ES(S,pi).
return: NES, NESnull
"""
nEnrichmentScores =np.zeros(es.shape)
nEnrichmentNulls=np.zeros(esnull.shape)
esnull_pos = (esnull * (esnull >= 0)).mean(axis=1)
esnull_neg = (esnull * (esnull < 0)).mean(axis=1)
# calculate nESnulls
for i in range(esnull.shape[0]):
# NES
if es[i] >= 0:
nEnrichmentScores[i] = es[i] / esnull_pos[i]
else:
nEnrichmentScores[i] = - es[i] / esnull_neg[i]
# NESnull
for j in range(esnull.shape[1]):
if esnull[i,j] >= 0:
nEnrichmentNulls[i,j] = esnull[i,j] / esnull_pos[i]
else:
nEnrichmentNulls[i,j] = - esnull[i,j] / esnull_neg[i]
return nEnrichmentScores, nEnrichmentNulls
|
def normalize(es, esnull):
"""normalize the ES(S,pi) and the observed ES(S), separately rescaling
the positive and negative scores by dividing the mean of the ES(S,pi).
return: NES, NESnull
"""
nEnrichmentScores =np.zeros(es.shape)
nEnrichmentNulls=np.zeros(esnull.shape)
esnull_pos = (esnull * (esnull >= 0)).mean(axis=1)
esnull_neg = (esnull * (esnull < 0)).mean(axis=1)
# calculate nESnulls
for i in range(esnull.shape[0]):
# NES
if es[i] >= 0:
nEnrichmentScores[i] = es[i] / esnull_pos[i]
else:
nEnrichmentScores[i] = - es[i] / esnull_neg[i]
# NESnull
for j in range(esnull.shape[1]):
if esnull[i,j] >= 0:
nEnrichmentNulls[i,j] = esnull[i,j] / esnull_pos[i]
else:
nEnrichmentNulls[i,j] = - esnull[i,j] / esnull_neg[i]
return nEnrichmentScores, nEnrichmentNulls
|
[
"normalize",
"the",
"ES",
"(",
"S",
"pi",
")",
"and",
"the",
"observed",
"ES",
"(",
"S",
")",
"separately",
"rescaling",
"the",
"positive",
"and",
"negative",
"scores",
"by",
"dividing",
"the",
"mean",
"of",
"the",
"ES",
"(",
"S",
"pi",
")",
".",
"return",
":",
"NES",
"NESnull"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/algorithm.py#L527-L554
|
[
"def",
"normalize",
"(",
"es",
",",
"esnull",
")",
":",
"nEnrichmentScores",
"=",
"np",
".",
"zeros",
"(",
"es",
".",
"shape",
")",
"nEnrichmentNulls",
"=",
"np",
".",
"zeros",
"(",
"esnull",
".",
"shape",
")",
"esnull_pos",
"=",
"(",
"esnull",
"*",
"(",
"esnull",
">=",
"0",
")",
")",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"esnull_neg",
"=",
"(",
"esnull",
"*",
"(",
"esnull",
"<",
"0",
")",
")",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"# calculate nESnulls",
"for",
"i",
"in",
"range",
"(",
"esnull",
".",
"shape",
"[",
"0",
"]",
")",
":",
"# NES",
"if",
"es",
"[",
"i",
"]",
">=",
"0",
":",
"nEnrichmentScores",
"[",
"i",
"]",
"=",
"es",
"[",
"i",
"]",
"/",
"esnull_pos",
"[",
"i",
"]",
"else",
":",
"nEnrichmentScores",
"[",
"i",
"]",
"=",
"-",
"es",
"[",
"i",
"]",
"/",
"esnull_neg",
"[",
"i",
"]",
"# NESnull",
"for",
"j",
"in",
"range",
"(",
"esnull",
".",
"shape",
"[",
"1",
"]",
")",
":",
"if",
"esnull",
"[",
"i",
",",
"j",
"]",
">=",
"0",
":",
"nEnrichmentNulls",
"[",
"i",
",",
"j",
"]",
"=",
"esnull",
"[",
"i",
",",
"j",
"]",
"/",
"esnull_pos",
"[",
"i",
"]",
"else",
":",
"nEnrichmentNulls",
"[",
"i",
",",
"j",
"]",
"=",
"-",
"esnull",
"[",
"i",
",",
"j",
"]",
"/",
"esnull_neg",
"[",
"i",
"]",
"return",
"nEnrichmentScores",
",",
"nEnrichmentNulls"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
gsea_significance
|
Compute nominal pvals, normalized ES, and FDR q value.
For a given NES(S) = NES* >= 0. The FDR is the ratio of the percentage of all (S,pi) with
NES(S,pi) >= 0, whose NES(S,pi) >= NES*, divided by the percentage of
observed S wih NES(S) >= 0, whose NES(S) >= NES*, and similarly if NES(S) = NES* <= 0.
|
gseapy/algorithm.py
|
def gsea_significance(enrichment_scores, enrichment_nulls):
"""Compute nominal pvals, normalized ES, and FDR q value.
For a given NES(S) = NES* >= 0. The FDR is the ratio of the percentage of all (S,pi) with
NES(S,pi) >= 0, whose NES(S,pi) >= NES*, divided by the percentage of
observed S wih NES(S) >= 0, whose NES(S) >= NES*, and similarly if NES(S) = NES* <= 0.
"""
# For a zero by zero division (undetermined, results in a NaN),
np.seterr(divide='ignore', invalid='ignore')
# import warnings
# warnings.simplefilter("ignore")
es = np.array(enrichment_scores)
esnull = np.array(enrichment_nulls)
logging.debug("Start to compute pvals..................................")
# compute pvals.
enrichmentPVals = gsea_pval(es, esnull).tolist()
logging.debug("Compute nes and nesnull.................................")
# nEnrichmentScores, nEnrichmentNulls = normalize(es, esnull)
# new normalized enrichment score implementation.
# this could speed up significantly.
esnull_pos = (esnull*(esnull>=0)).mean(axis=1)
esnull_neg = (esnull*(esnull<0)).mean(axis=1)
nEnrichmentScores = np.where(es>=0, es/esnull_pos, -es/esnull_neg)
nEnrichmentNulls = np.where(esnull>=0, esnull/esnull_pos[:,np.newaxis],
-esnull/esnull_neg[:,np.newaxis])
logging.debug("start to compute fdrs..................................")
# FDR null distribution histogram
# create a histogram of all NES(S,pi) over all S and pi
# Use this null distribution to compute an FDR q value,
# vals = reduce(lambda x,y: x+y, nEnrichmentNulls, [])
# nvals = np.array(sorted(vals))
# or
nvals = np.sort(nEnrichmentNulls.flatten())
nnes = np.sort(nEnrichmentScores)
fdrs = []
# FDR computation
for i in range(len(enrichment_scores)):
nes = nEnrichmentScores[i]
# use the same pval method to calculate fdr
if nes >= 0:
allPos = int(len(nvals) - np.searchsorted(nvals, 0, side="left"))
allHigherAndPos = int(len(nvals) - np.searchsorted(nvals, nes, side="left"))
nesPos = len(nnes) - int(np.searchsorted(nnes, 0, side="left"))
nesHigherAndPos = len(nnes) - int(np.searchsorted(nnes, nes, side="left"))
# allPos = (nvals >= 0).sum()
# allHigherAndPos = (nvals >= nes).sum()
# nesPos = (nnes >=0).sum()
# nesHigherAndPos = (nnes >= nes).sum()
else:
allPos = int(np.searchsorted(nvals, 0, side="left"))
allHigherAndPos = int(np.searchsorted(nvals, nes, side="right"))
nesPos = int(np.searchsorted(nnes, 0, side="left"))
nesHigherAndPos = int(np.searchsorted(nnes, nes, side="right"))
# allPos = (nvals < 0).sum()
# allHigherAndPos = (nvals < nes).sum()
# nesPos = (nnes < 0).sum()
# nesHigherAndPos = (nnes < nes).sum()
try:
pi_norm = allHigherAndPos/float(allPos)
pi_obs = nesHigherAndPos/float(nesPos)
fdr = pi_norm / pi_obs
fdrs.append(fdr if fdr < 1 else 1.0)
except:
fdrs.append(1000000000.0)
logging.debug("Statistical testing finished.............................")
return zip(enrichment_scores, nEnrichmentScores, enrichmentPVals, fdrs)
|
def gsea_significance(enrichment_scores, enrichment_nulls):
"""Compute nominal pvals, normalized ES, and FDR q value.
For a given NES(S) = NES* >= 0. The FDR is the ratio of the percentage of all (S,pi) with
NES(S,pi) >= 0, whose NES(S,pi) >= NES*, divided by the percentage of
observed S wih NES(S) >= 0, whose NES(S) >= NES*, and similarly if NES(S) = NES* <= 0.
"""
# For a zero by zero division (undetermined, results in a NaN),
np.seterr(divide='ignore', invalid='ignore')
# import warnings
# warnings.simplefilter("ignore")
es = np.array(enrichment_scores)
esnull = np.array(enrichment_nulls)
logging.debug("Start to compute pvals..................................")
# compute pvals.
enrichmentPVals = gsea_pval(es, esnull).tolist()
logging.debug("Compute nes and nesnull.................................")
# nEnrichmentScores, nEnrichmentNulls = normalize(es, esnull)
# new normalized enrichment score implementation.
# this could speed up significantly.
esnull_pos = (esnull*(esnull>=0)).mean(axis=1)
esnull_neg = (esnull*(esnull<0)).mean(axis=1)
nEnrichmentScores = np.where(es>=0, es/esnull_pos, -es/esnull_neg)
nEnrichmentNulls = np.where(esnull>=0, esnull/esnull_pos[:,np.newaxis],
-esnull/esnull_neg[:,np.newaxis])
logging.debug("start to compute fdrs..................................")
# FDR null distribution histogram
# create a histogram of all NES(S,pi) over all S and pi
# Use this null distribution to compute an FDR q value,
# vals = reduce(lambda x,y: x+y, nEnrichmentNulls, [])
# nvals = np.array(sorted(vals))
# or
nvals = np.sort(nEnrichmentNulls.flatten())
nnes = np.sort(nEnrichmentScores)
fdrs = []
# FDR computation
for i in range(len(enrichment_scores)):
nes = nEnrichmentScores[i]
# use the same pval method to calculate fdr
if nes >= 0:
allPos = int(len(nvals) - np.searchsorted(nvals, 0, side="left"))
allHigherAndPos = int(len(nvals) - np.searchsorted(nvals, nes, side="left"))
nesPos = len(nnes) - int(np.searchsorted(nnes, 0, side="left"))
nesHigherAndPos = len(nnes) - int(np.searchsorted(nnes, nes, side="left"))
# allPos = (nvals >= 0).sum()
# allHigherAndPos = (nvals >= nes).sum()
# nesPos = (nnes >=0).sum()
# nesHigherAndPos = (nnes >= nes).sum()
else:
allPos = int(np.searchsorted(nvals, 0, side="left"))
allHigherAndPos = int(np.searchsorted(nvals, nes, side="right"))
nesPos = int(np.searchsorted(nnes, 0, side="left"))
nesHigherAndPos = int(np.searchsorted(nnes, nes, side="right"))
# allPos = (nvals < 0).sum()
# allHigherAndPos = (nvals < nes).sum()
# nesPos = (nnes < 0).sum()
# nesHigherAndPos = (nnes < nes).sum()
try:
pi_norm = allHigherAndPos/float(allPos)
pi_obs = nesHigherAndPos/float(nesPos)
fdr = pi_norm / pi_obs
fdrs.append(fdr if fdr < 1 else 1.0)
except:
fdrs.append(1000000000.0)
logging.debug("Statistical testing finished.............................")
return zip(enrichment_scores, nEnrichmentScores, enrichmentPVals, fdrs)
|
[
"Compute",
"nominal",
"pvals",
"normalized",
"ES",
"and",
"FDR",
"q",
"value",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/algorithm.py#L557-L629
|
[
"def",
"gsea_significance",
"(",
"enrichment_scores",
",",
"enrichment_nulls",
")",
":",
"# For a zero by zero division (undetermined, results in a NaN),",
"np",
".",
"seterr",
"(",
"divide",
"=",
"'ignore'",
",",
"invalid",
"=",
"'ignore'",
")",
"# import warnings",
"# warnings.simplefilter(\"ignore\")",
"es",
"=",
"np",
".",
"array",
"(",
"enrichment_scores",
")",
"esnull",
"=",
"np",
".",
"array",
"(",
"enrichment_nulls",
")",
"logging",
".",
"debug",
"(",
"\"Start to compute pvals..................................\"",
")",
"# compute pvals.",
"enrichmentPVals",
"=",
"gsea_pval",
"(",
"es",
",",
"esnull",
")",
".",
"tolist",
"(",
")",
"logging",
".",
"debug",
"(",
"\"Compute nes and nesnull.................................\"",
")",
"# nEnrichmentScores, nEnrichmentNulls = normalize(es, esnull)",
"# new normalized enrichment score implementation.",
"# this could speed up significantly.",
"esnull_pos",
"=",
"(",
"esnull",
"*",
"(",
"esnull",
">=",
"0",
")",
")",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"esnull_neg",
"=",
"(",
"esnull",
"*",
"(",
"esnull",
"<",
"0",
")",
")",
".",
"mean",
"(",
"axis",
"=",
"1",
")",
"nEnrichmentScores",
"=",
"np",
".",
"where",
"(",
"es",
">=",
"0",
",",
"es",
"/",
"esnull_pos",
",",
"-",
"es",
"/",
"esnull_neg",
")",
"nEnrichmentNulls",
"=",
"np",
".",
"where",
"(",
"esnull",
">=",
"0",
",",
"esnull",
"/",
"esnull_pos",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"-",
"esnull",
"/",
"esnull_neg",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
"logging",
".",
"debug",
"(",
"\"start to compute fdrs..................................\"",
")",
"# FDR null distribution histogram",
"# create a histogram of all NES(S,pi) over all S and pi",
"# Use this null distribution to compute an FDR q value,",
"# vals = reduce(lambda x,y: x+y, nEnrichmentNulls, [])",
"# nvals = np.array(sorted(vals))",
"# or",
"nvals",
"=",
"np",
".",
"sort",
"(",
"nEnrichmentNulls",
".",
"flatten",
"(",
")",
")",
"nnes",
"=",
"np",
".",
"sort",
"(",
"nEnrichmentScores",
")",
"fdrs",
"=",
"[",
"]",
"# FDR computation",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"enrichment_scores",
")",
")",
":",
"nes",
"=",
"nEnrichmentScores",
"[",
"i",
"]",
"# use the same pval method to calculate fdr",
"if",
"nes",
">=",
"0",
":",
"allPos",
"=",
"int",
"(",
"len",
"(",
"nvals",
")",
"-",
"np",
".",
"searchsorted",
"(",
"nvals",
",",
"0",
",",
"side",
"=",
"\"left\"",
")",
")",
"allHigherAndPos",
"=",
"int",
"(",
"len",
"(",
"nvals",
")",
"-",
"np",
".",
"searchsorted",
"(",
"nvals",
",",
"nes",
",",
"side",
"=",
"\"left\"",
")",
")",
"nesPos",
"=",
"len",
"(",
"nnes",
")",
"-",
"int",
"(",
"np",
".",
"searchsorted",
"(",
"nnes",
",",
"0",
",",
"side",
"=",
"\"left\"",
")",
")",
"nesHigherAndPos",
"=",
"len",
"(",
"nnes",
")",
"-",
"int",
"(",
"np",
".",
"searchsorted",
"(",
"nnes",
",",
"nes",
",",
"side",
"=",
"\"left\"",
")",
")",
"# allPos = (nvals >= 0).sum()",
"# allHigherAndPos = (nvals >= nes).sum()",
"# nesPos = (nnes >=0).sum()",
"# nesHigherAndPos = (nnes >= nes).sum()",
"else",
":",
"allPos",
"=",
"int",
"(",
"np",
".",
"searchsorted",
"(",
"nvals",
",",
"0",
",",
"side",
"=",
"\"left\"",
")",
")",
"allHigherAndPos",
"=",
"int",
"(",
"np",
".",
"searchsorted",
"(",
"nvals",
",",
"nes",
",",
"side",
"=",
"\"right\"",
")",
")",
"nesPos",
"=",
"int",
"(",
"np",
".",
"searchsorted",
"(",
"nnes",
",",
"0",
",",
"side",
"=",
"\"left\"",
")",
")",
"nesHigherAndPos",
"=",
"int",
"(",
"np",
".",
"searchsorted",
"(",
"nnes",
",",
"nes",
",",
"side",
"=",
"\"right\"",
")",
")",
"# allPos = (nvals < 0).sum()",
"# allHigherAndPos = (nvals < nes).sum()",
"# nesPos = (nnes < 0).sum()",
"# nesHigherAndPos = (nnes < nes).sum()",
"try",
":",
"pi_norm",
"=",
"allHigherAndPos",
"/",
"float",
"(",
"allPos",
")",
"pi_obs",
"=",
"nesHigherAndPos",
"/",
"float",
"(",
"nesPos",
")",
"fdr",
"=",
"pi_norm",
"/",
"pi_obs",
"fdrs",
".",
"append",
"(",
"fdr",
"if",
"fdr",
"<",
"1",
"else",
"1.0",
")",
"except",
":",
"fdrs",
".",
"append",
"(",
"1000000000.0",
")",
"logging",
".",
"debug",
"(",
"\"Statistical testing finished.............................\"",
")",
"return",
"zip",
"(",
"enrichment_scores",
",",
"nEnrichmentScores",
",",
"enrichmentPVals",
",",
"fdrs",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
log_init
|
logging start
|
gseapy/utils.py
|
def log_init(outlog, log_level=logging.INFO):
"""logging start"""
# clear old root logger handlers
logging.getLogger("gseapy").handlers = []
# init a root logger
logging.basicConfig(level = logging.DEBUG,
format = 'LINE %(lineno)-4d: %(asctime)s [%(levelname)-8s] %(message)s',
filename = outlog,
filemode = 'w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(log_level)
# set a format which is simpler for console use
formatter = logging.Formatter('%(asctime)s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add handlers
logging.getLogger("gseapy").addHandler(console)
logger = logging.getLogger("gseapy")
# logger.setLevel(log_level)
return logger
|
def log_init(outlog, log_level=logging.INFO):
"""logging start"""
# clear old root logger handlers
logging.getLogger("gseapy").handlers = []
# init a root logger
logging.basicConfig(level = logging.DEBUG,
format = 'LINE %(lineno)-4d: %(asctime)s [%(levelname)-8s] %(message)s',
filename = outlog,
filemode = 'w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(log_level)
# set a format which is simpler for console use
formatter = logging.Formatter('%(asctime)s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add handlers
logging.getLogger("gseapy").addHandler(console)
logger = logging.getLogger("gseapy")
# logger.setLevel(log_level)
return logger
|
[
"logging",
"start"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/utils.py#L39-L61
|
[
"def",
"log_init",
"(",
"outlog",
",",
"log_level",
"=",
"logging",
".",
"INFO",
")",
":",
"# clear old root logger handlers",
"logging",
".",
"getLogger",
"(",
"\"gseapy\"",
")",
".",
"handlers",
"=",
"[",
"]",
"# init a root logger",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"DEBUG",
",",
"format",
"=",
"'LINE %(lineno)-4d: %(asctime)s [%(levelname)-8s] %(message)s'",
",",
"filename",
"=",
"outlog",
",",
"filemode",
"=",
"'w'",
")",
"# define a Handler which writes INFO messages or higher to the sys.stderr",
"console",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"console",
".",
"setLevel",
"(",
"log_level",
")",
"# set a format which is simpler for console use",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"'%(asctime)s %(message)s'",
")",
"# tell the handler to use this format",
"console",
".",
"setFormatter",
"(",
"formatter",
")",
"# add handlers",
"logging",
".",
"getLogger",
"(",
"\"gseapy\"",
")",
".",
"addHandler",
"(",
"console",
")",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"\"gseapy\"",
")",
"# logger.setLevel(log_level)",
"return",
"logger"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
log_stop
|
log stop
|
gseapy/utils.py
|
def log_stop(logger):
"""log stop"""
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
|
def log_stop(logger):
"""log stop"""
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
|
[
"log",
"stop"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/utils.py#L63-L69
|
[
"def",
"log_stop",
"(",
"logger",
")",
":",
"handlers",
"=",
"logger",
".",
"handlers",
"[",
":",
"]",
"for",
"handler",
"in",
"handlers",
":",
"handler",
".",
"close",
"(",
")",
"logger",
".",
"removeHandler",
"(",
"handler",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
retry
|
retry connection.
define max tries num
if the backoff_factor is 0.1, then sleep() will sleep for
[0.1s, 0.2s, 0.4s, ...] between retries.
It will also force a retry if the status code returned is 500, 502, 503 or 504.
|
gseapy/utils.py
|
def retry(num=5):
""""retry connection.
define max tries num
if the backoff_factor is 0.1, then sleep() will sleep for
[0.1s, 0.2s, 0.4s, ...] between retries.
It will also force a retry if the status code returned is 500, 502, 503 or 504.
"""
s = requests.Session()
retries = Retry(total=num, backoff_factor=0.1,
status_forcelist=[500, 502, 503, 504])
s.mount('http://', HTTPAdapter(max_retries=retries))
return s
|
def retry(num=5):
""""retry connection.
define max tries num
if the backoff_factor is 0.1, then sleep() will sleep for
[0.1s, 0.2s, 0.4s, ...] between retries.
It will also force a retry if the status code returned is 500, 502, 503 or 504.
"""
s = requests.Session()
retries = Retry(total=num, backoff_factor=0.1,
status_forcelist=[500, 502, 503, 504])
s.mount('http://', HTTPAdapter(max_retries=retries))
return s
|
[
"retry",
"connection",
".",
"define",
"max",
"tries",
"num",
"if",
"the",
"backoff_factor",
"is",
"0",
".",
"1",
"then",
"sleep",
"()",
"will",
"sleep",
"for",
"[",
"0",
".",
"1s",
"0",
".",
"2s",
"0",
".",
"4s",
"...",
"]",
"between",
"retries",
".",
"It",
"will",
"also",
"force",
"a",
"retry",
"if",
"the",
"status",
"code",
"returned",
"is",
"500",
"502",
"503",
"or",
"504",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/utils.py#L72-L86
|
[
"def",
"retry",
"(",
"num",
"=",
"5",
")",
":",
"s",
"=",
"requests",
".",
"Session",
"(",
")",
"retries",
"=",
"Retry",
"(",
"total",
"=",
"num",
",",
"backoff_factor",
"=",
"0.1",
",",
"status_forcelist",
"=",
"[",
"500",
",",
"502",
",",
"503",
",",
"504",
"]",
")",
"s",
".",
"mount",
"(",
"'http://'",
",",
"HTTPAdapter",
"(",
"max_retries",
"=",
"retries",
")",
")",
"return",
"s"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
gsea_cls_parser
|
Extract class(phenotype) name from .cls file.
:param cls: the a class list instance or .cls file which is identical to GSEA input .
:return: phenotype name and a list of class vector.
|
gseapy/parser.py
|
def gsea_cls_parser(cls):
"""Extract class(phenotype) name from .cls file.
:param cls: the a class list instance or .cls file which is identical to GSEA input .
:return: phenotype name and a list of class vector.
"""
if isinstance(cls, list) :
classes = cls
sample_name= unique(classes)
elif isinstance(cls, str) :
with open(cls) as c:
file = c.readlines()
classes = file[2].strip('\n').split(" ")
sample_name = file[1].lstrip("# ").strip('\n').split(" ")
else:
raise Exception('Error parsing sample name!')
return sample_name[0], sample_name[1], classes
|
def gsea_cls_parser(cls):
"""Extract class(phenotype) name from .cls file.
:param cls: the a class list instance or .cls file which is identical to GSEA input .
:return: phenotype name and a list of class vector.
"""
if isinstance(cls, list) :
classes = cls
sample_name= unique(classes)
elif isinstance(cls, str) :
with open(cls) as c:
file = c.readlines()
classes = file[2].strip('\n').split(" ")
sample_name = file[1].lstrip("# ").strip('\n').split(" ")
else:
raise Exception('Error parsing sample name!')
return sample_name[0], sample_name[1], classes
|
[
"Extract",
"class",
"(",
"phenotype",
")",
"name",
"from",
".",
"cls",
"file",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/parser.py#L13-L31
|
[
"def",
"gsea_cls_parser",
"(",
"cls",
")",
":",
"if",
"isinstance",
"(",
"cls",
",",
"list",
")",
":",
"classes",
"=",
"cls",
"sample_name",
"=",
"unique",
"(",
"classes",
")",
"elif",
"isinstance",
"(",
"cls",
",",
"str",
")",
":",
"with",
"open",
"(",
"cls",
")",
"as",
"c",
":",
"file",
"=",
"c",
".",
"readlines",
"(",
")",
"classes",
"=",
"file",
"[",
"2",
"]",
".",
"strip",
"(",
"'\\n'",
")",
".",
"split",
"(",
"\" \"",
")",
"sample_name",
"=",
"file",
"[",
"1",
"]",
".",
"lstrip",
"(",
"\"# \"",
")",
".",
"strip",
"(",
"'\\n'",
")",
".",
"split",
"(",
"\" \"",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Error parsing sample name!'",
")",
"return",
"sample_name",
"[",
"0",
"]",
",",
"sample_name",
"[",
"1",
"]",
",",
"classes"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
gsea_edb_parser
|
Parse results.edb file stored under **edb** file folder.
:param results_path: the .results file located inside edb folder.
:param index: gene_set index of gmt database, used for iterating items.
:return: enrichment_term, hit_index,nes, pval, fdr.
|
gseapy/parser.py
|
def gsea_edb_parser(results_path, index=0):
"""Parse results.edb file stored under **edb** file folder.
:param results_path: the .results file located inside edb folder.
:param index: gene_set index of gmt database, used for iterating items.
:return: enrichment_term, hit_index,nes, pval, fdr.
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(open(results_path), features='xml')
tag = soup.findAll('DTG')
term = dict(tag[index].attrs)
# dict_keys(['RANKED_LIST', 'GENESET', 'FWER', 'ES_PROFILE',
# 'HIT_INDICES', 'ES', 'NES', 'TEMPLATE', 'RND_ES', 'RANK_SCORE_AT_ES',
# 'NP', 'RANK_AT_ES', 'FDR'])
enrich_term = term.get('GENESET').split("#")[1]
es_profile = term.get('ES_PROFILE').split(" ")
# rank_es = term.get('RND_ES').split(" ")
hit_ind =term.get('HIT_INDICES').split(" ")
es_profile = [float(i) for i in es_profile ]
hit_ind = [float(i) for i in hit_ind ]
#r ank_es = [float(i) for i in rank_es ]
nes = term.get('NES')
pval = term.get('NP')
fdr = term.get('FDR')
# fwer = term.get('FWER')
# index_range = len(tag)-1
logging.debug("Enriched Gene set is: "+ enrich_term)
return enrich_term, hit_ind, nes, pval, fdr
|
def gsea_edb_parser(results_path, index=0):
"""Parse results.edb file stored under **edb** file folder.
:param results_path: the .results file located inside edb folder.
:param index: gene_set index of gmt database, used for iterating items.
:return: enrichment_term, hit_index,nes, pval, fdr.
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(open(results_path), features='xml')
tag = soup.findAll('DTG')
term = dict(tag[index].attrs)
# dict_keys(['RANKED_LIST', 'GENESET', 'FWER', 'ES_PROFILE',
# 'HIT_INDICES', 'ES', 'NES', 'TEMPLATE', 'RND_ES', 'RANK_SCORE_AT_ES',
# 'NP', 'RANK_AT_ES', 'FDR'])
enrich_term = term.get('GENESET').split("#")[1]
es_profile = term.get('ES_PROFILE').split(" ")
# rank_es = term.get('RND_ES').split(" ")
hit_ind =term.get('HIT_INDICES').split(" ")
es_profile = [float(i) for i in es_profile ]
hit_ind = [float(i) for i in hit_ind ]
#r ank_es = [float(i) for i in rank_es ]
nes = term.get('NES')
pval = term.get('NP')
fdr = term.get('FDR')
# fwer = term.get('FWER')
# index_range = len(tag)-1
logging.debug("Enriched Gene set is: "+ enrich_term)
return enrich_term, hit_ind, nes, pval, fdr
|
[
"Parse",
"results",
".",
"edb",
"file",
"stored",
"under",
"**",
"edb",
"**",
"file",
"folder",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/parser.py#L33-L62
|
[
"def",
"gsea_edb_parser",
"(",
"results_path",
",",
"index",
"=",
"0",
")",
":",
"from",
"bs4",
"import",
"BeautifulSoup",
"soup",
"=",
"BeautifulSoup",
"(",
"open",
"(",
"results_path",
")",
",",
"features",
"=",
"'xml'",
")",
"tag",
"=",
"soup",
".",
"findAll",
"(",
"'DTG'",
")",
"term",
"=",
"dict",
"(",
"tag",
"[",
"index",
"]",
".",
"attrs",
")",
"# dict_keys(['RANKED_LIST', 'GENESET', 'FWER', 'ES_PROFILE',",
"# 'HIT_INDICES', 'ES', 'NES', 'TEMPLATE', 'RND_ES', 'RANK_SCORE_AT_ES',",
"# 'NP', 'RANK_AT_ES', 'FDR'])",
"enrich_term",
"=",
"term",
".",
"get",
"(",
"'GENESET'",
")",
".",
"split",
"(",
"\"#\"",
")",
"[",
"1",
"]",
"es_profile",
"=",
"term",
".",
"get",
"(",
"'ES_PROFILE'",
")",
".",
"split",
"(",
"\" \"",
")",
"# rank_es = term.get('RND_ES').split(\" \")",
"hit_ind",
"=",
"term",
".",
"get",
"(",
"'HIT_INDICES'",
")",
".",
"split",
"(",
"\" \"",
")",
"es_profile",
"=",
"[",
"float",
"(",
"i",
")",
"for",
"i",
"in",
"es_profile",
"]",
"hit_ind",
"=",
"[",
"float",
"(",
"i",
")",
"for",
"i",
"in",
"hit_ind",
"]",
"#r ank_es = [float(i) for i in rank_es ]",
"nes",
"=",
"term",
".",
"get",
"(",
"'NES'",
")",
"pval",
"=",
"term",
".",
"get",
"(",
"'NP'",
")",
"fdr",
"=",
"term",
".",
"get",
"(",
"'FDR'",
")",
"# fwer = term.get('FWER')",
"# index_range = len(tag)-1",
"logging",
".",
"debug",
"(",
"\"Enriched Gene set is: \"",
"+",
"enrich_term",
")",
"return",
"enrich_term",
",",
"hit_ind",
",",
"nes",
",",
"pval",
",",
"fdr"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
gsea_gmt_parser
|
Parse gene_sets.gmt(gene set database) file or download from enrichr server.
:param gmt: the gene_sets.gmt file of GSEA input or an enrichr library name.
checkout full enrichr library name here: http://amp.pharm.mssm.edu/Enrichr/#stats
:param min_size: Minimum allowed number of genes from gene set also the data set. Default: 3.
:param max_size: Maximum allowed number of genes from gene set also the data set. Default: 5000.
:param gene_list: Used for filtering gene set. Only used this argument for :func:`call` method.
:return: Return a new filtered gene set database dictionary.
**DO NOT** filter gene sets, when use :func:`replot`. Because ``GSEA`` Desktop have already
done this for you.
|
gseapy/parser.py
|
def gsea_gmt_parser(gmt, min_size = 3, max_size = 1000, gene_list=None):
"""Parse gene_sets.gmt(gene set database) file or download from enrichr server.
:param gmt: the gene_sets.gmt file of GSEA input or an enrichr library name.
checkout full enrichr library name here: http://amp.pharm.mssm.edu/Enrichr/#stats
:param min_size: Minimum allowed number of genes from gene set also the data set. Default: 3.
:param max_size: Maximum allowed number of genes from gene set also the data set. Default: 5000.
:param gene_list: Used for filtering gene set. Only used this argument for :func:`call` method.
:return: Return a new filtered gene set database dictionary.
**DO NOT** filter gene sets, when use :func:`replot`. Because ``GSEA`` Desktop have already
done this for you.
"""
if gmt.lower().endswith(".gmt"):
logging.info("User Defined gene sets is given.......continue..........")
with open(gmt) as genesets:
genesets_dict = { line.strip().split("\t")[0]: line.strip().split("\t")[2:]
for line in genesets.readlines()}
else:
logging.info("Downloading and generating Enrichr library gene sets...")
if gmt in DEFAULT_LIBRARY:
names = DEFAULT_LIBRARY
else:
names = get_library_name()
if gmt in names:
"""
define max tries num
if the backoff_factor is 0.1, then sleep() will sleep for
[0.1s, 0.2s, 0.4s, ...] between retries.
It will also force a retry if the status code returned is 500, 502, 503 or 504.
"""
s = requests.Session()
retries = Retry(total=5, backoff_factor=0.1,
status_forcelist=[ 500, 502, 503, 504 ])
s.mount('http://', HTTPAdapter(max_retries=retries))
# queery string
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary'
query_string = '?mode=text&libraryName=%s'
# get
response = s.get( ENRICHR_URL + query_string % gmt, timeout=None)
else:
raise Exception("gene_set files(.gmt) not found")
if not response.ok:
raise Exception('Error fetching enrichment results, check internet connection first.')
genesets_dict = { line.strip().split("\t")[0]:
list(map(lambda x: x.split(",")[0], line.strip().split("\t")[2:]))
for line in response.iter_lines(chunk_size=1024, decode_unicode='utf-8')}
# filtering dict
if sys.version_info[0] >= 3 :
genesets_filter = {k: v for k, v in genesets_dict.items() if len(v) >= min_size and len(v) <= max_size}
elif sys.version_info[0] == 2:
genesets_filter = {k: v for k, v in genesets_dict.iteritems() if len(v) >= min_size and len(v) <= max_size}
else:
logging.error("System failure. Please Provide correct input files")
sys.exit(1)
if gene_list is not None:
subsets = sorted(genesets_filter.keys())
for subset in subsets:
tag_indicator = in1d(gene_list, genesets_filter.get(subset), assume_unique=True)
tag_len = sum(tag_indicator)
if tag_len <= min_size or tag_len >= max_size:
del genesets_filter[subset]
else:
continue
# some_dict = {key: value for key, value in some_dict.items() if value != value_to_remove}
# use np.intersect1d() may be faster???
filsets_num = len(genesets_dict) - len(genesets_filter)
logging.info("%04d gene_sets have been filtered out when max_size=%s and min_size=%s"%(filsets_num, max_size, min_size))
if filsets_num == len(genesets_dict):
logging.error("No gene sets passed throught filtering condition!!!, try new paramters again!\n" +\
"Note: Gene names for gseapy is case sensitive." )
sys.exit(1)
else:
return genesets_filter
|
def gsea_gmt_parser(gmt, min_size = 3, max_size = 1000, gene_list=None):
"""Parse gene_sets.gmt(gene set database) file or download from enrichr server.
:param gmt: the gene_sets.gmt file of GSEA input or an enrichr library name.
checkout full enrichr library name here: http://amp.pharm.mssm.edu/Enrichr/#stats
:param min_size: Minimum allowed number of genes from gene set also the data set. Default: 3.
:param max_size: Maximum allowed number of genes from gene set also the data set. Default: 5000.
:param gene_list: Used for filtering gene set. Only used this argument for :func:`call` method.
:return: Return a new filtered gene set database dictionary.
**DO NOT** filter gene sets, when use :func:`replot`. Because ``GSEA`` Desktop have already
done this for you.
"""
if gmt.lower().endswith(".gmt"):
logging.info("User Defined gene sets is given.......continue..........")
with open(gmt) as genesets:
genesets_dict = { line.strip().split("\t")[0]: line.strip().split("\t")[2:]
for line in genesets.readlines()}
else:
logging.info("Downloading and generating Enrichr library gene sets...")
if gmt in DEFAULT_LIBRARY:
names = DEFAULT_LIBRARY
else:
names = get_library_name()
if gmt in names:
"""
define max tries num
if the backoff_factor is 0.1, then sleep() will sleep for
[0.1s, 0.2s, 0.4s, ...] between retries.
It will also force a retry if the status code returned is 500, 502, 503 or 504.
"""
s = requests.Session()
retries = Retry(total=5, backoff_factor=0.1,
status_forcelist=[ 500, 502, 503, 504 ])
s.mount('http://', HTTPAdapter(max_retries=retries))
# queery string
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary'
query_string = '?mode=text&libraryName=%s'
# get
response = s.get( ENRICHR_URL + query_string % gmt, timeout=None)
else:
raise Exception("gene_set files(.gmt) not found")
if not response.ok:
raise Exception('Error fetching enrichment results, check internet connection first.')
genesets_dict = { line.strip().split("\t")[0]:
list(map(lambda x: x.split(",")[0], line.strip().split("\t")[2:]))
for line in response.iter_lines(chunk_size=1024, decode_unicode='utf-8')}
# filtering dict
if sys.version_info[0] >= 3 :
genesets_filter = {k: v for k, v in genesets_dict.items() if len(v) >= min_size and len(v) <= max_size}
elif sys.version_info[0] == 2:
genesets_filter = {k: v for k, v in genesets_dict.iteritems() if len(v) >= min_size and len(v) <= max_size}
else:
logging.error("System failure. Please Provide correct input files")
sys.exit(1)
if gene_list is not None:
subsets = sorted(genesets_filter.keys())
for subset in subsets:
tag_indicator = in1d(gene_list, genesets_filter.get(subset), assume_unique=True)
tag_len = sum(tag_indicator)
if tag_len <= min_size or tag_len >= max_size:
del genesets_filter[subset]
else:
continue
# some_dict = {key: value for key, value in some_dict.items() if value != value_to_remove}
# use np.intersect1d() may be faster???
filsets_num = len(genesets_dict) - len(genesets_filter)
logging.info("%04d gene_sets have been filtered out when max_size=%s and min_size=%s"%(filsets_num, max_size, min_size))
if filsets_num == len(genesets_dict):
logging.error("No gene sets passed throught filtering condition!!!, try new paramters again!\n" +\
"Note: Gene names for gseapy is case sensitive." )
sys.exit(1)
else:
return genesets_filter
|
[
"Parse",
"gene_sets",
".",
"gmt",
"(",
"gene",
"set",
"database",
")",
"file",
"or",
"download",
"from",
"enrichr",
"server",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/parser.py#L65-L146
|
[
"def",
"gsea_gmt_parser",
"(",
"gmt",
",",
"min_size",
"=",
"3",
",",
"max_size",
"=",
"1000",
",",
"gene_list",
"=",
"None",
")",
":",
"if",
"gmt",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\".gmt\"",
")",
":",
"logging",
".",
"info",
"(",
"\"User Defined gene sets is given.......continue..........\"",
")",
"with",
"open",
"(",
"gmt",
")",
"as",
"genesets",
":",
"genesets_dict",
"=",
"{",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"0",
"]",
":",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"2",
":",
"]",
"for",
"line",
"in",
"genesets",
".",
"readlines",
"(",
")",
"}",
"else",
":",
"logging",
".",
"info",
"(",
"\"Downloading and generating Enrichr library gene sets...\"",
")",
"if",
"gmt",
"in",
"DEFAULT_LIBRARY",
":",
"names",
"=",
"DEFAULT_LIBRARY",
"else",
":",
"names",
"=",
"get_library_name",
"(",
")",
"if",
"gmt",
"in",
"names",
":",
"\"\"\"\n define max tries num\n if the backoff_factor is 0.1, then sleep() will sleep for\n [0.1s, 0.2s, 0.4s, ...] between retries.\n It will also force a retry if the status code returned is 500, 502, 503 or 504.\n \"\"\"",
"s",
"=",
"requests",
".",
"Session",
"(",
")",
"retries",
"=",
"Retry",
"(",
"total",
"=",
"5",
",",
"backoff_factor",
"=",
"0.1",
",",
"status_forcelist",
"=",
"[",
"500",
",",
"502",
",",
"503",
",",
"504",
"]",
")",
"s",
".",
"mount",
"(",
"'http://'",
",",
"HTTPAdapter",
"(",
"max_retries",
"=",
"retries",
")",
")",
"# queery string",
"ENRICHR_URL",
"=",
"'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary'",
"query_string",
"=",
"'?mode=text&libraryName=%s'",
"# get",
"response",
"=",
"s",
".",
"get",
"(",
"ENRICHR_URL",
"+",
"query_string",
"%",
"gmt",
",",
"timeout",
"=",
"None",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"gene_set files(.gmt) not found\"",
")",
"if",
"not",
"response",
".",
"ok",
":",
"raise",
"Exception",
"(",
"'Error fetching enrichment results, check internet connection first.'",
")",
"genesets_dict",
"=",
"{",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"0",
"]",
":",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"split",
"(",
"\",\"",
")",
"[",
"0",
"]",
",",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"2",
":",
"]",
")",
")",
"for",
"line",
"in",
"response",
".",
"iter_lines",
"(",
"chunk_size",
"=",
"1024",
",",
"decode_unicode",
"=",
"'utf-8'",
")",
"}",
"# filtering dict",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
":",
"genesets_filter",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"genesets_dict",
".",
"items",
"(",
")",
"if",
"len",
"(",
"v",
")",
">=",
"min_size",
"and",
"len",
"(",
"v",
")",
"<=",
"max_size",
"}",
"elif",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"2",
":",
"genesets_filter",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"genesets_dict",
".",
"iteritems",
"(",
")",
"if",
"len",
"(",
"v",
")",
">=",
"min_size",
"and",
"len",
"(",
"v",
")",
"<=",
"max_size",
"}",
"else",
":",
"logging",
".",
"error",
"(",
"\"System failure. Please Provide correct input files\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"gene_list",
"is",
"not",
"None",
":",
"subsets",
"=",
"sorted",
"(",
"genesets_filter",
".",
"keys",
"(",
")",
")",
"for",
"subset",
"in",
"subsets",
":",
"tag_indicator",
"=",
"in1d",
"(",
"gene_list",
",",
"genesets_filter",
".",
"get",
"(",
"subset",
")",
",",
"assume_unique",
"=",
"True",
")",
"tag_len",
"=",
"sum",
"(",
"tag_indicator",
")",
"if",
"tag_len",
"<=",
"min_size",
"or",
"tag_len",
">=",
"max_size",
":",
"del",
"genesets_filter",
"[",
"subset",
"]",
"else",
":",
"continue",
"# some_dict = {key: value for key, value in some_dict.items() if value != value_to_remove}",
"# use np.intersect1d() may be faster???",
"filsets_num",
"=",
"len",
"(",
"genesets_dict",
")",
"-",
"len",
"(",
"genesets_filter",
")",
"logging",
".",
"info",
"(",
"\"%04d gene_sets have been filtered out when max_size=%s and min_size=%s\"",
"%",
"(",
"filsets_num",
",",
"max_size",
",",
"min_size",
")",
")",
"if",
"filsets_num",
"==",
"len",
"(",
"genesets_dict",
")",
":",
"logging",
".",
"error",
"(",
"\"No gene sets passed throught filtering condition!!!, try new paramters again!\\n\"",
"+",
"\"Note: Gene names for gseapy is case sensitive.\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"return",
"genesets_filter"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
get_library_name
|
return enrichr active enrichr library name.
:param str database: Select one from { 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' }
|
gseapy/parser.py
|
def get_library_name(database='Human'):
"""return enrichr active enrichr library name.
:param str database: Select one from { 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' }
"""
# make a get request to get the gmt names and meta data from Enrichr
# old code
# response = requests.get('http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=meta')
# gmt_data = response.json()
# # generate list of lib names
# libs = []
# # get library names
# for inst_gmt in gmt_data['libraries']:
# # only include active gmts
# if inst_gmt['isActive'] == True:
# libs.append(inst_gmt['libraryName'])
if database not in ['Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm']:
sys.stderr.write("""No supported database. Please input one of these:
'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' """)
return
if database in ['Human', 'Mouse']: database=''
lib_url='http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics'%database
libs_json = json.loads(requests.get(lib_url).text)
libs = [lib['libraryName'] for lib in libs_json['statistics']]
return sorted(libs)
|
def get_library_name(database='Human'):
"""return enrichr active enrichr library name.
:param str database: Select one from { 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' }
"""
# make a get request to get the gmt names and meta data from Enrichr
# old code
# response = requests.get('http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=meta')
# gmt_data = response.json()
# # generate list of lib names
# libs = []
# # get library names
# for inst_gmt in gmt_data['libraries']:
# # only include active gmts
# if inst_gmt['isActive'] == True:
# libs.append(inst_gmt['libraryName'])
if database not in ['Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm']:
sys.stderr.write("""No supported database. Please input one of these:
'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' """)
return
if database in ['Human', 'Mouse']: database=''
lib_url='http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics'%database
libs_json = json.loads(requests.get(lib_url).text)
libs = [lib['libraryName'] for lib in libs_json['statistics']]
return sorted(libs)
|
[
"return",
"enrichr",
"active",
"enrichr",
"library",
"name",
".",
":",
"param",
"str",
"database",
":",
"Select",
"one",
"from",
"{",
"Human",
"Mouse",
"Yeast",
"Fly",
"Fish",
"Worm",
"}"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/parser.py#L148-L177
|
[
"def",
"get_library_name",
"(",
"database",
"=",
"'Human'",
")",
":",
"# make a get request to get the gmt names and meta data from Enrichr",
"# old code",
"# response = requests.get('http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary?mode=meta')",
"# gmt_data = response.json()",
"# # generate list of lib names",
"# libs = []",
"# # get library names",
"# for inst_gmt in gmt_data['libraries']:",
"# # only include active gmts",
"# if inst_gmt['isActive'] == True:",
"# libs.append(inst_gmt['libraryName'])",
"if",
"database",
"not",
"in",
"[",
"'Human'",
",",
"'Mouse'",
",",
"'Yeast'",
",",
"'Fly'",
",",
"'Fish'",
",",
"'Worm'",
"]",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\"\"No supported database. Please input one of these:\n 'Human', 'Mouse', 'Yeast', 'Fly', 'Fish', 'Worm' \"\"\"",
")",
"return",
"if",
"database",
"in",
"[",
"'Human'",
",",
"'Mouse'",
"]",
":",
"database",
"=",
"''",
"lib_url",
"=",
"'http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics'",
"%",
"database",
"libs_json",
"=",
"json",
".",
"loads",
"(",
"requests",
".",
"get",
"(",
"lib_url",
")",
".",
"text",
")",
"libs",
"=",
"[",
"lib",
"[",
"'libraryName'",
"]",
"for",
"lib",
"in",
"libs_json",
"[",
"'statistics'",
"]",
"]",
"return",
"sorted",
"(",
"libs",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Biomart.get_marts
|
Get available marts and their names.
|
gseapy/parser.py
|
def get_marts(self):
"""Get available marts and their names."""
mart_names = pd.Series(self.names, name="Name")
mart_descriptions = pd.Series(self.displayNames, name="Description")
return pd.concat([mart_names, mart_descriptions], axis=1)
|
def get_marts(self):
"""Get available marts and their names."""
mart_names = pd.Series(self.names, name="Name")
mart_descriptions = pd.Series(self.displayNames, name="Description")
return pd.concat([mart_names, mart_descriptions], axis=1)
|
[
"Get",
"available",
"marts",
"and",
"their",
"names",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/parser.py#L211-L217
|
[
"def",
"get_marts",
"(",
"self",
")",
":",
"mart_names",
"=",
"pd",
".",
"Series",
"(",
"self",
".",
"names",
",",
"name",
"=",
"\"Name\"",
")",
"mart_descriptions",
"=",
"pd",
".",
"Series",
"(",
"self",
".",
"displayNames",
",",
"name",
"=",
"\"Description\"",
")",
"return",
"pd",
".",
"concat",
"(",
"[",
"mart_names",
",",
"mart_descriptions",
"]",
",",
"axis",
"=",
"1",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Biomart.get_datasets
|
Get available datasets from mart you've selected
|
gseapy/parser.py
|
def get_datasets(self, mart='ENSEMBL_MART_ENSEMBL'):
"""Get available datasets from mart you've selected"""
datasets = self.datasets(mart, raw=True)
return pd.read_csv(StringIO(datasets), header=None, usecols=[1, 2],
names = ["Name", "Description"],sep="\t")
|
def get_datasets(self, mart='ENSEMBL_MART_ENSEMBL'):
"""Get available datasets from mart you've selected"""
datasets = self.datasets(mart, raw=True)
return pd.read_csv(StringIO(datasets), header=None, usecols=[1, 2],
names = ["Name", "Description"],sep="\t")
|
[
"Get",
"available",
"datasets",
"from",
"mart",
"you",
"ve",
"selected"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/parser.py#L219-L223
|
[
"def",
"get_datasets",
"(",
"self",
",",
"mart",
"=",
"'ENSEMBL_MART_ENSEMBL'",
")",
":",
"datasets",
"=",
"self",
".",
"datasets",
"(",
"mart",
",",
"raw",
"=",
"True",
")",
"return",
"pd",
".",
"read_csv",
"(",
"StringIO",
"(",
"datasets",
")",
",",
"header",
"=",
"None",
",",
"usecols",
"=",
"[",
"1",
",",
"2",
"]",
",",
"names",
"=",
"[",
"\"Name\"",
",",
"\"Description\"",
"]",
",",
"sep",
"=",
"\"\\t\"",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Biomart.get_attributes
|
Get available attritbutes from dataset you've selected
|
gseapy/parser.py
|
def get_attributes(self, dataset):
"""Get available attritbutes from dataset you've selected"""
attributes = self.attributes(dataset)
attr_ = [ (k, v[0]) for k, v in attributes.items()]
return pd.DataFrame(attr_, columns=["Attribute","Description"])
|
def get_attributes(self, dataset):
"""Get available attritbutes from dataset you've selected"""
attributes = self.attributes(dataset)
attr_ = [ (k, v[0]) for k, v in attributes.items()]
return pd.DataFrame(attr_, columns=["Attribute","Description"])
|
[
"Get",
"available",
"attritbutes",
"from",
"dataset",
"you",
"ve",
"selected"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/parser.py#L225-L229
|
[
"def",
"get_attributes",
"(",
"self",
",",
"dataset",
")",
":",
"attributes",
"=",
"self",
".",
"attributes",
"(",
"dataset",
")",
"attr_",
"=",
"[",
"(",
"k",
",",
"v",
"[",
"0",
"]",
")",
"for",
"k",
",",
"v",
"in",
"attributes",
".",
"items",
"(",
")",
"]",
"return",
"pd",
".",
"DataFrame",
"(",
"attr_",
",",
"columns",
"=",
"[",
"\"Attribute\"",
",",
"\"Description\"",
"]",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Biomart.get_filters
|
Get available filters from dataset you've selected
|
gseapy/parser.py
|
def get_filters(self, dataset):
"""Get available filters from dataset you've selected"""
filters = self.filters(dataset)
filt_ = [ (k, v[0]) for k, v in filters.items()]
return pd.DataFrame(filt_, columns=["Filter", "Description"])
|
def get_filters(self, dataset):
"""Get available filters from dataset you've selected"""
filters = self.filters(dataset)
filt_ = [ (k, v[0]) for k, v in filters.items()]
return pd.DataFrame(filt_, columns=["Filter", "Description"])
|
[
"Get",
"available",
"filters",
"from",
"dataset",
"you",
"ve",
"selected"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/parser.py#L231-L235
|
[
"def",
"get_filters",
"(",
"self",
",",
"dataset",
")",
":",
"filters",
"=",
"self",
".",
"filters",
"(",
"dataset",
")",
"filt_",
"=",
"[",
"(",
"k",
",",
"v",
"[",
"0",
"]",
")",
"for",
"k",
",",
"v",
"in",
"filters",
".",
"items",
"(",
")",
"]",
"return",
"pd",
".",
"DataFrame",
"(",
"filt_",
",",
"columns",
"=",
"[",
"\"Filter\"",
",",
"\"Description\"",
"]",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Biomart.query
|
mapping ids using BioMart.
:param dataset: str, default: 'hsapiens_gene_ensembl'
:param attributes: str, list, tuple
:param filters: dict, {'filter name': list(filter value)}
:param host: www.ensembl.org, asia.ensembl.org, useast.ensembl.org
:return: a dataframe contains all attributes you selected.
**Note**: it will take a couple of minutes to get the results.
A xml template for querying biomart. (see https://gist.github.com/keithshep/7776579)
exampleTaxonomy = "mmusculus_gene_ensembl"
exampleGene = "ENSMUSG00000086981,ENSMUSG00000086982,ENSMUSG00000086983"
urlTemplate = \
'''http://ensembl.org/biomart/martservice?query=''' \
'''<?xml version="1.0" encoding="UTF-8"?>''' \
'''<!DOCTYPE Query>''' \
'''<Query virtualSchemaName="default" formatter="CSV" header="0" uniqueRows="0" count="" datasetConfigVersion="0.6">''' \
'''<Dataset name="%s" interface="default"><Filter name="ensembl_gene_id" value="%s"/>''' \
'''<Attribute name="ensembl_gene_id"/><Attribute name="ensembl_transcript_id"/>''' \
'''<Attribute name="transcript_start"/><Attribute name="transcript_end"/>''' \
'''<Attribute name="exon_chrom_start"/><Attribute name="exon_chrom_end"/>''' \
'''</Dataset>''' \
'''</Query>'''
exampleURL = urlTemplate % (exampleTaxonomy, exampleGene)
req = requests.get(exampleURL, stream=True)
|
gseapy/parser.py
|
def query(self, dataset='hsapiens_gene_ensembl', attributes=[],
filters={}, filename=None):
"""mapping ids using BioMart.
:param dataset: str, default: 'hsapiens_gene_ensembl'
:param attributes: str, list, tuple
:param filters: dict, {'filter name': list(filter value)}
:param host: www.ensembl.org, asia.ensembl.org, useast.ensembl.org
:return: a dataframe contains all attributes you selected.
**Note**: it will take a couple of minutes to get the results.
A xml template for querying biomart. (see https://gist.github.com/keithshep/7776579)
exampleTaxonomy = "mmusculus_gene_ensembl"
exampleGene = "ENSMUSG00000086981,ENSMUSG00000086982,ENSMUSG00000086983"
urlTemplate = \
'''http://ensembl.org/biomart/martservice?query=''' \
'''<?xml version="1.0" encoding="UTF-8"?>''' \
'''<!DOCTYPE Query>''' \
'''<Query virtualSchemaName="default" formatter="CSV" header="0" uniqueRows="0" count="" datasetConfigVersion="0.6">''' \
'''<Dataset name="%s" interface="default"><Filter name="ensembl_gene_id" value="%s"/>''' \
'''<Attribute name="ensembl_gene_id"/><Attribute name="ensembl_transcript_id"/>''' \
'''<Attribute name="transcript_start"/><Attribute name="transcript_end"/>''' \
'''<Attribute name="exon_chrom_start"/><Attribute name="exon_chrom_end"/>''' \
'''</Dataset>''' \
'''</Query>'''
exampleURL = urlTemplate % (exampleTaxonomy, exampleGene)
req = requests.get(exampleURL, stream=True)
"""
if not attributes:
attributes = ['ensembl_gene_id', 'external_gene_name', 'entrezgene', 'go_id']
# i=0
# while (self.host is None) and (i < 3):
# self.host = self.ghosts[i]
# i +=1
self.new_query()
# 'mmusculus_gene_ensembl'
self.add_dataset_to_xml(dataset)
for at in attributes:
self.add_attribute_to_xml(at)
# add filters
if filters:
for k, v in filters.items():
if isinstance(v, list): v = ",".join(v)
self.add_filter_to_xml(k, v)
xml_query = self.get_xml()
results = super(Biomart, self).query(xml_query)
df = pd.read_csv(StringIO(results), header=None, sep="\t",
names=attributes, index_col=None)
# save file to cache path.
if filename is None:
mkdirs(DEFAULT_CACHE_PATH)
filename = os.path.join(DEFAULT_CACHE_PATH, "{}.background.genes.txt".format(dataset))
df.to_csv(filename, sep="\t", index=False)
return df
|
def query(self, dataset='hsapiens_gene_ensembl', attributes=[],
filters={}, filename=None):
"""mapping ids using BioMart.
:param dataset: str, default: 'hsapiens_gene_ensembl'
:param attributes: str, list, tuple
:param filters: dict, {'filter name': list(filter value)}
:param host: www.ensembl.org, asia.ensembl.org, useast.ensembl.org
:return: a dataframe contains all attributes you selected.
**Note**: it will take a couple of minutes to get the results.
A xml template for querying biomart. (see https://gist.github.com/keithshep/7776579)
exampleTaxonomy = "mmusculus_gene_ensembl"
exampleGene = "ENSMUSG00000086981,ENSMUSG00000086982,ENSMUSG00000086983"
urlTemplate = \
'''http://ensembl.org/biomart/martservice?query=''' \
'''<?xml version="1.0" encoding="UTF-8"?>''' \
'''<!DOCTYPE Query>''' \
'''<Query virtualSchemaName="default" formatter="CSV" header="0" uniqueRows="0" count="" datasetConfigVersion="0.6">''' \
'''<Dataset name="%s" interface="default"><Filter name="ensembl_gene_id" value="%s"/>''' \
'''<Attribute name="ensembl_gene_id"/><Attribute name="ensembl_transcript_id"/>''' \
'''<Attribute name="transcript_start"/><Attribute name="transcript_end"/>''' \
'''<Attribute name="exon_chrom_start"/><Attribute name="exon_chrom_end"/>''' \
'''</Dataset>''' \
'''</Query>'''
exampleURL = urlTemplate % (exampleTaxonomy, exampleGene)
req = requests.get(exampleURL, stream=True)
"""
if not attributes:
attributes = ['ensembl_gene_id', 'external_gene_name', 'entrezgene', 'go_id']
# i=0
# while (self.host is None) and (i < 3):
# self.host = self.ghosts[i]
# i +=1
self.new_query()
# 'mmusculus_gene_ensembl'
self.add_dataset_to_xml(dataset)
for at in attributes:
self.add_attribute_to_xml(at)
# add filters
if filters:
for k, v in filters.items():
if isinstance(v, list): v = ",".join(v)
self.add_filter_to_xml(k, v)
xml_query = self.get_xml()
results = super(Biomart, self).query(xml_query)
df = pd.read_csv(StringIO(results), header=None, sep="\t",
names=attributes, index_col=None)
# save file to cache path.
if filename is None:
mkdirs(DEFAULT_CACHE_PATH)
filename = os.path.join(DEFAULT_CACHE_PATH, "{}.background.genes.txt".format(dataset))
df.to_csv(filename, sep="\t", index=False)
return df
|
[
"mapping",
"ids",
"using",
"BioMart",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/parser.py#L237-L295
|
[
"def",
"query",
"(",
"self",
",",
"dataset",
"=",
"'hsapiens_gene_ensembl'",
",",
"attributes",
"=",
"[",
"]",
",",
"filters",
"=",
"{",
"}",
",",
"filename",
"=",
"None",
")",
":",
"if",
"not",
"attributes",
":",
"attributes",
"=",
"[",
"'ensembl_gene_id'",
",",
"'external_gene_name'",
",",
"'entrezgene'",
",",
"'go_id'",
"]",
"# i=0",
"# while (self.host is None) and (i < 3):",
"# self.host = self.ghosts[i]",
"# i +=1 ",
"self",
".",
"new_query",
"(",
")",
"# 'mmusculus_gene_ensembl'",
"self",
".",
"add_dataset_to_xml",
"(",
"dataset",
")",
"for",
"at",
"in",
"attributes",
":",
"self",
".",
"add_attribute_to_xml",
"(",
"at",
")",
"# add filters",
"if",
"filters",
":",
"for",
"k",
",",
"v",
"in",
"filters",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"v",
"=",
"\",\"",
".",
"join",
"(",
"v",
")",
"self",
".",
"add_filter_to_xml",
"(",
"k",
",",
"v",
")",
"xml_query",
"=",
"self",
".",
"get_xml",
"(",
")",
"results",
"=",
"super",
"(",
"Biomart",
",",
"self",
")",
".",
"query",
"(",
"xml_query",
")",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"StringIO",
"(",
"results",
")",
",",
"header",
"=",
"None",
",",
"sep",
"=",
"\"\\t\"",
",",
"names",
"=",
"attributes",
",",
"index_col",
"=",
"None",
")",
"# save file to cache path.",
"if",
"filename",
"is",
"None",
":",
"mkdirs",
"(",
"DEFAULT_CACHE_PATH",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DEFAULT_CACHE_PATH",
",",
"\"{}.background.genes.txt\"",
".",
"format",
"(",
"dataset",
")",
")",
"df",
".",
"to_csv",
"(",
"filename",
",",
"sep",
"=",
"\"\\t\"",
",",
"index",
"=",
"False",
")",
"return",
"df"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
gsea
|
Run Gene Set Enrichment Analysis.
:param data: Gene expression data table, Pandas DataFrame, gct file.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param cls: A list or a .cls file format required for GSEA.
:param str outdir: Results output directory.
:param int permutation_num: Number of permutations for significance computation. Default: 1000.
:param str permutation_type: Permutation type, "phenotype" for phenotypes, "gene_set" for genes.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Default: 500.
:param float weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:1.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for nature scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a GSEA obj. All results store to a dictionary, obj.results,
where contains::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes}
|
gseapy/gsea.py
|
def gsea(data, gene_sets, cls, outdir='GSEA_', min_size=15, max_size=500, permutation_num=1000,
weighted_score_type=1,permutation_type='gene_set', method='log2_ratio_of_classes',
ascending=False, processes=1, figsize=(6.5,6), format='pdf',
graph_num=20, no_plot=False, seed=None, verbose=False):
""" Run Gene Set Enrichment Analysis.
:param data: Gene expression data table, Pandas DataFrame, gct file.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param cls: A list or a .cls file format required for GSEA.
:param str outdir: Results output directory.
:param int permutation_num: Number of permutations for significance computation. Default: 1000.
:param str permutation_type: Permutation type, "phenotype" for phenotypes, "gene_set" for genes.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Default: 500.
:param float weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:1.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for nature scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a GSEA obj. All results store to a dictionary, obj.results,
where contains::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes}
"""
gs = GSEA(data, gene_sets, cls, outdir, min_size, max_size, permutation_num,
weighted_score_type, permutation_type, method, ascending, processes,
figsize, format, graph_num, no_plot, seed, verbose)
gs.run()
return gs
|
def gsea(data, gene_sets, cls, outdir='GSEA_', min_size=15, max_size=500, permutation_num=1000,
weighted_score_type=1,permutation_type='gene_set', method='log2_ratio_of_classes',
ascending=False, processes=1, figsize=(6.5,6), format='pdf',
graph_num=20, no_plot=False, seed=None, verbose=False):
""" Run Gene Set Enrichment Analysis.
:param data: Gene expression data table, Pandas DataFrame, gct file.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param cls: A list or a .cls file format required for GSEA.
:param str outdir: Results output directory.
:param int permutation_num: Number of permutations for significance computation. Default: 1000.
:param str permutation_type: Permutation type, "phenotype" for phenotypes, "gene_set" for genes.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Default: 500.
:param float weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:1.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for nature scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a GSEA obj. All results store to a dictionary, obj.results,
where contains::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes}
"""
gs = GSEA(data, gene_sets, cls, outdir, min_size, max_size, permutation_num,
weighted_score_type, permutation_type, method, ascending, processes,
figsize, format, graph_num, no_plot, seed, verbose)
gs.run()
return gs
|
[
"Run",
"Gene",
"Set",
"Enrichment",
"Analysis",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L858-L933
|
[
"def",
"gsea",
"(",
"data",
",",
"gene_sets",
",",
"cls",
",",
"outdir",
"=",
"'GSEA_'",
",",
"min_size",
"=",
"15",
",",
"max_size",
"=",
"500",
",",
"permutation_num",
"=",
"1000",
",",
"weighted_score_type",
"=",
"1",
",",
"permutation_type",
"=",
"'gene_set'",
",",
"method",
"=",
"'log2_ratio_of_classes'",
",",
"ascending",
"=",
"False",
",",
"processes",
"=",
"1",
",",
"figsize",
"=",
"(",
"6.5",
",",
"6",
")",
",",
"format",
"=",
"'pdf'",
",",
"graph_num",
"=",
"20",
",",
"no_plot",
"=",
"False",
",",
"seed",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"gs",
"=",
"GSEA",
"(",
"data",
",",
"gene_sets",
",",
"cls",
",",
"outdir",
",",
"min_size",
",",
"max_size",
",",
"permutation_num",
",",
"weighted_score_type",
",",
"permutation_type",
",",
"method",
",",
"ascending",
",",
"processes",
",",
"figsize",
",",
"format",
",",
"graph_num",
",",
"no_plot",
",",
"seed",
",",
"verbose",
")",
"gs",
".",
"run",
"(",
")",
"return",
"gs"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
ssgsea
|
Run Gene Set Enrichment Analysis with single sample GSEA tool
:param data: Expression table, pd.Series, pd.DataFrame, GCT file, or .rnk file format.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param outdir: Results output directory.
:param str sample_norm_method: "Sample normalization method. Choose from {'rank', 'log', 'log_rank'}. Default: rank.
1. 'rank': Rank your expression data, and transform by 10000*rank_dat/gene_numbers
2. 'log' : Do not rank, but transform data by log(data + exp(1)), while data = data[data<1] =1.
3. 'log_rank': Rank your expression data, and transform by log(10000*rank_dat/gene_numbers+ exp(1))
4. 'custom': Do nothing, and use your own rank value to calculate enrichment score.
see here: https://github.com/GSEA-MSigDB/ssGSEAProjection-gpmodule/blob/master/src/ssGSEAProjection.Library.R, line 86
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Default: 2000.
:param int permutation_num: Number of permutations for significance computation. Default: 0.
:param str weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:0.25.
:param bool scale: If True, normalize the scores by number of genes in the gene sets.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [7,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a ssGSEA obj.
All results store to a dictionary, access enrichment score by obj.resultsOnSamples,
and normalized enrichment score by obj.res2d.
if permutation_num > 0, additional results contain::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes, if permutation_num >0}
|
gseapy/gsea.py
|
def ssgsea(data, gene_sets, outdir="ssGSEA_", sample_norm_method='rank', min_size=15, max_size=2000,
permutation_num=0, weighted_score_type=0.25, scale=True, ascending=False, processes=1,
figsize=(7,6), format='pdf', graph_num=20, no_plot=False, seed=None, verbose=False):
"""Run Gene Set Enrichment Analysis with single sample GSEA tool
:param data: Expression table, pd.Series, pd.DataFrame, GCT file, or .rnk file format.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param outdir: Results output directory.
:param str sample_norm_method: "Sample normalization method. Choose from {'rank', 'log', 'log_rank'}. Default: rank.
1. 'rank': Rank your expression data, and transform by 10000*rank_dat/gene_numbers
2. 'log' : Do not rank, but transform data by log(data + exp(1)), while data = data[data<1] =1.
3. 'log_rank': Rank your expression data, and transform by log(10000*rank_dat/gene_numbers+ exp(1))
4. 'custom': Do nothing, and use your own rank value to calculate enrichment score.
see here: https://github.com/GSEA-MSigDB/ssGSEAProjection-gpmodule/blob/master/src/ssGSEAProjection.Library.R, line 86
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Default: 2000.
:param int permutation_num: Number of permutations for significance computation. Default: 0.
:param str weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:0.25.
:param bool scale: If True, normalize the scores by number of genes in the gene sets.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [7,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a ssGSEA obj.
All results store to a dictionary, access enrichment score by obj.resultsOnSamples,
and normalized enrichment score by obj.res2d.
if permutation_num > 0, additional results contain::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes, if permutation_num >0}
"""
ss = SingleSampleGSEA(data, gene_sets, outdir, sample_norm_method, min_size, max_size,
permutation_num, weighted_score_type, scale, ascending,
processes, figsize, format, graph_num, no_plot, seed, verbose)
ss.run()
return ss
|
def ssgsea(data, gene_sets, outdir="ssGSEA_", sample_norm_method='rank', min_size=15, max_size=2000,
permutation_num=0, weighted_score_type=0.25, scale=True, ascending=False, processes=1,
figsize=(7,6), format='pdf', graph_num=20, no_plot=False, seed=None, verbose=False):
"""Run Gene Set Enrichment Analysis with single sample GSEA tool
:param data: Expression table, pd.Series, pd.DataFrame, GCT file, or .rnk file format.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param outdir: Results output directory.
:param str sample_norm_method: "Sample normalization method. Choose from {'rank', 'log', 'log_rank'}. Default: rank.
1. 'rank': Rank your expression data, and transform by 10000*rank_dat/gene_numbers
2. 'log' : Do not rank, but transform data by log(data + exp(1)), while data = data[data<1] =1.
3. 'log_rank': Rank your expression data, and transform by log(10000*rank_dat/gene_numbers+ exp(1))
4. 'custom': Do nothing, and use your own rank value to calculate enrichment score.
see here: https://github.com/GSEA-MSigDB/ssGSEAProjection-gpmodule/blob/master/src/ssGSEAProjection.Library.R, line 86
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Default: 2000.
:param int permutation_num: Number of permutations for significance computation. Default: 0.
:param str weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:0.25.
:param bool scale: If True, normalize the scores by number of genes in the gene sets.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [7,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a ssGSEA obj.
All results store to a dictionary, access enrichment score by obj.resultsOnSamples,
and normalized enrichment score by obj.res2d.
if permutation_num > 0, additional results contain::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes, if permutation_num >0}
"""
ss = SingleSampleGSEA(data, gene_sets, outdir, sample_norm_method, min_size, max_size,
permutation_num, weighted_score_type, scale, ascending,
processes, figsize, format, graph_num, no_plot, seed, verbose)
ss.run()
return ss
|
[
"Run",
"Gene",
"Set",
"Enrichment",
"Analysis",
"with",
"single",
"sample",
"GSEA",
"tool"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L936-L988
|
[
"def",
"ssgsea",
"(",
"data",
",",
"gene_sets",
",",
"outdir",
"=",
"\"ssGSEA_\"",
",",
"sample_norm_method",
"=",
"'rank'",
",",
"min_size",
"=",
"15",
",",
"max_size",
"=",
"2000",
",",
"permutation_num",
"=",
"0",
",",
"weighted_score_type",
"=",
"0.25",
",",
"scale",
"=",
"True",
",",
"ascending",
"=",
"False",
",",
"processes",
"=",
"1",
",",
"figsize",
"=",
"(",
"7",
",",
"6",
")",
",",
"format",
"=",
"'pdf'",
",",
"graph_num",
"=",
"20",
",",
"no_plot",
"=",
"False",
",",
"seed",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"ss",
"=",
"SingleSampleGSEA",
"(",
"data",
",",
"gene_sets",
",",
"outdir",
",",
"sample_norm_method",
",",
"min_size",
",",
"max_size",
",",
"permutation_num",
",",
"weighted_score_type",
",",
"scale",
",",
"ascending",
",",
"processes",
",",
"figsize",
",",
"format",
",",
"graph_num",
",",
"no_plot",
",",
"seed",
",",
"verbose",
")",
"ss",
".",
"run",
"(",
")",
"return",
"ss"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
prerank
|
Run Gene Set Enrichment Analysis with pre-ranked correlation defined by user.
:param rnk: pre-ranked correlation table or pandas DataFrame. Same input with ``GSEA`` .rnk file.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param outdir: results output directory.
:param int permutation_num: Number of permutations for significance computation. Default: 1000.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Defaults: 500.
:param str weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:1.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a Prerank obj. All results store to a dictionary, obj.results,
where contains::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes}
|
gseapy/gsea.py
|
def prerank(rnk, gene_sets, outdir='GSEA_Prerank', pheno_pos='Pos', pheno_neg='Neg',
min_size=15, max_size=500, permutation_num=1000, weighted_score_type=1,
ascending=False, processes=1, figsize=(6.5,6), format='pdf',
graph_num=20, no_plot=False, seed=None, verbose=False):
""" Run Gene Set Enrichment Analysis with pre-ranked correlation defined by user.
:param rnk: pre-ranked correlation table or pandas DataFrame. Same input with ``GSEA`` .rnk file.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param outdir: results output directory.
:param int permutation_num: Number of permutations for significance computation. Default: 1000.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Defaults: 500.
:param str weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:1.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a Prerank obj. All results store to a dictionary, obj.results,
where contains::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes}
"""
pre = Prerank(rnk, gene_sets, outdir, pheno_pos, pheno_neg,
min_size, max_size, permutation_num, weighted_score_type,
ascending, processes, figsize, format, graph_num, no_plot, seed, verbose)
pre.run()
return pre
|
def prerank(rnk, gene_sets, outdir='GSEA_Prerank', pheno_pos='Pos', pheno_neg='Neg',
min_size=15, max_size=500, permutation_num=1000, weighted_score_type=1,
ascending=False, processes=1, figsize=(6.5,6), format='pdf',
graph_num=20, no_plot=False, seed=None, verbose=False):
""" Run Gene Set Enrichment Analysis with pre-ranked correlation defined by user.
:param rnk: pre-ranked correlation table or pandas DataFrame. Same input with ``GSEA`` .rnk file.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param outdir: results output directory.
:param int permutation_num: Number of permutations for significance computation. Default: 1000.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Defaults: 500.
:param str weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:1.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a Prerank obj. All results store to a dictionary, obj.results,
where contains::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes}
"""
pre = Prerank(rnk, gene_sets, outdir, pheno_pos, pheno_neg,
min_size, max_size, permutation_num, weighted_score_type,
ascending, processes, figsize, format, graph_num, no_plot, seed, verbose)
pre.run()
return pre
|
[
"Run",
"Gene",
"Set",
"Enrichment",
"Analysis",
"with",
"pre",
"-",
"ranked",
"correlation",
"defined",
"by",
"user",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L991-L1031
|
[
"def",
"prerank",
"(",
"rnk",
",",
"gene_sets",
",",
"outdir",
"=",
"'GSEA_Prerank'",
",",
"pheno_pos",
"=",
"'Pos'",
",",
"pheno_neg",
"=",
"'Neg'",
",",
"min_size",
"=",
"15",
",",
"max_size",
"=",
"500",
",",
"permutation_num",
"=",
"1000",
",",
"weighted_score_type",
"=",
"1",
",",
"ascending",
"=",
"False",
",",
"processes",
"=",
"1",
",",
"figsize",
"=",
"(",
"6.5",
",",
"6",
")",
",",
"format",
"=",
"'pdf'",
",",
"graph_num",
"=",
"20",
",",
"no_plot",
"=",
"False",
",",
"seed",
"=",
"None",
",",
"verbose",
"=",
"False",
")",
":",
"pre",
"=",
"Prerank",
"(",
"rnk",
",",
"gene_sets",
",",
"outdir",
",",
"pheno_pos",
",",
"pheno_neg",
",",
"min_size",
",",
"max_size",
",",
"permutation_num",
",",
"weighted_score_type",
",",
"ascending",
",",
"processes",
",",
"figsize",
",",
"format",
",",
"graph_num",
",",
"no_plot",
",",
"seed",
",",
"verbose",
")",
"pre",
".",
"run",
"(",
")",
"return",
"pre"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
replot
|
The main function to reproduce GSEA desktop outputs.
:param indir: GSEA desktop results directory. In the sub folder, you must contain edb file folder.
:param outdir: Output directory.
:param float weighted_score_type: weighted score type. choose from {0,1,1.5,2}. Default: 1.
:param list figsize: Matplotlib output figure figsize. Default: [6.5,6].
:param str format: Matplotlib output figure format. Default: 'pdf'.
:param int min_size: Min size of input genes presented in Gene Sets. Default: 3.
:param int max_size: Max size of input genes presented in Gene Sets. Default: 5000.
You are not encouraged to use min_size, or max_size argument in :func:`replot` function.
Because gmt file has already been filtered.
:param verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Generate new figures with selected figure format. Default: 'pdf'.
|
gseapy/gsea.py
|
def replot(indir, outdir='GSEA_Replot', weighted_score_type=1,
min_size=3, max_size=1000, figsize=(6.5,6), graph_num=20, format='pdf', verbose=False):
"""The main function to reproduce GSEA desktop outputs.
:param indir: GSEA desktop results directory. In the sub folder, you must contain edb file folder.
:param outdir: Output directory.
:param float weighted_score_type: weighted score type. choose from {0,1,1.5,2}. Default: 1.
:param list figsize: Matplotlib output figure figsize. Default: [6.5,6].
:param str format: Matplotlib output figure format. Default: 'pdf'.
:param int min_size: Min size of input genes presented in Gene Sets. Default: 3.
:param int max_size: Max size of input genes presented in Gene Sets. Default: 5000.
You are not encouraged to use min_size, or max_size argument in :func:`replot` function.
Because gmt file has already been filtered.
:param verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Generate new figures with selected figure format. Default: 'pdf'.
"""
rep = Replot(indir, outdir, weighted_score_type,
min_size, max_size, figsize, graph_num, format, verbose)
rep.run()
return
|
def replot(indir, outdir='GSEA_Replot', weighted_score_type=1,
min_size=3, max_size=1000, figsize=(6.5,6), graph_num=20, format='pdf', verbose=False):
"""The main function to reproduce GSEA desktop outputs.
:param indir: GSEA desktop results directory. In the sub folder, you must contain edb file folder.
:param outdir: Output directory.
:param float weighted_score_type: weighted score type. choose from {0,1,1.5,2}. Default: 1.
:param list figsize: Matplotlib output figure figsize. Default: [6.5,6].
:param str format: Matplotlib output figure format. Default: 'pdf'.
:param int min_size: Min size of input genes presented in Gene Sets. Default: 3.
:param int max_size: Max size of input genes presented in Gene Sets. Default: 5000.
You are not encouraged to use min_size, or max_size argument in :func:`replot` function.
Because gmt file has already been filtered.
:param verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Generate new figures with selected figure format. Default: 'pdf'.
"""
rep = Replot(indir, outdir, weighted_score_type,
min_size, max_size, figsize, graph_num, format, verbose)
rep.run()
return
|
[
"The",
"main",
"function",
"to",
"reproduce",
"GSEA",
"desktop",
"outputs",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L1034-L1056
|
[
"def",
"replot",
"(",
"indir",
",",
"outdir",
"=",
"'GSEA_Replot'",
",",
"weighted_score_type",
"=",
"1",
",",
"min_size",
"=",
"3",
",",
"max_size",
"=",
"1000",
",",
"figsize",
"=",
"(",
"6.5",
",",
"6",
")",
",",
"graph_num",
"=",
"20",
",",
"format",
"=",
"'pdf'",
",",
"verbose",
"=",
"False",
")",
":",
"rep",
"=",
"Replot",
"(",
"indir",
",",
"outdir",
",",
"weighted_score_type",
",",
"min_size",
",",
"max_size",
",",
"figsize",
",",
"graph_num",
",",
"format",
",",
"verbose",
")",
"rep",
".",
"run",
"(",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
GSEAbase.prepare_outdir
|
create temp directory.
|
gseapy/gsea.py
|
def prepare_outdir(self):
"""create temp directory."""
self._outdir = self.outdir
if self._outdir is None:
self._tmpdir = TemporaryDirectory()
self.outdir = self._tmpdir.name
elif isinstance(self.outdir, str):
mkdirs(self.outdir)
else:
raise Exception("Error parsing outdir: %s"%type(self.outdir))
# handle gmt type
if isinstance(self.gene_sets, str):
_gset = os.path.split(self.gene_sets)[-1].lower().rstrip(".gmt")
elif isinstance(self.gene_sets, dict):
_gset = "blank_name"
else:
raise Exception("Error parsing gene_sets parameter for gene sets")
logfile = os.path.join(self.outdir, "gseapy.%s.%s.log" % (self.module, _gset))
return logfile
|
def prepare_outdir(self):
"""create temp directory."""
self._outdir = self.outdir
if self._outdir is None:
self._tmpdir = TemporaryDirectory()
self.outdir = self._tmpdir.name
elif isinstance(self.outdir, str):
mkdirs(self.outdir)
else:
raise Exception("Error parsing outdir: %s"%type(self.outdir))
# handle gmt type
if isinstance(self.gene_sets, str):
_gset = os.path.split(self.gene_sets)[-1].lower().rstrip(".gmt")
elif isinstance(self.gene_sets, dict):
_gset = "blank_name"
else:
raise Exception("Error parsing gene_sets parameter for gene sets")
logfile = os.path.join(self.outdir, "gseapy.%s.%s.log" % (self.module, _gset))
return logfile
|
[
"create",
"temp",
"directory",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L34-L54
|
[
"def",
"prepare_outdir",
"(",
"self",
")",
":",
"self",
".",
"_outdir",
"=",
"self",
".",
"outdir",
"if",
"self",
".",
"_outdir",
"is",
"None",
":",
"self",
".",
"_tmpdir",
"=",
"TemporaryDirectory",
"(",
")",
"self",
".",
"outdir",
"=",
"self",
".",
"_tmpdir",
".",
"name",
"elif",
"isinstance",
"(",
"self",
".",
"outdir",
",",
"str",
")",
":",
"mkdirs",
"(",
"self",
".",
"outdir",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Error parsing outdir: %s\"",
"%",
"type",
"(",
"self",
".",
"outdir",
")",
")",
"# handle gmt type",
"if",
"isinstance",
"(",
"self",
".",
"gene_sets",
",",
"str",
")",
":",
"_gset",
"=",
"os",
".",
"path",
".",
"split",
"(",
"self",
".",
"gene_sets",
")",
"[",
"-",
"1",
"]",
".",
"lower",
"(",
")",
".",
"rstrip",
"(",
"\".gmt\"",
")",
"elif",
"isinstance",
"(",
"self",
".",
"gene_sets",
",",
"dict",
")",
":",
"_gset",
"=",
"\"blank_name\"",
"else",
":",
"raise",
"Exception",
"(",
"\"Error parsing gene_sets parameter for gene sets\"",
")",
"logfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"outdir",
",",
"\"gseapy.%s.%s.log\"",
"%",
"(",
"self",
".",
"module",
",",
"_gset",
")",
")",
"return",
"logfile"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
GSEAbase._set_cores
|
set cpu numbers to be used
|
gseapy/gsea.py
|
def _set_cores(self):
"""set cpu numbers to be used"""
cpu_num = cpu_count()-1
if self._processes > cpu_num:
cores = cpu_num
elif self._processes < 1:
cores = 1
else:
cores = self._processes
# have to be int if user input is float
self._processes = int(cores)
|
def _set_cores(self):
"""set cpu numbers to be used"""
cpu_num = cpu_count()-1
if self._processes > cpu_num:
cores = cpu_num
elif self._processes < 1:
cores = 1
else:
cores = self._processes
# have to be int if user input is float
self._processes = int(cores)
|
[
"set",
"cpu",
"numbers",
"to",
"be",
"used"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L56-L67
|
[
"def",
"_set_cores",
"(",
"self",
")",
":",
"cpu_num",
"=",
"cpu_count",
"(",
")",
"-",
"1",
"if",
"self",
".",
"_processes",
">",
"cpu_num",
":",
"cores",
"=",
"cpu_num",
"elif",
"self",
".",
"_processes",
"<",
"1",
":",
"cores",
"=",
"1",
"else",
":",
"cores",
"=",
"self",
".",
"_processes",
"# have to be int if user input is float",
"self",
".",
"_processes",
"=",
"int",
"(",
"cores",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
GSEAbase._load_ranking
|
Parse ranking file. This file contains ranking correlation vector( or expression values)
and gene names or ids.
:param rnk: the .rnk file of GSEA input or a Pandas DataFrame, Series instance.
:return: a Pandas Series with gene name indexed rankings
|
gseapy/gsea.py
|
def _load_ranking(self, rnk):
"""Parse ranking file. This file contains ranking correlation vector( or expression values)
and gene names or ids.
:param rnk: the .rnk file of GSEA input or a Pandas DataFrame, Series instance.
:return: a Pandas Series with gene name indexed rankings
"""
# load data
if isinstance(rnk, pd.DataFrame):
rank_metric = rnk.copy()
# handle dataframe with gene_name as index.
if rnk.shape[1] == 1: rank_metric = rnk.reset_index()
elif isinstance(rnk, pd.Series):
rank_metric = rnk.reset_index()
elif os.path.isfile(rnk):
rank_metric = pd.read_csv(rnk, header=None, comment='#', sep="\t")
else:
raise Exception('Error parsing gene ranking values!')
# sort ranking values from high to low
rank_metric.sort_values(by=rank_metric.columns[1], ascending=self.ascending, inplace=True)
# drop na values
if rank_metric.isnull().any(axis=1).sum() >0:
self._logger.warning("Input gene rankings contains NA values(gene name and ranking value), drop them all!")
# print out NAs
NAs = rank_metric[rank_metric.isnull().any(axis=1)]
self._logger.debug('NAs list:\n'+NAs.to_string())
rank_metric.dropna(how='any', inplace=True)
# drop duplicate IDs, keep the first
if rank_metric.duplicated(subset=rank_metric.columns[0]).sum() >0:
self._logger.warning("Input gene rankings contains duplicated IDs, Only use the duplicated ID with highest value!")
# print out duplicated IDs.
dups = rank_metric[rank_metric.duplicated(subset=rank_metric.columns[0])]
self._logger.debug('Dups list:\n'+dups.to_string())
rank_metric.drop_duplicates(subset=rank_metric.columns[0], inplace=True, keep='first')
# reset ranking index, because you have sort values and drop duplicates.
rank_metric.reset_index(drop=True, inplace=True)
rank_metric.columns = ['gene_name','rank']
rankser = rank_metric.set_index('gene_name')['rank']
self.ranking = rankser
# return series
return rankser
|
def _load_ranking(self, rnk):
"""Parse ranking file. This file contains ranking correlation vector( or expression values)
and gene names or ids.
:param rnk: the .rnk file of GSEA input or a Pandas DataFrame, Series instance.
:return: a Pandas Series with gene name indexed rankings
"""
# load data
if isinstance(rnk, pd.DataFrame):
rank_metric = rnk.copy()
# handle dataframe with gene_name as index.
if rnk.shape[1] == 1: rank_metric = rnk.reset_index()
elif isinstance(rnk, pd.Series):
rank_metric = rnk.reset_index()
elif os.path.isfile(rnk):
rank_metric = pd.read_csv(rnk, header=None, comment='#', sep="\t")
else:
raise Exception('Error parsing gene ranking values!')
# sort ranking values from high to low
rank_metric.sort_values(by=rank_metric.columns[1], ascending=self.ascending, inplace=True)
# drop na values
if rank_metric.isnull().any(axis=1).sum() >0:
self._logger.warning("Input gene rankings contains NA values(gene name and ranking value), drop them all!")
# print out NAs
NAs = rank_metric[rank_metric.isnull().any(axis=1)]
self._logger.debug('NAs list:\n'+NAs.to_string())
rank_metric.dropna(how='any', inplace=True)
# drop duplicate IDs, keep the first
if rank_metric.duplicated(subset=rank_metric.columns[0]).sum() >0:
self._logger.warning("Input gene rankings contains duplicated IDs, Only use the duplicated ID with highest value!")
# print out duplicated IDs.
dups = rank_metric[rank_metric.duplicated(subset=rank_metric.columns[0])]
self._logger.debug('Dups list:\n'+dups.to_string())
rank_metric.drop_duplicates(subset=rank_metric.columns[0], inplace=True, keep='first')
# reset ranking index, because you have sort values and drop duplicates.
rank_metric.reset_index(drop=True, inplace=True)
rank_metric.columns = ['gene_name','rank']
rankser = rank_metric.set_index('gene_name')['rank']
self.ranking = rankser
# return series
return rankser
|
[
"Parse",
"ranking",
"file",
".",
"This",
"file",
"contains",
"ranking",
"correlation",
"vector",
"(",
"or",
"expression",
"values",
")",
"and",
"gene",
"names",
"or",
"ids",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L69-L110
|
[
"def",
"_load_ranking",
"(",
"self",
",",
"rnk",
")",
":",
"# load data",
"if",
"isinstance",
"(",
"rnk",
",",
"pd",
".",
"DataFrame",
")",
":",
"rank_metric",
"=",
"rnk",
".",
"copy",
"(",
")",
"# handle dataframe with gene_name as index.",
"if",
"rnk",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
":",
"rank_metric",
"=",
"rnk",
".",
"reset_index",
"(",
")",
"elif",
"isinstance",
"(",
"rnk",
",",
"pd",
".",
"Series",
")",
":",
"rank_metric",
"=",
"rnk",
".",
"reset_index",
"(",
")",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"rnk",
")",
":",
"rank_metric",
"=",
"pd",
".",
"read_csv",
"(",
"rnk",
",",
"header",
"=",
"None",
",",
"comment",
"=",
"'#'",
",",
"sep",
"=",
"\"\\t\"",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Error parsing gene ranking values!'",
")",
"# sort ranking values from high to low",
"rank_metric",
".",
"sort_values",
"(",
"by",
"=",
"rank_metric",
".",
"columns",
"[",
"1",
"]",
",",
"ascending",
"=",
"self",
".",
"ascending",
",",
"inplace",
"=",
"True",
")",
"# drop na values",
"if",
"rank_metric",
".",
"isnull",
"(",
")",
".",
"any",
"(",
"axis",
"=",
"1",
")",
".",
"sum",
"(",
")",
">",
"0",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"Input gene rankings contains NA values(gene name and ranking value), drop them all!\"",
")",
"# print out NAs",
"NAs",
"=",
"rank_metric",
"[",
"rank_metric",
".",
"isnull",
"(",
")",
".",
"any",
"(",
"axis",
"=",
"1",
")",
"]",
"self",
".",
"_logger",
".",
"debug",
"(",
"'NAs list:\\n'",
"+",
"NAs",
".",
"to_string",
"(",
")",
")",
"rank_metric",
".",
"dropna",
"(",
"how",
"=",
"'any'",
",",
"inplace",
"=",
"True",
")",
"# drop duplicate IDs, keep the first",
"if",
"rank_metric",
".",
"duplicated",
"(",
"subset",
"=",
"rank_metric",
".",
"columns",
"[",
"0",
"]",
")",
".",
"sum",
"(",
")",
">",
"0",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"Input gene rankings contains duplicated IDs, Only use the duplicated ID with highest value!\"",
")",
"# print out duplicated IDs.",
"dups",
"=",
"rank_metric",
"[",
"rank_metric",
".",
"duplicated",
"(",
"subset",
"=",
"rank_metric",
".",
"columns",
"[",
"0",
"]",
")",
"]",
"self",
".",
"_logger",
".",
"debug",
"(",
"'Dups list:\\n'",
"+",
"dups",
".",
"to_string",
"(",
")",
")",
"rank_metric",
".",
"drop_duplicates",
"(",
"subset",
"=",
"rank_metric",
".",
"columns",
"[",
"0",
"]",
",",
"inplace",
"=",
"True",
",",
"keep",
"=",
"'first'",
")",
"# reset ranking index, because you have sort values and drop duplicates.",
"rank_metric",
".",
"reset_index",
"(",
"drop",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"rank_metric",
".",
"columns",
"=",
"[",
"'gene_name'",
",",
"'rank'",
"]",
"rankser",
"=",
"rank_metric",
".",
"set_index",
"(",
"'gene_name'",
")",
"[",
"'rank'",
"]",
"self",
".",
"ranking",
"=",
"rankser",
"# return series",
"return",
"rankser"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
GSEAbase.load_gmt
|
load gene set dict
|
gseapy/gsea.py
|
def load_gmt(self, gene_list, gmt):
"""load gene set dict"""
if isinstance(gmt, dict):
genesets_dict = gmt
elif isinstance(gmt, str):
genesets_dict = self.parse_gmt(gmt)
else:
raise Exception("Error parsing gmt parameter for gene sets")
subsets = list(genesets_dict.keys())
self.n_genesets = len(subsets)
for subset in subsets:
subset_list = genesets_dict.get(subset)
if isinstance(subset_list, set):
subset_list = list(subset_list)
genesets_dict[subset] = subset_list
tag_indicator = np.in1d(gene_list, subset_list, assume_unique=True)
tag_len = tag_indicator.sum()
if self.min_size <= tag_len <= self.max_size: continue
del genesets_dict[subset]
filsets_num = len(subsets) - len(genesets_dict)
self._logger.info("%04d gene_sets have been filtered out when max_size=%s and min_size=%s"%(filsets_num, self.max_size, self.min_size))
if filsets_num == len(subsets):
self._logger.error("No gene sets passed through filtering condition!!!, try new parameters again!\n" +\
"Note: check gene name, gmt file format, or filtering size." )
sys.exit(0)
self._gmtdct=genesets_dict
return genesets_dict
|
def load_gmt(self, gene_list, gmt):
"""load gene set dict"""
if isinstance(gmt, dict):
genesets_dict = gmt
elif isinstance(gmt, str):
genesets_dict = self.parse_gmt(gmt)
else:
raise Exception("Error parsing gmt parameter for gene sets")
subsets = list(genesets_dict.keys())
self.n_genesets = len(subsets)
for subset in subsets:
subset_list = genesets_dict.get(subset)
if isinstance(subset_list, set):
subset_list = list(subset_list)
genesets_dict[subset] = subset_list
tag_indicator = np.in1d(gene_list, subset_list, assume_unique=True)
tag_len = tag_indicator.sum()
if self.min_size <= tag_len <= self.max_size: continue
del genesets_dict[subset]
filsets_num = len(subsets) - len(genesets_dict)
self._logger.info("%04d gene_sets have been filtered out when max_size=%s and min_size=%s"%(filsets_num, self.max_size, self.min_size))
if filsets_num == len(subsets):
self._logger.error("No gene sets passed through filtering condition!!!, try new parameters again!\n" +\
"Note: check gene name, gmt file format, or filtering size." )
sys.exit(0)
self._gmtdct=genesets_dict
return genesets_dict
|
[
"load",
"gene",
"set",
"dict"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L112-L143
|
[
"def",
"load_gmt",
"(",
"self",
",",
"gene_list",
",",
"gmt",
")",
":",
"if",
"isinstance",
"(",
"gmt",
",",
"dict",
")",
":",
"genesets_dict",
"=",
"gmt",
"elif",
"isinstance",
"(",
"gmt",
",",
"str",
")",
":",
"genesets_dict",
"=",
"self",
".",
"parse_gmt",
"(",
"gmt",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Error parsing gmt parameter for gene sets\"",
")",
"subsets",
"=",
"list",
"(",
"genesets_dict",
".",
"keys",
"(",
")",
")",
"self",
".",
"n_genesets",
"=",
"len",
"(",
"subsets",
")",
"for",
"subset",
"in",
"subsets",
":",
"subset_list",
"=",
"genesets_dict",
".",
"get",
"(",
"subset",
")",
"if",
"isinstance",
"(",
"subset_list",
",",
"set",
")",
":",
"subset_list",
"=",
"list",
"(",
"subset_list",
")",
"genesets_dict",
"[",
"subset",
"]",
"=",
"subset_list",
"tag_indicator",
"=",
"np",
".",
"in1d",
"(",
"gene_list",
",",
"subset_list",
",",
"assume_unique",
"=",
"True",
")",
"tag_len",
"=",
"tag_indicator",
".",
"sum",
"(",
")",
"if",
"self",
".",
"min_size",
"<=",
"tag_len",
"<=",
"self",
".",
"max_size",
":",
"continue",
"del",
"genesets_dict",
"[",
"subset",
"]",
"filsets_num",
"=",
"len",
"(",
"subsets",
")",
"-",
"len",
"(",
"genesets_dict",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"%04d gene_sets have been filtered out when max_size=%s and min_size=%s\"",
"%",
"(",
"filsets_num",
",",
"self",
".",
"max_size",
",",
"self",
".",
"min_size",
")",
")",
"if",
"filsets_num",
"==",
"len",
"(",
"subsets",
")",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"\"No gene sets passed through filtering condition!!!, try new parameters again!\\n\"",
"+",
"\"Note: check gene name, gmt file format, or filtering size.\"",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"self",
".",
"_gmtdct",
"=",
"genesets_dict",
"return",
"genesets_dict"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
GSEAbase.parse_gmt
|
gmt parser
|
gseapy/gsea.py
|
def parse_gmt(self, gmt):
"""gmt parser"""
if gmt.lower().endswith(".gmt"):
with open(gmt) as genesets:
genesets_dict = { line.strip().split("\t")[0]: line.strip().split("\t")[2:]
for line in genesets.readlines()}
return genesets_dict
elif gmt in DEFAULT_LIBRARY:
pass
elif gmt in self.get_libraries():
pass
else:
self._logger.error("No supported gene_sets: %s"%gmt)
sys.exit(0)
tmpname = "enrichr." + gmt + ".gmt"
tempath = os.path.join(DEFAULT_CACHE_PATH, tmpname)
# if file already download
if os.path.isfile(tempath):
self._logger.info("Enrichr library gene sets already downloaded in: %s, use local file"%DEFAULT_CACHE_PATH)
return self.parse_gmt(tempath)
else:
return self._download_libraries(gmt)
|
def parse_gmt(self, gmt):
"""gmt parser"""
if gmt.lower().endswith(".gmt"):
with open(gmt) as genesets:
genesets_dict = { line.strip().split("\t")[0]: line.strip().split("\t")[2:]
for line in genesets.readlines()}
return genesets_dict
elif gmt in DEFAULT_LIBRARY:
pass
elif gmt in self.get_libraries():
pass
else:
self._logger.error("No supported gene_sets: %s"%gmt)
sys.exit(0)
tmpname = "enrichr." + gmt + ".gmt"
tempath = os.path.join(DEFAULT_CACHE_PATH, tmpname)
# if file already download
if os.path.isfile(tempath):
self._logger.info("Enrichr library gene sets already downloaded in: %s, use local file"%DEFAULT_CACHE_PATH)
return self.parse_gmt(tempath)
else:
return self._download_libraries(gmt)
|
[
"gmt",
"parser"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L145-L169
|
[
"def",
"parse_gmt",
"(",
"self",
",",
"gmt",
")",
":",
"if",
"gmt",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\".gmt\"",
")",
":",
"with",
"open",
"(",
"gmt",
")",
"as",
"genesets",
":",
"genesets_dict",
"=",
"{",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"0",
"]",
":",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"2",
":",
"]",
"for",
"line",
"in",
"genesets",
".",
"readlines",
"(",
")",
"}",
"return",
"genesets_dict",
"elif",
"gmt",
"in",
"DEFAULT_LIBRARY",
":",
"pass",
"elif",
"gmt",
"in",
"self",
".",
"get_libraries",
"(",
")",
":",
"pass",
"else",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"\"No supported gene_sets: %s\"",
"%",
"gmt",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"tmpname",
"=",
"\"enrichr.\"",
"+",
"gmt",
"+",
"\".gmt\"",
"tempath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DEFAULT_CACHE_PATH",
",",
"tmpname",
")",
"# if file already download",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"tempath",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Enrichr library gene sets already downloaded in: %s, use local file\"",
"%",
"DEFAULT_CACHE_PATH",
")",
"return",
"self",
".",
"parse_gmt",
"(",
"tempath",
")",
"else",
":",
"return",
"self",
".",
"_download_libraries",
"(",
"gmt",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
GSEAbase.get_libraries
|
return active enrichr library name.Offical API
|
gseapy/gsea.py
|
def get_libraries(self, database=''):
"""return active enrichr library name.Offical API """
lib_url='http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics'%database
libs_json = json.loads(requests.get(lib_url).text)
libs = [lib['libraryName'] for lib in libs_json['statistics']]
return sorted(libs)
|
def get_libraries(self, database=''):
"""return active enrichr library name.Offical API """
lib_url='http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics'%database
libs_json = json.loads(requests.get(lib_url).text)
libs = [lib['libraryName'] for lib in libs_json['statistics']]
return sorted(libs)
|
[
"return",
"active",
"enrichr",
"library",
"name",
".",
"Offical",
"API"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L171-L177
|
[
"def",
"get_libraries",
"(",
"self",
",",
"database",
"=",
"''",
")",
":",
"lib_url",
"=",
"'http://amp.pharm.mssm.edu/%sEnrichr/datasetStatistics'",
"%",
"database",
"libs_json",
"=",
"json",
".",
"loads",
"(",
"requests",
".",
"get",
"(",
"lib_url",
")",
".",
"text",
")",
"libs",
"=",
"[",
"lib",
"[",
"'libraryName'",
"]",
"for",
"lib",
"in",
"libs_json",
"[",
"'statistics'",
"]",
"]",
"return",
"sorted",
"(",
"libs",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
GSEAbase._download_libraries
|
download enrichr libraries.
|
gseapy/gsea.py
|
def _download_libraries(self, libname):
""" download enrichr libraries."""
self._logger.info("Downloading and generating Enrichr library gene sets......")
s = retry(5)
# queery string
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary'
query_string = '?mode=text&libraryName=%s'
# get
response = s.get( ENRICHR_URL + query_string % libname, timeout=None)
if not response.ok:
raise Exception('Error fetching enrichment results, check internet connection first.')
# reformat to dict and save to disk
mkdirs(DEFAULT_CACHE_PATH)
genesets_dict = {}
outname = "enrichr.%s.gmt"%libname
gmtout = open(os.path.join(DEFAULT_CACHE_PATH, outname), "w")
for line in response.iter_lines(chunk_size=1024, decode_unicode='utf-8'):
line=line.strip()
k = line.split("\t")[0]
v = list(map(lambda x: x.split(",")[0], line.split("\t")[2:]))
genesets_dict.update({ k: v})
outline = "%s\t\t%s\n"%(k, "\t".join(v))
gmtout.write(outline)
gmtout.close()
return genesets_dict
|
def _download_libraries(self, libname):
""" download enrichr libraries."""
self._logger.info("Downloading and generating Enrichr library gene sets......")
s = retry(5)
# queery string
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary'
query_string = '?mode=text&libraryName=%s'
# get
response = s.get( ENRICHR_URL + query_string % libname, timeout=None)
if not response.ok:
raise Exception('Error fetching enrichment results, check internet connection first.')
# reformat to dict and save to disk
mkdirs(DEFAULT_CACHE_PATH)
genesets_dict = {}
outname = "enrichr.%s.gmt"%libname
gmtout = open(os.path.join(DEFAULT_CACHE_PATH, outname), "w")
for line in response.iter_lines(chunk_size=1024, decode_unicode='utf-8'):
line=line.strip()
k = line.split("\t")[0]
v = list(map(lambda x: x.split(",")[0], line.split("\t")[2:]))
genesets_dict.update({ k: v})
outline = "%s\t\t%s\n"%(k, "\t".join(v))
gmtout.write(outline)
gmtout.close()
return genesets_dict
|
[
"download",
"enrichr",
"libraries",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L179-L204
|
[
"def",
"_download_libraries",
"(",
"self",
",",
"libname",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Downloading and generating Enrichr library gene sets......\"",
")",
"s",
"=",
"retry",
"(",
"5",
")",
"# queery string",
"ENRICHR_URL",
"=",
"'http://amp.pharm.mssm.edu/Enrichr/geneSetLibrary'",
"query_string",
"=",
"'?mode=text&libraryName=%s'",
"# get",
"response",
"=",
"s",
".",
"get",
"(",
"ENRICHR_URL",
"+",
"query_string",
"%",
"libname",
",",
"timeout",
"=",
"None",
")",
"if",
"not",
"response",
".",
"ok",
":",
"raise",
"Exception",
"(",
"'Error fetching enrichment results, check internet connection first.'",
")",
"# reformat to dict and save to disk",
"mkdirs",
"(",
"DEFAULT_CACHE_PATH",
")",
"genesets_dict",
"=",
"{",
"}",
"outname",
"=",
"\"enrichr.%s.gmt\"",
"%",
"libname",
"gmtout",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"DEFAULT_CACHE_PATH",
",",
"outname",
")",
",",
"\"w\"",
")",
"for",
"line",
"in",
"response",
".",
"iter_lines",
"(",
"chunk_size",
"=",
"1024",
",",
"decode_unicode",
"=",
"'utf-8'",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"k",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"0",
"]",
"v",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"split",
"(",
"\",\"",
")",
"[",
"0",
"]",
",",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"2",
":",
"]",
")",
")",
"genesets_dict",
".",
"update",
"(",
"{",
"k",
":",
"v",
"}",
")",
"outline",
"=",
"\"%s\\t\\t%s\\n\"",
"%",
"(",
"k",
",",
"\"\\t\"",
".",
"join",
"(",
"v",
")",
")",
"gmtout",
".",
"write",
"(",
"outline",
")",
"gmtout",
".",
"close",
"(",
")",
"return",
"genesets_dict"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
GSEAbase._heatmat
|
only use for gsea heatmap
|
gseapy/gsea.py
|
def _heatmat(self, df, classes, pheno_pos, pheno_neg):
"""only use for gsea heatmap"""
width = len(classes) if len(classes) >= 6 else 5
cls_booA =list(map(lambda x: True if x == pheno_pos else False, classes))
cls_booB =list(map(lambda x: True if x == pheno_neg else False, classes))
datA = df.loc[:, cls_booA]
datB = df.loc[:, cls_booB]
datAB=pd.concat([datA,datB], axis=1)
self._width = width
self.heatmat = datAB
return
|
def _heatmat(self, df, classes, pheno_pos, pheno_neg):
"""only use for gsea heatmap"""
width = len(classes) if len(classes) >= 6 else 5
cls_booA =list(map(lambda x: True if x == pheno_pos else False, classes))
cls_booB =list(map(lambda x: True if x == pheno_neg else False, classes))
datA = df.loc[:, cls_booA]
datB = df.loc[:, cls_booB]
datAB=pd.concat([datA,datB], axis=1)
self._width = width
self.heatmat = datAB
return
|
[
"only",
"use",
"for",
"gsea",
"heatmap"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L206-L216
|
[
"def",
"_heatmat",
"(",
"self",
",",
"df",
",",
"classes",
",",
"pheno_pos",
",",
"pheno_neg",
")",
":",
"width",
"=",
"len",
"(",
"classes",
")",
"if",
"len",
"(",
"classes",
")",
">=",
"6",
"else",
"5",
"cls_booA",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"True",
"if",
"x",
"==",
"pheno_pos",
"else",
"False",
",",
"classes",
")",
")",
"cls_booB",
"=",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"True",
"if",
"x",
"==",
"pheno_neg",
"else",
"False",
",",
"classes",
")",
")",
"datA",
"=",
"df",
".",
"loc",
"[",
":",
",",
"cls_booA",
"]",
"datB",
"=",
"df",
".",
"loc",
"[",
":",
",",
"cls_booB",
"]",
"datAB",
"=",
"pd",
".",
"concat",
"(",
"[",
"datA",
",",
"datB",
"]",
",",
"axis",
"=",
"1",
")",
"self",
".",
"_width",
"=",
"width",
"self",
".",
"heatmat",
"=",
"datAB",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
GSEAbase._plotting
|
Plotting API.
:param rank_metric: sorted pd.Series with rankings values.
:param results: self.results
:param data: preprocessed expression table
|
gseapy/gsea.py
|
def _plotting(self, rank_metric, results, graph_num, outdir,
format, figsize, pheno_pos='', pheno_neg=''):
""" Plotting API.
:param rank_metric: sorted pd.Series with rankings values.
:param results: self.results
:param data: preprocessed expression table
"""
# no values need to be returned
if self._outdir is None: return
#Plotting
top_term = self.res2d.index[:graph_num]
# multi-threading
pool = Pool(self._processes)
for gs in top_term:
hit = results.get(gs)['hits_indices']
NES = 'nes' if self.module != 'ssgsea' else 'es'
term = gs.replace('/','_').replace(":","_")
outfile = '{0}/{1}.{2}.{3}'.format(self.outdir, term, self.module, self.format)
# gseaplot(rank_metric=rank_metric, term=term, hits_indices=hit,
# nes=results.get(gs)[NES], pval=results.get(gs)['pval'],
# fdr=results.get(gs)['fdr'], RES=results.get(gs)['RES'],
# pheno_pos=pheno_pos, pheno_neg=pheno_neg, figsize=figsize,
# ofname=outfile)
pool.apply_async(gseaplot, args=(rank_metric, term, hit, results.get(gs)[NES],
results.get(gs)['pval'],results.get(gs)['fdr'],
results.get(gs)['RES'],
pheno_pos, pheno_neg,
figsize, 'seismic', outfile))
if self.module == 'gsea':
outfile2 = "{0}/{1}.heatmap.{2}".format(self.outdir, term, self.format)
# heatmap(df=self.heatmat.iloc[hit, :], title=term, ofname=outfile2,
# z_score=0, figsize=(self._width, len(hit)/2))
pool.apply_async(heatmap, args=(self.heatmat.iloc[hit, :], 0, term,
(self._width, len(hit)/2+2), 'RdBu_r',
True, True, outfile2))
pool.close()
pool.join()
|
def _plotting(self, rank_metric, results, graph_num, outdir,
format, figsize, pheno_pos='', pheno_neg=''):
""" Plotting API.
:param rank_metric: sorted pd.Series with rankings values.
:param results: self.results
:param data: preprocessed expression table
"""
# no values need to be returned
if self._outdir is None: return
#Plotting
top_term = self.res2d.index[:graph_num]
# multi-threading
pool = Pool(self._processes)
for gs in top_term:
hit = results.get(gs)['hits_indices']
NES = 'nes' if self.module != 'ssgsea' else 'es'
term = gs.replace('/','_').replace(":","_")
outfile = '{0}/{1}.{2}.{3}'.format(self.outdir, term, self.module, self.format)
# gseaplot(rank_metric=rank_metric, term=term, hits_indices=hit,
# nes=results.get(gs)[NES], pval=results.get(gs)['pval'],
# fdr=results.get(gs)['fdr'], RES=results.get(gs)['RES'],
# pheno_pos=pheno_pos, pheno_neg=pheno_neg, figsize=figsize,
# ofname=outfile)
pool.apply_async(gseaplot, args=(rank_metric, term, hit, results.get(gs)[NES],
results.get(gs)['pval'],results.get(gs)['fdr'],
results.get(gs)['RES'],
pheno_pos, pheno_neg,
figsize, 'seismic', outfile))
if self.module == 'gsea':
outfile2 = "{0}/{1}.heatmap.{2}".format(self.outdir, term, self.format)
# heatmap(df=self.heatmat.iloc[hit, :], title=term, ofname=outfile2,
# z_score=0, figsize=(self._width, len(hit)/2))
pool.apply_async(heatmap, args=(self.heatmat.iloc[hit, :], 0, term,
(self._width, len(hit)/2+2), 'RdBu_r',
True, True, outfile2))
pool.close()
pool.join()
|
[
"Plotting",
"API",
".",
":",
"param",
"rank_metric",
":",
"sorted",
"pd",
".",
"Series",
"with",
"rankings",
"values",
".",
":",
"param",
"results",
":",
"self",
".",
"results",
":",
"param",
"data",
":",
"preprocessed",
"expression",
"table"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L218-L256
|
[
"def",
"_plotting",
"(",
"self",
",",
"rank_metric",
",",
"results",
",",
"graph_num",
",",
"outdir",
",",
"format",
",",
"figsize",
",",
"pheno_pos",
"=",
"''",
",",
"pheno_neg",
"=",
"''",
")",
":",
"# no values need to be returned",
"if",
"self",
".",
"_outdir",
"is",
"None",
":",
"return",
"#Plotting",
"top_term",
"=",
"self",
".",
"res2d",
".",
"index",
"[",
":",
"graph_num",
"]",
"# multi-threading",
"pool",
"=",
"Pool",
"(",
"self",
".",
"_processes",
")",
"for",
"gs",
"in",
"top_term",
":",
"hit",
"=",
"results",
".",
"get",
"(",
"gs",
")",
"[",
"'hits_indices'",
"]",
"NES",
"=",
"'nes'",
"if",
"self",
".",
"module",
"!=",
"'ssgsea'",
"else",
"'es'",
"term",
"=",
"gs",
".",
"replace",
"(",
"'/'",
",",
"'_'",
")",
".",
"replace",
"(",
"\":\"",
",",
"\"_\"",
")",
"outfile",
"=",
"'{0}/{1}.{2}.{3}'",
".",
"format",
"(",
"self",
".",
"outdir",
",",
"term",
",",
"self",
".",
"module",
",",
"self",
".",
"format",
")",
"# gseaplot(rank_metric=rank_metric, term=term, hits_indices=hit,",
"# nes=results.get(gs)[NES], pval=results.get(gs)['pval'], ",
"# fdr=results.get(gs)['fdr'], RES=results.get(gs)['RES'],",
"# pheno_pos=pheno_pos, pheno_neg=pheno_neg, figsize=figsize,",
"# ofname=outfile)",
"pool",
".",
"apply_async",
"(",
"gseaplot",
",",
"args",
"=",
"(",
"rank_metric",
",",
"term",
",",
"hit",
",",
"results",
".",
"get",
"(",
"gs",
")",
"[",
"NES",
"]",
",",
"results",
".",
"get",
"(",
"gs",
")",
"[",
"'pval'",
"]",
",",
"results",
".",
"get",
"(",
"gs",
")",
"[",
"'fdr'",
"]",
",",
"results",
".",
"get",
"(",
"gs",
")",
"[",
"'RES'",
"]",
",",
"pheno_pos",
",",
"pheno_neg",
",",
"figsize",
",",
"'seismic'",
",",
"outfile",
")",
")",
"if",
"self",
".",
"module",
"==",
"'gsea'",
":",
"outfile2",
"=",
"\"{0}/{1}.heatmap.{2}\"",
".",
"format",
"(",
"self",
".",
"outdir",
",",
"term",
",",
"self",
".",
"format",
")",
"# heatmap(df=self.heatmat.iloc[hit, :], title=term, ofname=outfile2, ",
"# z_score=0, figsize=(self._width, len(hit)/2))",
"pool",
".",
"apply_async",
"(",
"heatmap",
",",
"args",
"=",
"(",
"self",
".",
"heatmat",
".",
"iloc",
"[",
"hit",
",",
":",
"]",
",",
"0",
",",
"term",
",",
"(",
"self",
".",
"_width",
",",
"len",
"(",
"hit",
")",
"/",
"2",
"+",
"2",
")",
",",
"'RdBu_r'",
",",
"True",
",",
"True",
",",
"outfile2",
")",
")",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
GSEAbase._save_results
|
reformat gsea results, and save to txt
|
gseapy/gsea.py
|
def _save_results(self, zipdata, outdir, module, gmt, rank_metric, permutation_type):
"""reformat gsea results, and save to txt"""
res = OrderedDict()
for gs, gseale, ind, RES in zipdata:
rdict = OrderedDict()
rdict['es'] = gseale[0]
rdict['nes'] = gseale[1]
rdict['pval'] = gseale[2]
rdict['fdr'] = gseale[3]
rdict['geneset_size'] = len(gmt[gs])
rdict['matched_size'] = len(ind)
#reformat gene list.
_genes = rank_metric.index.values[ind]
rdict['genes'] = ";".join([ str(g).strip() for g in _genes ])
if self.module != 'ssgsea':
# extract leading edge genes
if rdict['es'] > 0:
# RES -> ndarray, ind -> list
idx = RES.argmax()
ldg_pos = list(filter(lambda x: x<= idx, ind))
elif rdict['es'] < 0:
idx = RES.argmin()
ldg_pos = list(filter(lambda x: x >= idx, ind))
else:
ldg_pos = ind # es == 0 ?
rdict['ledge_genes'] = ';'.join(list(map(str,rank_metric.iloc[ldg_pos].index)))
rdict['RES'] = RES
rdict['hits_indices'] = ind
# save to one odict
res[gs] = rdict
# save
self.results = res
# save to dataframe
res_df = pd.DataFrame.from_dict(res, orient='index')
res_df.index.name = 'Term'
res_df.drop(['RES','hits_indices'], axis=1, inplace=True)
res_df.sort_values(by=['fdr','pval'], inplace=True)
self.res2d = res_df
if self._outdir is None: return
out = os.path.join(outdir,'gseapy.{b}.{c}.report.csv'.format(b=module, c=permutation_type))
if self.module == 'ssgsea':
out = out.replace(".csv",".txt")
with open(out, 'a') as f:
f.write('# normalize enrichment scores by random permutation procedure (GSEA method)\n')
f.write("# might not proper for publication\n")
res_df.to_csv(f, sep='\t')
else:
res_df.to_csv(out)
return
|
def _save_results(self, zipdata, outdir, module, gmt, rank_metric, permutation_type):
"""reformat gsea results, and save to txt"""
res = OrderedDict()
for gs, gseale, ind, RES in zipdata:
rdict = OrderedDict()
rdict['es'] = gseale[0]
rdict['nes'] = gseale[1]
rdict['pval'] = gseale[2]
rdict['fdr'] = gseale[3]
rdict['geneset_size'] = len(gmt[gs])
rdict['matched_size'] = len(ind)
#reformat gene list.
_genes = rank_metric.index.values[ind]
rdict['genes'] = ";".join([ str(g).strip() for g in _genes ])
if self.module != 'ssgsea':
# extract leading edge genes
if rdict['es'] > 0:
# RES -> ndarray, ind -> list
idx = RES.argmax()
ldg_pos = list(filter(lambda x: x<= idx, ind))
elif rdict['es'] < 0:
idx = RES.argmin()
ldg_pos = list(filter(lambda x: x >= idx, ind))
else:
ldg_pos = ind # es == 0 ?
rdict['ledge_genes'] = ';'.join(list(map(str,rank_metric.iloc[ldg_pos].index)))
rdict['RES'] = RES
rdict['hits_indices'] = ind
# save to one odict
res[gs] = rdict
# save
self.results = res
# save to dataframe
res_df = pd.DataFrame.from_dict(res, orient='index')
res_df.index.name = 'Term'
res_df.drop(['RES','hits_indices'], axis=1, inplace=True)
res_df.sort_values(by=['fdr','pval'], inplace=True)
self.res2d = res_df
if self._outdir is None: return
out = os.path.join(outdir,'gseapy.{b}.{c}.report.csv'.format(b=module, c=permutation_type))
if self.module == 'ssgsea':
out = out.replace(".csv",".txt")
with open(out, 'a') as f:
f.write('# normalize enrichment scores by random permutation procedure (GSEA method)\n')
f.write("# might not proper for publication\n")
res_df.to_csv(f, sep='\t')
else:
res_df.to_csv(out)
return
|
[
"reformat",
"gsea",
"results",
"and",
"save",
"to",
"txt"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L259-L312
|
[
"def",
"_save_results",
"(",
"self",
",",
"zipdata",
",",
"outdir",
",",
"module",
",",
"gmt",
",",
"rank_metric",
",",
"permutation_type",
")",
":",
"res",
"=",
"OrderedDict",
"(",
")",
"for",
"gs",
",",
"gseale",
",",
"ind",
",",
"RES",
"in",
"zipdata",
":",
"rdict",
"=",
"OrderedDict",
"(",
")",
"rdict",
"[",
"'es'",
"]",
"=",
"gseale",
"[",
"0",
"]",
"rdict",
"[",
"'nes'",
"]",
"=",
"gseale",
"[",
"1",
"]",
"rdict",
"[",
"'pval'",
"]",
"=",
"gseale",
"[",
"2",
"]",
"rdict",
"[",
"'fdr'",
"]",
"=",
"gseale",
"[",
"3",
"]",
"rdict",
"[",
"'geneset_size'",
"]",
"=",
"len",
"(",
"gmt",
"[",
"gs",
"]",
")",
"rdict",
"[",
"'matched_size'",
"]",
"=",
"len",
"(",
"ind",
")",
"#reformat gene list.",
"_genes",
"=",
"rank_metric",
".",
"index",
".",
"values",
"[",
"ind",
"]",
"rdict",
"[",
"'genes'",
"]",
"=",
"\";\"",
".",
"join",
"(",
"[",
"str",
"(",
"g",
")",
".",
"strip",
"(",
")",
"for",
"g",
"in",
"_genes",
"]",
")",
"if",
"self",
".",
"module",
"!=",
"'ssgsea'",
":",
"# extract leading edge genes",
"if",
"rdict",
"[",
"'es'",
"]",
">",
"0",
":",
"# RES -> ndarray, ind -> list",
"idx",
"=",
"RES",
".",
"argmax",
"(",
")",
"ldg_pos",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
"<=",
"idx",
",",
"ind",
")",
")",
"elif",
"rdict",
"[",
"'es'",
"]",
"<",
"0",
":",
"idx",
"=",
"RES",
".",
"argmin",
"(",
")",
"ldg_pos",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
">=",
"idx",
",",
"ind",
")",
")",
"else",
":",
"ldg_pos",
"=",
"ind",
"# es == 0 ?",
"rdict",
"[",
"'ledge_genes'",
"]",
"=",
"';'",
".",
"join",
"(",
"list",
"(",
"map",
"(",
"str",
",",
"rank_metric",
".",
"iloc",
"[",
"ldg_pos",
"]",
".",
"index",
")",
")",
")",
"rdict",
"[",
"'RES'",
"]",
"=",
"RES",
"rdict",
"[",
"'hits_indices'",
"]",
"=",
"ind",
"# save to one odict",
"res",
"[",
"gs",
"]",
"=",
"rdict",
"# save",
"self",
".",
"results",
"=",
"res",
"# save to dataframe",
"res_df",
"=",
"pd",
".",
"DataFrame",
".",
"from_dict",
"(",
"res",
",",
"orient",
"=",
"'index'",
")",
"res_df",
".",
"index",
".",
"name",
"=",
"'Term'",
"res_df",
".",
"drop",
"(",
"[",
"'RES'",
",",
"'hits_indices'",
"]",
",",
"axis",
"=",
"1",
",",
"inplace",
"=",
"True",
")",
"res_df",
".",
"sort_values",
"(",
"by",
"=",
"[",
"'fdr'",
",",
"'pval'",
"]",
",",
"inplace",
"=",
"True",
")",
"self",
".",
"res2d",
"=",
"res_df",
"if",
"self",
".",
"_outdir",
"is",
"None",
":",
"return",
"out",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"'gseapy.{b}.{c}.report.csv'",
".",
"format",
"(",
"b",
"=",
"module",
",",
"c",
"=",
"permutation_type",
")",
")",
"if",
"self",
".",
"module",
"==",
"'ssgsea'",
":",
"out",
"=",
"out",
".",
"replace",
"(",
"\".csv\"",
",",
"\".txt\"",
")",
"with",
"open",
"(",
"out",
",",
"'a'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'# normalize enrichment scores by random permutation procedure (GSEA method)\\n'",
")",
"f",
".",
"write",
"(",
"\"# might not proper for publication\\n\"",
")",
"res_df",
".",
"to_csv",
"(",
"f",
",",
"sep",
"=",
"'\\t'",
")",
"else",
":",
"res_df",
".",
"to_csv",
"(",
"out",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
GSEA.load_data
|
pre-processed the data frame.new filtering methods will be implement here.
|
gseapy/gsea.py
|
def load_data(self, cls_vec):
"""pre-processed the data frame.new filtering methods will be implement here.
"""
# read data in
if isinstance(self.data, pd.DataFrame) :
exprs = self.data.copy()
# handle index is gene_names
if exprs.index.dtype == 'O':
exprs = exprs.reset_index()
elif os.path.isfile(self.data) :
# GCT input format?
if self.data.endswith("gct"):
exprs = pd.read_csv(self.data, skiprows=1, comment='#',sep="\t")
else:
exprs = pd.read_csv(self.data, comment='#',sep="\t")
else:
raise Exception('Error parsing gene expression DataFrame!')
#drop duplicated gene names
if exprs.iloc[:,0].duplicated().sum() > 0:
self._logger.warning("Warning: dropping duplicated gene names, only keep the first values")
exprs.drop_duplicates(subset=exprs.columns[0], inplace=True) #drop duplicate gene_names.
if exprs.isnull().any().sum() > 0:
self._logger.warning("Warning: Input data contains NA, filled NA with 0")
exprs.dropna(how='all', inplace=True) #drop rows with all NAs
exprs = exprs.fillna(0)
# set gene name as index
exprs.set_index(keys=exprs.columns[0], inplace=True)
# select numberic columns
df = exprs.select_dtypes(include=[np.number])
# drop any genes which std ==0
df_std = df.groupby(by=cls_vec, axis=1).std()
df = df[~df_std.isin([0]).any(axis=1)]
df = df + 0.00001 # we don't like zeros!!!
return df
|
def load_data(self, cls_vec):
"""pre-processed the data frame.new filtering methods will be implement here.
"""
# read data in
if isinstance(self.data, pd.DataFrame) :
exprs = self.data.copy()
# handle index is gene_names
if exprs.index.dtype == 'O':
exprs = exprs.reset_index()
elif os.path.isfile(self.data) :
# GCT input format?
if self.data.endswith("gct"):
exprs = pd.read_csv(self.data, skiprows=1, comment='#',sep="\t")
else:
exprs = pd.read_csv(self.data, comment='#',sep="\t")
else:
raise Exception('Error parsing gene expression DataFrame!')
#drop duplicated gene names
if exprs.iloc[:,0].duplicated().sum() > 0:
self._logger.warning("Warning: dropping duplicated gene names, only keep the first values")
exprs.drop_duplicates(subset=exprs.columns[0], inplace=True) #drop duplicate gene_names.
if exprs.isnull().any().sum() > 0:
self._logger.warning("Warning: Input data contains NA, filled NA with 0")
exprs.dropna(how='all', inplace=True) #drop rows with all NAs
exprs = exprs.fillna(0)
# set gene name as index
exprs.set_index(keys=exprs.columns[0], inplace=True)
# select numberic columns
df = exprs.select_dtypes(include=[np.number])
# drop any genes which std ==0
df_std = df.groupby(by=cls_vec, axis=1).std()
df = df[~df_std.isin([0]).any(axis=1)]
df = df + 0.00001 # we don't like zeros!!!
return df
|
[
"pre",
"-",
"processed",
"the",
"data",
"frame",
".",
"new",
"filtering",
"methods",
"will",
"be",
"implement",
"here",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L348-L382
|
[
"def",
"load_data",
"(",
"self",
",",
"cls_vec",
")",
":",
"# read data in",
"if",
"isinstance",
"(",
"self",
".",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"exprs",
"=",
"self",
".",
"data",
".",
"copy",
"(",
")",
"# handle index is gene_names",
"if",
"exprs",
".",
"index",
".",
"dtype",
"==",
"'O'",
":",
"exprs",
"=",
"exprs",
".",
"reset_index",
"(",
")",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"data",
")",
":",
"# GCT input format?",
"if",
"self",
".",
"data",
".",
"endswith",
"(",
"\"gct\"",
")",
":",
"exprs",
"=",
"pd",
".",
"read_csv",
"(",
"self",
".",
"data",
",",
"skiprows",
"=",
"1",
",",
"comment",
"=",
"'#'",
",",
"sep",
"=",
"\"\\t\"",
")",
"else",
":",
"exprs",
"=",
"pd",
".",
"read_csv",
"(",
"self",
".",
"data",
",",
"comment",
"=",
"'#'",
",",
"sep",
"=",
"\"\\t\"",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Error parsing gene expression DataFrame!'",
")",
"#drop duplicated gene names",
"if",
"exprs",
".",
"iloc",
"[",
":",
",",
"0",
"]",
".",
"duplicated",
"(",
")",
".",
"sum",
"(",
")",
">",
"0",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"Warning: dropping duplicated gene names, only keep the first values\"",
")",
"exprs",
".",
"drop_duplicates",
"(",
"subset",
"=",
"exprs",
".",
"columns",
"[",
"0",
"]",
",",
"inplace",
"=",
"True",
")",
"#drop duplicate gene_names.",
"if",
"exprs",
".",
"isnull",
"(",
")",
".",
"any",
"(",
")",
".",
"sum",
"(",
")",
">",
"0",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"Warning: Input data contains NA, filled NA with 0\"",
")",
"exprs",
".",
"dropna",
"(",
"how",
"=",
"'all'",
",",
"inplace",
"=",
"True",
")",
"#drop rows with all NAs",
"exprs",
"=",
"exprs",
".",
"fillna",
"(",
"0",
")",
"# set gene name as index",
"exprs",
".",
"set_index",
"(",
"keys",
"=",
"exprs",
".",
"columns",
"[",
"0",
"]",
",",
"inplace",
"=",
"True",
")",
"# select numberic columns",
"df",
"=",
"exprs",
".",
"select_dtypes",
"(",
"include",
"=",
"[",
"np",
".",
"number",
"]",
")",
"# drop any genes which std ==0",
"df_std",
"=",
"df",
".",
"groupby",
"(",
"by",
"=",
"cls_vec",
",",
"axis",
"=",
"1",
")",
".",
"std",
"(",
")",
"df",
"=",
"df",
"[",
"~",
"df_std",
".",
"isin",
"(",
"[",
"0",
"]",
")",
".",
"any",
"(",
"axis",
"=",
"1",
")",
"]",
"df",
"=",
"df",
"+",
"0.00001",
"# we don't like zeros!!!",
"return",
"df"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
GSEA.run
|
GSEA main procedure
|
gseapy/gsea.py
|
def run(self):
"""GSEA main procedure"""
assert self.permutation_type in ["phenotype", "gene_set"]
assert self.min_size <= self.max_size
# Start Analysis
self._logger.info("Parsing data files for GSEA.............................")
# phenotype labels parsing
phenoPos, phenoNeg, cls_vector = gsea_cls_parser(self.classes)
# select correct expression genes and values.
dat = self.load_data(cls_vector)
# data frame must have length > 1
assert len(dat) > 1
# ranking metrics calculation.
dat2 = ranking_metric(df=dat, method=self.method, pos=phenoPos, neg=phenoNeg,
classes=cls_vector, ascending=self.ascending)
self.ranking = dat2
# filtering out gene sets and build gene sets dictionary
gmt = self.load_gmt(gene_list=dat2.index.values, gmt=self.gene_sets)
self._logger.info("%04d gene_sets used for further statistical testing....."% len(gmt))
self._logger.info("Start to run GSEA...Might take a while..................")
# cpu numbers
self._set_cores()
# compute ES, NES, pval, FDR, RES
dataset = dat if self.permutation_type =='phenotype' else dat2
gsea_results,hit_ind,rank_ES, subsets = gsea_compute_tensor(data=dataset, gmt=gmt, n=self.permutation_num,
weighted_score_type=self.weighted_score_type,
permutation_type=self.permutation_type,
method=self.method,
pheno_pos=phenoPos, pheno_neg=phenoNeg,
classes=cls_vector, ascending=self.ascending,
processes=self._processes, seed=self.seed)
self._logger.info("Start to generate GSEApy reports and figures............")
res_zip = zip(subsets, list(gsea_results), hit_ind, rank_ES)
self._save_results(zipdata=res_zip, outdir=self.outdir, module=self.module,
gmt=gmt, rank_metric=dat2, permutation_type=self.permutation_type)
# reorder datarame for heatmap
self._heatmat(df=dat.loc[dat2.index], classes=cls_vector,
pheno_pos=phenoPos, pheno_neg=phenoNeg)
# Plotting
if not self._noplot:
self._plotting(rank_metric=dat2, results=self.results,
graph_num=self.graph_num, outdir=self.outdir,
figsize=self.figsize, format=self.format,
pheno_pos=phenoPos, pheno_neg=phenoNeg)
self._logger.info("Congratulations. GSEApy ran successfully.................\n")
if self._outdir is None:
self._tmpdir.cleanup()
return
|
def run(self):
"""GSEA main procedure"""
assert self.permutation_type in ["phenotype", "gene_set"]
assert self.min_size <= self.max_size
# Start Analysis
self._logger.info("Parsing data files for GSEA.............................")
# phenotype labels parsing
phenoPos, phenoNeg, cls_vector = gsea_cls_parser(self.classes)
# select correct expression genes and values.
dat = self.load_data(cls_vector)
# data frame must have length > 1
assert len(dat) > 1
# ranking metrics calculation.
dat2 = ranking_metric(df=dat, method=self.method, pos=phenoPos, neg=phenoNeg,
classes=cls_vector, ascending=self.ascending)
self.ranking = dat2
# filtering out gene sets and build gene sets dictionary
gmt = self.load_gmt(gene_list=dat2.index.values, gmt=self.gene_sets)
self._logger.info("%04d gene_sets used for further statistical testing....."% len(gmt))
self._logger.info("Start to run GSEA...Might take a while..................")
# cpu numbers
self._set_cores()
# compute ES, NES, pval, FDR, RES
dataset = dat if self.permutation_type =='phenotype' else dat2
gsea_results,hit_ind,rank_ES, subsets = gsea_compute_tensor(data=dataset, gmt=gmt, n=self.permutation_num,
weighted_score_type=self.weighted_score_type,
permutation_type=self.permutation_type,
method=self.method,
pheno_pos=phenoPos, pheno_neg=phenoNeg,
classes=cls_vector, ascending=self.ascending,
processes=self._processes, seed=self.seed)
self._logger.info("Start to generate GSEApy reports and figures............")
res_zip = zip(subsets, list(gsea_results), hit_ind, rank_ES)
self._save_results(zipdata=res_zip, outdir=self.outdir, module=self.module,
gmt=gmt, rank_metric=dat2, permutation_type=self.permutation_type)
# reorder datarame for heatmap
self._heatmat(df=dat.loc[dat2.index], classes=cls_vector,
pheno_pos=phenoPos, pheno_neg=phenoNeg)
# Plotting
if not self._noplot:
self._plotting(rank_metric=dat2, results=self.results,
graph_num=self.graph_num, outdir=self.outdir,
figsize=self.figsize, format=self.format,
pheno_pos=phenoPos, pheno_neg=phenoNeg)
self._logger.info("Congratulations. GSEApy ran successfully.................\n")
if self._outdir is None:
self._tmpdir.cleanup()
return
|
[
"GSEA",
"main",
"procedure"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L384-L438
|
[
"def",
"run",
"(",
"self",
")",
":",
"assert",
"self",
".",
"permutation_type",
"in",
"[",
"\"phenotype\"",
",",
"\"gene_set\"",
"]",
"assert",
"self",
".",
"min_size",
"<=",
"self",
".",
"max_size",
"# Start Analysis",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Parsing data files for GSEA.............................\"",
")",
"# phenotype labels parsing",
"phenoPos",
",",
"phenoNeg",
",",
"cls_vector",
"=",
"gsea_cls_parser",
"(",
"self",
".",
"classes",
")",
"# select correct expression genes and values.",
"dat",
"=",
"self",
".",
"load_data",
"(",
"cls_vector",
")",
"# data frame must have length > 1",
"assert",
"len",
"(",
"dat",
")",
">",
"1",
"# ranking metrics calculation.",
"dat2",
"=",
"ranking_metric",
"(",
"df",
"=",
"dat",
",",
"method",
"=",
"self",
".",
"method",
",",
"pos",
"=",
"phenoPos",
",",
"neg",
"=",
"phenoNeg",
",",
"classes",
"=",
"cls_vector",
",",
"ascending",
"=",
"self",
".",
"ascending",
")",
"self",
".",
"ranking",
"=",
"dat2",
"# filtering out gene sets and build gene sets dictionary",
"gmt",
"=",
"self",
".",
"load_gmt",
"(",
"gene_list",
"=",
"dat2",
".",
"index",
".",
"values",
",",
"gmt",
"=",
"self",
".",
"gene_sets",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"%04d gene_sets used for further statistical testing.....\"",
"%",
"len",
"(",
"gmt",
")",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Start to run GSEA...Might take a while..................\"",
")",
"# cpu numbers",
"self",
".",
"_set_cores",
"(",
")",
"# compute ES, NES, pval, FDR, RES",
"dataset",
"=",
"dat",
"if",
"self",
".",
"permutation_type",
"==",
"'phenotype'",
"else",
"dat2",
"gsea_results",
",",
"hit_ind",
",",
"rank_ES",
",",
"subsets",
"=",
"gsea_compute_tensor",
"(",
"data",
"=",
"dataset",
",",
"gmt",
"=",
"gmt",
",",
"n",
"=",
"self",
".",
"permutation_num",
",",
"weighted_score_type",
"=",
"self",
".",
"weighted_score_type",
",",
"permutation_type",
"=",
"self",
".",
"permutation_type",
",",
"method",
"=",
"self",
".",
"method",
",",
"pheno_pos",
"=",
"phenoPos",
",",
"pheno_neg",
"=",
"phenoNeg",
",",
"classes",
"=",
"cls_vector",
",",
"ascending",
"=",
"self",
".",
"ascending",
",",
"processes",
"=",
"self",
".",
"_processes",
",",
"seed",
"=",
"self",
".",
"seed",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Start to generate GSEApy reports and figures............\"",
")",
"res_zip",
"=",
"zip",
"(",
"subsets",
",",
"list",
"(",
"gsea_results",
")",
",",
"hit_ind",
",",
"rank_ES",
")",
"self",
".",
"_save_results",
"(",
"zipdata",
"=",
"res_zip",
",",
"outdir",
"=",
"self",
".",
"outdir",
",",
"module",
"=",
"self",
".",
"module",
",",
"gmt",
"=",
"gmt",
",",
"rank_metric",
"=",
"dat2",
",",
"permutation_type",
"=",
"self",
".",
"permutation_type",
")",
"# reorder datarame for heatmap",
"self",
".",
"_heatmat",
"(",
"df",
"=",
"dat",
".",
"loc",
"[",
"dat2",
".",
"index",
"]",
",",
"classes",
"=",
"cls_vector",
",",
"pheno_pos",
"=",
"phenoPos",
",",
"pheno_neg",
"=",
"phenoNeg",
")",
"# Plotting",
"if",
"not",
"self",
".",
"_noplot",
":",
"self",
".",
"_plotting",
"(",
"rank_metric",
"=",
"dat2",
",",
"results",
"=",
"self",
".",
"results",
",",
"graph_num",
"=",
"self",
".",
"graph_num",
",",
"outdir",
"=",
"self",
".",
"outdir",
",",
"figsize",
"=",
"self",
".",
"figsize",
",",
"format",
"=",
"self",
".",
"format",
",",
"pheno_pos",
"=",
"phenoPos",
",",
"pheno_neg",
"=",
"phenoNeg",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Congratulations. GSEApy ran successfully.................\\n\"",
")",
"if",
"self",
".",
"_outdir",
"is",
"None",
":",
"self",
".",
"_tmpdir",
".",
"cleanup",
"(",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Prerank.run
|
GSEA prerank workflow
|
gseapy/gsea.py
|
def run(self):
"""GSEA prerank workflow"""
assert self.min_size <= self.max_size
# parsing rankings
dat2 = self._load_ranking(self.rnk)
assert len(dat2) > 1
# cpu numbers
self._set_cores()
# Start Analysis
self._logger.info("Parsing data files for GSEA.............................")
# filtering out gene sets and build gene sets dictionary
gmt = self.load_gmt(gene_list=dat2.index.values, gmt=self.gene_sets)
self._logger.info("%04d gene_sets used for further statistical testing....."% len(gmt))
self._logger.info("Start to run GSEA...Might take a while..................")
# compute ES, NES, pval, FDR, RES
gsea_results, hit_ind,rank_ES, subsets = gsea_compute(data=dat2, n=self.permutation_num, gmt=gmt,
weighted_score_type=self.weighted_score_type,
permutation_type='gene_set', method=None,
pheno_pos=self.pheno_pos, pheno_neg=self.pheno_neg,
classes=None, ascending=self.ascending,
processes=self._processes, seed=self.seed)
self._logger.info("Start to generate gseapy reports, and produce figures...")
res_zip = zip(subsets, list(gsea_results), hit_ind, rank_ES)
self._save_results(zipdata=res_zip, outdir=self.outdir, module=self.module,
gmt=gmt, rank_metric=dat2, permutation_type="gene_sets")
# Plotting
if not self._noplot:
self._plotting(rank_metric=dat2, results=self.results,
graph_num=self.graph_num, outdir=self.outdir,
figsize=self.figsize, format=self.format,
pheno_pos=self.pheno_pos, pheno_neg=self.pheno_neg)
self._logger.info("Congratulations. GSEApy runs successfully................\n")
if self._outdir is None:
self._tmpdir.cleanup()
return
|
def run(self):
"""GSEA prerank workflow"""
assert self.min_size <= self.max_size
# parsing rankings
dat2 = self._load_ranking(self.rnk)
assert len(dat2) > 1
# cpu numbers
self._set_cores()
# Start Analysis
self._logger.info("Parsing data files for GSEA.............................")
# filtering out gene sets and build gene sets dictionary
gmt = self.load_gmt(gene_list=dat2.index.values, gmt=self.gene_sets)
self._logger.info("%04d gene_sets used for further statistical testing....."% len(gmt))
self._logger.info("Start to run GSEA...Might take a while..................")
# compute ES, NES, pval, FDR, RES
gsea_results, hit_ind,rank_ES, subsets = gsea_compute(data=dat2, n=self.permutation_num, gmt=gmt,
weighted_score_type=self.weighted_score_type,
permutation_type='gene_set', method=None,
pheno_pos=self.pheno_pos, pheno_neg=self.pheno_neg,
classes=None, ascending=self.ascending,
processes=self._processes, seed=self.seed)
self._logger.info("Start to generate gseapy reports, and produce figures...")
res_zip = zip(subsets, list(gsea_results), hit_ind, rank_ES)
self._save_results(zipdata=res_zip, outdir=self.outdir, module=self.module,
gmt=gmt, rank_metric=dat2, permutation_type="gene_sets")
# Plotting
if not self._noplot:
self._plotting(rank_metric=dat2, results=self.results,
graph_num=self.graph_num, outdir=self.outdir,
figsize=self.figsize, format=self.format,
pheno_pos=self.pheno_pos, pheno_neg=self.pheno_neg)
self._logger.info("Congratulations. GSEApy runs successfully................\n")
if self._outdir is None:
self._tmpdir.cleanup()
return
|
[
"GSEA",
"prerank",
"workflow"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L474-L515
|
[
"def",
"run",
"(",
"self",
")",
":",
"assert",
"self",
".",
"min_size",
"<=",
"self",
".",
"max_size",
"# parsing rankings",
"dat2",
"=",
"self",
".",
"_load_ranking",
"(",
"self",
".",
"rnk",
")",
"assert",
"len",
"(",
"dat2",
")",
">",
"1",
"# cpu numbers",
"self",
".",
"_set_cores",
"(",
")",
"# Start Analysis",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Parsing data files for GSEA.............................\"",
")",
"# filtering out gene sets and build gene sets dictionary",
"gmt",
"=",
"self",
".",
"load_gmt",
"(",
"gene_list",
"=",
"dat2",
".",
"index",
".",
"values",
",",
"gmt",
"=",
"self",
".",
"gene_sets",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"%04d gene_sets used for further statistical testing.....\"",
"%",
"len",
"(",
"gmt",
")",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Start to run GSEA...Might take a while..................\"",
")",
"# compute ES, NES, pval, FDR, RES",
"gsea_results",
",",
"hit_ind",
",",
"rank_ES",
",",
"subsets",
"=",
"gsea_compute",
"(",
"data",
"=",
"dat2",
",",
"n",
"=",
"self",
".",
"permutation_num",
",",
"gmt",
"=",
"gmt",
",",
"weighted_score_type",
"=",
"self",
".",
"weighted_score_type",
",",
"permutation_type",
"=",
"'gene_set'",
",",
"method",
"=",
"None",
",",
"pheno_pos",
"=",
"self",
".",
"pheno_pos",
",",
"pheno_neg",
"=",
"self",
".",
"pheno_neg",
",",
"classes",
"=",
"None",
",",
"ascending",
"=",
"self",
".",
"ascending",
",",
"processes",
"=",
"self",
".",
"_processes",
",",
"seed",
"=",
"self",
".",
"seed",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Start to generate gseapy reports, and produce figures...\"",
")",
"res_zip",
"=",
"zip",
"(",
"subsets",
",",
"list",
"(",
"gsea_results",
")",
",",
"hit_ind",
",",
"rank_ES",
")",
"self",
".",
"_save_results",
"(",
"zipdata",
"=",
"res_zip",
",",
"outdir",
"=",
"self",
".",
"outdir",
",",
"module",
"=",
"self",
".",
"module",
",",
"gmt",
"=",
"gmt",
",",
"rank_metric",
"=",
"dat2",
",",
"permutation_type",
"=",
"\"gene_sets\"",
")",
"# Plotting",
"if",
"not",
"self",
".",
"_noplot",
":",
"self",
".",
"_plotting",
"(",
"rank_metric",
"=",
"dat2",
",",
"results",
"=",
"self",
".",
"results",
",",
"graph_num",
"=",
"self",
".",
"graph_num",
",",
"outdir",
"=",
"self",
".",
"outdir",
",",
"figsize",
"=",
"self",
".",
"figsize",
",",
"format",
"=",
"self",
".",
"format",
",",
"pheno_pos",
"=",
"self",
".",
"pheno_pos",
",",
"pheno_neg",
"=",
"self",
".",
"pheno_neg",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Congratulations. GSEApy runs successfully................\\n\"",
")",
"if",
"self",
".",
"_outdir",
"is",
"None",
":",
"self",
".",
"_tmpdir",
".",
"cleanup",
"(",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
SingleSampleGSEA.norm_samples
|
normalization samples
see here: http://rowley.mit.edu/caw_web/ssGSEAProjection/ssGSEAProjection.Library.R
|
gseapy/gsea.py
|
def norm_samples(self, dat):
"""normalization samples
see here: http://rowley.mit.edu/caw_web/ssGSEAProjection/ssGSEAProjection.Library.R
"""
if self.sample_norm_method == 'rank':
data = dat.rank(axis=0, method='average', na_option='bottom')
data = 10000*data / data.shape[0]
elif self.sample_norm_method == 'log_rank':
data = dat.rank(axis=0, method='average', na_option='bottom')
data = log(10000*data / data.shape[0] + exp(1))
elif self.sample_norm_method == 'log':
dat[dat < 1] = 1
data = log(dat + exp(1))
elif self.sample_norm_method == 'custom':
self._logger.info("Use custom rank metric for ssGSEA")
data = dat
else:
sys.stderr.write("No supported method: %s"%self.sample_norm_method)
sys.exit(0)
return data
|
def norm_samples(self, dat):
"""normalization samples
see here: http://rowley.mit.edu/caw_web/ssGSEAProjection/ssGSEAProjection.Library.R
"""
if self.sample_norm_method == 'rank':
data = dat.rank(axis=0, method='average', na_option='bottom')
data = 10000*data / data.shape[0]
elif self.sample_norm_method == 'log_rank':
data = dat.rank(axis=0, method='average', na_option='bottom')
data = log(10000*data / data.shape[0] + exp(1))
elif self.sample_norm_method == 'log':
dat[dat < 1] = 1
data = log(dat + exp(1))
elif self.sample_norm_method == 'custom':
self._logger.info("Use custom rank metric for ssGSEA")
data = dat
else:
sys.stderr.write("No supported method: %s"%self.sample_norm_method)
sys.exit(0)
return data
|
[
"normalization",
"samples",
"see",
"here",
":",
"http",
":",
"//",
"rowley",
".",
"mit",
".",
"edu",
"/",
"caw_web",
"/",
"ssGSEAProjection",
"/",
"ssGSEAProjection",
".",
"Library",
".",
"R"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L602-L623
|
[
"def",
"norm_samples",
"(",
"self",
",",
"dat",
")",
":",
"if",
"self",
".",
"sample_norm_method",
"==",
"'rank'",
":",
"data",
"=",
"dat",
".",
"rank",
"(",
"axis",
"=",
"0",
",",
"method",
"=",
"'average'",
",",
"na_option",
"=",
"'bottom'",
")",
"data",
"=",
"10000",
"*",
"data",
"/",
"data",
".",
"shape",
"[",
"0",
"]",
"elif",
"self",
".",
"sample_norm_method",
"==",
"'log_rank'",
":",
"data",
"=",
"dat",
".",
"rank",
"(",
"axis",
"=",
"0",
",",
"method",
"=",
"'average'",
",",
"na_option",
"=",
"'bottom'",
")",
"data",
"=",
"log",
"(",
"10000",
"*",
"data",
"/",
"data",
".",
"shape",
"[",
"0",
"]",
"+",
"exp",
"(",
"1",
")",
")",
"elif",
"self",
".",
"sample_norm_method",
"==",
"'log'",
":",
"dat",
"[",
"dat",
"<",
"1",
"]",
"=",
"1",
"data",
"=",
"log",
"(",
"dat",
"+",
"exp",
"(",
"1",
")",
")",
"elif",
"self",
".",
"sample_norm_method",
"==",
"'custom'",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Use custom rank metric for ssGSEA\"",
")",
"data",
"=",
"dat",
"else",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"No supported method: %s\"",
"%",
"self",
".",
"sample_norm_method",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"return",
"data"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
SingleSampleGSEA.run
|
run entry
|
gseapy/gsea.py
|
def run(self):
"""run entry"""
self._logger.info("Parsing data files for ssGSEA...........................")
# load data
data = self.load_data()
# normalized samples, and rank
normdat = self.norm_samples(data)
# filtering out gene sets and build gene sets dictionary
gmt = self.load_gmt(gene_list=normdat.index.values, gmt=self.gene_sets)
self._logger.info("%04d gene_sets used for further statistical testing....."% len(gmt))
# set cpu numbers
self._set_cores()
# start analsis
self._logger.info("Start to run ssGSEA...Might take a while................")
if self.permutation_num == 0 :
# ssGSEA without permutation
self.runSamples(df=normdat, gmt=gmt)
else:
# run permutation procedure and calculate pvals, fdrs
self._logger.warning("run ssGSEA with permutation procedure, don't use these part of results for publication.")
self.runSamplesPermu(df=normdat, gmt=gmt)
# clean up all outputs if _outdir is None
if self._outdir is None:
self._tmpdir.cleanup()
|
def run(self):
"""run entry"""
self._logger.info("Parsing data files for ssGSEA...........................")
# load data
data = self.load_data()
# normalized samples, and rank
normdat = self.norm_samples(data)
# filtering out gene sets and build gene sets dictionary
gmt = self.load_gmt(gene_list=normdat.index.values, gmt=self.gene_sets)
self._logger.info("%04d gene_sets used for further statistical testing....."% len(gmt))
# set cpu numbers
self._set_cores()
# start analsis
self._logger.info("Start to run ssGSEA...Might take a while................")
if self.permutation_num == 0 :
# ssGSEA without permutation
self.runSamples(df=normdat, gmt=gmt)
else:
# run permutation procedure and calculate pvals, fdrs
self._logger.warning("run ssGSEA with permutation procedure, don't use these part of results for publication.")
self.runSamplesPermu(df=normdat, gmt=gmt)
# clean up all outputs if _outdir is None
if self._outdir is None:
self._tmpdir.cleanup()
|
[
"run",
"entry"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L625-L648
|
[
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Parsing data files for ssGSEA...........................\"",
")",
"# load data",
"data",
"=",
"self",
".",
"load_data",
"(",
")",
"# normalized samples, and rank",
"normdat",
"=",
"self",
".",
"norm_samples",
"(",
"data",
")",
"# filtering out gene sets and build gene sets dictionary",
"gmt",
"=",
"self",
".",
"load_gmt",
"(",
"gene_list",
"=",
"normdat",
".",
"index",
".",
"values",
",",
"gmt",
"=",
"self",
".",
"gene_sets",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"%04d gene_sets used for further statistical testing.....\"",
"%",
"len",
"(",
"gmt",
")",
")",
"# set cpu numbers",
"self",
".",
"_set_cores",
"(",
")",
"# start analsis",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Start to run ssGSEA...Might take a while................\"",
")",
"if",
"self",
".",
"permutation_num",
"==",
"0",
":",
"# ssGSEA without permutation",
"self",
".",
"runSamples",
"(",
"df",
"=",
"normdat",
",",
"gmt",
"=",
"gmt",
")",
"else",
":",
"# run permutation procedure and calculate pvals, fdrs",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"run ssGSEA with permutation procedure, don't use these part of results for publication.\"",
")",
"self",
".",
"runSamplesPermu",
"(",
"df",
"=",
"normdat",
",",
"gmt",
"=",
"gmt",
")",
"# clean up all outputs if _outdir is None",
"if",
"self",
".",
"_outdir",
"is",
"None",
":",
"self",
".",
"_tmpdir",
".",
"cleanup",
"(",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
SingleSampleGSEA.runSamplesPermu
|
Single Sample GSEA workflow with permutation procedure
|
gseapy/gsea.py
|
def runSamplesPermu(self, df, gmt=None):
"""Single Sample GSEA workflow with permutation procedure"""
assert self.min_size <= self.max_size
mkdirs(self.outdir)
self.resultsOnSamples = OrderedDict()
outdir = self.outdir
# iter throught each sample
for name, ser in df.iteritems():
self.outdir = os.path.join(outdir, str(name))
self._logger.info("Run Sample: %s " % name)
mkdirs(self.outdir)
# sort ranking values from high to low or reverse
dat2 = ser.sort_values(ascending=self.ascending)
# reset integer index, or caused unwanted problems
# df.reset_index(drop=True, inplace=True)
# compute ES, NES, pval, FDR, RES
gsea_results, hit_ind,rank_ES, subsets = gsea_compute(data=dat2, n=self.permutation_num, gmt=gmt,
weighted_score_type=self.weighted_score_type,
permutation_type='gene_set', method=None,
pheno_pos='', pheno_neg='',
classes=None, ascending=self.ascending,
processes=self._processes,
seed=self.seed, single=True, scale=self.scale)
# write file
res_zip = zip(subsets, list(gsea_results), hit_ind, rank_ES)
self._save_results(zipdata=res_zip, outdir=self.outdir, module=self.module,
gmt=gmt, rank_metric=dat2, permutation_type="gene_sets")
self.resultsOnSamples[name] = self.res2d.es
# plotting
if self._noplot: continue
self._logger.info("Plotting Sample: %s \n" % name)
self._plotting(rank_metric=dat2, results=self.results,
graph_num=self.graph_num, outdir=self.outdir,
figsize=self.figsize, format=self.format)
# save es, nes to file
self._save(outdir)
return
|
def runSamplesPermu(self, df, gmt=None):
"""Single Sample GSEA workflow with permutation procedure"""
assert self.min_size <= self.max_size
mkdirs(self.outdir)
self.resultsOnSamples = OrderedDict()
outdir = self.outdir
# iter throught each sample
for name, ser in df.iteritems():
self.outdir = os.path.join(outdir, str(name))
self._logger.info("Run Sample: %s " % name)
mkdirs(self.outdir)
# sort ranking values from high to low or reverse
dat2 = ser.sort_values(ascending=self.ascending)
# reset integer index, or caused unwanted problems
# df.reset_index(drop=True, inplace=True)
# compute ES, NES, pval, FDR, RES
gsea_results, hit_ind,rank_ES, subsets = gsea_compute(data=dat2, n=self.permutation_num, gmt=gmt,
weighted_score_type=self.weighted_score_type,
permutation_type='gene_set', method=None,
pheno_pos='', pheno_neg='',
classes=None, ascending=self.ascending,
processes=self._processes,
seed=self.seed, single=True, scale=self.scale)
# write file
res_zip = zip(subsets, list(gsea_results), hit_ind, rank_ES)
self._save_results(zipdata=res_zip, outdir=self.outdir, module=self.module,
gmt=gmt, rank_metric=dat2, permutation_type="gene_sets")
self.resultsOnSamples[name] = self.res2d.es
# plotting
if self._noplot: continue
self._logger.info("Plotting Sample: %s \n" % name)
self._plotting(rank_metric=dat2, results=self.results,
graph_num=self.graph_num, outdir=self.outdir,
figsize=self.figsize, format=self.format)
# save es, nes to file
self._save(outdir)
return
|
[
"Single",
"Sample",
"GSEA",
"workflow",
"with",
"permutation",
"procedure"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L650-L691
|
[
"def",
"runSamplesPermu",
"(",
"self",
",",
"df",
",",
"gmt",
"=",
"None",
")",
":",
"assert",
"self",
".",
"min_size",
"<=",
"self",
".",
"max_size",
"mkdirs",
"(",
"self",
".",
"outdir",
")",
"self",
".",
"resultsOnSamples",
"=",
"OrderedDict",
"(",
")",
"outdir",
"=",
"self",
".",
"outdir",
"# iter throught each sample",
"for",
"name",
",",
"ser",
"in",
"df",
".",
"iteritems",
"(",
")",
":",
"self",
".",
"outdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"str",
"(",
"name",
")",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Run Sample: %s \"",
"%",
"name",
")",
"mkdirs",
"(",
"self",
".",
"outdir",
")",
"# sort ranking values from high to low or reverse",
"dat2",
"=",
"ser",
".",
"sort_values",
"(",
"ascending",
"=",
"self",
".",
"ascending",
")",
"# reset integer index, or caused unwanted problems",
"# df.reset_index(drop=True, inplace=True)",
"# compute ES, NES, pval, FDR, RES",
"gsea_results",
",",
"hit_ind",
",",
"rank_ES",
",",
"subsets",
"=",
"gsea_compute",
"(",
"data",
"=",
"dat2",
",",
"n",
"=",
"self",
".",
"permutation_num",
",",
"gmt",
"=",
"gmt",
",",
"weighted_score_type",
"=",
"self",
".",
"weighted_score_type",
",",
"permutation_type",
"=",
"'gene_set'",
",",
"method",
"=",
"None",
",",
"pheno_pos",
"=",
"''",
",",
"pheno_neg",
"=",
"''",
",",
"classes",
"=",
"None",
",",
"ascending",
"=",
"self",
".",
"ascending",
",",
"processes",
"=",
"self",
".",
"_processes",
",",
"seed",
"=",
"self",
".",
"seed",
",",
"single",
"=",
"True",
",",
"scale",
"=",
"self",
".",
"scale",
")",
"# write file",
"res_zip",
"=",
"zip",
"(",
"subsets",
",",
"list",
"(",
"gsea_results",
")",
",",
"hit_ind",
",",
"rank_ES",
")",
"self",
".",
"_save_results",
"(",
"zipdata",
"=",
"res_zip",
",",
"outdir",
"=",
"self",
".",
"outdir",
",",
"module",
"=",
"self",
".",
"module",
",",
"gmt",
"=",
"gmt",
",",
"rank_metric",
"=",
"dat2",
",",
"permutation_type",
"=",
"\"gene_sets\"",
")",
"self",
".",
"resultsOnSamples",
"[",
"name",
"]",
"=",
"self",
".",
"res2d",
".",
"es",
"# plotting",
"if",
"self",
".",
"_noplot",
":",
"continue",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Plotting Sample: %s \\n\"",
"%",
"name",
")",
"self",
".",
"_plotting",
"(",
"rank_metric",
"=",
"dat2",
",",
"results",
"=",
"self",
".",
"results",
",",
"graph_num",
"=",
"self",
".",
"graph_num",
",",
"outdir",
"=",
"self",
".",
"outdir",
",",
"figsize",
"=",
"self",
".",
"figsize",
",",
"format",
"=",
"self",
".",
"format",
")",
"# save es, nes to file",
"self",
".",
"_save",
"(",
"outdir",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
SingleSampleGSEA.runSamples
|
Single Sample GSEA workflow.
multiprocessing utility on samples.
|
gseapy/gsea.py
|
def runSamples(self, df, gmt=None):
"""Single Sample GSEA workflow.
multiprocessing utility on samples.
"""
# df.index.values are gene_names
# Save each sample results to odict
self.resultsOnSamples = OrderedDict()
outdir = self.outdir
# run ssgsea for gct expression matrix
#multi-threading
subsets = sorted(gmt.keys())
tempes=[]
names=[]
rankings=[]
pool = Pool(processes=self._processes)
for name, ser in df.iteritems():
#prepare input
dat = ser.sort_values(ascending=self.ascending)
rankings.append(dat)
names.append(name)
genes_sorted, cor_vec = dat.index.values, dat.values
rs = np.random.RandomState(self.seed)
# apply_async
tempes.append(pool.apply_async(enrichment_score_tensor,
args=(genes_sorted, cor_vec, gmt,
self.weighted_score_type,
self.permutation_num, rs, True,
self.scale)))
pool.close()
pool.join()
# save results and plotting
for i, temp in enumerate(tempes):
name, rnk = names[i], rankings[i]
self._logger.info("Calculate Enrichment Score for Sample: %s "%name)
es, esnull, hit_ind, RES = temp.get()
# create results subdir
self.outdir= os.path.join(outdir, str(name))
mkdirs(self.outdir)
# save results
self.resultsOnSamples[name] = pd.Series(data=es, index=subsets, name=name)
# plotting
if self._noplot: continue
self._logger.info("Plotting Sample: %s \n" % name)
for i, term in enumerate(subsets):
term = term.replace('/','_').replace(":","_")
outfile = '{0}/{1}.{2}.{3}'.format(self.outdir, term, self.module, self.format)
gseaplot(rank_metric=rnk, term=term,
hits_indices=hit_ind[i], nes=es[i], pval=1, fdr=1,
RES=RES[i], pheno_pos='', pheno_neg='',
figsize=self.figsize, ofname=outfile)
# save es, nes to file
self._save(outdir)
return
|
def runSamples(self, df, gmt=None):
"""Single Sample GSEA workflow.
multiprocessing utility on samples.
"""
# df.index.values are gene_names
# Save each sample results to odict
self.resultsOnSamples = OrderedDict()
outdir = self.outdir
# run ssgsea for gct expression matrix
#multi-threading
subsets = sorted(gmt.keys())
tempes=[]
names=[]
rankings=[]
pool = Pool(processes=self._processes)
for name, ser in df.iteritems():
#prepare input
dat = ser.sort_values(ascending=self.ascending)
rankings.append(dat)
names.append(name)
genes_sorted, cor_vec = dat.index.values, dat.values
rs = np.random.RandomState(self.seed)
# apply_async
tempes.append(pool.apply_async(enrichment_score_tensor,
args=(genes_sorted, cor_vec, gmt,
self.weighted_score_type,
self.permutation_num, rs, True,
self.scale)))
pool.close()
pool.join()
# save results and plotting
for i, temp in enumerate(tempes):
name, rnk = names[i], rankings[i]
self._logger.info("Calculate Enrichment Score for Sample: %s "%name)
es, esnull, hit_ind, RES = temp.get()
# create results subdir
self.outdir= os.path.join(outdir, str(name))
mkdirs(self.outdir)
# save results
self.resultsOnSamples[name] = pd.Series(data=es, index=subsets, name=name)
# plotting
if self._noplot: continue
self._logger.info("Plotting Sample: %s \n" % name)
for i, term in enumerate(subsets):
term = term.replace('/','_').replace(":","_")
outfile = '{0}/{1}.{2}.{3}'.format(self.outdir, term, self.module, self.format)
gseaplot(rank_metric=rnk, term=term,
hits_indices=hit_ind[i], nes=es[i], pval=1, fdr=1,
RES=RES[i], pheno_pos='', pheno_neg='',
figsize=self.figsize, ofname=outfile)
# save es, nes to file
self._save(outdir)
return
|
[
"Single",
"Sample",
"GSEA",
"workflow",
".",
"multiprocessing",
"utility",
"on",
"samples",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L693-L747
|
[
"def",
"runSamples",
"(",
"self",
",",
"df",
",",
"gmt",
"=",
"None",
")",
":",
"# df.index.values are gene_names",
"# Save each sample results to odict",
"self",
".",
"resultsOnSamples",
"=",
"OrderedDict",
"(",
")",
"outdir",
"=",
"self",
".",
"outdir",
"# run ssgsea for gct expression matrix",
"#multi-threading",
"subsets",
"=",
"sorted",
"(",
"gmt",
".",
"keys",
"(",
")",
")",
"tempes",
"=",
"[",
"]",
"names",
"=",
"[",
"]",
"rankings",
"=",
"[",
"]",
"pool",
"=",
"Pool",
"(",
"processes",
"=",
"self",
".",
"_processes",
")",
"for",
"name",
",",
"ser",
"in",
"df",
".",
"iteritems",
"(",
")",
":",
"#prepare input",
"dat",
"=",
"ser",
".",
"sort_values",
"(",
"ascending",
"=",
"self",
".",
"ascending",
")",
"rankings",
".",
"append",
"(",
"dat",
")",
"names",
".",
"append",
"(",
"name",
")",
"genes_sorted",
",",
"cor_vec",
"=",
"dat",
".",
"index",
".",
"values",
",",
"dat",
".",
"values",
"rs",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"self",
".",
"seed",
")",
"# apply_async",
"tempes",
".",
"append",
"(",
"pool",
".",
"apply_async",
"(",
"enrichment_score_tensor",
",",
"args",
"=",
"(",
"genes_sorted",
",",
"cor_vec",
",",
"gmt",
",",
"self",
".",
"weighted_score_type",
",",
"self",
".",
"permutation_num",
",",
"rs",
",",
"True",
",",
"self",
".",
"scale",
")",
")",
")",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"# save results and plotting",
"for",
"i",
",",
"temp",
"in",
"enumerate",
"(",
"tempes",
")",
":",
"name",
",",
"rnk",
"=",
"names",
"[",
"i",
"]",
",",
"rankings",
"[",
"i",
"]",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Calculate Enrichment Score for Sample: %s \"",
"%",
"name",
")",
"es",
",",
"esnull",
",",
"hit_ind",
",",
"RES",
"=",
"temp",
".",
"get",
"(",
")",
"# create results subdir",
"self",
".",
"outdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"str",
"(",
"name",
")",
")",
"mkdirs",
"(",
"self",
".",
"outdir",
")",
"# save results",
"self",
".",
"resultsOnSamples",
"[",
"name",
"]",
"=",
"pd",
".",
"Series",
"(",
"data",
"=",
"es",
",",
"index",
"=",
"subsets",
",",
"name",
"=",
"name",
")",
"# plotting",
"if",
"self",
".",
"_noplot",
":",
"continue",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Plotting Sample: %s \\n\"",
"%",
"name",
")",
"for",
"i",
",",
"term",
"in",
"enumerate",
"(",
"subsets",
")",
":",
"term",
"=",
"term",
".",
"replace",
"(",
"'/'",
",",
"'_'",
")",
".",
"replace",
"(",
"\":\"",
",",
"\"_\"",
")",
"outfile",
"=",
"'{0}/{1}.{2}.{3}'",
".",
"format",
"(",
"self",
".",
"outdir",
",",
"term",
",",
"self",
".",
"module",
",",
"self",
".",
"format",
")",
"gseaplot",
"(",
"rank_metric",
"=",
"rnk",
",",
"term",
"=",
"term",
",",
"hits_indices",
"=",
"hit_ind",
"[",
"i",
"]",
",",
"nes",
"=",
"es",
"[",
"i",
"]",
",",
"pval",
"=",
"1",
",",
"fdr",
"=",
"1",
",",
"RES",
"=",
"RES",
"[",
"i",
"]",
",",
"pheno_pos",
"=",
"''",
",",
"pheno_neg",
"=",
"''",
",",
"figsize",
"=",
"self",
".",
"figsize",
",",
"ofname",
"=",
"outfile",
")",
"# save es, nes to file",
"self",
".",
"_save",
"(",
"outdir",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
SingleSampleGSEA._save
|
save es and stats
|
gseapy/gsea.py
|
def _save(self, outdir):
"""save es and stats"""
# save raw ES to one csv file
samplesRawES = pd.DataFrame(self.resultsOnSamples)
samplesRawES.index.name = 'Term|ES'
# normalize enrichment scores by using the entire data set, as indicated
# by Barbie et al., 2009, online methods, pg. 2
samplesNES = samplesRawES / (samplesRawES.values.max() - samplesRawES.values.min())
samplesNES = samplesNES.copy()
samplesNES.index.rename('Term|NES', inplace=True)
self.res2d = samplesNES
self._logger.info("Congratulations. GSEApy runs successfully................\n")
if self._outdir is None: return
# write es
outESfile = os.path.join(outdir, "gseapy.samples.raw.es.txt")
with open(outESfile, 'a') as f:
if self.scale:
f.write('# scale the enrichment scores by number of genes in the gene sets\n')
f.write('# this normalization has not effects on the final NES, ' + \
'as indicated by Barbie et al., 2009, online methods, pg. 2\n')
else:
f.write('# raw enrichment scores of all data\n')
f.write('# no scale es by numbers of genes in the gene sets\n')
samplesRawES.to_csv(f, sep='\t')
outNESfile = os.path.join(outdir, "gseapy.samples.normalized.es.txt")
with open(outNESfile, 'a') as f:
f.write('# normalize enrichment scores by using the entire data set\n')
f.write('# as indicated by Barbie et al., 2009, online methods, pg. 2\n')
samplesNES.to_csv(f, sep='\t')
return
|
def _save(self, outdir):
"""save es and stats"""
# save raw ES to one csv file
samplesRawES = pd.DataFrame(self.resultsOnSamples)
samplesRawES.index.name = 'Term|ES'
# normalize enrichment scores by using the entire data set, as indicated
# by Barbie et al., 2009, online methods, pg. 2
samplesNES = samplesRawES / (samplesRawES.values.max() - samplesRawES.values.min())
samplesNES = samplesNES.copy()
samplesNES.index.rename('Term|NES', inplace=True)
self.res2d = samplesNES
self._logger.info("Congratulations. GSEApy runs successfully................\n")
if self._outdir is None: return
# write es
outESfile = os.path.join(outdir, "gseapy.samples.raw.es.txt")
with open(outESfile, 'a') as f:
if self.scale:
f.write('# scale the enrichment scores by number of genes in the gene sets\n')
f.write('# this normalization has not effects on the final NES, ' + \
'as indicated by Barbie et al., 2009, online methods, pg. 2\n')
else:
f.write('# raw enrichment scores of all data\n')
f.write('# no scale es by numbers of genes in the gene sets\n')
samplesRawES.to_csv(f, sep='\t')
outNESfile = os.path.join(outdir, "gseapy.samples.normalized.es.txt")
with open(outNESfile, 'a') as f:
f.write('# normalize enrichment scores by using the entire data set\n')
f.write('# as indicated by Barbie et al., 2009, online methods, pg. 2\n')
samplesNES.to_csv(f, sep='\t')
return
|
[
"save",
"es",
"and",
"stats"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L749-L779
|
[
"def",
"_save",
"(",
"self",
",",
"outdir",
")",
":",
"# save raw ES to one csv file",
"samplesRawES",
"=",
"pd",
".",
"DataFrame",
"(",
"self",
".",
"resultsOnSamples",
")",
"samplesRawES",
".",
"index",
".",
"name",
"=",
"'Term|ES'",
"# normalize enrichment scores by using the entire data set, as indicated",
"# by Barbie et al., 2009, online methods, pg. 2",
"samplesNES",
"=",
"samplesRawES",
"/",
"(",
"samplesRawES",
".",
"values",
".",
"max",
"(",
")",
"-",
"samplesRawES",
".",
"values",
".",
"min",
"(",
")",
")",
"samplesNES",
"=",
"samplesNES",
".",
"copy",
"(",
")",
"samplesNES",
".",
"index",
".",
"rename",
"(",
"'Term|NES'",
",",
"inplace",
"=",
"True",
")",
"self",
".",
"res2d",
"=",
"samplesNES",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Congratulations. GSEApy runs successfully................\\n\"",
")",
"if",
"self",
".",
"_outdir",
"is",
"None",
":",
"return",
"# write es",
"outESfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"\"gseapy.samples.raw.es.txt\"",
")",
"with",
"open",
"(",
"outESfile",
",",
"'a'",
")",
"as",
"f",
":",
"if",
"self",
".",
"scale",
":",
"f",
".",
"write",
"(",
"'# scale the enrichment scores by number of genes in the gene sets\\n'",
")",
"f",
".",
"write",
"(",
"'# this normalization has not effects on the final NES, '",
"+",
"'as indicated by Barbie et al., 2009, online methods, pg. 2\\n'",
")",
"else",
":",
"f",
".",
"write",
"(",
"'# raw enrichment scores of all data\\n'",
")",
"f",
".",
"write",
"(",
"'# no scale es by numbers of genes in the gene sets\\n'",
")",
"samplesRawES",
".",
"to_csv",
"(",
"f",
",",
"sep",
"=",
"'\\t'",
")",
"outNESfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"\"gseapy.samples.normalized.es.txt\"",
")",
"with",
"open",
"(",
"outNESfile",
",",
"'a'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'# normalize enrichment scores by using the entire data set\\n'",
")",
"f",
".",
"write",
"(",
"'# as indicated by Barbie et al., 2009, online methods, pg. 2\\n'",
")",
"samplesNES",
".",
"to_csv",
"(",
"f",
",",
"sep",
"=",
"'\\t'",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Replot.run
|
main replot function
|
gseapy/gsea.py
|
def run(self):
"""main replot function"""
assert self.min_size <= self.max_size
assert self.fignum > 0
import glob
from bs4 import BeautifulSoup
# parsing files.......
try:
results_path = glob.glob(self.indir+'*/edb/results.edb')[0]
rank_path = glob.glob(self.indir+'*/edb/*.rnk')[0]
gene_set_path = glob.glob(self.indir+'*/edb/gene_sets.gmt')[0]
except IndexError as e:
sys.stderr.write("Could not locate GSEA files in the given directory!")
sys.exit(1)
# extract sample names from .cls file
cls_path = glob.glob(self.indir+'*/edb/*.cls')
if cls_path:
pos, neg, classes = gsea_cls_parser(cls_path[0])
else:
# logic for prerank results
pos, neg = '',''
# start reploting
self.gene_sets=gene_set_path
# obtain gene sets
gene_set_dict = self.parse_gmt(gmt=gene_set_path)
# obtain rank_metrics
rank_metric = self._load_ranking(rank_path)
correl_vector = rank_metric.values
gene_list = rank_metric.index.values
# extract each enriment term in the results.edb files and plot.
database = BeautifulSoup(open(results_path), features='xml')
length = len(database.findAll('DTG'))
fig_num = self.fignum if self.fignum <= length else length
for idx in range(fig_num):
# extract statistical resutls from results.edb file
enrich_term, hit_ind, nes, pval, fdr= gsea_edb_parser(results_path, index=idx)
gene_set = gene_set_dict.get(enrich_term)
# calculate enrichment score
RES = enrichment_score(gene_list=gene_list,
correl_vector=correl_vector,
gene_set=gene_set,
weighted_score_type=self.weighted_score_type,
nperm=0)[-1]
# plotting
term = enrich_term.replace('/','_').replace(":","_")
outfile = '{0}/{1}.{2}.{3}'.format(self.outdir, term, self.module, self.format)
gseaplot(rank_metric=rank_metric, term=enrich_term,
hits_indices=hit_ind, nes=nes, pval=pval, fdr=fdr,
RES=RES, pheno_pos=pos, pheno_neg=neg,
figsize=self.figsize, ofname=outfile)
self._logger.info("Congratulations! Your plots have been reproduced successfully!\n")
|
def run(self):
"""main replot function"""
assert self.min_size <= self.max_size
assert self.fignum > 0
import glob
from bs4 import BeautifulSoup
# parsing files.......
try:
results_path = glob.glob(self.indir+'*/edb/results.edb')[0]
rank_path = glob.glob(self.indir+'*/edb/*.rnk')[0]
gene_set_path = glob.glob(self.indir+'*/edb/gene_sets.gmt')[0]
except IndexError as e:
sys.stderr.write("Could not locate GSEA files in the given directory!")
sys.exit(1)
# extract sample names from .cls file
cls_path = glob.glob(self.indir+'*/edb/*.cls')
if cls_path:
pos, neg, classes = gsea_cls_parser(cls_path[0])
else:
# logic for prerank results
pos, neg = '',''
# start reploting
self.gene_sets=gene_set_path
# obtain gene sets
gene_set_dict = self.parse_gmt(gmt=gene_set_path)
# obtain rank_metrics
rank_metric = self._load_ranking(rank_path)
correl_vector = rank_metric.values
gene_list = rank_metric.index.values
# extract each enriment term in the results.edb files and plot.
database = BeautifulSoup(open(results_path), features='xml')
length = len(database.findAll('DTG'))
fig_num = self.fignum if self.fignum <= length else length
for idx in range(fig_num):
# extract statistical resutls from results.edb file
enrich_term, hit_ind, nes, pval, fdr= gsea_edb_parser(results_path, index=idx)
gene_set = gene_set_dict.get(enrich_term)
# calculate enrichment score
RES = enrichment_score(gene_list=gene_list,
correl_vector=correl_vector,
gene_set=gene_set,
weighted_score_type=self.weighted_score_type,
nperm=0)[-1]
# plotting
term = enrich_term.replace('/','_').replace(":","_")
outfile = '{0}/{1}.{2}.{3}'.format(self.outdir, term, self.module, self.format)
gseaplot(rank_metric=rank_metric, term=enrich_term,
hits_indices=hit_ind, nes=nes, pval=pval, fdr=fdr,
RES=RES, pheno_pos=pos, pheno_neg=neg,
figsize=self.figsize, ofname=outfile)
self._logger.info("Congratulations! Your plots have been reproduced successfully!\n")
|
[
"main",
"replot",
"function"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/gsea.py#L802-L854
|
[
"def",
"run",
"(",
"self",
")",
":",
"assert",
"self",
".",
"min_size",
"<=",
"self",
".",
"max_size",
"assert",
"self",
".",
"fignum",
">",
"0",
"import",
"glob",
"from",
"bs4",
"import",
"BeautifulSoup",
"# parsing files.......",
"try",
":",
"results_path",
"=",
"glob",
".",
"glob",
"(",
"self",
".",
"indir",
"+",
"'*/edb/results.edb'",
")",
"[",
"0",
"]",
"rank_path",
"=",
"glob",
".",
"glob",
"(",
"self",
".",
"indir",
"+",
"'*/edb/*.rnk'",
")",
"[",
"0",
"]",
"gene_set_path",
"=",
"glob",
".",
"glob",
"(",
"self",
".",
"indir",
"+",
"'*/edb/gene_sets.gmt'",
")",
"[",
"0",
"]",
"except",
"IndexError",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Could not locate GSEA files in the given directory!\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# extract sample names from .cls file",
"cls_path",
"=",
"glob",
".",
"glob",
"(",
"self",
".",
"indir",
"+",
"'*/edb/*.cls'",
")",
"if",
"cls_path",
":",
"pos",
",",
"neg",
",",
"classes",
"=",
"gsea_cls_parser",
"(",
"cls_path",
"[",
"0",
"]",
")",
"else",
":",
"# logic for prerank results",
"pos",
",",
"neg",
"=",
"''",
",",
"''",
"# start reploting",
"self",
".",
"gene_sets",
"=",
"gene_set_path",
"# obtain gene sets",
"gene_set_dict",
"=",
"self",
".",
"parse_gmt",
"(",
"gmt",
"=",
"gene_set_path",
")",
"# obtain rank_metrics",
"rank_metric",
"=",
"self",
".",
"_load_ranking",
"(",
"rank_path",
")",
"correl_vector",
"=",
"rank_metric",
".",
"values",
"gene_list",
"=",
"rank_metric",
".",
"index",
".",
"values",
"# extract each enriment term in the results.edb files and plot.",
"database",
"=",
"BeautifulSoup",
"(",
"open",
"(",
"results_path",
")",
",",
"features",
"=",
"'xml'",
")",
"length",
"=",
"len",
"(",
"database",
".",
"findAll",
"(",
"'DTG'",
")",
")",
"fig_num",
"=",
"self",
".",
"fignum",
"if",
"self",
".",
"fignum",
"<=",
"length",
"else",
"length",
"for",
"idx",
"in",
"range",
"(",
"fig_num",
")",
":",
"# extract statistical resutls from results.edb file",
"enrich_term",
",",
"hit_ind",
",",
"nes",
",",
"pval",
",",
"fdr",
"=",
"gsea_edb_parser",
"(",
"results_path",
",",
"index",
"=",
"idx",
")",
"gene_set",
"=",
"gene_set_dict",
".",
"get",
"(",
"enrich_term",
")",
"# calculate enrichment score",
"RES",
"=",
"enrichment_score",
"(",
"gene_list",
"=",
"gene_list",
",",
"correl_vector",
"=",
"correl_vector",
",",
"gene_set",
"=",
"gene_set",
",",
"weighted_score_type",
"=",
"self",
".",
"weighted_score_type",
",",
"nperm",
"=",
"0",
")",
"[",
"-",
"1",
"]",
"# plotting",
"term",
"=",
"enrich_term",
".",
"replace",
"(",
"'/'",
",",
"'_'",
")",
".",
"replace",
"(",
"\":\"",
",",
"\"_\"",
")",
"outfile",
"=",
"'{0}/{1}.{2}.{3}'",
".",
"format",
"(",
"self",
".",
"outdir",
",",
"term",
",",
"self",
".",
"module",
",",
"self",
".",
"format",
")",
"gseaplot",
"(",
"rank_metric",
"=",
"rank_metric",
",",
"term",
"=",
"enrich_term",
",",
"hits_indices",
"=",
"hit_ind",
",",
"nes",
"=",
"nes",
",",
"pval",
"=",
"pval",
",",
"fdr",
"=",
"fdr",
",",
"RES",
"=",
"RES",
",",
"pheno_pos",
"=",
"pos",
",",
"pheno_neg",
"=",
"neg",
",",
"figsize",
"=",
"self",
".",
"figsize",
",",
"ofname",
"=",
"outfile",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Congratulations! Your plots have been reproduced successfully!\\n\"",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
enrichr
|
Enrichr API.
:param gene_list: Flat file with list of genes, one gene id per row, or a python list object
:param gene_sets: Enrichr Library to query. Required enrichr library name(s). Separate each name by comma.
:param organism: Enrichr supported organism. Select from (human, mouse, yeast, fly, fish, worm).
see here for details: https://amp.pharm.mssm.edu/modEnrichr
:param description: name of analysis. optional.
:param outdir: Output file directory
:param float cutoff: Adjusted P-value (benjamini-hochberg correction) cutoff. Default: 0.05
:param int background: BioMart dataset name for retrieving background gene information.
This argument only works when gene_sets input is a gmt file or python dict.
You could also specify a number by yourself, e.g. total expressed genes number.
In this case, you will skip retrieving background infos from biomart.
Use the code below to see valid background dataset names from BioMart.
Here are example code:
>>> from gseapy.parser import Biomart
>>> bm = Biomart(verbose=False, host="asia.ensembl.org")
>>> ## view validated marts
>>> marts = bm.get_marts()
>>> ## view validated dataset
>>> datasets = bm.get_datasets(mart='ENSEMBL_MART_ENSEMBL')
:param str format: Output figure format supported by matplotlib,('pdf','png','eps'...). Default: 'pdf'.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. (width,height). Default: (6.5,6).
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param bool verbose: Increase output verbosity, print out progress of your job, Default: False.
:return: An Enrichr object, which obj.res2d stores your last query, obj.results stores your all queries.
|
gseapy/enrichr.py
|
def enrichr(gene_list, gene_sets, organism='human', description='',
outdir='Enrichr', background='hsapiens_gene_ensembl', cutoff=0.05,
format='pdf', figsize=(8,6), top_term=10, no_plot=False, verbose=False):
"""Enrichr API.
:param gene_list: Flat file with list of genes, one gene id per row, or a python list object
:param gene_sets: Enrichr Library to query. Required enrichr library name(s). Separate each name by comma.
:param organism: Enrichr supported organism. Select from (human, mouse, yeast, fly, fish, worm).
see here for details: https://amp.pharm.mssm.edu/modEnrichr
:param description: name of analysis. optional.
:param outdir: Output file directory
:param float cutoff: Adjusted P-value (benjamini-hochberg correction) cutoff. Default: 0.05
:param int background: BioMart dataset name for retrieving background gene information.
This argument only works when gene_sets input is a gmt file or python dict.
You could also specify a number by yourself, e.g. total expressed genes number.
In this case, you will skip retrieving background infos from biomart.
Use the code below to see valid background dataset names from BioMart.
Here are example code:
>>> from gseapy.parser import Biomart
>>> bm = Biomart(verbose=False, host="asia.ensembl.org")
>>> ## view validated marts
>>> marts = bm.get_marts()
>>> ## view validated dataset
>>> datasets = bm.get_datasets(mart='ENSEMBL_MART_ENSEMBL')
:param str format: Output figure format supported by matplotlib,('pdf','png','eps'...). Default: 'pdf'.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. (width,height). Default: (6.5,6).
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param bool verbose: Increase output verbosity, print out progress of your job, Default: False.
:return: An Enrichr object, which obj.res2d stores your last query, obj.results stores your all queries.
"""
enr = Enrichr(gene_list, gene_sets, organism, description, outdir,
cutoff, background, format, figsize, top_term, no_plot, verbose)
enr.run()
return enr
|
def enrichr(gene_list, gene_sets, organism='human', description='',
outdir='Enrichr', background='hsapiens_gene_ensembl', cutoff=0.05,
format='pdf', figsize=(8,6), top_term=10, no_plot=False, verbose=False):
"""Enrichr API.
:param gene_list: Flat file with list of genes, one gene id per row, or a python list object
:param gene_sets: Enrichr Library to query. Required enrichr library name(s). Separate each name by comma.
:param organism: Enrichr supported organism. Select from (human, mouse, yeast, fly, fish, worm).
see here for details: https://amp.pharm.mssm.edu/modEnrichr
:param description: name of analysis. optional.
:param outdir: Output file directory
:param float cutoff: Adjusted P-value (benjamini-hochberg correction) cutoff. Default: 0.05
:param int background: BioMart dataset name for retrieving background gene information.
This argument only works when gene_sets input is a gmt file or python dict.
You could also specify a number by yourself, e.g. total expressed genes number.
In this case, you will skip retrieving background infos from biomart.
Use the code below to see valid background dataset names from BioMart.
Here are example code:
>>> from gseapy.parser import Biomart
>>> bm = Biomart(verbose=False, host="asia.ensembl.org")
>>> ## view validated marts
>>> marts = bm.get_marts()
>>> ## view validated dataset
>>> datasets = bm.get_datasets(mart='ENSEMBL_MART_ENSEMBL')
:param str format: Output figure format supported by matplotlib,('pdf','png','eps'...). Default: 'pdf'.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. (width,height). Default: (6.5,6).
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param bool verbose: Increase output verbosity, print out progress of your job, Default: False.
:return: An Enrichr object, which obj.res2d stores your last query, obj.results stores your all queries.
"""
enr = Enrichr(gene_list, gene_sets, organism, description, outdir,
cutoff, background, format, figsize, top_term, no_plot, verbose)
enr.run()
return enr
|
[
"Enrichr",
"API",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L355-L393
|
[
"def",
"enrichr",
"(",
"gene_list",
",",
"gene_sets",
",",
"organism",
"=",
"'human'",
",",
"description",
"=",
"''",
",",
"outdir",
"=",
"'Enrichr'",
",",
"background",
"=",
"'hsapiens_gene_ensembl'",
",",
"cutoff",
"=",
"0.05",
",",
"format",
"=",
"'pdf'",
",",
"figsize",
"=",
"(",
"8",
",",
"6",
")",
",",
"top_term",
"=",
"10",
",",
"no_plot",
"=",
"False",
",",
"verbose",
"=",
"False",
")",
":",
"enr",
"=",
"Enrichr",
"(",
"gene_list",
",",
"gene_sets",
",",
"organism",
",",
"description",
",",
"outdir",
",",
"cutoff",
",",
"background",
",",
"format",
",",
"figsize",
",",
"top_term",
",",
"no_plot",
",",
"verbose",
")",
"enr",
".",
"run",
"(",
")",
"return",
"enr"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Enrichr.prepare_outdir
|
create temp directory.
|
gseapy/enrichr.py
|
def prepare_outdir(self):
"""create temp directory."""
self._outdir = self.outdir
if self._outdir is None:
self._tmpdir = TemporaryDirectory()
self.outdir = self._tmpdir.name
elif isinstance(self.outdir, str):
mkdirs(self.outdir)
else:
raise Exception("Error parsing outdir: %s"%type(self.outdir))
# handle gene_sets
logfile = os.path.join(self.outdir, "gseapy.%s.%s.log" % (self.module, self.descriptions))
return logfile
|
def prepare_outdir(self):
"""create temp directory."""
self._outdir = self.outdir
if self._outdir is None:
self._tmpdir = TemporaryDirectory()
self.outdir = self._tmpdir.name
elif isinstance(self.outdir, str):
mkdirs(self.outdir)
else:
raise Exception("Error parsing outdir: %s"%type(self.outdir))
# handle gene_sets
logfile = os.path.join(self.outdir, "gseapy.%s.%s.log" % (self.module, self.descriptions))
return logfile
|
[
"create",
"temp",
"directory",
"."
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L50-L63
|
[
"def",
"prepare_outdir",
"(",
"self",
")",
":",
"self",
".",
"_outdir",
"=",
"self",
".",
"outdir",
"if",
"self",
".",
"_outdir",
"is",
"None",
":",
"self",
".",
"_tmpdir",
"=",
"TemporaryDirectory",
"(",
")",
"self",
".",
"outdir",
"=",
"self",
".",
"_tmpdir",
".",
"name",
"elif",
"isinstance",
"(",
"self",
".",
"outdir",
",",
"str",
")",
":",
"mkdirs",
"(",
"self",
".",
"outdir",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Error parsing outdir: %s\"",
"%",
"type",
"(",
"self",
".",
"outdir",
")",
")",
"# handle gene_sets",
"logfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"outdir",
",",
"\"gseapy.%s.%s.log\"",
"%",
"(",
"self",
".",
"module",
",",
"self",
".",
"descriptions",
")",
")",
"return",
"logfile"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Enrichr.parse_genesets
|
parse gene_sets input file type
|
gseapy/enrichr.py
|
def parse_genesets(self):
"""parse gene_sets input file type"""
enrichr_library = self.get_libraries()
if isinstance(self.gene_sets, list):
gss = self.gene_sets
elif isinstance(self.gene_sets, str):
gss = [ g.strip() for g in self.gene_sets.strip().split(",") ]
elif isinstance(self.gene_sets, dict):
gss = [self.gene_sets]
else:
raise Exception("Error parsing enrichr libraries, please provided corrected one")
# gss: a list contain .gmt, dict, enrichr_liraries.
# now, convert .gmt to dict
gss_exist = []
for g in gss:
if isinstance(g, dict):
gss_exist.append(g)
continue
if isinstance(g, str):
if g in enrichr_library:
gss_exist.append(g)
continue
if g.lower().endswith(".gmt") and os.path.exists(g):
self._logger.info("User Defined gene sets is given: %s"%g)
with open(g) as genesets:
g_dict = { line.strip().split("\t")[0]: line.strip().split("\t")[2:]
for line in genesets.readlines() }
gss_exist.append(g_dict)
return gss_exist
|
def parse_genesets(self):
"""parse gene_sets input file type"""
enrichr_library = self.get_libraries()
if isinstance(self.gene_sets, list):
gss = self.gene_sets
elif isinstance(self.gene_sets, str):
gss = [ g.strip() for g in self.gene_sets.strip().split(",") ]
elif isinstance(self.gene_sets, dict):
gss = [self.gene_sets]
else:
raise Exception("Error parsing enrichr libraries, please provided corrected one")
# gss: a list contain .gmt, dict, enrichr_liraries.
# now, convert .gmt to dict
gss_exist = []
for g in gss:
if isinstance(g, dict):
gss_exist.append(g)
continue
if isinstance(g, str):
if g in enrichr_library:
gss_exist.append(g)
continue
if g.lower().endswith(".gmt") and os.path.exists(g):
self._logger.info("User Defined gene sets is given: %s"%g)
with open(g) as genesets:
g_dict = { line.strip().split("\t")[0]: line.strip().split("\t")[2:]
for line in genesets.readlines() }
gss_exist.append(g_dict)
return gss_exist
|
[
"parse",
"gene_sets",
"input",
"file",
"type"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L65-L96
|
[
"def",
"parse_genesets",
"(",
"self",
")",
":",
"enrichr_library",
"=",
"self",
".",
"get_libraries",
"(",
")",
"if",
"isinstance",
"(",
"self",
".",
"gene_sets",
",",
"list",
")",
":",
"gss",
"=",
"self",
".",
"gene_sets",
"elif",
"isinstance",
"(",
"self",
".",
"gene_sets",
",",
"str",
")",
":",
"gss",
"=",
"[",
"g",
".",
"strip",
"(",
")",
"for",
"g",
"in",
"self",
".",
"gene_sets",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\",\"",
")",
"]",
"elif",
"isinstance",
"(",
"self",
".",
"gene_sets",
",",
"dict",
")",
":",
"gss",
"=",
"[",
"self",
".",
"gene_sets",
"]",
"else",
":",
"raise",
"Exception",
"(",
"\"Error parsing enrichr libraries, please provided corrected one\"",
")",
"# gss: a list contain .gmt, dict, enrichr_liraries.",
"# now, convert .gmt to dict",
"gss_exist",
"=",
"[",
"]",
"for",
"g",
"in",
"gss",
":",
"if",
"isinstance",
"(",
"g",
",",
"dict",
")",
":",
"gss_exist",
".",
"append",
"(",
"g",
")",
"continue",
"if",
"isinstance",
"(",
"g",
",",
"str",
")",
":",
"if",
"g",
"in",
"enrichr_library",
":",
"gss_exist",
".",
"append",
"(",
"g",
")",
"continue",
"if",
"g",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\".gmt\"",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"g",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"User Defined gene sets is given: %s\"",
"%",
"g",
")",
"with",
"open",
"(",
"g",
")",
"as",
"genesets",
":",
"g_dict",
"=",
"{",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"0",
"]",
":",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
"2",
":",
"]",
"for",
"line",
"in",
"genesets",
".",
"readlines",
"(",
")",
"}",
"gss_exist",
".",
"append",
"(",
"g_dict",
")",
"return",
"gss_exist"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Enrichr.parse_genelists
|
parse gene list
|
gseapy/enrichr.py
|
def parse_genelists(self):
"""parse gene list"""
if isinstance(self.gene_list, list):
genes = self.gene_list
elif isinstance(self.gene_list, pd.DataFrame):
# input type is bed file
if self.gene_list.shape[1] >=3:
genes= self.gene_list.iloc[:,:3].apply(lambda x: "\t".join([str(i) for i in x]), axis=1).tolist()
# input type with weight values
elif self.gene_list.shape[1] == 2:
genes= self.gene_list.apply(lambda x: ",".join([str(i) for i in x]), axis=1).tolist()
else:
genes = self.gene_list.squeeze().tolist()
elif isinstance(self.gene_list, pd.Series):
genes = self.gene_list.squeeze().tolist()
else:
# get gene lists or bed file, or gene list with weighted values.
genes=[]
with open(self.gene_list) as f:
for gene in f:
genes.append(gene.strip())
self._isezid = all(map(self._is_entrez_id, genes))
if self._isezid:
self._gls = set(map(int, self._gls))
else:
self._gls = genes
return '\n'.join(genes)
|
def parse_genelists(self):
"""parse gene list"""
if isinstance(self.gene_list, list):
genes = self.gene_list
elif isinstance(self.gene_list, pd.DataFrame):
# input type is bed file
if self.gene_list.shape[1] >=3:
genes= self.gene_list.iloc[:,:3].apply(lambda x: "\t".join([str(i) for i in x]), axis=1).tolist()
# input type with weight values
elif self.gene_list.shape[1] == 2:
genes= self.gene_list.apply(lambda x: ",".join([str(i) for i in x]), axis=1).tolist()
else:
genes = self.gene_list.squeeze().tolist()
elif isinstance(self.gene_list, pd.Series):
genes = self.gene_list.squeeze().tolist()
else:
# get gene lists or bed file, or gene list with weighted values.
genes=[]
with open(self.gene_list) as f:
for gene in f:
genes.append(gene.strip())
self._isezid = all(map(self._is_entrez_id, genes))
if self._isezid:
self._gls = set(map(int, self._gls))
else:
self._gls = genes
return '\n'.join(genes)
|
[
"parse",
"gene",
"list"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L98-L126
|
[
"def",
"parse_genelists",
"(",
"self",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"gene_list",
",",
"list",
")",
":",
"genes",
"=",
"self",
".",
"gene_list",
"elif",
"isinstance",
"(",
"self",
".",
"gene_list",
",",
"pd",
".",
"DataFrame",
")",
":",
"# input type is bed file",
"if",
"self",
".",
"gene_list",
".",
"shape",
"[",
"1",
"]",
">=",
"3",
":",
"genes",
"=",
"self",
".",
"gene_list",
".",
"iloc",
"[",
":",
",",
":",
"3",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"\"\\t\"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"x",
"]",
")",
",",
"axis",
"=",
"1",
")",
".",
"tolist",
"(",
")",
"# input type with weight values",
"elif",
"self",
".",
"gene_list",
".",
"shape",
"[",
"1",
"]",
"==",
"2",
":",
"genes",
"=",
"self",
".",
"gene_list",
".",
"apply",
"(",
"lambda",
"x",
":",
"\",\"",
".",
"join",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"x",
"]",
")",
",",
"axis",
"=",
"1",
")",
".",
"tolist",
"(",
")",
"else",
":",
"genes",
"=",
"self",
".",
"gene_list",
".",
"squeeze",
"(",
")",
".",
"tolist",
"(",
")",
"elif",
"isinstance",
"(",
"self",
".",
"gene_list",
",",
"pd",
".",
"Series",
")",
":",
"genes",
"=",
"self",
".",
"gene_list",
".",
"squeeze",
"(",
")",
".",
"tolist",
"(",
")",
"else",
":",
"# get gene lists or bed file, or gene list with weighted values.",
"genes",
"=",
"[",
"]",
"with",
"open",
"(",
"self",
".",
"gene_list",
")",
"as",
"f",
":",
"for",
"gene",
"in",
"f",
":",
"genes",
".",
"append",
"(",
"gene",
".",
"strip",
"(",
")",
")",
"self",
".",
"_isezid",
"=",
"all",
"(",
"map",
"(",
"self",
".",
"_is_entrez_id",
",",
"genes",
")",
")",
"if",
"self",
".",
"_isezid",
":",
"self",
".",
"_gls",
"=",
"set",
"(",
"map",
"(",
"int",
",",
"self",
".",
"_gls",
")",
")",
"else",
":",
"self",
".",
"_gls",
"=",
"genes",
"return",
"'\\n'",
".",
"join",
"(",
"genes",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Enrichr.send_genes
|
send gene list to enrichr server
|
gseapy/enrichr.py
|
def send_genes(self, gene_list, url):
""" send gene list to enrichr server"""
payload = {
'list': (None, gene_list),
'description': (None, self.descriptions)
}
# response
response = requests.post(url, files=payload)
if not response.ok:
raise Exception('Error analyzing gene list')
sleep(1)
job_id = json.loads(response.text)
return job_id
|
def send_genes(self, gene_list, url):
""" send gene list to enrichr server"""
payload = {
'list': (None, gene_list),
'description': (None, self.descriptions)
}
# response
response = requests.post(url, files=payload)
if not response.ok:
raise Exception('Error analyzing gene list')
sleep(1)
job_id = json.loads(response.text)
return job_id
|
[
"send",
"gene",
"list",
"to",
"enrichr",
"server"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L128-L141
|
[
"def",
"send_genes",
"(",
"self",
",",
"gene_list",
",",
"url",
")",
":",
"payload",
"=",
"{",
"'list'",
":",
"(",
"None",
",",
"gene_list",
")",
",",
"'description'",
":",
"(",
"None",
",",
"self",
".",
"descriptions",
")",
"}",
"# response",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"files",
"=",
"payload",
")",
"if",
"not",
"response",
".",
"ok",
":",
"raise",
"Exception",
"(",
"'Error analyzing gene list'",
")",
"sleep",
"(",
"1",
")",
"job_id",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"text",
")",
"return",
"job_id"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Enrichr.check_genes
|
Compare the genes sent and received to get successfully recognized genes
|
gseapy/enrichr.py
|
def check_genes(self, gene_list, usr_list_id):
'''
Compare the genes sent and received to get successfully recognized genes
'''
response = requests.get('http://amp.pharm.mssm.edu/Enrichr/view?userListId=%s' % usr_list_id)
if not response.ok:
raise Exception('Error getting gene list back')
returnedL = json.loads(response.text)["genes"]
returnedN = sum([1 for gene in gene_list if gene in returnedL])
self._logger.info('{} genes successfully recognized by Enrichr'.format(returnedN))
|
def check_genes(self, gene_list, usr_list_id):
'''
Compare the genes sent and received to get successfully recognized genes
'''
response = requests.get('http://amp.pharm.mssm.edu/Enrichr/view?userListId=%s' % usr_list_id)
if not response.ok:
raise Exception('Error getting gene list back')
returnedL = json.loads(response.text)["genes"]
returnedN = sum([1 for gene in gene_list if gene in returnedL])
self._logger.info('{} genes successfully recognized by Enrichr'.format(returnedN))
|
[
"Compare",
"the",
"genes",
"sent",
"and",
"received",
"to",
"get",
"successfully",
"recognized",
"genes"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L143-L152
|
[
"def",
"check_genes",
"(",
"self",
",",
"gene_list",
",",
"usr_list_id",
")",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"'http://amp.pharm.mssm.edu/Enrichr/view?userListId=%s'",
"%",
"usr_list_id",
")",
"if",
"not",
"response",
".",
"ok",
":",
"raise",
"Exception",
"(",
"'Error getting gene list back'",
")",
"returnedL",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"text",
")",
"[",
"\"genes\"",
"]",
"returnedN",
"=",
"sum",
"(",
"[",
"1",
"for",
"gene",
"in",
"gene_list",
"if",
"gene",
"in",
"returnedL",
"]",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"'{} genes successfully recognized by Enrichr'",
".",
"format",
"(",
"returnedN",
")",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Enrichr.get_results
|
Enrichr API
|
gseapy/enrichr.py
|
def get_results(self, gene_list):
"""Enrichr API"""
ADDLIST_URL = 'http://amp.pharm.mssm.edu/%sEnrichr/addList'%self._organism
job_id = self.send_genes(gene_list, ADDLIST_URL)
user_list_id = job_id['userListId']
RESULTS_URL = 'http://amp.pharm.mssm.edu/%sEnrichr/export'%self._organism
query_string = '?userListId=%s&filename=%s&backgroundType=%s'
# set max retries num =5
s = retry(num=5)
filename = "%s.%s.reports" % (self._gs, self.descriptions)
url = RESULTS_URL + query_string % (user_list_id, filename, self._gs)
response = s.get(url, stream=True, timeout=None)
# response = requests.get(RESULTS_URL + query_string % (user_list_id, gene_set))
sleep(1)
res = pd.read_csv(StringIO(response.content.decode('utf-8')),sep="\t")
return [job_id['shortId'], res]
|
def get_results(self, gene_list):
"""Enrichr API"""
ADDLIST_URL = 'http://amp.pharm.mssm.edu/%sEnrichr/addList'%self._organism
job_id = self.send_genes(gene_list, ADDLIST_URL)
user_list_id = job_id['userListId']
RESULTS_URL = 'http://amp.pharm.mssm.edu/%sEnrichr/export'%self._organism
query_string = '?userListId=%s&filename=%s&backgroundType=%s'
# set max retries num =5
s = retry(num=5)
filename = "%s.%s.reports" % (self._gs, self.descriptions)
url = RESULTS_URL + query_string % (user_list_id, filename, self._gs)
response = s.get(url, stream=True, timeout=None)
# response = requests.get(RESULTS_URL + query_string % (user_list_id, gene_set))
sleep(1)
res = pd.read_csv(StringIO(response.content.decode('utf-8')),sep="\t")
return [job_id['shortId'], res]
|
[
"Enrichr",
"API"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L154-L170
|
[
"def",
"get_results",
"(",
"self",
",",
"gene_list",
")",
":",
"ADDLIST_URL",
"=",
"'http://amp.pharm.mssm.edu/%sEnrichr/addList'",
"%",
"self",
".",
"_organism",
"job_id",
"=",
"self",
".",
"send_genes",
"(",
"gene_list",
",",
"ADDLIST_URL",
")",
"user_list_id",
"=",
"job_id",
"[",
"'userListId'",
"]",
"RESULTS_URL",
"=",
"'http://amp.pharm.mssm.edu/%sEnrichr/export'",
"%",
"self",
".",
"_organism",
"query_string",
"=",
"'?userListId=%s&filename=%s&backgroundType=%s'",
"# set max retries num =5",
"s",
"=",
"retry",
"(",
"num",
"=",
"5",
")",
"filename",
"=",
"\"%s.%s.reports\"",
"%",
"(",
"self",
".",
"_gs",
",",
"self",
".",
"descriptions",
")",
"url",
"=",
"RESULTS_URL",
"+",
"query_string",
"%",
"(",
"user_list_id",
",",
"filename",
",",
"self",
".",
"_gs",
")",
"response",
"=",
"s",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
",",
"timeout",
"=",
"None",
")",
"# response = requests.get(RESULTS_URL + query_string % (user_list_id, gene_set))",
"sleep",
"(",
"1",
")",
"res",
"=",
"pd",
".",
"read_csv",
"(",
"StringIO",
"(",
"response",
".",
"content",
".",
"decode",
"(",
"'utf-8'",
")",
")",
",",
"sep",
"=",
"\"\\t\"",
")",
"return",
"[",
"job_id",
"[",
"'shortId'",
"]",
",",
"res",
"]"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Enrichr.get_background
|
get background gene
|
gseapy/enrichr.py
|
def get_background(self):
"""get background gene"""
# input is a file
if os.path.isfile(self.background):
with open(self.background) as b:
bg2 = b.readlines()
bg = [g.strip() for g in bg2]
return set(bg)
# package included data
DB_FILE = resource_filename("gseapy", "data/{}.background.genes.txt".format(self.background))
filename = os.path.join(DEFAULT_CACHE_PATH, "{}.background.genes.txt".format(self.background))
if os.path.exists(filename):
df = pd.read_csv(filename,sep="\t")
elif os.path.exists(DB_FILE):
df = pd.read_csv(DB_FILE,sep="\t")
else:
# background is a biomart database name
self._logger.warning("Downloading %s for the first time. It might take a couple of miniutes."%self.background)
bm = Biomart()
df = bm.query(dataset=self.background)
df.dropna(subset=['go_id'], inplace=True)
self._logger.info("using all annotated genes with GO_ID as background genes")
df.dropna(subset=['entrezgene'], inplace=True)
# input id type: entrez or gene_name
if self._isezid:
bg = df['entrezgene'].astype(int)
else:
bg = df['external_gene_name']
return set(bg)
|
def get_background(self):
"""get background gene"""
# input is a file
if os.path.isfile(self.background):
with open(self.background) as b:
bg2 = b.readlines()
bg = [g.strip() for g in bg2]
return set(bg)
# package included data
DB_FILE = resource_filename("gseapy", "data/{}.background.genes.txt".format(self.background))
filename = os.path.join(DEFAULT_CACHE_PATH, "{}.background.genes.txt".format(self.background))
if os.path.exists(filename):
df = pd.read_csv(filename,sep="\t")
elif os.path.exists(DB_FILE):
df = pd.read_csv(DB_FILE,sep="\t")
else:
# background is a biomart database name
self._logger.warning("Downloading %s for the first time. It might take a couple of miniutes."%self.background)
bm = Biomart()
df = bm.query(dataset=self.background)
df.dropna(subset=['go_id'], inplace=True)
self._logger.info("using all annotated genes with GO_ID as background genes")
df.dropna(subset=['entrezgene'], inplace=True)
# input id type: entrez or gene_name
if self._isezid:
bg = df['entrezgene'].astype(int)
else:
bg = df['external_gene_name']
return set(bg)
|
[
"get",
"background",
"gene"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L186-L217
|
[
"def",
"get_background",
"(",
"self",
")",
":",
"# input is a file",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"background",
")",
":",
"with",
"open",
"(",
"self",
".",
"background",
")",
"as",
"b",
":",
"bg2",
"=",
"b",
".",
"readlines",
"(",
")",
"bg",
"=",
"[",
"g",
".",
"strip",
"(",
")",
"for",
"g",
"in",
"bg2",
"]",
"return",
"set",
"(",
"bg",
")",
"# package included data",
"DB_FILE",
"=",
"resource_filename",
"(",
"\"gseapy\"",
",",
"\"data/{}.background.genes.txt\"",
".",
"format",
"(",
"self",
".",
"background",
")",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"DEFAULT_CACHE_PATH",
",",
"\"{}.background.genes.txt\"",
".",
"format",
"(",
"self",
".",
"background",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"filename",
",",
"sep",
"=",
"\"\\t\"",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"DB_FILE",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"DB_FILE",
",",
"sep",
"=",
"\"\\t\"",
")",
"else",
":",
"# background is a biomart database name",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"Downloading %s for the first time. It might take a couple of miniutes.\"",
"%",
"self",
".",
"background",
")",
"bm",
"=",
"Biomart",
"(",
")",
"df",
"=",
"bm",
".",
"query",
"(",
"dataset",
"=",
"self",
".",
"background",
")",
"df",
".",
"dropna",
"(",
"subset",
"=",
"[",
"'go_id'",
"]",
",",
"inplace",
"=",
"True",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"using all annotated genes with GO_ID as background genes\"",
")",
"df",
".",
"dropna",
"(",
"subset",
"=",
"[",
"'entrezgene'",
"]",
",",
"inplace",
"=",
"True",
")",
"# input id type: entrez or gene_name",
"if",
"self",
".",
"_isezid",
":",
"bg",
"=",
"df",
"[",
"'entrezgene'",
"]",
".",
"astype",
"(",
"int",
")",
"else",
":",
"bg",
"=",
"df",
"[",
"'external_gene_name'",
"]",
"return",
"set",
"(",
"bg",
")"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Enrichr.get_organism
|
Select Enrichr organism from below:
Human & Mouse: H. sapiens & M. musculus
Fly: D. melanogaster
Yeast: S. cerevisiae
Worm: C. elegans
Fish: D. rerio
|
gseapy/enrichr.py
|
def get_organism(self):
"""Select Enrichr organism from below:
Human & Mouse: H. sapiens & M. musculus
Fly: D. melanogaster
Yeast: S. cerevisiae
Worm: C. elegans
Fish: D. rerio
"""
organism = {'default': ['', 'hs', 'mm', 'human','mouse',
'homo sapiens', 'mus musculus',
'h. sapiens', 'm. musculus'],
'Fly': ['fly', 'd. melanogaster', 'drosophila melanogaster'],
'Yeast': ['yeast', 's. cerevisiae', 'saccharomyces cerevisiae'],
'Worm': ['worm', 'c. elegans', 'caenorhabditis elegans', 'nematode'],
'Fish': ['fish', 'd. rerio', 'danio rerio', 'zebrafish']
}
for k, v in organism.items():
if self.organism.lower() in v :
self._organism = k
if self._organism is None:
raise Exception("No supported organism found !!!")
if self._organism == 'default':
self._organism = ''
return
|
def get_organism(self):
"""Select Enrichr organism from below:
Human & Mouse: H. sapiens & M. musculus
Fly: D. melanogaster
Yeast: S. cerevisiae
Worm: C. elegans
Fish: D. rerio
"""
organism = {'default': ['', 'hs', 'mm', 'human','mouse',
'homo sapiens', 'mus musculus',
'h. sapiens', 'm. musculus'],
'Fly': ['fly', 'd. melanogaster', 'drosophila melanogaster'],
'Yeast': ['yeast', 's. cerevisiae', 'saccharomyces cerevisiae'],
'Worm': ['worm', 'c. elegans', 'caenorhabditis elegans', 'nematode'],
'Fish': ['fish', 'd. rerio', 'danio rerio', 'zebrafish']
}
for k, v in organism.items():
if self.organism.lower() in v :
self._organism = k
if self._organism is None:
raise Exception("No supported organism found !!!")
if self._organism == 'default':
self._organism = ''
return
|
[
"Select",
"Enrichr",
"organism",
"from",
"below",
":"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L219-L248
|
[
"def",
"get_organism",
"(",
"self",
")",
":",
"organism",
"=",
"{",
"'default'",
":",
"[",
"''",
",",
"'hs'",
",",
"'mm'",
",",
"'human'",
",",
"'mouse'",
",",
"'homo sapiens'",
",",
"'mus musculus'",
",",
"'h. sapiens'",
",",
"'m. musculus'",
"]",
",",
"'Fly'",
":",
"[",
"'fly'",
",",
"'d. melanogaster'",
",",
"'drosophila melanogaster'",
"]",
",",
"'Yeast'",
":",
"[",
"'yeast'",
",",
"'s. cerevisiae'",
",",
"'saccharomyces cerevisiae'",
"]",
",",
"'Worm'",
":",
"[",
"'worm'",
",",
"'c. elegans'",
",",
"'caenorhabditis elegans'",
",",
"'nematode'",
"]",
",",
"'Fish'",
":",
"[",
"'fish'",
",",
"'d. rerio'",
",",
"'danio rerio'",
",",
"'zebrafish'",
"]",
"}",
"for",
"k",
",",
"v",
"in",
"organism",
".",
"items",
"(",
")",
":",
"if",
"self",
".",
"organism",
".",
"lower",
"(",
")",
"in",
"v",
":",
"self",
".",
"_organism",
"=",
"k",
"if",
"self",
".",
"_organism",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"No supported organism found !!!\"",
")",
"if",
"self",
".",
"_organism",
"==",
"'default'",
":",
"self",
".",
"_organism",
"=",
"''",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Enrichr.enrich
|
use local mode
p = p-value computed using the Fisher exact test (Hypergeometric test)
Not implemented here:
combine score = log(p)·z
see here: http://amp.pharm.mssm.edu/Enrichr/help#background&q=4
columns contain:
Term Overlap P-value Adjusted_P-value Genes
|
gseapy/enrichr.py
|
def enrich(self, gmt):
"""use local mode
p = p-value computed using the Fisher exact test (Hypergeometric test)
Not implemented here:
combine score = log(p)·z
see here: http://amp.pharm.mssm.edu/Enrichr/help#background&q=4
columns contain:
Term Overlap P-value Adjusted_P-value Genes
"""
if isscalar(self.background):
if isinstance(self.background, int) or self.background.isdigit():
self._bg = int(self.background)
elif isinstance(self.background, str):
# self.background = set(reduce(lambda x,y: x+y, gmt.values(),[]))
self._bg = self.get_background()
self._logger.info("Background: found %s genes"%(len(self._bg)))
else:
raise Exception("Unsupported background data type")
else:
# handle array object: nd.array, list, tuple, set, Series
try:
it = iter(self.background)
self._bg = set(self.background)
except TypeError:
self._logger.error("Unsupported background data type")
# statistical testing
hgtest = list(calc_pvalues(query=self._gls, gene_sets=gmt,
background=self._bg))
if len(hgtest) > 0:
terms, pvals, olsz, gsetsz, genes = hgtest
fdrs, rej = multiple_testing_correction(ps = pvals,
alpha=self.cutoff,
method='benjamini-hochberg')
# save to a dataframe
odict = OrderedDict()
odict['Term'] = terms
odict['Overlap'] = list(map(lambda h,g: "%s/%s"%(h, g), olsz, gsetsz))
odict['P-value'] = pvals
odict['Adjusted P-value'] = fdrs
# odict['Reject (FDR< %s)'%self.cutoff ] = rej
odict['Genes'] = [";".join(g) for g in genes]
res = pd.DataFrame(odict)
return res
return
|
def enrich(self, gmt):
"""use local mode
p = p-value computed using the Fisher exact test (Hypergeometric test)
Not implemented here:
combine score = log(p)·z
see here: http://amp.pharm.mssm.edu/Enrichr/help#background&q=4
columns contain:
Term Overlap P-value Adjusted_P-value Genes
"""
if isscalar(self.background):
if isinstance(self.background, int) or self.background.isdigit():
self._bg = int(self.background)
elif isinstance(self.background, str):
# self.background = set(reduce(lambda x,y: x+y, gmt.values(),[]))
self._bg = self.get_background()
self._logger.info("Background: found %s genes"%(len(self._bg)))
else:
raise Exception("Unsupported background data type")
else:
# handle array object: nd.array, list, tuple, set, Series
try:
it = iter(self.background)
self._bg = set(self.background)
except TypeError:
self._logger.error("Unsupported background data type")
# statistical testing
hgtest = list(calc_pvalues(query=self._gls, gene_sets=gmt,
background=self._bg))
if len(hgtest) > 0:
terms, pvals, olsz, gsetsz, genes = hgtest
fdrs, rej = multiple_testing_correction(ps = pvals,
alpha=self.cutoff,
method='benjamini-hochberg')
# save to a dataframe
odict = OrderedDict()
odict['Term'] = terms
odict['Overlap'] = list(map(lambda h,g: "%s/%s"%(h, g), olsz, gsetsz))
odict['P-value'] = pvals
odict['Adjusted P-value'] = fdrs
# odict['Reject (FDR< %s)'%self.cutoff ] = rej
odict['Genes'] = [";".join(g) for g in genes]
res = pd.DataFrame(odict)
return res
return
|
[
"use",
"local",
"mode",
"p",
"=",
"p",
"-",
"value",
"computed",
"using",
"the",
"Fisher",
"exact",
"test",
"(",
"Hypergeometric",
"test",
")"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L250-L300
|
[
"def",
"enrich",
"(",
"self",
",",
"gmt",
")",
":",
"if",
"isscalar",
"(",
"self",
".",
"background",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"background",
",",
"int",
")",
"or",
"self",
".",
"background",
".",
"isdigit",
"(",
")",
":",
"self",
".",
"_bg",
"=",
"int",
"(",
"self",
".",
"background",
")",
"elif",
"isinstance",
"(",
"self",
".",
"background",
",",
"str",
")",
":",
"# self.background = set(reduce(lambda x,y: x+y, gmt.values(),[]))",
"self",
".",
"_bg",
"=",
"self",
".",
"get_background",
"(",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Background: found %s genes\"",
"%",
"(",
"len",
"(",
"self",
".",
"_bg",
")",
")",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"Unsupported background data type\"",
")",
"else",
":",
"# handle array object: nd.array, list, tuple, set, Series",
"try",
":",
"it",
"=",
"iter",
"(",
"self",
".",
"background",
")",
"self",
".",
"_bg",
"=",
"set",
"(",
"self",
".",
"background",
")",
"except",
"TypeError",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"\"Unsupported background data type\"",
")",
"# statistical testing",
"hgtest",
"=",
"list",
"(",
"calc_pvalues",
"(",
"query",
"=",
"self",
".",
"_gls",
",",
"gene_sets",
"=",
"gmt",
",",
"background",
"=",
"self",
".",
"_bg",
")",
")",
"if",
"len",
"(",
"hgtest",
")",
">",
"0",
":",
"terms",
",",
"pvals",
",",
"olsz",
",",
"gsetsz",
",",
"genes",
"=",
"hgtest",
"fdrs",
",",
"rej",
"=",
"multiple_testing_correction",
"(",
"ps",
"=",
"pvals",
",",
"alpha",
"=",
"self",
".",
"cutoff",
",",
"method",
"=",
"'benjamini-hochberg'",
")",
"# save to a dataframe",
"odict",
"=",
"OrderedDict",
"(",
")",
"odict",
"[",
"'Term'",
"]",
"=",
"terms",
"odict",
"[",
"'Overlap'",
"]",
"=",
"list",
"(",
"map",
"(",
"lambda",
"h",
",",
"g",
":",
"\"%s/%s\"",
"%",
"(",
"h",
",",
"g",
")",
",",
"olsz",
",",
"gsetsz",
")",
")",
"odict",
"[",
"'P-value'",
"]",
"=",
"pvals",
"odict",
"[",
"'Adjusted P-value'",
"]",
"=",
"fdrs",
"# odict['Reject (FDR< %s)'%self.cutoff ] = rej",
"odict",
"[",
"'Genes'",
"]",
"=",
"[",
"\";\"",
".",
"join",
"(",
"g",
")",
"for",
"g",
"in",
"genes",
"]",
"res",
"=",
"pd",
".",
"DataFrame",
"(",
"odict",
")",
"return",
"res",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
Enrichr.run
|
run enrichr for one sample gene list but multi-libraries
|
gseapy/enrichr.py
|
def run(self):
"""run enrichr for one sample gene list but multi-libraries"""
# set organism
self.get_organism()
# read input file
genes_list = self.parse_genelists()
gss = self.parse_genesets()
# if gmt
self._logger.info("Connecting to Enrichr Server to get latest library names")
if len(gss) < 1:
sys.stderr.write("Not validated Enrichr library name provided\n")
sys.stdout.write("Hint: use get_library_name() to view full list of supported names")
sys.exit(1)
self.results = pd.DataFrame()
for g in gss:
if isinstance(g, dict):
## local mode
res = self.enrich(g)
shortID, self._gs = str(id(g)), "CUSTOM%s"%id(g)
if res is None:
self._logger.info("No hits return, for gene set: Custom%s"%shortID)
continue
else:
## online mode
self._gs = str(g)
self._logger.debug("Start Enrichr using library: %s" % (self._gs))
self._logger.info('Analysis name: %s, Enrichr Library: %s' % (self.descriptions, self._gs))
shortID, res = self.get_results(genes_list)
# Remember gene set library used
res.insert(0, "Gene_set", self._gs)
# Append to master dataframe
self.results = self.results.append(res, ignore_index=True, sort=True)
self.res2d = res
if self._outdir is None: continue
self._logger.info('Save file of enrichment results: Job Id:' + str(shortID))
outfile = "%s/%s.%s.%s.reports.txt" % (self.outdir, self._gs, self.descriptions, self.module)
self.res2d.to_csv(outfile, index=False, encoding='utf-8', sep="\t")
# plotting
if not self.__no_plot:
msg = barplot(df=res, cutoff=self.cutoff, figsize=self.figsize,
top_term=self.__top_term, color='salmon',
title=self._gs,
ofname=outfile.replace("txt", self.format))
if msg is not None : self._logger.warning(msg)
self._logger.info('Done.\n')
# clean up tmpdir
if self._outdir is None: self._tmpdir.cleanup()
return
|
def run(self):
"""run enrichr for one sample gene list but multi-libraries"""
# set organism
self.get_organism()
# read input file
genes_list = self.parse_genelists()
gss = self.parse_genesets()
# if gmt
self._logger.info("Connecting to Enrichr Server to get latest library names")
if len(gss) < 1:
sys.stderr.write("Not validated Enrichr library name provided\n")
sys.stdout.write("Hint: use get_library_name() to view full list of supported names")
sys.exit(1)
self.results = pd.DataFrame()
for g in gss:
if isinstance(g, dict):
## local mode
res = self.enrich(g)
shortID, self._gs = str(id(g)), "CUSTOM%s"%id(g)
if res is None:
self._logger.info("No hits return, for gene set: Custom%s"%shortID)
continue
else:
## online mode
self._gs = str(g)
self._logger.debug("Start Enrichr using library: %s" % (self._gs))
self._logger.info('Analysis name: %s, Enrichr Library: %s' % (self.descriptions, self._gs))
shortID, res = self.get_results(genes_list)
# Remember gene set library used
res.insert(0, "Gene_set", self._gs)
# Append to master dataframe
self.results = self.results.append(res, ignore_index=True, sort=True)
self.res2d = res
if self._outdir is None: continue
self._logger.info('Save file of enrichment results: Job Id:' + str(shortID))
outfile = "%s/%s.%s.%s.reports.txt" % (self.outdir, self._gs, self.descriptions, self.module)
self.res2d.to_csv(outfile, index=False, encoding='utf-8', sep="\t")
# plotting
if not self.__no_plot:
msg = barplot(df=res, cutoff=self.cutoff, figsize=self.figsize,
top_term=self.__top_term, color='salmon',
title=self._gs,
ofname=outfile.replace("txt", self.format))
if msg is not None : self._logger.warning(msg)
self._logger.info('Done.\n')
# clean up tmpdir
if self._outdir is None: self._tmpdir.cleanup()
return
|
[
"run",
"enrichr",
"for",
"one",
"sample",
"gene",
"list",
"but",
"multi",
"-",
"libraries"
] |
zqfang/GSEApy
|
python
|
https://github.com/zqfang/GSEApy/blob/673e9ec1391e3b14d3e8a4353117151fd2cb9345/gseapy/enrichr.py#L302-L352
|
[
"def",
"run",
"(",
"self",
")",
":",
"# set organism",
"self",
".",
"get_organism",
"(",
")",
"# read input file",
"genes_list",
"=",
"self",
".",
"parse_genelists",
"(",
")",
"gss",
"=",
"self",
".",
"parse_genesets",
"(",
")",
"# if gmt",
"self",
".",
"_logger",
".",
"info",
"(",
"\"Connecting to Enrichr Server to get latest library names\"",
")",
"if",
"len",
"(",
"gss",
")",
"<",
"1",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Not validated Enrichr library name provided\\n\"",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"Hint: use get_library_name() to view full list of supported names\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"self",
".",
"results",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"for",
"g",
"in",
"gss",
":",
"if",
"isinstance",
"(",
"g",
",",
"dict",
")",
":",
"## local mode",
"res",
"=",
"self",
".",
"enrich",
"(",
"g",
")",
"shortID",
",",
"self",
".",
"_gs",
"=",
"str",
"(",
"id",
"(",
"g",
")",
")",
",",
"\"CUSTOM%s\"",
"%",
"id",
"(",
"g",
")",
"if",
"res",
"is",
"None",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"No hits return, for gene set: Custom%s\"",
"%",
"shortID",
")",
"continue",
"else",
":",
"## online mode",
"self",
".",
"_gs",
"=",
"str",
"(",
"g",
")",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Start Enrichr using library: %s\"",
"%",
"(",
"self",
".",
"_gs",
")",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"'Analysis name: %s, Enrichr Library: %s'",
"%",
"(",
"self",
".",
"descriptions",
",",
"self",
".",
"_gs",
")",
")",
"shortID",
",",
"res",
"=",
"self",
".",
"get_results",
"(",
"genes_list",
")",
"# Remember gene set library used",
"res",
".",
"insert",
"(",
"0",
",",
"\"Gene_set\"",
",",
"self",
".",
"_gs",
")",
"# Append to master dataframe",
"self",
".",
"results",
"=",
"self",
".",
"results",
".",
"append",
"(",
"res",
",",
"ignore_index",
"=",
"True",
",",
"sort",
"=",
"True",
")",
"self",
".",
"res2d",
"=",
"res",
"if",
"self",
".",
"_outdir",
"is",
"None",
":",
"continue",
"self",
".",
"_logger",
".",
"info",
"(",
"'Save file of enrichment results: Job Id:'",
"+",
"str",
"(",
"shortID",
")",
")",
"outfile",
"=",
"\"%s/%s.%s.%s.reports.txt\"",
"%",
"(",
"self",
".",
"outdir",
",",
"self",
".",
"_gs",
",",
"self",
".",
"descriptions",
",",
"self",
".",
"module",
")",
"self",
".",
"res2d",
".",
"to_csv",
"(",
"outfile",
",",
"index",
"=",
"False",
",",
"encoding",
"=",
"'utf-8'",
",",
"sep",
"=",
"\"\\t\"",
")",
"# plotting",
"if",
"not",
"self",
".",
"__no_plot",
":",
"msg",
"=",
"barplot",
"(",
"df",
"=",
"res",
",",
"cutoff",
"=",
"self",
".",
"cutoff",
",",
"figsize",
"=",
"self",
".",
"figsize",
",",
"top_term",
"=",
"self",
".",
"__top_term",
",",
"color",
"=",
"'salmon'",
",",
"title",
"=",
"self",
".",
"_gs",
",",
"ofname",
"=",
"outfile",
".",
"replace",
"(",
"\"txt\"",
",",
"self",
".",
"format",
")",
")",
"if",
"msg",
"is",
"not",
"None",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"msg",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"'Done.\\n'",
")",
"# clean up tmpdir",
"if",
"self",
".",
"_outdir",
"is",
"None",
":",
"self",
".",
"_tmpdir",
".",
"cleanup",
"(",
")",
"return"
] |
673e9ec1391e3b14d3e8a4353117151fd2cb9345
|
test
|
cube
|
Create a cube primitive
Note that this is made of 6 quads, not triangles
|
meshlabxml/create.py
|
def cube(script, size=1.0, center=False, color=None):
"""Create a cube primitive
Note that this is made of 6 quads, not triangles
"""
"""# Convert size to list if it isn't already
if not isinstance(size, list):
size = list(size)
# If a single value was supplied use it for all 3 axes
if len(size) == 1:
size = [size[0], size[0], size[0]]"""
size = util.make_list(size, 3)
if script.ml_version == '1.3.4BETA':
filter_name = 'Box'
else:
filter_name = 'Box/Cube'
filter_xml = ''.join([
' <filter name="{}">\n'.format(filter_name),
' <Param name="size" ',
'value="1.0" ',
'description="Scale factor" ',
'type="RichFloat" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Cube', change_layer=True)
transform.scale(script, value=size)
# Box is centered on origin at creation
if not center:
transform.translate(script, value=[size[0]/2, size[1]/2, size[2]/2])
if color is not None:
vert_color.function(script, color=color)
return None
|
def cube(script, size=1.0, center=False, color=None):
"""Create a cube primitive
Note that this is made of 6 quads, not triangles
"""
"""# Convert size to list if it isn't already
if not isinstance(size, list):
size = list(size)
# If a single value was supplied use it for all 3 axes
if len(size) == 1:
size = [size[0], size[0], size[0]]"""
size = util.make_list(size, 3)
if script.ml_version == '1.3.4BETA':
filter_name = 'Box'
else:
filter_name = 'Box/Cube'
filter_xml = ''.join([
' <filter name="{}">\n'.format(filter_name),
' <Param name="size" ',
'value="1.0" ',
'description="Scale factor" ',
'type="RichFloat" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Cube', change_layer=True)
transform.scale(script, value=size)
# Box is centered on origin at creation
if not center:
transform.translate(script, value=[size[0]/2, size[1]/2, size[2]/2])
if color is not None:
vert_color.function(script, color=color)
return None
|
[
"Create",
"a",
"cube",
"primitive"
] |
3DLIRIOUS/MeshLabXML
|
python
|
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/create.py#L13-L47
|
[
"def",
"cube",
"(",
"script",
",",
"size",
"=",
"1.0",
",",
"center",
"=",
"False",
",",
"color",
"=",
"None",
")",
":",
"\"\"\"# Convert size to list if it isn't already\n if not isinstance(size, list):\n size = list(size)\n # If a single value was supplied use it for all 3 axes\n if len(size) == 1:\n size = [size[0], size[0], size[0]]\"\"\"",
"size",
"=",
"util",
".",
"make_list",
"(",
"size",
",",
"3",
")",
"if",
"script",
".",
"ml_version",
"==",
"'1.3.4BETA'",
":",
"filter_name",
"=",
"'Box'",
"else",
":",
"filter_name",
"=",
"'Box/Cube'",
"filter_xml",
"=",
"''",
".",
"join",
"(",
"[",
"' <filter name=\"{}\">\\n'",
".",
"format",
"(",
"filter_name",
")",
",",
"' <Param name=\"size\" '",
",",
"'value=\"1.0\" '",
",",
"'description=\"Scale factor\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' </filter>\\n'",
"]",
")",
"util",
".",
"write_filter",
"(",
"script",
",",
"filter_xml",
")",
"if",
"isinstance",
"(",
"script",
",",
"FilterScript",
")",
":",
"script",
".",
"add_layer",
"(",
"'Cube'",
",",
"change_layer",
"=",
"True",
")",
"transform",
".",
"scale",
"(",
"script",
",",
"value",
"=",
"size",
")",
"# Box is centered on origin at creation",
"if",
"not",
"center",
":",
"transform",
".",
"translate",
"(",
"script",
",",
"value",
"=",
"[",
"size",
"[",
"0",
"]",
"/",
"2",
",",
"size",
"[",
"1",
"]",
"/",
"2",
",",
"size",
"[",
"2",
"]",
"/",
"2",
"]",
")",
"if",
"color",
"is",
"not",
"None",
":",
"vert_color",
".",
"function",
"(",
"script",
",",
"color",
"=",
"color",
")",
"return",
"None"
] |
177cce21e92baca500f56a932d66bd9a33257af8
|
test
|
cylinder
|
Create a cylinder or cone primitive. Usage is based on OpenSCAD.
# height = height of the cylinder
# radius1 = radius of the cone on bottom end
# radius2 = radius of the cone on top end
# center = If true will center the height of the cone/cylinder around
# the origin. Default is false, placing the base of the cylinder or radius1
# radius of cone at the origin.
#
cir_segments Number of sides of the polygonal approximation of the cone
color = specify a color name to apply vertex colors to the newly
# created mesh
|
meshlabxml/create.py
|
def cylinder(script, up='z', height=1.0, radius=None, radius1=None,
radius2=None, diameter=None, diameter1=None, diameter2=None,
center=False, cir_segments=32, color=None):
"""Create a cylinder or cone primitive. Usage is based on OpenSCAD.
# height = height of the cylinder
# radius1 = radius of the cone on bottom end
# radius2 = radius of the cone on top end
# center = If true will center the height of the cone/cylinder around
# the origin. Default is false, placing the base of the cylinder or radius1
# radius of cone at the origin.
#
cir_segments Number of sides of the polygonal approximation of the cone
color = specify a color name to apply vertex colors to the newly
# created mesh
"""
if radius is not None and diameter is None:
if radius1 is None and diameter1 is None:
radius1 = radius
if radius2 is None and diameter2 is None:
radius2 = radius
if diameter is not None:
if radius1 is None and diameter1 is None:
radius1 = diameter / 2
if radius2 is None and diameter2 is None:
radius2 = diameter / 2
if diameter1 is not None:
radius1 = diameter1 / 2
if diameter2 is not None:
radius2 = diameter2 / 2
if radius1 is None:
radius1 = 1.0
if radius2 is None:
radius2 = radius1
# Cylinder is created centered with Y up
filter_xml = ''.join([
' <filter name="Cone">\n',
' <Param name="h" ',
'value="%s" ' % height,
'description="Height" ',
'type="RichFloat" ',
'/>\n',
' <Param name="r0" ',
'value="%s" ' % radius1,
'description="Radius 1" ',
'type="RichFloat" ',
'/>\n',
' <Param name="r1" ',
'value="%s" ' % radius2,
'description="Radius 2" ',
'type="RichFloat" ',
'/>\n',
' <Param name="subdiv" ',
'value="%d" ' % cir_segments,
'description="Side" ',
'type="RichInt" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Cone', change_layer=True)
if not center:
transform.translate(script, [0, height / 2, 0])
if up.lower() == 'z':
transform.rotate(script, axis='x', angle=90) # rotate to Z up
if color is not None:
vert_color.function(script, color=color)
return None
|
def cylinder(script, up='z', height=1.0, radius=None, radius1=None,
radius2=None, diameter=None, diameter1=None, diameter2=None,
center=False, cir_segments=32, color=None):
"""Create a cylinder or cone primitive. Usage is based on OpenSCAD.
# height = height of the cylinder
# radius1 = radius of the cone on bottom end
# radius2 = radius of the cone on top end
# center = If true will center the height of the cone/cylinder around
# the origin. Default is false, placing the base of the cylinder or radius1
# radius of cone at the origin.
#
cir_segments Number of sides of the polygonal approximation of the cone
color = specify a color name to apply vertex colors to the newly
# created mesh
"""
if radius is not None and diameter is None:
if radius1 is None and diameter1 is None:
radius1 = radius
if radius2 is None and diameter2 is None:
radius2 = radius
if diameter is not None:
if radius1 is None and diameter1 is None:
radius1 = diameter / 2
if radius2 is None and diameter2 is None:
radius2 = diameter / 2
if diameter1 is not None:
radius1 = diameter1 / 2
if diameter2 is not None:
radius2 = diameter2 / 2
if radius1 is None:
radius1 = 1.0
if radius2 is None:
radius2 = radius1
# Cylinder is created centered with Y up
filter_xml = ''.join([
' <filter name="Cone">\n',
' <Param name="h" ',
'value="%s" ' % height,
'description="Height" ',
'type="RichFloat" ',
'/>\n',
' <Param name="r0" ',
'value="%s" ' % radius1,
'description="Radius 1" ',
'type="RichFloat" ',
'/>\n',
' <Param name="r1" ',
'value="%s" ' % radius2,
'description="Radius 2" ',
'type="RichFloat" ',
'/>\n',
' <Param name="subdiv" ',
'value="%d" ' % cir_segments,
'description="Side" ',
'type="RichInt" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Cone', change_layer=True)
if not center:
transform.translate(script, [0, height / 2, 0])
if up.lower() == 'z':
transform.rotate(script, axis='x', angle=90) # rotate to Z up
if color is not None:
vert_color.function(script, color=color)
return None
|
[
"Create",
"a",
"cylinder",
"or",
"cone",
"primitive",
".",
"Usage",
"is",
"based",
"on",
"OpenSCAD",
".",
"#",
"height",
"=",
"height",
"of",
"the",
"cylinder",
"#",
"radius1",
"=",
"radius",
"of",
"the",
"cone",
"on",
"bottom",
"end",
"#",
"radius2",
"=",
"radius",
"of",
"the",
"cone",
"on",
"top",
"end",
"#",
"center",
"=",
"If",
"true",
"will",
"center",
"the",
"height",
"of",
"the",
"cone",
"/",
"cylinder",
"around",
"#",
"the",
"origin",
".",
"Default",
"is",
"false",
"placing",
"the",
"base",
"of",
"the",
"cylinder",
"or",
"radius1",
"#",
"radius",
"of",
"cone",
"at",
"the",
"origin",
".",
"#",
"cir_segments",
"Number",
"of",
"sides",
"of",
"the",
"polygonal",
"approximation",
"of",
"the",
"cone"
] |
3DLIRIOUS/MeshLabXML
|
python
|
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/create.py#L63-L131
|
[
"def",
"cylinder",
"(",
"script",
",",
"up",
"=",
"'z'",
",",
"height",
"=",
"1.0",
",",
"radius",
"=",
"None",
",",
"radius1",
"=",
"None",
",",
"radius2",
"=",
"None",
",",
"diameter",
"=",
"None",
",",
"diameter1",
"=",
"None",
",",
"diameter2",
"=",
"None",
",",
"center",
"=",
"False",
",",
"cir_segments",
"=",
"32",
",",
"color",
"=",
"None",
")",
":",
"if",
"radius",
"is",
"not",
"None",
"and",
"diameter",
"is",
"None",
":",
"if",
"radius1",
"is",
"None",
"and",
"diameter1",
"is",
"None",
":",
"radius1",
"=",
"radius",
"if",
"radius2",
"is",
"None",
"and",
"diameter2",
"is",
"None",
":",
"radius2",
"=",
"radius",
"if",
"diameter",
"is",
"not",
"None",
":",
"if",
"radius1",
"is",
"None",
"and",
"diameter1",
"is",
"None",
":",
"radius1",
"=",
"diameter",
"/",
"2",
"if",
"radius2",
"is",
"None",
"and",
"diameter2",
"is",
"None",
":",
"radius2",
"=",
"diameter",
"/",
"2",
"if",
"diameter1",
"is",
"not",
"None",
":",
"radius1",
"=",
"diameter1",
"/",
"2",
"if",
"diameter2",
"is",
"not",
"None",
":",
"radius2",
"=",
"diameter2",
"/",
"2",
"if",
"radius1",
"is",
"None",
":",
"radius1",
"=",
"1.0",
"if",
"radius2",
"is",
"None",
":",
"radius2",
"=",
"radius1",
"# Cylinder is created centered with Y up",
"filter_xml",
"=",
"''",
".",
"join",
"(",
"[",
"' <filter name=\"Cone\">\\n'",
",",
"' <Param name=\"h\" '",
",",
"'value=\"%s\" '",
"%",
"height",
",",
"'description=\"Height\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"r0\" '",
",",
"'value=\"%s\" '",
"%",
"radius1",
",",
"'description=\"Radius 1\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"r1\" '",
",",
"'value=\"%s\" '",
"%",
"radius2",
",",
"'description=\"Radius 2\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"subdiv\" '",
",",
"'value=\"%d\" '",
"%",
"cir_segments",
",",
"'description=\"Side\" '",
",",
"'type=\"RichInt\" '",
",",
"'/>\\n'",
",",
"' </filter>\\n'",
"]",
")",
"util",
".",
"write_filter",
"(",
"script",
",",
"filter_xml",
")",
"if",
"isinstance",
"(",
"script",
",",
"FilterScript",
")",
":",
"script",
".",
"add_layer",
"(",
"'Cone'",
",",
"change_layer",
"=",
"True",
")",
"if",
"not",
"center",
":",
"transform",
".",
"translate",
"(",
"script",
",",
"[",
"0",
",",
"height",
"/",
"2",
",",
"0",
"]",
")",
"if",
"up",
".",
"lower",
"(",
")",
"==",
"'z'",
":",
"transform",
".",
"rotate",
"(",
"script",
",",
"axis",
"=",
"'x'",
",",
"angle",
"=",
"90",
")",
"# rotate to Z up",
"if",
"color",
"is",
"not",
"None",
":",
"vert_color",
".",
"function",
"(",
"script",
",",
"color",
"=",
"color",
")",
"return",
"None"
] |
177cce21e92baca500f56a932d66bd9a33257af8
|
test
|
icosphere
|
create an icosphere mesh
radius Radius of the sphere
# subdivisions = Subdivision level; Number of the recursive subdivision of the
# surface. Default is 3 (a sphere approximation composed by 1280 faces).
# Admitted values are in the range 0 (an icosahedron) to 8 (a 1.3 MegaTris
# approximation of a sphere). Formula for number of faces: F=20*4^subdiv
# color = specify a color name to apply vertex colors to the newly
# created mesh
|
meshlabxml/create.py
|
def icosphere(script, radius=1.0, diameter=None, subdivisions=3, color=None):
"""create an icosphere mesh
radius Radius of the sphere
# subdivisions = Subdivision level; Number of the recursive subdivision of the
# surface. Default is 3 (a sphere approximation composed by 1280 faces).
# Admitted values are in the range 0 (an icosahedron) to 8 (a 1.3 MegaTris
# approximation of a sphere). Formula for number of faces: F=20*4^subdiv
# color = specify a color name to apply vertex colors to the newly
# created mesh"""
if diameter is not None:
radius = diameter / 2
filter_xml = ''.join([
' <filter name="Sphere">\n',
' <Param name="radius" ',
'value="%s" ' % radius,
'description="Radius" ',
'type="RichFloat" ',
'/>\n',
' <Param name="subdiv" ',
'value="%d" ' % subdivisions,
'description="Subdiv. Level" ',
'type="RichInt" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Sphere', change_layer=True)
if color is not None:
vert_color.function(script, color=color)
return None
|
def icosphere(script, radius=1.0, diameter=None, subdivisions=3, color=None):
"""create an icosphere mesh
radius Radius of the sphere
# subdivisions = Subdivision level; Number of the recursive subdivision of the
# surface. Default is 3 (a sphere approximation composed by 1280 faces).
# Admitted values are in the range 0 (an icosahedron) to 8 (a 1.3 MegaTris
# approximation of a sphere). Formula for number of faces: F=20*4^subdiv
# color = specify a color name to apply vertex colors to the newly
# created mesh"""
if diameter is not None:
radius = diameter / 2
filter_xml = ''.join([
' <filter name="Sphere">\n',
' <Param name="radius" ',
'value="%s" ' % radius,
'description="Radius" ',
'type="RichFloat" ',
'/>\n',
' <Param name="subdiv" ',
'value="%d" ' % subdivisions,
'description="Subdiv. Level" ',
'type="RichInt" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Sphere', change_layer=True)
if color is not None:
vert_color.function(script, color=color)
return None
|
[
"create",
"an",
"icosphere",
"mesh"
] |
3DLIRIOUS/MeshLabXML
|
python
|
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/create.py#L134-L164
|
[
"def",
"icosphere",
"(",
"script",
",",
"radius",
"=",
"1.0",
",",
"diameter",
"=",
"None",
",",
"subdivisions",
"=",
"3",
",",
"color",
"=",
"None",
")",
":",
"if",
"diameter",
"is",
"not",
"None",
":",
"radius",
"=",
"diameter",
"/",
"2",
"filter_xml",
"=",
"''",
".",
"join",
"(",
"[",
"' <filter name=\"Sphere\">\\n'",
",",
"' <Param name=\"radius\" '",
",",
"'value=\"%s\" '",
"%",
"radius",
",",
"'description=\"Radius\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"subdiv\" '",
",",
"'value=\"%d\" '",
"%",
"subdivisions",
",",
"'description=\"Subdiv. Level\" '",
",",
"'type=\"RichInt\" '",
",",
"'/>\\n'",
",",
"' </filter>\\n'",
"]",
")",
"util",
".",
"write_filter",
"(",
"script",
",",
"filter_xml",
")",
"if",
"isinstance",
"(",
"script",
",",
"FilterScript",
")",
":",
"script",
".",
"add_layer",
"(",
"'Sphere'",
",",
"change_layer",
"=",
"True",
")",
"if",
"color",
"is",
"not",
"None",
":",
"vert_color",
".",
"function",
"(",
"script",
",",
"color",
"=",
"color",
")",
"return",
"None"
] |
177cce21e92baca500f56a932d66bd9a33257af8
|
test
|
sphere_cap
|
# angle = Angle of the cone subtending the cap. It must be <180 less than 180
# subdivisions = Subdivision level; Number of the recursive subdivision of the
# surface. Default is 3 (a sphere approximation composed by 1280 faces).
# Admitted values are in the range 0 (an icosahedron) to 8 (a 1.3 MegaTris
# approximation of a sphere). Formula for number of faces: F=20*4^subdivisions
# color = specify a color name to apply vertex colors to the newly
# created mesh
|
meshlabxml/create.py
|
def sphere_cap(script, angle=1.0, subdivisions=3, color=None):
"""# angle = Angle of the cone subtending the cap. It must be <180 less than 180
# subdivisions = Subdivision level; Number of the recursive subdivision of the
# surface. Default is 3 (a sphere approximation composed by 1280 faces).
# Admitted values are in the range 0 (an icosahedron) to 8 (a 1.3 MegaTris
# approximation of a sphere). Formula for number of faces: F=20*4^subdivisions
# color = specify a color name to apply vertex colors to the newly
# created mesh"""
filter_xml = ''.join([
' <filter name="Sphere Cap">\n',
' <Param name="angle" ',
'value="%s" ' % angle,
'description="Angle" ',
'type="RichFloat" ',
'/>\n',
' <Param name="subdiv" ',
'value="%d" ' % subdivisions,
'description="Subdiv. Level" ',
'type="RichInt" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Sphere Cap', change_layer=True)
if color is not None:
vert_color.function(script, color=color)
return None
|
def sphere_cap(script, angle=1.0, subdivisions=3, color=None):
"""# angle = Angle of the cone subtending the cap. It must be <180 less than 180
# subdivisions = Subdivision level; Number of the recursive subdivision of the
# surface. Default is 3 (a sphere approximation composed by 1280 faces).
# Admitted values are in the range 0 (an icosahedron) to 8 (a 1.3 MegaTris
# approximation of a sphere). Formula for number of faces: F=20*4^subdivisions
# color = specify a color name to apply vertex colors to the newly
# created mesh"""
filter_xml = ''.join([
' <filter name="Sphere Cap">\n',
' <Param name="angle" ',
'value="%s" ' % angle,
'description="Angle" ',
'type="RichFloat" ',
'/>\n',
' <Param name="subdiv" ',
'value="%d" ' % subdivisions,
'description="Subdiv. Level" ',
'type="RichInt" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Sphere Cap', change_layer=True)
if color is not None:
vert_color.function(script, color=color)
return None
|
[
"#",
"angle",
"=",
"Angle",
"of",
"the",
"cone",
"subtending",
"the",
"cap",
".",
"It",
"must",
"be",
"<180",
"less",
"than",
"180",
"#",
"subdivisions",
"=",
"Subdivision",
"level",
";",
"Number",
"of",
"the",
"recursive",
"subdivision",
"of",
"the",
"#",
"surface",
".",
"Default",
"is",
"3",
"(",
"a",
"sphere",
"approximation",
"composed",
"by",
"1280",
"faces",
")",
".",
"#",
"Admitted",
"values",
"are",
"in",
"the",
"range",
"0",
"(",
"an",
"icosahedron",
")",
"to",
"8",
"(",
"a",
"1",
".",
"3",
"MegaTris",
"#",
"approximation",
"of",
"a",
"sphere",
")",
".",
"Formula",
"for",
"number",
"of",
"faces",
":",
"F",
"=",
"20",
"*",
"4^subdivisions",
"#",
"color",
"=",
"specify",
"a",
"color",
"name",
"to",
"apply",
"vertex",
"colors",
"to",
"the",
"newly",
"#",
"created",
"mesh"
] |
3DLIRIOUS/MeshLabXML
|
python
|
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/create.py#L167-L193
|
[
"def",
"sphere_cap",
"(",
"script",
",",
"angle",
"=",
"1.0",
",",
"subdivisions",
"=",
"3",
",",
"color",
"=",
"None",
")",
":",
"filter_xml",
"=",
"''",
".",
"join",
"(",
"[",
"' <filter name=\"Sphere Cap\">\\n'",
",",
"' <Param name=\"angle\" '",
",",
"'value=\"%s\" '",
"%",
"angle",
",",
"'description=\"Angle\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"subdiv\" '",
",",
"'value=\"%d\" '",
"%",
"subdivisions",
",",
"'description=\"Subdiv. Level\" '",
",",
"'type=\"RichInt\" '",
",",
"'/>\\n'",
",",
"' </filter>\\n'",
"]",
")",
"util",
".",
"write_filter",
"(",
"script",
",",
"filter_xml",
")",
"if",
"isinstance",
"(",
"script",
",",
"FilterScript",
")",
":",
"script",
".",
"add_layer",
"(",
"'Sphere Cap'",
",",
"change_layer",
"=",
"True",
")",
"if",
"color",
"is",
"not",
"None",
":",
"vert_color",
".",
"function",
"(",
"script",
",",
"color",
"=",
"color",
")",
"return",
"None"
] |
177cce21e92baca500f56a932d66bd9a33257af8
|
test
|
torus
|
Create a torus mesh
Args:
major_radius (float, (optional)): radius from the origin to the
center of the cross sections
minor_radius (float, (optional)): radius of the torus cross
section
inner_diameter (float, (optional)): inner diameter of torus. If
both inner_diameter and outer_diameter are provided then
these will override major_radius and minor_radius.,
outer_diameter (float, (optional)): outer diameter of torus. If
both inner_diameter and outer_diameter are provided then
these will override major_radius and minor_radius.
major_segments (int (optional)): number of segments for the main
ring of the torus
minor_segments (int (optional)): number of segments for the minor
ring of the torus
color (str (optional)): color name to apply vertex colors to the
newly created mesh
Returns:
None
|
meshlabxml/create.py
|
def torus(script, major_radius=3.0, minor_radius=1.0, inner_diameter=None,
outer_diameter=None, major_segments=48, minor_segments=12,
color=None):
"""Create a torus mesh
Args:
major_radius (float, (optional)): radius from the origin to the
center of the cross sections
minor_radius (float, (optional)): radius of the torus cross
section
inner_diameter (float, (optional)): inner diameter of torus. If
both inner_diameter and outer_diameter are provided then
these will override major_radius and minor_radius.,
outer_diameter (float, (optional)): outer diameter of torus. If
both inner_diameter and outer_diameter are provided then
these will override major_radius and minor_radius.
major_segments (int (optional)): number of segments for the main
ring of the torus
minor_segments (int (optional)): number of segments for the minor
ring of the torus
color (str (optional)): color name to apply vertex colors to the
newly created mesh
Returns:
None
"""
if inner_diameter is not None and outer_diameter is not None:
major_radius = (inner_diameter + outer_diameter) / 4
minor_radius = major_radius - inner_diameter / 2
# Ref: inner_diameter = 2 * (major_radius - minor_radius)
# Ref: outer_diameter = 2 * (major_radius + minor_radius)
filter_xml = ''.join([
' <filter name="Torus">\n',
' <Param name="hRadius" ',
'value="%s" ' % major_radius,
'description="Horizontal Radius" ',
'type="RichFloat" ',
'/>\n',
' <Param name="vRadius" ',
'value="%s" ' % minor_radius,
'description="Vertical Radius" ',
'type="RichFloat" ',
'/>\n',
' <Param name="hSubdiv" ',
'value="%d" ' % major_segments,
'description="Horizontal Subdivision" ',
'type="RichInt" ',
'/>\n',
' <Param name="vSubdiv" ',
'value="%d" ' % minor_segments,
'description="Vertical Subdivision" ',
'type="RichInt" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Torus', change_layer=True)
if color is not None:
vert_color.function(script, color=color)
return None
|
def torus(script, major_radius=3.0, minor_radius=1.0, inner_diameter=None,
outer_diameter=None, major_segments=48, minor_segments=12,
color=None):
"""Create a torus mesh
Args:
major_radius (float, (optional)): radius from the origin to the
center of the cross sections
minor_radius (float, (optional)): radius of the torus cross
section
inner_diameter (float, (optional)): inner diameter of torus. If
both inner_diameter and outer_diameter are provided then
these will override major_radius and minor_radius.,
outer_diameter (float, (optional)): outer diameter of torus. If
both inner_diameter and outer_diameter are provided then
these will override major_radius and minor_radius.
major_segments (int (optional)): number of segments for the main
ring of the torus
minor_segments (int (optional)): number of segments for the minor
ring of the torus
color (str (optional)): color name to apply vertex colors to the
newly created mesh
Returns:
None
"""
if inner_diameter is not None and outer_diameter is not None:
major_radius = (inner_diameter + outer_diameter) / 4
minor_radius = major_radius - inner_diameter / 2
# Ref: inner_diameter = 2 * (major_radius - minor_radius)
# Ref: outer_diameter = 2 * (major_radius + minor_radius)
filter_xml = ''.join([
' <filter name="Torus">\n',
' <Param name="hRadius" ',
'value="%s" ' % major_radius,
'description="Horizontal Radius" ',
'type="RichFloat" ',
'/>\n',
' <Param name="vRadius" ',
'value="%s" ' % minor_radius,
'description="Vertical Radius" ',
'type="RichFloat" ',
'/>\n',
' <Param name="hSubdiv" ',
'value="%d" ' % major_segments,
'description="Horizontal Subdivision" ',
'type="RichInt" ',
'/>\n',
' <Param name="vSubdiv" ',
'value="%d" ' % minor_segments,
'description="Vertical Subdivision" ',
'type="RichInt" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Torus', change_layer=True)
if color is not None:
vert_color.function(script, color=color)
return None
|
[
"Create",
"a",
"torus",
"mesh"
] |
3DLIRIOUS/MeshLabXML
|
python
|
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/create.py#L196-L256
|
[
"def",
"torus",
"(",
"script",
",",
"major_radius",
"=",
"3.0",
",",
"minor_radius",
"=",
"1.0",
",",
"inner_diameter",
"=",
"None",
",",
"outer_diameter",
"=",
"None",
",",
"major_segments",
"=",
"48",
",",
"minor_segments",
"=",
"12",
",",
"color",
"=",
"None",
")",
":",
"if",
"inner_diameter",
"is",
"not",
"None",
"and",
"outer_diameter",
"is",
"not",
"None",
":",
"major_radius",
"=",
"(",
"inner_diameter",
"+",
"outer_diameter",
")",
"/",
"4",
"minor_radius",
"=",
"major_radius",
"-",
"inner_diameter",
"/",
"2",
"# Ref: inner_diameter = 2 * (major_radius - minor_radius)",
"# Ref: outer_diameter = 2 * (major_radius + minor_radius)",
"filter_xml",
"=",
"''",
".",
"join",
"(",
"[",
"' <filter name=\"Torus\">\\n'",
",",
"' <Param name=\"hRadius\" '",
",",
"'value=\"%s\" '",
"%",
"major_radius",
",",
"'description=\"Horizontal Radius\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"vRadius\" '",
",",
"'value=\"%s\" '",
"%",
"minor_radius",
",",
"'description=\"Vertical Radius\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"hSubdiv\" '",
",",
"'value=\"%d\" '",
"%",
"major_segments",
",",
"'description=\"Horizontal Subdivision\" '",
",",
"'type=\"RichInt\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"vSubdiv\" '",
",",
"'value=\"%d\" '",
"%",
"minor_segments",
",",
"'description=\"Vertical Subdivision\" '",
",",
"'type=\"RichInt\" '",
",",
"'/>\\n'",
",",
"' </filter>\\n'",
"]",
")",
"util",
".",
"write_filter",
"(",
"script",
",",
"filter_xml",
")",
"if",
"isinstance",
"(",
"script",
",",
"FilterScript",
")",
":",
"script",
".",
"add_layer",
"(",
"'Torus'",
",",
"change_layer",
"=",
"True",
")",
"if",
"color",
"is",
"not",
"None",
":",
"vert_color",
".",
"function",
"(",
"script",
",",
"color",
"=",
"color",
")",
"return",
"None"
] |
177cce21e92baca500f56a932d66bd9a33257af8
|
test
|
grid
|
2D square/plane/grid created on XY plane
x_segments # Number of segments in the X direction.
y_segments # Number of segments in the Y direction.
center="false" # If true square will be centered on origin;
otherwise it is place in the positive XY quadrant.
|
meshlabxml/create.py
|
def grid(script, size=1.0, x_segments=1, y_segments=1, center=False,
color=None):
"""2D square/plane/grid created on XY plane
x_segments # Number of segments in the X direction.
y_segments # Number of segments in the Y direction.
center="false" # If true square will be centered on origin;
otherwise it is place in the positive XY quadrant.
"""
size = util.make_list(size, 2)
filter_xml = ''.join([
' <filter name="Grid Generator">\n',
' <Param name="absScaleX" ',
'value="{}" '.format(size[0]),
'description="x scale" ',
'type="RichFloat" ',
'/>\n',
' <Param name="absScaleY" ',
'value="{}" '.format(size[1]),
'description="y scale" ',
'type="RichFloat" ',
'/>\n',
' <Param name="numVertX" ',
'value="{:d}" '.format(x_segments + 1),
'description="num vertices on x" ',
'type="RichInt" ',
'/>\n',
' <Param name="numVertY" ',
'value="{:d}" '.format(y_segments + 1),
'description="num vertices on y" ',
'type="RichInt" ',
'/>\n',
' <Param name="center" ',
'value="false" ',
'description="centered on origin" ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Grid Generator', change_layer=True)
"""This is to work around a bug in MeshLab whereby the Grid Generator does not
create zero values for z. Ref bug #458: https://sourceforge.net/p/meshlab/bugs/458/"""
transform.vert_function(script, z_func='rint(z)')
"""Note that the "center" parameter in the mlx script does not actually
center the square, not sure what it is doing. Instead this is set to false,
which places the plane in the -X,+Y quadrant, and it is translated to the
appropriate position after creation.
"""
if center:
transform.translate(script, value=[size[0]/2, -size[1]/2, 0])
else:
transform.translate(script, value=[size[0], 0, 0])
if color is not None:
vert_color.function(script, color=color)
return None
|
def grid(script, size=1.0, x_segments=1, y_segments=1, center=False,
color=None):
"""2D square/plane/grid created on XY plane
x_segments # Number of segments in the X direction.
y_segments # Number of segments in the Y direction.
center="false" # If true square will be centered on origin;
otherwise it is place in the positive XY quadrant.
"""
size = util.make_list(size, 2)
filter_xml = ''.join([
' <filter name="Grid Generator">\n',
' <Param name="absScaleX" ',
'value="{}" '.format(size[0]),
'description="x scale" ',
'type="RichFloat" ',
'/>\n',
' <Param name="absScaleY" ',
'value="{}" '.format(size[1]),
'description="y scale" ',
'type="RichFloat" ',
'/>\n',
' <Param name="numVertX" ',
'value="{:d}" '.format(x_segments + 1),
'description="num vertices on x" ',
'type="RichInt" ',
'/>\n',
' <Param name="numVertY" ',
'value="{:d}" '.format(y_segments + 1),
'description="num vertices on y" ',
'type="RichInt" ',
'/>\n',
' <Param name="center" ',
'value="false" ',
'description="centered on origin" ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Grid Generator', change_layer=True)
"""This is to work around a bug in MeshLab whereby the Grid Generator does not
create zero values for z. Ref bug #458: https://sourceforge.net/p/meshlab/bugs/458/"""
transform.vert_function(script, z_func='rint(z)')
"""Note that the "center" parameter in the mlx script does not actually
center the square, not sure what it is doing. Instead this is set to false,
which places the plane in the -X,+Y quadrant, and it is translated to the
appropriate position after creation.
"""
if center:
transform.translate(script, value=[size[0]/2, -size[1]/2, 0])
else:
transform.translate(script, value=[size[0], 0, 0])
if color is not None:
vert_color.function(script, color=color)
return None
|
[
"2D",
"square",
"/",
"plane",
"/",
"grid",
"created",
"on",
"XY",
"plane"
] |
3DLIRIOUS/MeshLabXML
|
python
|
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/create.py#L259-L318
|
[
"def",
"grid",
"(",
"script",
",",
"size",
"=",
"1.0",
",",
"x_segments",
"=",
"1",
",",
"y_segments",
"=",
"1",
",",
"center",
"=",
"False",
",",
"color",
"=",
"None",
")",
":",
"size",
"=",
"util",
".",
"make_list",
"(",
"size",
",",
"2",
")",
"filter_xml",
"=",
"''",
".",
"join",
"(",
"[",
"' <filter name=\"Grid Generator\">\\n'",
",",
"' <Param name=\"absScaleX\" '",
",",
"'value=\"{}\" '",
".",
"format",
"(",
"size",
"[",
"0",
"]",
")",
",",
"'description=\"x scale\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"absScaleY\" '",
",",
"'value=\"{}\" '",
".",
"format",
"(",
"size",
"[",
"1",
"]",
")",
",",
"'description=\"y scale\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"numVertX\" '",
",",
"'value=\"{:d}\" '",
".",
"format",
"(",
"x_segments",
"+",
"1",
")",
",",
"'description=\"num vertices on x\" '",
",",
"'type=\"RichInt\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"numVertY\" '",
",",
"'value=\"{:d}\" '",
".",
"format",
"(",
"y_segments",
"+",
"1",
")",
",",
"'description=\"num vertices on y\" '",
",",
"'type=\"RichInt\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"center\" '",
",",
"'value=\"false\" '",
",",
"'description=\"centered on origin\" '",
",",
"'type=\"RichBool\" '",
",",
"'/>\\n'",
",",
"' </filter>\\n'",
"]",
")",
"util",
".",
"write_filter",
"(",
"script",
",",
"filter_xml",
")",
"if",
"isinstance",
"(",
"script",
",",
"FilterScript",
")",
":",
"script",
".",
"add_layer",
"(",
"'Grid Generator'",
",",
"change_layer",
"=",
"True",
")",
"\"\"\"This is to work around a bug in MeshLab whereby the Grid Generator does not\n create zero values for z. Ref bug #458: https://sourceforge.net/p/meshlab/bugs/458/\"\"\"",
"transform",
".",
"vert_function",
"(",
"script",
",",
"z_func",
"=",
"'rint(z)'",
")",
"\"\"\"Note that the \"center\" parameter in the mlx script does not actually\n center the square, not sure what it is doing. Instead this is set to false,\n which places the plane in the -X,+Y quadrant, and it is translated to the\n appropriate position after creation.\n \"\"\"",
"if",
"center",
":",
"transform",
".",
"translate",
"(",
"script",
",",
"value",
"=",
"[",
"size",
"[",
"0",
"]",
"/",
"2",
",",
"-",
"size",
"[",
"1",
"]",
"/",
"2",
",",
"0",
"]",
")",
"else",
":",
"transform",
".",
"translate",
"(",
"script",
",",
"value",
"=",
"[",
"size",
"[",
"0",
"]",
",",
"0",
",",
"0",
"]",
")",
"if",
"color",
"is",
"not",
"None",
":",
"vert_color",
".",
"function",
"(",
"script",
",",
"color",
"=",
"color",
")",
"return",
"None"
] |
177cce21e92baca500f56a932d66bd9a33257af8
|
test
|
annulus
|
Create a 2D (surface) circle or annulus
radius1=1 # Outer radius of the circle
radius2=0 # Inner radius of the circle (if non-zero it creates an annulus)
color="" # specify a color name to apply vertex colors to the newly created mesh
OpenSCAD: parameters: diameter overrides radius, radius1 & radius2 override radius
|
meshlabxml/create.py
|
def annulus(script, radius=None, radius1=None, radius2=None, diameter=None,
diameter1=None, diameter2=None, cir_segments=32, color=None):
"""Create a 2D (surface) circle or annulus
radius1=1 # Outer radius of the circle
radius2=0 # Inner radius of the circle (if non-zero it creates an annulus)
color="" # specify a color name to apply vertex colors to the newly created mesh
OpenSCAD: parameters: diameter overrides radius, radius1 & radius2 override radius
"""
if radius is not None and diameter is None:
if radius1 is None and diameter1 is None:
radius1 = radius
if radius2 is None and diameter2 is None:
radius2 = 0
if diameter is not None:
if radius1 is None and diameter1 is None:
radius1 = diameter / 2
if radius2 is None and diameter2 is None:
radius2 = 0
if diameter1 is not None:
radius1 = diameter1 / 2
if diameter2 is not None:
radius2 = diameter2 / 2
if radius1 is None:
radius1 = 1
if radius2 is None:
radius2 = 0
# Circle is created centered on the XY plane
filter_xml = ''.join([
' <filter name="Annulus">\n',
' <Param name="externalRadius" ',
'value="%s" ' % radius1,
'description="External Radius" ',
'type="RichFloat" ',
'/>\n',
' <Param name="internalRadius" ',
'value="%s" ' % radius2,
'description="Internal Radius" ',
'type="RichFloat" ',
'/>\n',
' <Param name="sides" ',
'value="%d" ' % cir_segments,
'description="Sides" ',
'type="RichInt" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Annulus', change_layer=True)
if color is not None:
vert_color.function(script, color=color)
return None
|
def annulus(script, radius=None, radius1=None, radius2=None, diameter=None,
diameter1=None, diameter2=None, cir_segments=32, color=None):
"""Create a 2D (surface) circle or annulus
radius1=1 # Outer radius of the circle
radius2=0 # Inner radius of the circle (if non-zero it creates an annulus)
color="" # specify a color name to apply vertex colors to the newly created mesh
OpenSCAD: parameters: diameter overrides radius, radius1 & radius2 override radius
"""
if radius is not None and diameter is None:
if radius1 is None and diameter1 is None:
radius1 = radius
if radius2 is None and diameter2 is None:
radius2 = 0
if diameter is not None:
if radius1 is None and diameter1 is None:
radius1 = diameter / 2
if radius2 is None and diameter2 is None:
radius2 = 0
if diameter1 is not None:
radius1 = diameter1 / 2
if diameter2 is not None:
radius2 = diameter2 / 2
if radius1 is None:
radius1 = 1
if radius2 is None:
radius2 = 0
# Circle is created centered on the XY plane
filter_xml = ''.join([
' <filter name="Annulus">\n',
' <Param name="externalRadius" ',
'value="%s" ' % radius1,
'description="External Radius" ',
'type="RichFloat" ',
'/>\n',
' <Param name="internalRadius" ',
'value="%s" ' % radius2,
'description="Internal Radius" ',
'type="RichFloat" ',
'/>\n',
' <Param name="sides" ',
'value="%d" ' % cir_segments,
'description="Sides" ',
'type="RichInt" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
if isinstance(script, FilterScript):
script.add_layer('Annulus', change_layer=True)
if color is not None:
vert_color.function(script, color=color)
return None
|
[
"Create",
"a",
"2D",
"(",
"surface",
")",
"circle",
"or",
"annulus",
"radius1",
"=",
"1",
"#",
"Outer",
"radius",
"of",
"the",
"circle",
"radius2",
"=",
"0",
"#",
"Inner",
"radius",
"of",
"the",
"circle",
"(",
"if",
"non",
"-",
"zero",
"it",
"creates",
"an",
"annulus",
")",
"color",
"=",
"#",
"specify",
"a",
"color",
"name",
"to",
"apply",
"vertex",
"colors",
"to",
"the",
"newly",
"created",
"mesh"
] |
3DLIRIOUS/MeshLabXML
|
python
|
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/create.py#L321-L373
|
[
"def",
"annulus",
"(",
"script",
",",
"radius",
"=",
"None",
",",
"radius1",
"=",
"None",
",",
"radius2",
"=",
"None",
",",
"diameter",
"=",
"None",
",",
"diameter1",
"=",
"None",
",",
"diameter2",
"=",
"None",
",",
"cir_segments",
"=",
"32",
",",
"color",
"=",
"None",
")",
":",
"if",
"radius",
"is",
"not",
"None",
"and",
"diameter",
"is",
"None",
":",
"if",
"radius1",
"is",
"None",
"and",
"diameter1",
"is",
"None",
":",
"radius1",
"=",
"radius",
"if",
"radius2",
"is",
"None",
"and",
"diameter2",
"is",
"None",
":",
"radius2",
"=",
"0",
"if",
"diameter",
"is",
"not",
"None",
":",
"if",
"radius1",
"is",
"None",
"and",
"diameter1",
"is",
"None",
":",
"radius1",
"=",
"diameter",
"/",
"2",
"if",
"radius2",
"is",
"None",
"and",
"diameter2",
"is",
"None",
":",
"radius2",
"=",
"0",
"if",
"diameter1",
"is",
"not",
"None",
":",
"radius1",
"=",
"diameter1",
"/",
"2",
"if",
"diameter2",
"is",
"not",
"None",
":",
"radius2",
"=",
"diameter2",
"/",
"2",
"if",
"radius1",
"is",
"None",
":",
"radius1",
"=",
"1",
"if",
"radius2",
"is",
"None",
":",
"radius2",
"=",
"0",
"# Circle is created centered on the XY plane",
"filter_xml",
"=",
"''",
".",
"join",
"(",
"[",
"' <filter name=\"Annulus\">\\n'",
",",
"' <Param name=\"externalRadius\" '",
",",
"'value=\"%s\" '",
"%",
"radius1",
",",
"'description=\"External Radius\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"internalRadius\" '",
",",
"'value=\"%s\" '",
"%",
"radius2",
",",
"'description=\"Internal Radius\" '",
",",
"'type=\"RichFloat\" '",
",",
"'/>\\n'",
",",
"' <Param name=\"sides\" '",
",",
"'value=\"%d\" '",
"%",
"cir_segments",
",",
"'description=\"Sides\" '",
",",
"'type=\"RichInt\" '",
",",
"'/>\\n'",
",",
"' </filter>\\n'",
"]",
")",
"util",
".",
"write_filter",
"(",
"script",
",",
"filter_xml",
")",
"if",
"isinstance",
"(",
"script",
",",
"FilterScript",
")",
":",
"script",
".",
"add_layer",
"(",
"'Annulus'",
",",
"change_layer",
"=",
"True",
")",
"if",
"color",
"is",
"not",
"None",
":",
"vert_color",
".",
"function",
"(",
"script",
",",
"color",
"=",
"color",
")",
"return",
"None"
] |
177cce21e92baca500f56a932d66bd9a33257af8
|
test
|
cylinder_open_hires
|
Creates a round open tube, e.g. a cylinder with no top or bottom.
Useful if you want to wrap it around and join the open ends together, forming a torus.
invert_normals (bool (optional)): if True normals point outward; in false normals point inward.
|
meshlabxml/create.py
|
def cylinder_open_hires(script, height=1.0, radius=1, diameter=None,
cir_segments=48, height_segments=1,
invert_normals=False, center=False, color=None):
""" Creates a round open tube, e.g. a cylinder with no top or bottom.
Useful if you want to wrap it around and join the open ends together, forming a torus.
invert_normals (bool (optional)): if True normals point outward; in false normals point inward.
"""
if diameter is not None:
radius = diameter / 2
if center:
z_translate = -height / 2
else:
z_translate = 0.0
grid(script,
[2 * math.pi * radius, height],
x_segments=cir_segments,
y_segments=height_segments)
transform.rotate(script, 'x', 90)
transform.translate(script, [math.pi * radius / 2, 0, z_translate])
if not invert_normals:
transform.rotate(script, 'z', 180)
transform.wrap2cylinder(script, radius)
clean.merge_vert(script, threshold=0.00002)
if color is not None:
vert_color.function(script, color=color)
return None
|
def cylinder_open_hires(script, height=1.0, radius=1, diameter=None,
cir_segments=48, height_segments=1,
invert_normals=False, center=False, color=None):
""" Creates a round open tube, e.g. a cylinder with no top or bottom.
Useful if you want to wrap it around and join the open ends together, forming a torus.
invert_normals (bool (optional)): if True normals point outward; in false normals point inward.
"""
if diameter is not None:
radius = diameter / 2
if center:
z_translate = -height / 2
else:
z_translate = 0.0
grid(script,
[2 * math.pi * radius, height],
x_segments=cir_segments,
y_segments=height_segments)
transform.rotate(script, 'x', 90)
transform.translate(script, [math.pi * radius / 2, 0, z_translate])
if not invert_normals:
transform.rotate(script, 'z', 180)
transform.wrap2cylinder(script, radius)
clean.merge_vert(script, threshold=0.00002)
if color is not None:
vert_color.function(script, color=color)
return None
|
[
"Creates",
"a",
"round",
"open",
"tube",
"e",
".",
"g",
".",
"a",
"cylinder",
"with",
"no",
"top",
"or",
"bottom",
"."
] |
3DLIRIOUS/MeshLabXML
|
python
|
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/create.py#L376-L405
|
[
"def",
"cylinder_open_hires",
"(",
"script",
",",
"height",
"=",
"1.0",
",",
"radius",
"=",
"1",
",",
"diameter",
"=",
"None",
",",
"cir_segments",
"=",
"48",
",",
"height_segments",
"=",
"1",
",",
"invert_normals",
"=",
"False",
",",
"center",
"=",
"False",
",",
"color",
"=",
"None",
")",
":",
"if",
"diameter",
"is",
"not",
"None",
":",
"radius",
"=",
"diameter",
"/",
"2",
"if",
"center",
":",
"z_translate",
"=",
"-",
"height",
"/",
"2",
"else",
":",
"z_translate",
"=",
"0.0",
"grid",
"(",
"script",
",",
"[",
"2",
"*",
"math",
".",
"pi",
"*",
"radius",
",",
"height",
"]",
",",
"x_segments",
"=",
"cir_segments",
",",
"y_segments",
"=",
"height_segments",
")",
"transform",
".",
"rotate",
"(",
"script",
",",
"'x'",
",",
"90",
")",
"transform",
".",
"translate",
"(",
"script",
",",
"[",
"math",
".",
"pi",
"*",
"radius",
"/",
"2",
",",
"0",
",",
"z_translate",
"]",
")",
"if",
"not",
"invert_normals",
":",
"transform",
".",
"rotate",
"(",
"script",
",",
"'z'",
",",
"180",
")",
"transform",
".",
"wrap2cylinder",
"(",
"script",
",",
"radius",
")",
"clean",
".",
"merge_vert",
"(",
"script",
",",
"threshold",
"=",
"0.00002",
")",
"if",
"color",
"is",
"not",
"None",
":",
"vert_color",
".",
"function",
"(",
"script",
",",
"color",
"=",
"color",
")",
"return",
"None"
] |
177cce21e92baca500f56a932d66bd9a33257af8
|
test
|
cube_open_hires_old
|
Creates a square open tube, e.g. a box with no top or bottom.
Useful if you want to wrap it around and join the open ends together, forming a torus.
|
meshlabxml/create.py
|
def cube_open_hires_old(script, size=1.0, x_segments=1, y_segments=1, z_segments=1,
center=False, color=None):
""" Creates a square open tube, e.g. a box with no top or bottom.
Useful if you want to wrap it around and join the open ends together, forming a torus.
"""
"""# Convert size to list if it isn't already
if not isinstance(size, list):
size = list(size)
# If a single value was supplied use it for all 3 axes
if len(size) == 1:
size = [size[0], size[0], size[0]]"""
size = util.make_list(size, 3)
# X sides
grid(script, [size[0], size[2]],
x_segments=x_segments,
y_segments=z_segments)
transform.rotate(script, 'x', 90)
#transform.translate(script, [0, 0, -size[2]])
layers.duplicate(script)
# Rotate to correct normals
transform.rotate(script, 'z', 180)
transform.translate(script, [size[0], size[1], 0])
# Y sides
grid(script, [size[2], size[1]],
x_segments=z_segments,
y_segments=y_segments)
transform.rotate(script, 'y', -90)
#transform.rotate(script, 'z', 90)
#transform.translate(script, [0, 0, -size[2]])
layers.duplicate(script)
# Rotate to correct normals
transform.rotate(script, 'z', 180)
transform.translate(script, [size[0], size[1], 0])
layers.join(script)
clean.merge_vert(script, threshold=0.00002)
# normals.fix(script)
if center:
transform.translate(script, [-size[0] / 2, -size[1] / 2, -size[2] / 2])
if color is not None:
vert_color.function(script, color=color)
return None
|
def cube_open_hires_old(script, size=1.0, x_segments=1, y_segments=1, z_segments=1,
center=False, color=None):
""" Creates a square open tube, e.g. a box with no top or bottom.
Useful if you want to wrap it around and join the open ends together, forming a torus.
"""
"""# Convert size to list if it isn't already
if not isinstance(size, list):
size = list(size)
# If a single value was supplied use it for all 3 axes
if len(size) == 1:
size = [size[0], size[0], size[0]]"""
size = util.make_list(size, 3)
# X sides
grid(script, [size[0], size[2]],
x_segments=x_segments,
y_segments=z_segments)
transform.rotate(script, 'x', 90)
#transform.translate(script, [0, 0, -size[2]])
layers.duplicate(script)
# Rotate to correct normals
transform.rotate(script, 'z', 180)
transform.translate(script, [size[0], size[1], 0])
# Y sides
grid(script, [size[2], size[1]],
x_segments=z_segments,
y_segments=y_segments)
transform.rotate(script, 'y', -90)
#transform.rotate(script, 'z', 90)
#transform.translate(script, [0, 0, -size[2]])
layers.duplicate(script)
# Rotate to correct normals
transform.rotate(script, 'z', 180)
transform.translate(script, [size[0], size[1], 0])
layers.join(script)
clean.merge_vert(script, threshold=0.00002)
# normals.fix(script)
if center:
transform.translate(script, [-size[0] / 2, -size[1] / 2, -size[2] / 2])
if color is not None:
vert_color.function(script, color=color)
return None
|
[
"Creates",
"a",
"square",
"open",
"tube",
"e",
".",
"g",
".",
"a",
"box",
"with",
"no",
"top",
"or",
"bottom",
"."
] |
3DLIRIOUS/MeshLabXML
|
python
|
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/create.py#L408-L452
|
[
"def",
"cube_open_hires_old",
"(",
"script",
",",
"size",
"=",
"1.0",
",",
"x_segments",
"=",
"1",
",",
"y_segments",
"=",
"1",
",",
"z_segments",
"=",
"1",
",",
"center",
"=",
"False",
",",
"color",
"=",
"None",
")",
":",
"\"\"\"# Convert size to list if it isn't already\n if not isinstance(size, list):\n size = list(size)\n # If a single value was supplied use it for all 3 axes\n if len(size) == 1:\n size = [size[0], size[0], size[0]]\"\"\"",
"size",
"=",
"util",
".",
"make_list",
"(",
"size",
",",
"3",
")",
"# X sides",
"grid",
"(",
"script",
",",
"[",
"size",
"[",
"0",
"]",
",",
"size",
"[",
"2",
"]",
"]",
",",
"x_segments",
"=",
"x_segments",
",",
"y_segments",
"=",
"z_segments",
")",
"transform",
".",
"rotate",
"(",
"script",
",",
"'x'",
",",
"90",
")",
"#transform.translate(script, [0, 0, -size[2]])",
"layers",
".",
"duplicate",
"(",
"script",
")",
"# Rotate to correct normals",
"transform",
".",
"rotate",
"(",
"script",
",",
"'z'",
",",
"180",
")",
"transform",
".",
"translate",
"(",
"script",
",",
"[",
"size",
"[",
"0",
"]",
",",
"size",
"[",
"1",
"]",
",",
"0",
"]",
")",
"# Y sides",
"grid",
"(",
"script",
",",
"[",
"size",
"[",
"2",
"]",
",",
"size",
"[",
"1",
"]",
"]",
",",
"x_segments",
"=",
"z_segments",
",",
"y_segments",
"=",
"y_segments",
")",
"transform",
".",
"rotate",
"(",
"script",
",",
"'y'",
",",
"-",
"90",
")",
"#transform.rotate(script, 'z', 90)",
"#transform.translate(script, [0, 0, -size[2]])",
"layers",
".",
"duplicate",
"(",
"script",
")",
"# Rotate to correct normals",
"transform",
".",
"rotate",
"(",
"script",
",",
"'z'",
",",
"180",
")",
"transform",
".",
"translate",
"(",
"script",
",",
"[",
"size",
"[",
"0",
"]",
",",
"size",
"[",
"1",
"]",
",",
"0",
"]",
")",
"layers",
".",
"join",
"(",
"script",
")",
"clean",
".",
"merge_vert",
"(",
"script",
",",
"threshold",
"=",
"0.00002",
")",
"# normals.fix(script)",
"if",
"center",
":",
"transform",
".",
"translate",
"(",
"script",
",",
"[",
"-",
"size",
"[",
"0",
"]",
"/",
"2",
",",
"-",
"size",
"[",
"1",
"]",
"/",
"2",
",",
"-",
"size",
"[",
"2",
"]",
"/",
"2",
"]",
")",
"if",
"color",
"is",
"not",
"None",
":",
"vert_color",
".",
"function",
"(",
"script",
",",
"color",
"=",
"color",
")",
"return",
"None"
] |
177cce21e92baca500f56a932d66bd9a33257af8
|
test
|
cube_open_hires
|
Creates a square open tube, e.g. a box with no top or bottom.
Useful if you want to wrap it around and join the open ends together, forming a torus.
|
meshlabxml/create.py
|
def cube_open_hires(script, size=1.0, x_segments=1, y_segments=1, z_segments=1,
center=False, color=None):
""" Creates a square open tube, e.g. a box with no top or bottom.
Useful if you want to wrap it around and join the open ends together, forming a torus.
"""
"""# Convert size to list if it isn't already
if not isinstance(size, list):
size = list(size)
# If a single value was supplied use it for all 3 axes
if len(size) == 1:
size = [size[0], size[0], size[0]]"""
size = util.make_list(size, 3)
# Make big grid and bend
grid(script, [2*(x_segments + y_segments), z_segments],
x_segments=2*(x_segments + y_segments),
y_segments=z_segments)
transform.rotate(script, 'x', 90)
# Bend 3 times into a rectangular tube
if script.ml_version == '1.3.4BETA': # muparser version: 1.3.2
transform.vert_function(script,
x_func='if(x>{x_size}, {x_size}, x)'.format(x_size=x_segments),
y_func='if(x>{x_size}, (x-{x_size}), y)'.format(x_size=x_segments),
z_func='z')
transform.vert_function(script,
x_func='if(y>{y_size}, ({y_size}-y+{x_size}), x)'.format(x_size=x_segments, y_size=y_segments),
y_func='if(y>{y_size}, {y_size}, y)'.format(y_size=y_segments),
z_func='z')
transform.vert_function(script,
x_func='if(x<0, 0, x)',
y_func='if(x<0, ({y_size}+x), y)'.format(y_size=y_segments),
z_func='z')
else: # muparser version: 2.2.5
transform.vert_function(script,
x_func='(x>{x_size} ? {x_size} : x)'.format(x_size=x_segments),
y_func='(x>{x_size} ? (x-{x_size}) : y)'.format(x_size=x_segments),
z_func='z')
transform.vert_function(script,
x_func='(y>{y_size} ? ({y_size}-y+{x_size}) : x)'.format(x_size=x_segments, y_size=y_segments),
y_func='(y>{y_size} ? {y_size} : y)'.format(y_size=y_segments),
z_func='z')
transform.vert_function(script,
x_func='(x<0 ? 0 : x)',
y_func='(x<0 ? ({y_size}+x) : y)'.format(y_size=y_segments),
z_func='z')
clean.merge_vert(script, threshold=0.00002)
transform.scale(script, [size[0]/x_segments, size[1]/y_segments, size[2]/z_segments])
if center:
transform.translate(script, [-size[0] / 2, -size[1] / 2, -size[2] / 2])
if color is not None:
vert_color.function(script, color=color)
return None
|
def cube_open_hires(script, size=1.0, x_segments=1, y_segments=1, z_segments=1,
center=False, color=None):
""" Creates a square open tube, e.g. a box with no top or bottom.
Useful if you want to wrap it around and join the open ends together, forming a torus.
"""
"""# Convert size to list if it isn't already
if not isinstance(size, list):
size = list(size)
# If a single value was supplied use it for all 3 axes
if len(size) == 1:
size = [size[0], size[0], size[0]]"""
size = util.make_list(size, 3)
# Make big grid and bend
grid(script, [2*(x_segments + y_segments), z_segments],
x_segments=2*(x_segments + y_segments),
y_segments=z_segments)
transform.rotate(script, 'x', 90)
# Bend 3 times into a rectangular tube
if script.ml_version == '1.3.4BETA': # muparser version: 1.3.2
transform.vert_function(script,
x_func='if(x>{x_size}, {x_size}, x)'.format(x_size=x_segments),
y_func='if(x>{x_size}, (x-{x_size}), y)'.format(x_size=x_segments),
z_func='z')
transform.vert_function(script,
x_func='if(y>{y_size}, ({y_size}-y+{x_size}), x)'.format(x_size=x_segments, y_size=y_segments),
y_func='if(y>{y_size}, {y_size}, y)'.format(y_size=y_segments),
z_func='z')
transform.vert_function(script,
x_func='if(x<0, 0, x)',
y_func='if(x<0, ({y_size}+x), y)'.format(y_size=y_segments),
z_func='z')
else: # muparser version: 2.2.5
transform.vert_function(script,
x_func='(x>{x_size} ? {x_size} : x)'.format(x_size=x_segments),
y_func='(x>{x_size} ? (x-{x_size}) : y)'.format(x_size=x_segments),
z_func='z')
transform.vert_function(script,
x_func='(y>{y_size} ? ({y_size}-y+{x_size}) : x)'.format(x_size=x_segments, y_size=y_segments),
y_func='(y>{y_size} ? {y_size} : y)'.format(y_size=y_segments),
z_func='z')
transform.vert_function(script,
x_func='(x<0 ? 0 : x)',
y_func='(x<0 ? ({y_size}+x) : y)'.format(y_size=y_segments),
z_func='z')
clean.merge_vert(script, threshold=0.00002)
transform.scale(script, [size[0]/x_segments, size[1]/y_segments, size[2]/z_segments])
if center:
transform.translate(script, [-size[0] / 2, -size[1] / 2, -size[2] / 2])
if color is not None:
vert_color.function(script, color=color)
return None
|
[
"Creates",
"a",
"square",
"open",
"tube",
"e",
".",
"g",
".",
"a",
"box",
"with",
"no",
"top",
"or",
"bottom",
"."
] |
3DLIRIOUS/MeshLabXML
|
python
|
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/create.py#L455-L507
|
[
"def",
"cube_open_hires",
"(",
"script",
",",
"size",
"=",
"1.0",
",",
"x_segments",
"=",
"1",
",",
"y_segments",
"=",
"1",
",",
"z_segments",
"=",
"1",
",",
"center",
"=",
"False",
",",
"color",
"=",
"None",
")",
":",
"\"\"\"# Convert size to list if it isn't already\n if not isinstance(size, list):\n size = list(size)\n # If a single value was supplied use it for all 3 axes\n if len(size) == 1:\n size = [size[0], size[0], size[0]]\"\"\"",
"size",
"=",
"util",
".",
"make_list",
"(",
"size",
",",
"3",
")",
"# Make big grid and bend",
"grid",
"(",
"script",
",",
"[",
"2",
"*",
"(",
"x_segments",
"+",
"y_segments",
")",
",",
"z_segments",
"]",
",",
"x_segments",
"=",
"2",
"*",
"(",
"x_segments",
"+",
"y_segments",
")",
",",
"y_segments",
"=",
"z_segments",
")",
"transform",
".",
"rotate",
"(",
"script",
",",
"'x'",
",",
"90",
")",
"# Bend 3 times into a rectangular tube",
"if",
"script",
".",
"ml_version",
"==",
"'1.3.4BETA'",
":",
"# muparser version: 1.3.2",
"transform",
".",
"vert_function",
"(",
"script",
",",
"x_func",
"=",
"'if(x>{x_size}, {x_size}, x)'",
".",
"format",
"(",
"x_size",
"=",
"x_segments",
")",
",",
"y_func",
"=",
"'if(x>{x_size}, (x-{x_size}), y)'",
".",
"format",
"(",
"x_size",
"=",
"x_segments",
")",
",",
"z_func",
"=",
"'z'",
")",
"transform",
".",
"vert_function",
"(",
"script",
",",
"x_func",
"=",
"'if(y>{y_size}, ({y_size}-y+{x_size}), x)'",
".",
"format",
"(",
"x_size",
"=",
"x_segments",
",",
"y_size",
"=",
"y_segments",
")",
",",
"y_func",
"=",
"'if(y>{y_size}, {y_size}, y)'",
".",
"format",
"(",
"y_size",
"=",
"y_segments",
")",
",",
"z_func",
"=",
"'z'",
")",
"transform",
".",
"vert_function",
"(",
"script",
",",
"x_func",
"=",
"'if(x<0, 0, x)'",
",",
"y_func",
"=",
"'if(x<0, ({y_size}+x), y)'",
".",
"format",
"(",
"y_size",
"=",
"y_segments",
")",
",",
"z_func",
"=",
"'z'",
")",
"else",
":",
"# muparser version: 2.2.5",
"transform",
".",
"vert_function",
"(",
"script",
",",
"x_func",
"=",
"'(x>{x_size} ? {x_size} : x)'",
".",
"format",
"(",
"x_size",
"=",
"x_segments",
")",
",",
"y_func",
"=",
"'(x>{x_size} ? (x-{x_size}) : y)'",
".",
"format",
"(",
"x_size",
"=",
"x_segments",
")",
",",
"z_func",
"=",
"'z'",
")",
"transform",
".",
"vert_function",
"(",
"script",
",",
"x_func",
"=",
"'(y>{y_size} ? ({y_size}-y+{x_size}) : x)'",
".",
"format",
"(",
"x_size",
"=",
"x_segments",
",",
"y_size",
"=",
"y_segments",
")",
",",
"y_func",
"=",
"'(y>{y_size} ? {y_size} : y)'",
".",
"format",
"(",
"y_size",
"=",
"y_segments",
")",
",",
"z_func",
"=",
"'z'",
")",
"transform",
".",
"vert_function",
"(",
"script",
",",
"x_func",
"=",
"'(x<0 ? 0 : x)'",
",",
"y_func",
"=",
"'(x<0 ? ({y_size}+x) : y)'",
".",
"format",
"(",
"y_size",
"=",
"y_segments",
")",
",",
"z_func",
"=",
"'z'",
")",
"clean",
".",
"merge_vert",
"(",
"script",
",",
"threshold",
"=",
"0.00002",
")",
"transform",
".",
"scale",
"(",
"script",
",",
"[",
"size",
"[",
"0",
"]",
"/",
"x_segments",
",",
"size",
"[",
"1",
"]",
"/",
"y_segments",
",",
"size",
"[",
"2",
"]",
"/",
"z_segments",
"]",
")",
"if",
"center",
":",
"transform",
".",
"translate",
"(",
"script",
",",
"[",
"-",
"size",
"[",
"0",
"]",
"/",
"2",
",",
"-",
"size",
"[",
"1",
"]",
"/",
"2",
",",
"-",
"size",
"[",
"2",
"]",
"/",
"2",
"]",
")",
"if",
"color",
"is",
"not",
"None",
":",
"vert_color",
".",
"function",
"(",
"script",
",",
"color",
"=",
"color",
")",
"return",
"None"
] |
177cce21e92baca500f56a932d66bd9a33257af8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.