partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
SRegistryMessage.useColor
|
useColor will determine if color should be added
to a print. Will check if being run in a terminal, and
if has support for asci
|
sregistry/logger/message.py
|
def useColor(self):
'''useColor will determine if color should be added
to a print. Will check if being run in a terminal, and
if has support for asci'''
COLORIZE = get_user_color_preference()
if COLORIZE is not None:
return COLORIZE
streams = [self.errorStream, self.outputStream]
for stream in streams:
if not hasattr(stream, 'isatty'):
return False
if not stream.isatty():
return False
return True
|
def useColor(self):
'''useColor will determine if color should be added
to a print. Will check if being run in a terminal, and
if has support for asci'''
COLORIZE = get_user_color_preference()
if COLORIZE is not None:
return COLORIZE
streams = [self.errorStream, self.outputStream]
for stream in streams:
if not hasattr(stream, 'isatty'):
return False
if not stream.isatty():
return False
return True
|
[
"useColor",
"will",
"determine",
"if",
"color",
"should",
"be",
"added",
"to",
"a",
"print",
".",
"Will",
"check",
"if",
"being",
"run",
"in",
"a",
"terminal",
"and",
"if",
"has",
"support",
"for",
"asci"
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/logger/message.py#L59-L72
|
[
"def",
"useColor",
"(",
"self",
")",
":",
"COLORIZE",
"=",
"get_user_color_preference",
"(",
")",
"if",
"COLORIZE",
"is",
"not",
"None",
":",
"return",
"COLORIZE",
"streams",
"=",
"[",
"self",
".",
"errorStream",
",",
"self",
".",
"outputStream",
"]",
"for",
"stream",
"in",
"streams",
":",
"if",
"not",
"hasattr",
"(",
"stream",
",",
"'isatty'",
")",
":",
"return",
"False",
"if",
"not",
"stream",
".",
"isatty",
"(",
")",
":",
"return",
"False",
"return",
"True"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
SRegistryMessage.addColor
|
addColor to the prompt (usually prefix) if terminal
supports, and specified to do so
|
sregistry/logger/message.py
|
def addColor(self, level, text):
'''addColor to the prompt (usually prefix) if terminal
supports, and specified to do so'''
if self.colorize:
if level in self.colors:
text = "%s%s%s" % (self.colors[level],
text,
self.colors["OFF"])
return text
|
def addColor(self, level, text):
'''addColor to the prompt (usually prefix) if terminal
supports, and specified to do so'''
if self.colorize:
if level in self.colors:
text = "%s%s%s" % (self.colors[level],
text,
self.colors["OFF"])
return text
|
[
"addColor",
"to",
"the",
"prompt",
"(",
"usually",
"prefix",
")",
"if",
"terminal",
"supports",
"and",
"specified",
"to",
"do",
"so"
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/logger/message.py#L74-L82
|
[
"def",
"addColor",
"(",
"self",
",",
"level",
",",
"text",
")",
":",
"if",
"self",
".",
"colorize",
":",
"if",
"level",
"in",
"self",
".",
"colors",
":",
"text",
"=",
"\"%s%s%s\"",
"%",
"(",
"self",
".",
"colors",
"[",
"level",
"]",
",",
"text",
",",
"self",
".",
"colors",
"[",
"\"OFF\"",
"]",
")",
"return",
"text"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
SRegistryMessage.emitError
|
determine if a level should print to
stderr, includes all levels but INFO and QUIET
|
sregistry/logger/message.py
|
def emitError(self, level):
'''determine if a level should print to
stderr, includes all levels but INFO and QUIET'''
if level in [ABORT,
ERROR,
WARNING,
VERBOSE,
VERBOSE1,
VERBOSE2,
VERBOSE3,
DEBUG]:
return True
return False
|
def emitError(self, level):
'''determine if a level should print to
stderr, includes all levels but INFO and QUIET'''
if level in [ABORT,
ERROR,
WARNING,
VERBOSE,
VERBOSE1,
VERBOSE2,
VERBOSE3,
DEBUG]:
return True
return False
|
[
"determine",
"if",
"a",
"level",
"should",
"print",
"to",
"stderr",
"includes",
"all",
"levels",
"but",
"INFO",
"and",
"QUIET"
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/logger/message.py#L84-L96
|
[
"def",
"emitError",
"(",
"self",
",",
"level",
")",
":",
"if",
"level",
"in",
"[",
"ABORT",
",",
"ERROR",
",",
"WARNING",
",",
"VERBOSE",
",",
"VERBOSE1",
",",
"VERBOSE2",
",",
"VERBOSE3",
",",
"DEBUG",
"]",
":",
"return",
"True",
"return",
"False"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
SRegistryMessage.emit
|
emit is the main function to print the message
optionally with a prefix
:param level: the level of the message
:param message: the message to print
:param prefix: a prefix for the message
|
sregistry/logger/message.py
|
def emit(self, level, message, prefix=None, color=None):
'''emit is the main function to print the message
optionally with a prefix
:param level: the level of the message
:param message: the message to print
:param prefix: a prefix for the message
'''
if color is None:
color = level
if prefix is not None:
prefix = self.addColor(color, "%s " % (prefix))
else:
prefix = ""
message = self.addColor(color, message)
# Add the prefix
message = "%s%s" % (prefix, message)
if not message.endswith('\n'):
message = "%s\n" % message
# If the level is quiet, only print to error
if self.level == QUIET:
pass
# Otherwise if in range print to stdout and stderr
elif self.isEnabledFor(level):
if self.emitError(level):
self.write(self.errorStream, message)
else:
self.write(self.outputStream, message)
# Add all log messages to history
self.history.append(message)
|
def emit(self, level, message, prefix=None, color=None):
'''emit is the main function to print the message
optionally with a prefix
:param level: the level of the message
:param message: the message to print
:param prefix: a prefix for the message
'''
if color is None:
color = level
if prefix is not None:
prefix = self.addColor(color, "%s " % (prefix))
else:
prefix = ""
message = self.addColor(color, message)
# Add the prefix
message = "%s%s" % (prefix, message)
if not message.endswith('\n'):
message = "%s\n" % message
# If the level is quiet, only print to error
if self.level == QUIET:
pass
# Otherwise if in range print to stdout and stderr
elif self.isEnabledFor(level):
if self.emitError(level):
self.write(self.errorStream, message)
else:
self.write(self.outputStream, message)
# Add all log messages to history
self.history.append(message)
|
[
"emit",
"is",
"the",
"main",
"function",
"to",
"print",
"the",
"message",
"optionally",
"with",
"a",
"prefix",
":",
"param",
"level",
":",
"the",
"level",
"of",
"the",
"message",
":",
"param",
"message",
":",
"the",
"message",
"to",
"print",
":",
"param",
"prefix",
":",
"a",
"prefix",
"for",
"the",
"message"
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/logger/message.py#L113-L147
|
[
"def",
"emit",
"(",
"self",
",",
"level",
",",
"message",
",",
"prefix",
"=",
"None",
",",
"color",
"=",
"None",
")",
":",
"if",
"color",
"is",
"None",
":",
"color",
"=",
"level",
"if",
"prefix",
"is",
"not",
"None",
":",
"prefix",
"=",
"self",
".",
"addColor",
"(",
"color",
",",
"\"%s \"",
"%",
"(",
"prefix",
")",
")",
"else",
":",
"prefix",
"=",
"\"\"",
"message",
"=",
"self",
".",
"addColor",
"(",
"color",
",",
"message",
")",
"# Add the prefix",
"message",
"=",
"\"%s%s\"",
"%",
"(",
"prefix",
",",
"message",
")",
"if",
"not",
"message",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"message",
"=",
"\"%s\\n\"",
"%",
"message",
"# If the level is quiet, only print to error",
"if",
"self",
".",
"level",
"==",
"QUIET",
":",
"pass",
"# Otherwise if in range print to stdout and stderr",
"elif",
"self",
".",
"isEnabledFor",
"(",
"level",
")",
":",
"if",
"self",
".",
"emitError",
"(",
"level",
")",
":",
"self",
".",
"write",
"(",
"self",
".",
"errorStream",
",",
"message",
")",
"else",
":",
"self",
".",
"write",
"(",
"self",
".",
"outputStream",
",",
"message",
")",
"# Add all log messages to history",
"self",
".",
"history",
".",
"append",
"(",
"message",
")"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
SRegistryMessage.write
|
write will write a message to a stream,
first checking the encoding
|
sregistry/logger/message.py
|
def write(self, stream, message):
'''write will write a message to a stream,
first checking the encoding
'''
if isinstance(message, bytes):
message = message.decode('utf-8')
stream.write(message)
|
def write(self, stream, message):
'''write will write a message to a stream,
first checking the encoding
'''
if isinstance(message, bytes):
message = message.decode('utf-8')
stream.write(message)
|
[
"write",
"will",
"write",
"a",
"message",
"to",
"a",
"stream",
"first",
"checking",
"the",
"encoding"
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/logger/message.py#L149-L155
|
[
"def",
"write",
"(",
"self",
",",
"stream",
",",
"message",
")",
":",
"if",
"isinstance",
"(",
"message",
",",
"bytes",
")",
":",
"message",
"=",
"message",
".",
"decode",
"(",
"'utf-8'",
")",
"stream",
".",
"write",
"(",
"message",
")"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
SRegistryMessage.get_logs
|
get_logs will return the complete history, joined by newline
(default) or as is.
|
sregistry/logger/message.py
|
def get_logs(self, join_newline=True):
''''get_logs will return the complete history, joined by newline
(default) or as is.
'''
if join_newline:
return '\n'.join(self.history)
return self.history
|
def get_logs(self, join_newline=True):
''''get_logs will return the complete history, joined by newline
(default) or as is.
'''
if join_newline:
return '\n'.join(self.history)
return self.history
|
[
"get_logs",
"will",
"return",
"the",
"complete",
"history",
"joined",
"by",
"newline",
"(",
"default",
")",
"or",
"as",
"is",
"."
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/logger/message.py#L157-L163
|
[
"def",
"get_logs",
"(",
"self",
",",
"join_newline",
"=",
"True",
")",
":",
"if",
"join_newline",
":",
"return",
"'\\n'",
".",
"join",
"(",
"self",
".",
"history",
")",
"return",
"self",
".",
"history"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
SRegistryMessage.show_progress
|
create a terminal progress bar, default bar shows for verbose+
Parameters
==========
iteration: current iteration (Int)
total: total iterations (Int)
length: character length of bar (Int)
|
sregistry/logger/message.py
|
def show_progress(self,
iteration,
total,
length=40,
min_level=0,
prefix=None,
carriage_return=True,
suffix=None,
symbol=None):
'''create a terminal progress bar, default bar shows for verbose+
Parameters
==========
iteration: current iteration (Int)
total: total iterations (Int)
length: character length of bar (Int)
'''
if not self.level == QUIET:
percent = 100 * (iteration / float(total))
progress = int(length * iteration // total)
if suffix is None:
suffix = ''
if prefix is None:
prefix = 'Progress'
# Download sizes can be imperfect, setting carriage_return to False
# and writing newline with caller cleans up the UI
if percent >= 100:
percent = 100
progress = length
if symbol is None:
symbol = "="
if progress < length:
bar = symbol * progress + '|' + '-' * (length - progress - 1)
else:
bar = symbol * progress + '-' * (length - progress)
# Only show progress bar for level > min_level
if self.level > min_level:
percent = "%5s" % ("{0:.1f}").format(percent)
output = '\r' + prefix + \
" |%s| %s%s %s" % (bar, percent, '%', suffix)
sys.stdout.write(output),
if iteration == total and carriage_return:
sys.stdout.write('\n')
sys.stdout.flush()
|
def show_progress(self,
iteration,
total,
length=40,
min_level=0,
prefix=None,
carriage_return=True,
suffix=None,
symbol=None):
'''create a terminal progress bar, default bar shows for verbose+
Parameters
==========
iteration: current iteration (Int)
total: total iterations (Int)
length: character length of bar (Int)
'''
if not self.level == QUIET:
percent = 100 * (iteration / float(total))
progress = int(length * iteration // total)
if suffix is None:
suffix = ''
if prefix is None:
prefix = 'Progress'
# Download sizes can be imperfect, setting carriage_return to False
# and writing newline with caller cleans up the UI
if percent >= 100:
percent = 100
progress = length
if symbol is None:
symbol = "="
if progress < length:
bar = symbol * progress + '|' + '-' * (length - progress - 1)
else:
bar = symbol * progress + '-' * (length - progress)
# Only show progress bar for level > min_level
if self.level > min_level:
percent = "%5s" % ("{0:.1f}").format(percent)
output = '\r' + prefix + \
" |%s| %s%s %s" % (bar, percent, '%', suffix)
sys.stdout.write(output),
if iteration == total and carriage_return:
sys.stdout.write('\n')
sys.stdout.flush()
|
[
"create",
"a",
"terminal",
"progress",
"bar",
"default",
"bar",
"shows",
"for",
"verbose",
"+",
"Parameters",
"==========",
"iteration",
":",
"current",
"iteration",
"(",
"Int",
")",
"total",
":",
"total",
"iterations",
"(",
"Int",
")",
"length",
":",
"character",
"length",
"of",
"bar",
"(",
"Int",
")"
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/logger/message.py#L166-L216
|
[
"def",
"show_progress",
"(",
"self",
",",
"iteration",
",",
"total",
",",
"length",
"=",
"40",
",",
"min_level",
"=",
"0",
",",
"prefix",
"=",
"None",
",",
"carriage_return",
"=",
"True",
",",
"suffix",
"=",
"None",
",",
"symbol",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"level",
"==",
"QUIET",
":",
"percent",
"=",
"100",
"*",
"(",
"iteration",
"/",
"float",
"(",
"total",
")",
")",
"progress",
"=",
"int",
"(",
"length",
"*",
"iteration",
"//",
"total",
")",
"if",
"suffix",
"is",
"None",
":",
"suffix",
"=",
"''",
"if",
"prefix",
"is",
"None",
":",
"prefix",
"=",
"'Progress'",
"# Download sizes can be imperfect, setting carriage_return to False",
"# and writing newline with caller cleans up the UI",
"if",
"percent",
">=",
"100",
":",
"percent",
"=",
"100",
"progress",
"=",
"length",
"if",
"symbol",
"is",
"None",
":",
"symbol",
"=",
"\"=\"",
"if",
"progress",
"<",
"length",
":",
"bar",
"=",
"symbol",
"*",
"progress",
"+",
"'|'",
"+",
"'-'",
"*",
"(",
"length",
"-",
"progress",
"-",
"1",
")",
"else",
":",
"bar",
"=",
"symbol",
"*",
"progress",
"+",
"'-'",
"*",
"(",
"length",
"-",
"progress",
")",
"# Only show progress bar for level > min_level",
"if",
"self",
".",
"level",
">",
"min_level",
":",
"percent",
"=",
"\"%5s\"",
"%",
"(",
"\"{0:.1f}\"",
")",
".",
"format",
"(",
"percent",
")",
"output",
"=",
"'\\r'",
"+",
"prefix",
"+",
"\" |%s| %s%s %s\"",
"%",
"(",
"bar",
",",
"percent",
",",
"'%'",
",",
"suffix",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"output",
")",
",",
"if",
"iteration",
"==",
"total",
"and",
"carriage_return",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n'",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
SRegistryMessage.table
|
table will print a table of entries. If the rows is
a dictionary, the keys are interpreted as column names. if
not, a numbered list is used.
|
sregistry/logger/message.py
|
def table(self, rows, col_width=2):
'''table will print a table of entries. If the rows is
a dictionary, the keys are interpreted as column names. if
not, a numbered list is used.
'''
labels = [str(x) for x in range(1,len(rows)+1)]
if isinstance(rows, dict):
labels = list(rows.keys())
rows = list(rows.values())
for row in rows:
label = labels.pop(0)
label = label.ljust(col_width)
message = "\t".join(row)
self.custom(prefix=label,
message=message)
|
def table(self, rows, col_width=2):
'''table will print a table of entries. If the rows is
a dictionary, the keys are interpreted as column names. if
not, a numbered list is used.
'''
labels = [str(x) for x in range(1,len(rows)+1)]
if isinstance(rows, dict):
labels = list(rows.keys())
rows = list(rows.values())
for row in rows:
label = labels.pop(0)
label = label.ljust(col_width)
message = "\t".join(row)
self.custom(prefix=label,
message=message)
|
[
"table",
"will",
"print",
"a",
"table",
"of",
"entries",
".",
"If",
"the",
"rows",
"is",
"a",
"dictionary",
"the",
"keys",
"are",
"interpreted",
"as",
"column",
"names",
".",
"if",
"not",
"a",
"numbered",
"list",
"is",
"used",
"."
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/logger/message.py#L274-L290
|
[
"def",
"table",
"(",
"self",
",",
"rows",
",",
"col_width",
"=",
"2",
")",
":",
"labels",
"=",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"rows",
")",
"+",
"1",
")",
"]",
"if",
"isinstance",
"(",
"rows",
",",
"dict",
")",
":",
"labels",
"=",
"list",
"(",
"rows",
".",
"keys",
"(",
")",
")",
"rows",
"=",
"list",
"(",
"rows",
".",
"values",
"(",
")",
")",
"for",
"row",
"in",
"rows",
":",
"label",
"=",
"labels",
".",
"pop",
"(",
"0",
")",
"label",
"=",
"label",
".",
"ljust",
"(",
"col_width",
")",
"message",
"=",
"\"\\t\"",
".",
"join",
"(",
"row",
")",
"self",
".",
"custom",
"(",
"prefix",
"=",
"label",
",",
"message",
"=",
"message",
")"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
push
|
push an image to Singularity Registry
path: should correspond to an absolte image path (or derive it)
name: should be the complete uri that the user has requested to push.
tag: should correspond with an image tag. This is provided to mirror Docker
|
sregistry/main/__template__/push.py
|
def push(self, path, name, tag=None):
'''push an image to Singularity Registry
path: should correspond to an absolte image path (or derive it)
name: should be the complete uri that the user has requested to push.
tag: should correspond with an image tag. This is provided to mirror Docker
'''
path = os.path.abspath(path)
bot.debug("PUSH %s" % path)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# This returns a data structure with collection, container, based on uri
names = parse_image_name(remove_uri(name),tag=tag)
# use Singularity client, if exists, to inspect to extract metadata
metadata = self.get_metadata(path, names=names)
# If you want a spinner
bot.spinner.start()
# do your push request here. Generally you want to except a KeyboardInterrupt
# and give the user a status from the response
bot.spinner.stop()
|
def push(self, path, name, tag=None):
'''push an image to Singularity Registry
path: should correspond to an absolte image path (or derive it)
name: should be the complete uri that the user has requested to push.
tag: should correspond with an image tag. This is provided to mirror Docker
'''
path = os.path.abspath(path)
bot.debug("PUSH %s" % path)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# This returns a data structure with collection, container, based on uri
names = parse_image_name(remove_uri(name),tag=tag)
# use Singularity client, if exists, to inspect to extract metadata
metadata = self.get_metadata(path, names=names)
# If you want a spinner
bot.spinner.start()
# do your push request here. Generally you want to except a KeyboardInterrupt
# and give the user a status from the response
bot.spinner.stop()
|
[
"push",
"an",
"image",
"to",
"Singularity",
"Registry",
"path",
":",
"should",
"correspond",
"to",
"an",
"absolte",
"image",
"path",
"(",
"or",
"derive",
"it",
")",
"name",
":",
"should",
"be",
"the",
"complete",
"uri",
"that",
"the",
"user",
"has",
"requested",
"to",
"push",
".",
"tag",
":",
"should",
"correspond",
"with",
"an",
"image",
"tag",
".",
"This",
"is",
"provided",
"to",
"mirror",
"Docker"
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/__template__/push.py#L28-L52
|
[
"def",
"push",
"(",
"self",
",",
"path",
",",
"name",
",",
"tag",
"=",
"None",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"bot",
".",
"debug",
"(",
"\"PUSH %s\"",
"%",
"path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"bot",
".",
"error",
"(",
"'%s does not exist.'",
"%",
"path",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# This returns a data structure with collection, container, based on uri",
"names",
"=",
"parse_image_name",
"(",
"remove_uri",
"(",
"name",
")",
",",
"tag",
"=",
"tag",
")",
"# use Singularity client, if exists, to inspect to extract metadata",
"metadata",
"=",
"self",
".",
"get_metadata",
"(",
"path",
",",
"names",
"=",
"names",
")",
"# If you want a spinner",
"bot",
".",
"spinner",
".",
"start",
"(",
")",
"# do your push request here. Generally you want to except a KeyboardInterrupt",
"# and give the user a status from the response",
"bot",
".",
"spinner",
".",
"stop",
"(",
")"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
push
|
push an image to Globus endpoint. In this case, the name is the
globus endpoint id and path.
--name <endpointid>:/path/for/image
|
sregistry/main/globus/push.py
|
def push(self, path, name, tag=None):
'''push an image to Globus endpoint. In this case, the name is the
globus endpoint id and path.
--name <endpointid>:/path/for/image
'''
# Split the name into endpoint and rest
endpoint, remote = self._parse_endpoint_name(name)
path = os.path.abspath(path)
image = os.path.basename(path)
bot.debug("PUSH %s" % path)
# Flatten image uri into image name
q = parse_image_name(image)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# Ensure we have a transfer client
if not hasattr(self, 'transfer_client'):
self._init_transfer_client()
# The user must have a personal endpoint
endpoints = self._get_endpoints()
if len(endpoints['my-endpoints']) == 0:
bot.error('You must have a personal endpoint to transfer the container')
sys.exit(1)
# Take the first endpoint that is active
source_endpoint = None
for eid,contender in endpoints['my-endpoints'].items():
if contender['gcp_connected'] is True:
source_endpoint = contender
break
# Exit if none are active, required!
if source_endpoint is None:
bot.error('No activated local endpoints online! Go online to transfer')
sys.exit(1)
# The destination endpoint should have an .singularity/shub folder set
self._create_endpoint_cache(endpoint)
# SREGISTRY_STORAGE must be an endpoint
# if the image isn't already there, add it first
added = self.add(image_path=path,
image_uri=q['uri'],
copy=True)
label = "Singularity Registry Transfer for %s" %added.name
tdata = globus_sdk.TransferData(self.transfer_client,
source_endpoint['id'],
endpoint,
label=label,
sync_level="checksum")
image = ".singularity/shub/%s" %image
tdata.add_item(added.image, image)
bot.info('Requesting transfer from local %s to %s:%s' %(SREGISTRY_STORAGE,
endpoint, image))
transfer_result = self.transfer_client.submit_transfer(tdata)
bot.info(transfer_result['message'])
return transfer_result
|
def push(self, path, name, tag=None):
'''push an image to Globus endpoint. In this case, the name is the
globus endpoint id and path.
--name <endpointid>:/path/for/image
'''
# Split the name into endpoint and rest
endpoint, remote = self._parse_endpoint_name(name)
path = os.path.abspath(path)
image = os.path.basename(path)
bot.debug("PUSH %s" % path)
# Flatten image uri into image name
q = parse_image_name(image)
if not os.path.exists(path):
bot.error('%s does not exist.' %path)
sys.exit(1)
# Ensure we have a transfer client
if not hasattr(self, 'transfer_client'):
self._init_transfer_client()
# The user must have a personal endpoint
endpoints = self._get_endpoints()
if len(endpoints['my-endpoints']) == 0:
bot.error('You must have a personal endpoint to transfer the container')
sys.exit(1)
# Take the first endpoint that is active
source_endpoint = None
for eid,contender in endpoints['my-endpoints'].items():
if contender['gcp_connected'] is True:
source_endpoint = contender
break
# Exit if none are active, required!
if source_endpoint is None:
bot.error('No activated local endpoints online! Go online to transfer')
sys.exit(1)
# The destination endpoint should have an .singularity/shub folder set
self._create_endpoint_cache(endpoint)
# SREGISTRY_STORAGE must be an endpoint
# if the image isn't already there, add it first
added = self.add(image_path=path,
image_uri=q['uri'],
copy=True)
label = "Singularity Registry Transfer for %s" %added.name
tdata = globus_sdk.TransferData(self.transfer_client,
source_endpoint['id'],
endpoint,
label=label,
sync_level="checksum")
image = ".singularity/shub/%s" %image
tdata.add_item(added.image, image)
bot.info('Requesting transfer from local %s to %s:%s' %(SREGISTRY_STORAGE,
endpoint, image))
transfer_result = self.transfer_client.submit_transfer(tdata)
bot.info(transfer_result['message'])
return transfer_result
|
[
"push",
"an",
"image",
"to",
"Globus",
"endpoint",
".",
"In",
"this",
"case",
"the",
"name",
"is",
"the",
"globus",
"endpoint",
"id",
"and",
"path",
"."
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/globus/push.py#L24-L97
|
[
"def",
"push",
"(",
"self",
",",
"path",
",",
"name",
",",
"tag",
"=",
"None",
")",
":",
"# Split the name into endpoint and rest",
"endpoint",
",",
"remote",
"=",
"self",
".",
"_parse_endpoint_name",
"(",
"name",
")",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"image",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"bot",
".",
"debug",
"(",
"\"PUSH %s\"",
"%",
"path",
")",
"# Flatten image uri into image name",
"q",
"=",
"parse_image_name",
"(",
"image",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"bot",
".",
"error",
"(",
"'%s does not exist.'",
"%",
"path",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# Ensure we have a transfer client",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'transfer_client'",
")",
":",
"self",
".",
"_init_transfer_client",
"(",
")",
"# The user must have a personal endpoint",
"endpoints",
"=",
"self",
".",
"_get_endpoints",
"(",
")",
"if",
"len",
"(",
"endpoints",
"[",
"'my-endpoints'",
"]",
")",
"==",
"0",
":",
"bot",
".",
"error",
"(",
"'You must have a personal endpoint to transfer the container'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# Take the first endpoint that is active",
"source_endpoint",
"=",
"None",
"for",
"eid",
",",
"contender",
"in",
"endpoints",
"[",
"'my-endpoints'",
"]",
".",
"items",
"(",
")",
":",
"if",
"contender",
"[",
"'gcp_connected'",
"]",
"is",
"True",
":",
"source_endpoint",
"=",
"contender",
"break",
"# Exit if none are active, required!",
"if",
"source_endpoint",
"is",
"None",
":",
"bot",
".",
"error",
"(",
"'No activated local endpoints online! Go online to transfer'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# The destination endpoint should have an .singularity/shub folder set",
"self",
".",
"_create_endpoint_cache",
"(",
"endpoint",
")",
"# SREGISTRY_STORAGE must be an endpoint",
"# if the image isn't already there, add it first",
"added",
"=",
"self",
".",
"add",
"(",
"image_path",
"=",
"path",
",",
"image_uri",
"=",
"q",
"[",
"'uri'",
"]",
",",
"copy",
"=",
"True",
")",
"label",
"=",
"\"Singularity Registry Transfer for %s\"",
"%",
"added",
".",
"name",
"tdata",
"=",
"globus_sdk",
".",
"TransferData",
"(",
"self",
".",
"transfer_client",
",",
"source_endpoint",
"[",
"'id'",
"]",
",",
"endpoint",
",",
"label",
"=",
"label",
",",
"sync_level",
"=",
"\"checksum\"",
")",
"image",
"=",
"\".singularity/shub/%s\"",
"%",
"image",
"tdata",
".",
"add_item",
"(",
"added",
".",
"image",
",",
"image",
")",
"bot",
".",
"info",
"(",
"'Requesting transfer from local %s to %s:%s'",
"%",
"(",
"SREGISTRY_STORAGE",
",",
"endpoint",
",",
"image",
")",
")",
"transfer_result",
"=",
"self",
".",
"transfer_client",
".",
"submit_transfer",
"(",
"tdata",
")",
"bot",
".",
"info",
"(",
"transfer_result",
"[",
"'message'",
"]",
")",
"return",
"transfer_result"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
get_template
|
return a default template for some function in sregistry
If there is no template, None is returned.
Parameters
==========
name: the name of the template to retrieve
|
sregistry/utils/templates.py
|
def get_template(name):
'''return a default template for some function in sregistry
If there is no template, None is returned.
Parameters
==========
name: the name of the template to retrieve
'''
name = name.lower()
templates = dict()
templates['tarinfo'] = {"gid": 0,
"uid": 0,
"uname": "root",
"gname": "root",
"mode": 493}
if name in templates:
bot.debug("Found template for %s" % (name))
return templates[name]
else:
bot.warning("Cannot find template %s" % (name))
|
def get_template(name):
'''return a default template for some function in sregistry
If there is no template, None is returned.
Parameters
==========
name: the name of the template to retrieve
'''
name = name.lower()
templates = dict()
templates['tarinfo'] = {"gid": 0,
"uid": 0,
"uname": "root",
"gname": "root",
"mode": 493}
if name in templates:
bot.debug("Found template for %s" % (name))
return templates[name]
else:
bot.warning("Cannot find template %s" % (name))
|
[
"return",
"a",
"default",
"template",
"for",
"some",
"function",
"in",
"sregistry",
"If",
"there",
"is",
"no",
"template",
"None",
"is",
"returned",
"."
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/utils/templates.py#L13-L35
|
[
"def",
"get_template",
"(",
"name",
")",
":",
"name",
"=",
"name",
".",
"lower",
"(",
")",
"templates",
"=",
"dict",
"(",
")",
"templates",
"[",
"'tarinfo'",
"]",
"=",
"{",
"\"gid\"",
":",
"0",
",",
"\"uid\"",
":",
"0",
",",
"\"uname\"",
":",
"\"root\"",
",",
"\"gname\"",
":",
"\"root\"",
",",
"\"mode\"",
":",
"493",
"}",
"if",
"name",
"in",
"templates",
":",
"bot",
".",
"debug",
"(",
"\"Found template for %s\"",
"%",
"(",
"name",
")",
")",
"return",
"templates",
"[",
"name",
"]",
"else",
":",
"bot",
".",
"warning",
"(",
"\"Cannot find template %s\"",
"%",
"(",
"name",
")",
")"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
update_token
|
update_token uses HTTP basic authentication to get a token for
Docker registry API V2 operations. We get here if a 401 is
returned for a request.
Parameters
==========
response: the http request response to parse for the challenge.
https://docs.docker.com/registry/spec/auth/token/
|
sregistry/main/aws/api.py
|
def update_token(self):
'''update_token uses HTTP basic authentication to get a token for
Docker registry API V2 operations. We get here if a 401 is
returned for a request.
Parameters
==========
response: the http request response to parse for the challenge.
https://docs.docker.com/registry/spec/auth/token/
'''
# Add Amazon headers
tokens = self.aws.get_authorization_token()
token = tokens['authorizationData'][0]['authorizationToken']
try:
token = {"Authorization": "Basic %s" % token}
self.headers.update(token)
except Exception:
bot.error("Error getting token.")
sys.exit(1)
|
def update_token(self):
'''update_token uses HTTP basic authentication to get a token for
Docker registry API V2 operations. We get here if a 401 is
returned for a request.
Parameters
==========
response: the http request response to parse for the challenge.
https://docs.docker.com/registry/spec/auth/token/
'''
# Add Amazon headers
tokens = self.aws.get_authorization_token()
token = tokens['authorizationData'][0]['authorizationToken']
try:
token = {"Authorization": "Basic %s" % token}
self.headers.update(token)
except Exception:
bot.error("Error getting token.")
sys.exit(1)
|
[
"update_token",
"uses",
"HTTP",
"basic",
"authentication",
"to",
"get",
"a",
"token",
"for",
"Docker",
"registry",
"API",
"V2",
"operations",
".",
"We",
"get",
"here",
"if",
"a",
"401",
"is",
"returned",
"for",
"a",
"request",
"."
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/aws/api.py#L26-L48
|
[
"def",
"update_token",
"(",
"self",
")",
":",
"# Add Amazon headers",
"tokens",
"=",
"self",
".",
"aws",
".",
"get_authorization_token",
"(",
")",
"token",
"=",
"tokens",
"[",
"'authorizationData'",
"]",
"[",
"0",
"]",
"[",
"'authorizationToken'",
"]",
"try",
":",
"token",
"=",
"{",
"\"Authorization\"",
":",
"\"Basic %s\"",
"%",
"token",
"}",
"self",
".",
"headers",
".",
"update",
"(",
"token",
")",
"except",
"Exception",
":",
"bot",
".",
"error",
"(",
"\"Error getting token.\"",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
download_layers
|
download layers is a wrapper to do the following for a client loaded
with a manifest for an image:
1. use the manifests to retrieve list of digests (get_digests)
2. atomically download the list to destination (get_layers)
This function uses the MultiProcess client to download layers
at the same time.
|
sregistry/main/aws/api.py
|
def download_layers(self, repo_name, digest=None, destination=None):
''' download layers is a wrapper to do the following for a client loaded
with a manifest for an image:
1. use the manifests to retrieve list of digests (get_digests)
2. atomically download the list to destination (get_layers)
This function uses the MultiProcess client to download layers
at the same time.
'''
from sregistry.main.workers import Workers
from sregistry.main.workers.aws import download_task
# Obtain list of digets, and destination for download
self._get_manifest(repo_name, digest)
digests = self._get_digests(repo_name, digest)
destination = self._get_download_cache(destination)
# Create multiprocess download client
workers = Workers()
# Download each layer atomically
tasks = []
layers = []
# Start with a fresh token
self._update_token()
for digest in digests:
targz = "%s/%s.tar.gz" % (destination, digest['digest'])
url = '%s/%s/blobs/%s' % (self.base, repo_name, digest['digest'])
# Only download if not in cache already
if not os.path.exists(targz):
tasks.append((url, self.headers, targz))
layers.append(targz)
# Download layers with multiprocess workers
if len(tasks) > 0:
download_layers = workers.run(func=download_task,
tasks=tasks)
return layers, url
|
def download_layers(self, repo_name, digest=None, destination=None):
''' download layers is a wrapper to do the following for a client loaded
with a manifest for an image:
1. use the manifests to retrieve list of digests (get_digests)
2. atomically download the list to destination (get_layers)
This function uses the MultiProcess client to download layers
at the same time.
'''
from sregistry.main.workers import Workers
from sregistry.main.workers.aws import download_task
# Obtain list of digets, and destination for download
self._get_manifest(repo_name, digest)
digests = self._get_digests(repo_name, digest)
destination = self._get_download_cache(destination)
# Create multiprocess download client
workers = Workers()
# Download each layer atomically
tasks = []
layers = []
# Start with a fresh token
self._update_token()
for digest in digests:
targz = "%s/%s.tar.gz" % (destination, digest['digest'])
url = '%s/%s/blobs/%s' % (self.base, repo_name, digest['digest'])
# Only download if not in cache already
if not os.path.exists(targz):
tasks.append((url, self.headers, targz))
layers.append(targz)
# Download layers with multiprocess workers
if len(tasks) > 0:
download_layers = workers.run(func=download_task,
tasks=tasks)
return layers, url
|
[
"download",
"layers",
"is",
"a",
"wrapper",
"to",
"do",
"the",
"following",
"for",
"a",
"client",
"loaded",
"with",
"a",
"manifest",
"for",
"an",
"image",
":",
"1",
".",
"use",
"the",
"manifests",
"to",
"retrieve",
"list",
"of",
"digests",
"(",
"get_digests",
")",
"2",
".",
"atomically",
"download",
"the",
"list",
"to",
"destination",
"(",
"get_layers",
")"
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/aws/api.py#L51-L95
|
[
"def",
"download_layers",
"(",
"self",
",",
"repo_name",
",",
"digest",
"=",
"None",
",",
"destination",
"=",
"None",
")",
":",
"from",
"sregistry",
".",
"main",
".",
"workers",
"import",
"Workers",
"from",
"sregistry",
".",
"main",
".",
"workers",
".",
"aws",
"import",
"download_task",
"# Obtain list of digets, and destination for download",
"self",
".",
"_get_manifest",
"(",
"repo_name",
",",
"digest",
")",
"digests",
"=",
"self",
".",
"_get_digests",
"(",
"repo_name",
",",
"digest",
")",
"destination",
"=",
"self",
".",
"_get_download_cache",
"(",
"destination",
")",
"# Create multiprocess download client",
"workers",
"=",
"Workers",
"(",
")",
"# Download each layer atomically",
"tasks",
"=",
"[",
"]",
"layers",
"=",
"[",
"]",
"# Start with a fresh token",
"self",
".",
"_update_token",
"(",
")",
"for",
"digest",
"in",
"digests",
":",
"targz",
"=",
"\"%s/%s.tar.gz\"",
"%",
"(",
"destination",
",",
"digest",
"[",
"'digest'",
"]",
")",
"url",
"=",
"'%s/%s/blobs/%s'",
"%",
"(",
"self",
".",
"base",
",",
"repo_name",
",",
"digest",
"[",
"'digest'",
"]",
")",
"# Only download if not in cache already",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"targz",
")",
":",
"tasks",
".",
"append",
"(",
"(",
"url",
",",
"self",
".",
"headers",
",",
"targz",
")",
")",
"layers",
".",
"append",
"(",
"targz",
")",
"# Download layers with multiprocess workers",
"if",
"len",
"(",
"tasks",
")",
">",
"0",
":",
"download_layers",
"=",
"workers",
".",
"run",
"(",
"func",
"=",
"download_task",
",",
"tasks",
"=",
"tasks",
")",
"return",
"layers",
",",
"url"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
get_manifest
|
return the image manifest via the aws client, saved in self.manifest
|
sregistry/main/aws/api.py
|
def get_manifest(self, repo_name, tag):
'''return the image manifest via the aws client, saved in self.manifest
'''
image = None
repo = self.aws.describe_images(repositoryName=repo_name)
if 'imageDetails' in repo:
for contender in repo.get('imageDetails'):
if tag in contender['imageTags']:
image = contender
break
# if the image isn't found, we need to exit
if image is None:
bot.exit('Cannot find %s:%s, is the uri correct?' %(repo_name, digest))
digest = image['imageDigest']
digests = self.aws.batch_get_image(repositoryName=repo_name,
imageIds=[{"imageDigest": digest,
"imageTag": tag}])
self.manifest = json.loads(digests['images'][0]['imageManifest'])
return self.manifest
|
def get_manifest(self, repo_name, tag):
'''return the image manifest via the aws client, saved in self.manifest
'''
image = None
repo = self.aws.describe_images(repositoryName=repo_name)
if 'imageDetails' in repo:
for contender in repo.get('imageDetails'):
if tag in contender['imageTags']:
image = contender
break
# if the image isn't found, we need to exit
if image is None:
bot.exit('Cannot find %s:%s, is the uri correct?' %(repo_name, digest))
digest = image['imageDigest']
digests = self.aws.batch_get_image(repositoryName=repo_name,
imageIds=[{"imageDigest": digest,
"imageTag": tag}])
self.manifest = json.loads(digests['images'][0]['imageManifest'])
return self.manifest
|
[
"return",
"the",
"image",
"manifest",
"via",
"the",
"aws",
"client",
"saved",
"in",
"self",
".",
"manifest"
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/aws/api.py#L98-L120
|
[
"def",
"get_manifest",
"(",
"self",
",",
"repo_name",
",",
"tag",
")",
":",
"image",
"=",
"None",
"repo",
"=",
"self",
".",
"aws",
".",
"describe_images",
"(",
"repositoryName",
"=",
"repo_name",
")",
"if",
"'imageDetails'",
"in",
"repo",
":",
"for",
"contender",
"in",
"repo",
".",
"get",
"(",
"'imageDetails'",
")",
":",
"if",
"tag",
"in",
"contender",
"[",
"'imageTags'",
"]",
":",
"image",
"=",
"contender",
"break",
"# if the image isn't found, we need to exit",
"if",
"image",
"is",
"None",
":",
"bot",
".",
"exit",
"(",
"'Cannot find %s:%s, is the uri correct?'",
"%",
"(",
"repo_name",
",",
"digest",
")",
")",
"digest",
"=",
"image",
"[",
"'imageDigest'",
"]",
"digests",
"=",
"self",
".",
"aws",
".",
"batch_get_image",
"(",
"repositoryName",
"=",
"repo_name",
",",
"imageIds",
"=",
"[",
"{",
"\"imageDigest\"",
":",
"digest",
",",
"\"imageTag\"",
":",
"tag",
"}",
"]",
")",
"self",
".",
"manifest",
"=",
"json",
".",
"loads",
"(",
"digests",
"[",
"'images'",
"]",
"[",
"0",
"]",
"[",
"'imageManifest'",
"]",
")",
"return",
"self",
".",
"manifest"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
get_digests
|
return a list of layers from a manifest.
The function is intended to work with both version
1 and 2 of the schema. All layers (including redundant)
are returned. By default, we try version 2 first,
then fall back to version 1.
For version 1 manifests: extraction is reversed
Parameters
==========
manifest: the manifest to read_layers from
|
sregistry/main/aws/api.py
|
def get_digests(self, repo_name, tag):
'''
return a list of layers from a manifest.
The function is intended to work with both version
1 and 2 of the schema. All layers (including redundant)
are returned. By default, we try version 2 first,
then fall back to version 1.
For version 1 manifests: extraction is reversed
Parameters
==========
manifest: the manifest to read_layers from
'''
if not hasattr(self, 'manifest'):
bot.error('Please retrieve manifest for the image first.')
sys.exit(1)
# version 2 manifest here!
return self.manifest['layers']
|
def get_digests(self, repo_name, tag):
'''
return a list of layers from a manifest.
The function is intended to work with both version
1 and 2 of the schema. All layers (including redundant)
are returned. By default, we try version 2 first,
then fall back to version 1.
For version 1 manifests: extraction is reversed
Parameters
==========
manifest: the manifest to read_layers from
'''
if not hasattr(self, 'manifest'):
bot.error('Please retrieve manifest for the image first.')
sys.exit(1)
# version 2 manifest here!
return self.manifest['layers']
|
[
"return",
"a",
"list",
"of",
"layers",
"from",
"a",
"manifest",
".",
"The",
"function",
"is",
"intended",
"to",
"work",
"with",
"both",
"version",
"1",
"and",
"2",
"of",
"the",
"schema",
".",
"All",
"layers",
"(",
"including",
"redundant",
")",
"are",
"returned",
".",
"By",
"default",
"we",
"try",
"version",
"2",
"first",
"then",
"fall",
"back",
"to",
"version",
"1",
"."
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/aws/api.py#L123-L143
|
[
"def",
"get_digests",
"(",
"self",
",",
"repo_name",
",",
"tag",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'manifest'",
")",
":",
"bot",
".",
"error",
"(",
"'Please retrieve manifest for the image first.'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# version 2 manifest here!",
"return",
"self",
".",
"manifest",
"[",
"'layers'",
"]"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
prepare_metadata
|
prepare a key/value list of metadata for the request. The metadata
object that comes in is only parsed one level.
|
sregistry/main/google_storage/utils.py
|
def prepare_metadata(metadata):
'''prepare a key/value list of metadata for the request. The metadata
object that comes in is only parsed one level.
'''
pairs = {
'metadata': {
'items': [{
'key': 'client',
'value': 'sregistry'
}
]
}
}
for key,val in metadata.items():
if not isinstance(val,dict) and not isinstance(val,list):
pairs['metadata']['items'].append({'key':key,'value':val})
elif isinstance(val,dict):
for k,v in val.items():
if not isinstance(v,dict) and not isinstance(v,list):
pairs['metadata']['items'].append({'key':k,'value':v})
return pairs
|
def prepare_metadata(metadata):
'''prepare a key/value list of metadata for the request. The metadata
object that comes in is only parsed one level.
'''
pairs = {
'metadata': {
'items': [{
'key': 'client',
'value': 'sregistry'
}
]
}
}
for key,val in metadata.items():
if not isinstance(val,dict) and not isinstance(val,list):
pairs['metadata']['items'].append({'key':key,'value':val})
elif isinstance(val,dict):
for k,v in val.items():
if not isinstance(v,dict) and not isinstance(v,list):
pairs['metadata']['items'].append({'key':k,'value':v})
return pairs
|
[
"prepare",
"a",
"key",
"/",
"value",
"list",
"of",
"metadata",
"for",
"the",
"request",
".",
"The",
"metadata",
"object",
"that",
"comes",
"in",
"is",
"only",
"parsed",
"one",
"level",
"."
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/google_storage/utils.py#L16-L37
|
[
"def",
"prepare_metadata",
"(",
"metadata",
")",
":",
"pairs",
"=",
"{",
"'metadata'",
":",
"{",
"'items'",
":",
"[",
"{",
"'key'",
":",
"'client'",
",",
"'value'",
":",
"'sregistry'",
"}",
"]",
"}",
"}",
"for",
"key",
",",
"val",
"in",
"metadata",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"val",
",",
"dict",
")",
"and",
"not",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"pairs",
"[",
"'metadata'",
"]",
"[",
"'items'",
"]",
".",
"append",
"(",
"{",
"'key'",
":",
"key",
",",
"'value'",
":",
"val",
"}",
")",
"elif",
"isinstance",
"(",
"val",
",",
"dict",
")",
":",
"for",
"k",
",",
"v",
"in",
"val",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"v",
",",
"dict",
")",
"and",
"not",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"pairs",
"[",
"'metadata'",
"]",
"[",
"'items'",
"]",
".",
"append",
"(",
"{",
"'key'",
":",
"k",
",",
"'value'",
":",
"v",
"}",
")",
"return",
"pairs"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
get_build_template
|
get a particular build template, by default we return templates
that are based on package managers.
Parameters
==========
name: the full path of the template file to use.
manager: the package manager to use in the template (yum or apt)
|
sregistry/main/google_storage/utils.py
|
def get_build_template(name=None, manager='apt'):
'''get a particular build template, by default we return templates
that are based on package managers.
Parameters
==========
name: the full path of the template file to use.
manager: the package manager to use in the template (yum or apt)
'''
base = get_installdir()
if name is None:
name = "%s/main/templates/build/singularity-builder-%s.sh" %(base,
manager)
if os.path.exists(name):
bot.debug("Found template %s" %name)
return ''.join(read_file(name))
bot.warning("Template %s not found." %name)
|
def get_build_template(name=None, manager='apt'):
'''get a particular build template, by default we return templates
that are based on package managers.
Parameters
==========
name: the full path of the template file to use.
manager: the package manager to use in the template (yum or apt)
'''
base = get_installdir()
if name is None:
name = "%s/main/templates/build/singularity-builder-%s.sh" %(base,
manager)
if os.path.exists(name):
bot.debug("Found template %s" %name)
return ''.join(read_file(name))
bot.warning("Template %s not found." %name)
|
[
"get",
"a",
"particular",
"build",
"template",
"by",
"default",
"we",
"return",
"templates",
"that",
"are",
"based",
"on",
"package",
"managers",
"."
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/google_storage/utils.py#L40-L59
|
[
"def",
"get_build_template",
"(",
"name",
"=",
"None",
",",
"manager",
"=",
"'apt'",
")",
":",
"base",
"=",
"get_installdir",
"(",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"%s/main/templates/build/singularity-builder-%s.sh\"",
"%",
"(",
"base",
",",
"manager",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"name",
")",
":",
"bot",
".",
"debug",
"(",
"\"Found template %s\"",
"%",
"name",
")",
"return",
"''",
".",
"join",
"(",
"read_file",
"(",
"name",
")",
")",
"bot",
".",
"warning",
"(",
"\"Template %s not found.\"",
"%",
"name",
")"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
get_metadata
|
extract metadata using Singularity inspect, if the executable is found.
If not, return a reasonable default (the parsed image name)
Parameters
==========
image_file: the full path to a Singularity image
names: optional, an extracted or otherwise created dictionary of
variables for the image, likely from utils.parse_image_name
|
sregistry/main/base/inspect.py
|
def get_metadata(self, image_file, names={}):
'''extract metadata using Singularity inspect, if the executable is found.
If not, return a reasonable default (the parsed image name)
Parameters
==========
image_file: the full path to a Singularity image
names: optional, an extracted or otherwise created dictionary of
variables for the image, likely from utils.parse_image_name
'''
metadata = dict()
# We can't return anything without image_file or names
if image_file is not None:
if not os.path.exists(image_file):
bot.error('Cannot find %s.' %image_file)
return names or metadata
# The user provided a file, but no names
if not names:
names = parse_image_name(remove_uri(image_file))
# Look for the Singularity Executable
singularity = which('singularity')['message']
# Inspect the image, or return names only
if os.path.exists(singularity) and image_file is not None:
from spython.main import Client as Singularity
# We try and inspect, but not required (wont work within Docker)
try:
Singularity.quiet = True
updates = Singularity.inspect(image=image_file)
except:
bot.warning('Inspect command not supported, metadata not included.')
updates = None
# Try loading the metadata
if updates is not None:
try:
updates = json.loads(updates)
metadata.update(updates)
except:
pass
metadata.update(names)
return metadata
|
def get_metadata(self, image_file, names={}):
'''extract metadata using Singularity inspect, if the executable is found.
If not, return a reasonable default (the parsed image name)
Parameters
==========
image_file: the full path to a Singularity image
names: optional, an extracted or otherwise created dictionary of
variables for the image, likely from utils.parse_image_name
'''
metadata = dict()
# We can't return anything without image_file or names
if image_file is not None:
if not os.path.exists(image_file):
bot.error('Cannot find %s.' %image_file)
return names or metadata
# The user provided a file, but no names
if not names:
names = parse_image_name(remove_uri(image_file))
# Look for the Singularity Executable
singularity = which('singularity')['message']
# Inspect the image, or return names only
if os.path.exists(singularity) and image_file is not None:
from spython.main import Client as Singularity
# We try and inspect, but not required (wont work within Docker)
try:
Singularity.quiet = True
updates = Singularity.inspect(image=image_file)
except:
bot.warning('Inspect command not supported, metadata not included.')
updates = None
# Try loading the metadata
if updates is not None:
try:
updates = json.loads(updates)
metadata.update(updates)
except:
pass
metadata.update(names)
return metadata
|
[
"extract",
"metadata",
"using",
"Singularity",
"inspect",
"if",
"the",
"executable",
"is",
"found",
".",
"If",
"not",
"return",
"a",
"reasonable",
"default",
"(",
"the",
"parsed",
"image",
"name",
")"
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/base/inspect.py#L27-L74
|
[
"def",
"get_metadata",
"(",
"self",
",",
"image_file",
",",
"names",
"=",
"{",
"}",
")",
":",
"metadata",
"=",
"dict",
"(",
")",
"# We can't return anything without image_file or names",
"if",
"image_file",
"is",
"not",
"None",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"image_file",
")",
":",
"bot",
".",
"error",
"(",
"'Cannot find %s.'",
"%",
"image_file",
")",
"return",
"names",
"or",
"metadata",
"# The user provided a file, but no names",
"if",
"not",
"names",
":",
"names",
"=",
"parse_image_name",
"(",
"remove_uri",
"(",
"image_file",
")",
")",
"# Look for the Singularity Executable",
"singularity",
"=",
"which",
"(",
"'singularity'",
")",
"[",
"'message'",
"]",
"# Inspect the image, or return names only",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"singularity",
")",
"and",
"image_file",
"is",
"not",
"None",
":",
"from",
"spython",
".",
"main",
"import",
"Client",
"as",
"Singularity",
"# We try and inspect, but not required (wont work within Docker)",
"try",
":",
"Singularity",
".",
"quiet",
"=",
"True",
"updates",
"=",
"Singularity",
".",
"inspect",
"(",
"image",
"=",
"image_file",
")",
"except",
":",
"bot",
".",
"warning",
"(",
"'Inspect command not supported, metadata not included.'",
")",
"updates",
"=",
"None",
"# Try loading the metadata",
"if",
"updates",
"is",
"not",
"None",
":",
"try",
":",
"updates",
"=",
"json",
".",
"loads",
"(",
"updates",
")",
"metadata",
".",
"update",
"(",
"updates",
")",
"except",
":",
"pass",
"metadata",
".",
"update",
"(",
"names",
")",
"return",
"metadata"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
_pull
|
pull an image from a docker hub. This is a (less than ideal) workaround
that actually does the following:
- creates a sandbox folder
- adds docker layers, metadata folder, and custom metadata to it
- converts to a squashfs image with build
the docker manifests are stored with registry metadata.
Parameters
==========
images: refers to the uri given by the user to pull in the format
<collection>/<namespace>. You should have an API that is able to
retrieve a container based on parsing this uri.
file_name: the user's requested name for the file. It can
optionally be None if the user wants a default.
save: if True, you should save the container to the database
using self.add()
Returns
=======
finished: a single container path, or list of paths
|
sregistry/main/docker/pull.py
|
def _pull(self,
file_name,
names,
save=True,
force=False,
uri="docker://",
**kwargs):
'''pull an image from a docker hub. This is a (less than ideal) workaround
that actually does the following:
- creates a sandbox folder
- adds docker layers, metadata folder, and custom metadata to it
- converts to a squashfs image with build
the docker manifests are stored with registry metadata.
Parameters
==========
images: refers to the uri given by the user to pull in the format
<collection>/<namespace>. You should have an API that is able to
retrieve a container based on parsing this uri.
file_name: the user's requested name for the file. It can
optionally be None if the user wants a default.
save: if True, you should save the container to the database
using self.add()
Returns
=======
finished: a single container path, or list of paths
'''
# Use Singularity to build the image, based on user preference
if file_name is None:
file_name = self._get_storage_name(names)
# Determine if the user already has the image
if os.path.exists(file_name) and force is False:
bot.error('Image exists! Remove first, or use --force to overwrite')
sys.exit(1)
digest = names['version'] or names['tag']
# Build from sandbox, prefix with sandbox
sandbox = get_tmpdir(prefix="sregistry-sandbox")
# First effort, get image via Sregistry
layers = self._download_layers(names['url'], digest)
# This is the url where the manifests were obtained
url = self._get_manifest_selfLink(names['url'], digest)
# Add environment to the layers
envtar = self._get_environment_tar()
layers = [envtar] + layers
# Create singularity image from an empty folder
for layer in layers:
bot.info('Exploding %s' %layer)
result = extract_tar(layer, sandbox, handle_whiteout=True)
if result['return_code'] != 0:
bot.error(result['message'])
sys.exit(1)
sudo = kwargs.get('sudo', False)
# Build from a sandbox (recipe) into the image_file (squashfs)
image_file = Singularity.build(image=file_name,
recipe=sandbox,
sudo=sudo)
# Fall back to using Singularity
if image_file is None:
bot.info('Downloading with native Singularity, please wait...')
image = image.replace('docker://', uri)
image_file = Singularity.pull(image, pull_folder=sandbox)
# Save to local storage
if save is True:
# Did we get the manifests?
manifests = {}
if hasattr(self, 'manifests'):
manifests = self.manifests
container = self.add(image_path = image_file,
image_uri = names['uri'],
metadata = manifests,
url = url)
# When the container is created, this is the path to the image
image_file = container.image
if os.path.exists(image_file):
bot.debug('Retrieved image file %s' %image_file)
bot.custom(prefix="Success!", message=image_file)
# Clean up sandbox
shutil.rmtree(sandbox)
return image_file
|
def _pull(self,
file_name,
names,
save=True,
force=False,
uri="docker://",
**kwargs):
'''pull an image from a docker hub. This is a (less than ideal) workaround
that actually does the following:
- creates a sandbox folder
- adds docker layers, metadata folder, and custom metadata to it
- converts to a squashfs image with build
the docker manifests are stored with registry metadata.
Parameters
==========
images: refers to the uri given by the user to pull in the format
<collection>/<namespace>. You should have an API that is able to
retrieve a container based on parsing this uri.
file_name: the user's requested name for the file. It can
optionally be None if the user wants a default.
save: if True, you should save the container to the database
using self.add()
Returns
=======
finished: a single container path, or list of paths
'''
# Use Singularity to build the image, based on user preference
if file_name is None:
file_name = self._get_storage_name(names)
# Determine if the user already has the image
if os.path.exists(file_name) and force is False:
bot.error('Image exists! Remove first, or use --force to overwrite')
sys.exit(1)
digest = names['version'] or names['tag']
# Build from sandbox, prefix with sandbox
sandbox = get_tmpdir(prefix="sregistry-sandbox")
# First effort, get image via Sregistry
layers = self._download_layers(names['url'], digest)
# This is the url where the manifests were obtained
url = self._get_manifest_selfLink(names['url'], digest)
# Add environment to the layers
envtar = self._get_environment_tar()
layers = [envtar] + layers
# Create singularity image from an empty folder
for layer in layers:
bot.info('Exploding %s' %layer)
result = extract_tar(layer, sandbox, handle_whiteout=True)
if result['return_code'] != 0:
bot.error(result['message'])
sys.exit(1)
sudo = kwargs.get('sudo', False)
# Build from a sandbox (recipe) into the image_file (squashfs)
image_file = Singularity.build(image=file_name,
recipe=sandbox,
sudo=sudo)
# Fall back to using Singularity
if image_file is None:
bot.info('Downloading with native Singularity, please wait...')
image = image.replace('docker://', uri)
image_file = Singularity.pull(image, pull_folder=sandbox)
# Save to local storage
if save is True:
# Did we get the manifests?
manifests = {}
if hasattr(self, 'manifests'):
manifests = self.manifests
container = self.add(image_path = image_file,
image_uri = names['uri'],
metadata = manifests,
url = url)
# When the container is created, this is the path to the image
image_file = container.image
if os.path.exists(image_file):
bot.debug('Retrieved image file %s' %image_file)
bot.custom(prefix="Success!", message=image_file)
# Clean up sandbox
shutil.rmtree(sandbox)
return image_file
|
[
"pull",
"an",
"image",
"from",
"a",
"docker",
"hub",
".",
"This",
"is",
"a",
"(",
"less",
"than",
"ideal",
")",
"workaround",
"that",
"actually",
"does",
"the",
"following",
":"
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/docker/pull.py#L84-L185
|
[
"def",
"_pull",
"(",
"self",
",",
"file_name",
",",
"names",
",",
"save",
"=",
"True",
",",
"force",
"=",
"False",
",",
"uri",
"=",
"\"docker://\"",
",",
"*",
"*",
"kwargs",
")",
":",
"# Use Singularity to build the image, based on user preference",
"if",
"file_name",
"is",
"None",
":",
"file_name",
"=",
"self",
".",
"_get_storage_name",
"(",
"names",
")",
"# Determine if the user already has the image",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_name",
")",
"and",
"force",
"is",
"False",
":",
"bot",
".",
"error",
"(",
"'Image exists! Remove first, or use --force to overwrite'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"digest",
"=",
"names",
"[",
"'version'",
"]",
"or",
"names",
"[",
"'tag'",
"]",
"# Build from sandbox, prefix with sandbox",
"sandbox",
"=",
"get_tmpdir",
"(",
"prefix",
"=",
"\"sregistry-sandbox\"",
")",
"# First effort, get image via Sregistry",
"layers",
"=",
"self",
".",
"_download_layers",
"(",
"names",
"[",
"'url'",
"]",
",",
"digest",
")",
"# This is the url where the manifests were obtained",
"url",
"=",
"self",
".",
"_get_manifest_selfLink",
"(",
"names",
"[",
"'url'",
"]",
",",
"digest",
")",
"# Add environment to the layers",
"envtar",
"=",
"self",
".",
"_get_environment_tar",
"(",
")",
"layers",
"=",
"[",
"envtar",
"]",
"+",
"layers",
"# Create singularity image from an empty folder",
"for",
"layer",
"in",
"layers",
":",
"bot",
".",
"info",
"(",
"'Exploding %s'",
"%",
"layer",
")",
"result",
"=",
"extract_tar",
"(",
"layer",
",",
"sandbox",
",",
"handle_whiteout",
"=",
"True",
")",
"if",
"result",
"[",
"'return_code'",
"]",
"!=",
"0",
":",
"bot",
".",
"error",
"(",
"result",
"[",
"'message'",
"]",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"sudo",
"=",
"kwargs",
".",
"get",
"(",
"'sudo'",
",",
"False",
")",
"# Build from a sandbox (recipe) into the image_file (squashfs)",
"image_file",
"=",
"Singularity",
".",
"build",
"(",
"image",
"=",
"file_name",
",",
"recipe",
"=",
"sandbox",
",",
"sudo",
"=",
"sudo",
")",
"# Fall back to using Singularity",
"if",
"image_file",
"is",
"None",
":",
"bot",
".",
"info",
"(",
"'Downloading with native Singularity, please wait...'",
")",
"image",
"=",
"image",
".",
"replace",
"(",
"'docker://'",
",",
"uri",
")",
"image_file",
"=",
"Singularity",
".",
"pull",
"(",
"image",
",",
"pull_folder",
"=",
"sandbox",
")",
"# Save to local storage",
"if",
"save",
"is",
"True",
":",
"# Did we get the manifests?",
"manifests",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'manifests'",
")",
":",
"manifests",
"=",
"self",
".",
"manifests",
"container",
"=",
"self",
".",
"add",
"(",
"image_path",
"=",
"image_file",
",",
"image_uri",
"=",
"names",
"[",
"'uri'",
"]",
",",
"metadata",
"=",
"manifests",
",",
"url",
"=",
"url",
")",
"# When the container is created, this is the path to the image",
"image_file",
"=",
"container",
".",
"image",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"image_file",
")",
":",
"bot",
".",
"debug",
"(",
"'Retrieved image file %s'",
"%",
"image_file",
")",
"bot",
".",
"custom",
"(",
"prefix",
"=",
"\"Success!\"",
",",
"message",
"=",
"image_file",
")",
"# Clean up sandbox",
"shutil",
".",
"rmtree",
"(",
"sandbox",
")",
"return",
"image_file"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
Client._update_secrets
|
update secrets will take a secrets credential file
either located at .sregistry or the environment variable
SREGISTRY_CLIENT_SECRETS and update the current client
secrets as well as the associated API base. This is where you
should do any customization of the secrets flie, or using
it to update your client, if needed.
|
sregistry/main/__template__/__init__.py
|
def _update_secrets(self):
'''update secrets will take a secrets credential file
either located at .sregistry or the environment variable
SREGISTRY_CLIENT_SECRETS and update the current client
secrets as well as the associated API base. This is where you
should do any customization of the secrets flie, or using
it to update your client, if needed.
'''
# Get a setting for client myclient and some variable name VAR.
# returns None if not set
setting = self._get_setting('SREGISTRY_MYCLIENT_VAR')
# Get (and if found in environment (1) settings (2) update the variable
# It will still return None if not set
setting = self._get_and_update_setting('SREGISTRY_MYCLIENT_VAR')
# If you have a setting that is required and not found, you should exit.
# Here is how to read all client secrets
self.secrets = read_client_secrets()
# If you don't want to use the shared settings file, you have your own.
# Here is how to get if the user has a cache for you enabled, this
# returns a path (enabled) or None (disabled) that you should honor
# You can use this as a file path or folder and for both cases, you
# need to create the file or folder
if self._credential_cache is not None:
bot.info("credential cache set to %s" %self._credential_cache)
|
def _update_secrets(self):
'''update secrets will take a secrets credential file
either located at .sregistry or the environment variable
SREGISTRY_CLIENT_SECRETS and update the current client
secrets as well as the associated API base. This is where you
should do any customization of the secrets flie, or using
it to update your client, if needed.
'''
# Get a setting for client myclient and some variable name VAR.
# returns None if not set
setting = self._get_setting('SREGISTRY_MYCLIENT_VAR')
# Get (and if found in environment (1) settings (2) update the variable
# It will still return None if not set
setting = self._get_and_update_setting('SREGISTRY_MYCLIENT_VAR')
# If you have a setting that is required and not found, you should exit.
# Here is how to read all client secrets
self.secrets = read_client_secrets()
# If you don't want to use the shared settings file, you have your own.
# Here is how to get if the user has a cache for you enabled, this
# returns a path (enabled) or None (disabled) that you should honor
# You can use this as a file path or folder and for both cases, you
# need to create the file or folder
if self._credential_cache is not None:
bot.info("credential cache set to %s" %self._credential_cache)
|
[
"update",
"secrets",
"will",
"take",
"a",
"secrets",
"credential",
"file",
"either",
"located",
"at",
".",
"sregistry",
"or",
"the",
"environment",
"variable",
"SREGISTRY_CLIENT_SECRETS",
"and",
"update",
"the",
"current",
"client",
"secrets",
"as",
"well",
"as",
"the",
"associated",
"API",
"base",
".",
"This",
"is",
"where",
"you",
"should",
"do",
"any",
"customization",
"of",
"the",
"secrets",
"flie",
"or",
"using",
"it",
"to",
"update",
"your",
"client",
"if",
"needed",
"."
] |
singularityhub/sregistry-cli
|
python
|
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/__template__/__init__.py#L38-L65
|
[
"def",
"_update_secrets",
"(",
"self",
")",
":",
"# Get a setting for client myclient and some variable name VAR. ",
"# returns None if not set",
"setting",
"=",
"self",
".",
"_get_setting",
"(",
"'SREGISTRY_MYCLIENT_VAR'",
")",
"# Get (and if found in environment (1) settings (2) update the variable",
"# It will still return None if not set",
"setting",
"=",
"self",
".",
"_get_and_update_setting",
"(",
"'SREGISTRY_MYCLIENT_VAR'",
")",
"# If you have a setting that is required and not found, you should exit.",
"# Here is how to read all client secrets",
"self",
".",
"secrets",
"=",
"read_client_secrets",
"(",
")",
"# If you don't want to use the shared settings file, you have your own.",
"# Here is how to get if the user has a cache for you enabled, this",
"# returns a path (enabled) or None (disabled) that you should honor",
"# You can use this as a file path or folder and for both cases, you",
"# need to create the file or folder",
"if",
"self",
".",
"_credential_cache",
"is",
"not",
"None",
":",
"bot",
".",
"info",
"(",
"\"credential cache set to %s\"",
"%",
"self",
".",
"_credential_cache",
")"
] |
abc96140a1d15b5e96d83432e1e0e1f4f8f36331
|
test
|
_make_repr
|
Generate a repr string.
Positional arguments should be the positional arguments used to
construct the class. Keyword arguments should consist of tuples of
the attribute value and default. If the value is the default, then
it won't be rendered in the output.
Here's an example::
def __repr__(self):
return make_repr('MyClass', 'foo', name=(self.name, None))
The output of this would be something line ``MyClass('foo',
name='Will')``.
|
fs_s3fs/_s3fs.py
|
def _make_repr(class_name, *args, **kwargs):
"""
Generate a repr string.
Positional arguments should be the positional arguments used to
construct the class. Keyword arguments should consist of tuples of
the attribute value and default. If the value is the default, then
it won't be rendered in the output.
Here's an example::
def __repr__(self):
return make_repr('MyClass', 'foo', name=(self.name, None))
The output of this would be something line ``MyClass('foo',
name='Will')``.
"""
arguments = [repr(arg) for arg in args]
arguments.extend(
"{}={!r}".format(name, value)
for name, (value, default) in sorted(kwargs.items())
if value != default
)
return "{}({})".format(class_name, ", ".join(arguments))
|
def _make_repr(class_name, *args, **kwargs):
"""
Generate a repr string.
Positional arguments should be the positional arguments used to
construct the class. Keyword arguments should consist of tuples of
the attribute value and default. If the value is the default, then
it won't be rendered in the output.
Here's an example::
def __repr__(self):
return make_repr('MyClass', 'foo', name=(self.name, None))
The output of this would be something line ``MyClass('foo',
name='Will')``.
"""
arguments = [repr(arg) for arg in args]
arguments.extend(
"{}={!r}".format(name, value)
for name, (value, default) in sorted(kwargs.items())
if value != default
)
return "{}({})".format(class_name, ", ".join(arguments))
|
[
"Generate",
"a",
"repr",
"string",
"."
] |
PyFilesystem/s3fs
|
python
|
https://github.com/PyFilesystem/s3fs/blob/1c5e3a1b6abbb9dff91ea7fc4cec7353798cd536/fs_s3fs/_s3fs.py#L34-L58
|
[
"def",
"_make_repr",
"(",
"class_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"arguments",
"=",
"[",
"repr",
"(",
"arg",
")",
"for",
"arg",
"in",
"args",
"]",
"arguments",
".",
"extend",
"(",
"\"{}={!r}\"",
".",
"format",
"(",
"name",
",",
"value",
")",
"for",
"name",
",",
"(",
"value",
",",
"default",
")",
"in",
"sorted",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
"if",
"value",
"!=",
"default",
")",
"return",
"\"{}({})\"",
".",
"format",
"(",
"class_name",
",",
"\", \"",
".",
"join",
"(",
"arguments",
")",
")"
] |
1c5e3a1b6abbb9dff91ea7fc4cec7353798cd536
|
test
|
s3errors
|
Translate S3 errors to FSErrors.
|
fs_s3fs/_s3fs.py
|
def s3errors(path):
"""Translate S3 errors to FSErrors."""
try:
yield
except ClientError as error:
_error = error.response.get("Error", {})
error_code = _error.get("Code", None)
response_meta = error.response.get("ResponseMetadata", {})
http_status = response_meta.get("HTTPStatusCode", 200)
error_msg = _error.get("Message", None)
if error_code == "NoSuchBucket":
raise errors.ResourceError(path, exc=error, msg=error_msg)
if http_status == 404:
raise errors.ResourceNotFound(path)
elif http_status == 403:
raise errors.PermissionDenied(path=path, msg=error_msg)
else:
raise errors.OperationFailed(path=path, exc=error)
except SSLError as error:
raise errors.OperationFailed(path, exc=error)
except EndpointConnectionError as error:
raise errors.RemoteConnectionError(path, exc=error, msg="{}".format(error))
|
def s3errors(path):
"""Translate S3 errors to FSErrors."""
try:
yield
except ClientError as error:
_error = error.response.get("Error", {})
error_code = _error.get("Code", None)
response_meta = error.response.get("ResponseMetadata", {})
http_status = response_meta.get("HTTPStatusCode", 200)
error_msg = _error.get("Message", None)
if error_code == "NoSuchBucket":
raise errors.ResourceError(path, exc=error, msg=error_msg)
if http_status == 404:
raise errors.ResourceNotFound(path)
elif http_status == 403:
raise errors.PermissionDenied(path=path, msg=error_msg)
else:
raise errors.OperationFailed(path=path, exc=error)
except SSLError as error:
raise errors.OperationFailed(path, exc=error)
except EndpointConnectionError as error:
raise errors.RemoteConnectionError(path, exc=error, msg="{}".format(error))
|
[
"Translate",
"S3",
"errors",
"to",
"FSErrors",
"."
] |
PyFilesystem/s3fs
|
python
|
https://github.com/PyFilesystem/s3fs/blob/1c5e3a1b6abbb9dff91ea7fc4cec7353798cd536/fs_s3fs/_s3fs.py#L171-L192
|
[
"def",
"s3errors",
"(",
"path",
")",
":",
"try",
":",
"yield",
"except",
"ClientError",
"as",
"error",
":",
"_error",
"=",
"error",
".",
"response",
".",
"get",
"(",
"\"Error\"",
",",
"{",
"}",
")",
"error_code",
"=",
"_error",
".",
"get",
"(",
"\"Code\"",
",",
"None",
")",
"response_meta",
"=",
"error",
".",
"response",
".",
"get",
"(",
"\"ResponseMetadata\"",
",",
"{",
"}",
")",
"http_status",
"=",
"response_meta",
".",
"get",
"(",
"\"HTTPStatusCode\"",
",",
"200",
")",
"error_msg",
"=",
"_error",
".",
"get",
"(",
"\"Message\"",
",",
"None",
")",
"if",
"error_code",
"==",
"\"NoSuchBucket\"",
":",
"raise",
"errors",
".",
"ResourceError",
"(",
"path",
",",
"exc",
"=",
"error",
",",
"msg",
"=",
"error_msg",
")",
"if",
"http_status",
"==",
"404",
":",
"raise",
"errors",
".",
"ResourceNotFound",
"(",
"path",
")",
"elif",
"http_status",
"==",
"403",
":",
"raise",
"errors",
".",
"PermissionDenied",
"(",
"path",
"=",
"path",
",",
"msg",
"=",
"error_msg",
")",
"else",
":",
"raise",
"errors",
".",
"OperationFailed",
"(",
"path",
"=",
"path",
",",
"exc",
"=",
"error",
")",
"except",
"SSLError",
"as",
"error",
":",
"raise",
"errors",
".",
"OperationFailed",
"(",
"path",
",",
"exc",
"=",
"error",
")",
"except",
"EndpointConnectionError",
"as",
"error",
":",
"raise",
"errors",
".",
"RemoteConnectionError",
"(",
"path",
",",
"exc",
"=",
"error",
",",
"msg",
"=",
"\"{}\"",
".",
"format",
"(",
"error",
")",
")"
] |
1c5e3a1b6abbb9dff91ea7fc4cec7353798cd536
|
test
|
S3File.factory
|
Create a S3File backed with a temporary file.
|
fs_s3fs/_s3fs.py
|
def factory(cls, filename, mode, on_close):
"""Create a S3File backed with a temporary file."""
_temp_file = tempfile.TemporaryFile()
proxy = cls(_temp_file, filename, mode, on_close=on_close)
return proxy
|
def factory(cls, filename, mode, on_close):
"""Create a S3File backed with a temporary file."""
_temp_file = tempfile.TemporaryFile()
proxy = cls(_temp_file, filename, mode, on_close=on_close)
return proxy
|
[
"Create",
"a",
"S3File",
"backed",
"with",
"a",
"temporary",
"file",
"."
] |
PyFilesystem/s3fs
|
python
|
https://github.com/PyFilesystem/s3fs/blob/1c5e3a1b6abbb9dff91ea7fc4cec7353798cd536/fs_s3fs/_s3fs.py#L65-L69
|
[
"def",
"factory",
"(",
"cls",
",",
"filename",
",",
"mode",
",",
"on_close",
")",
":",
"_temp_file",
"=",
"tempfile",
".",
"TemporaryFile",
"(",
")",
"proxy",
"=",
"cls",
"(",
"_temp_file",
",",
"filename",
",",
"mode",
",",
"on_close",
"=",
"on_close",
")",
"return",
"proxy"
] |
1c5e3a1b6abbb9dff91ea7fc4cec7353798cd536
|
test
|
gravatar_url
|
Builds a gravatar url from an user or email
|
django_gravatar/templatetags/gravatar.py
|
def gravatar_url(user_or_email, size=GRAVATAR_DEFAULT_SIZE):
""" Builds a gravatar url from an user or email """
if hasattr(user_or_email, 'email'):
email = user_or_email.email
else:
email = user_or_email
try:
return escape(get_gravatar_url(email=email, size=size))
except:
return ''
|
def gravatar_url(user_or_email, size=GRAVATAR_DEFAULT_SIZE):
""" Builds a gravatar url from an user or email """
if hasattr(user_or_email, 'email'):
email = user_or_email.email
else:
email = user_or_email
try:
return escape(get_gravatar_url(email=email, size=size))
except:
return ''
|
[
"Builds",
"a",
"gravatar",
"url",
"from",
"an",
"user",
"or",
"email"
] |
twaddington/django-gravatar
|
python
|
https://github.com/twaddington/django-gravatar/blob/c4849d93ed43b419eceff0ff2de83d4265597629/django_gravatar/templatetags/gravatar.py#L11-L21
|
[
"def",
"gravatar_url",
"(",
"user_or_email",
",",
"size",
"=",
"GRAVATAR_DEFAULT_SIZE",
")",
":",
"if",
"hasattr",
"(",
"user_or_email",
",",
"'email'",
")",
":",
"email",
"=",
"user_or_email",
".",
"email",
"else",
":",
"email",
"=",
"user_or_email",
"try",
":",
"return",
"escape",
"(",
"get_gravatar_url",
"(",
"email",
"=",
"email",
",",
"size",
"=",
"size",
")",
")",
"except",
":",
"return",
"''"
] |
c4849d93ed43b419eceff0ff2de83d4265597629
|
test
|
gravatar
|
Builds an gravatar <img> tag from an user or email
|
django_gravatar/templatetags/gravatar.py
|
def gravatar(user_or_email, size=GRAVATAR_DEFAULT_SIZE, alt_text='', css_class='gravatar'):
""" Builds an gravatar <img> tag from an user or email """
if hasattr(user_or_email, 'email'):
email = user_or_email.email
else:
email = user_or_email
try:
url = escape(get_gravatar_url(email=email, size=size))
except:
return ''
return mark_safe(
'<img class="{css_class}" src="{src}" width="{width}"'
' height="{height}" alt="{alt}" />'.format(
css_class=css_class, src=url, width=size, height=size, alt=alt_text
)
)
|
def gravatar(user_or_email, size=GRAVATAR_DEFAULT_SIZE, alt_text='', css_class='gravatar'):
""" Builds an gravatar <img> tag from an user or email """
if hasattr(user_or_email, 'email'):
email = user_or_email.email
else:
email = user_or_email
try:
url = escape(get_gravatar_url(email=email, size=size))
except:
return ''
return mark_safe(
'<img class="{css_class}" src="{src}" width="{width}"'
' height="{height}" alt="{alt}" />'.format(
css_class=css_class, src=url, width=size, height=size, alt=alt_text
)
)
|
[
"Builds",
"an",
"gravatar",
"<img",
">",
"tag",
"from",
"an",
"user",
"or",
"email"
] |
twaddington/django-gravatar
|
python
|
https://github.com/twaddington/django-gravatar/blob/c4849d93ed43b419eceff0ff2de83d4265597629/django_gravatar/templatetags/gravatar.py#L24-L41
|
[
"def",
"gravatar",
"(",
"user_or_email",
",",
"size",
"=",
"GRAVATAR_DEFAULT_SIZE",
",",
"alt_text",
"=",
"''",
",",
"css_class",
"=",
"'gravatar'",
")",
":",
"if",
"hasattr",
"(",
"user_or_email",
",",
"'email'",
")",
":",
"email",
"=",
"user_or_email",
".",
"email",
"else",
":",
"email",
"=",
"user_or_email",
"try",
":",
"url",
"=",
"escape",
"(",
"get_gravatar_url",
"(",
"email",
"=",
"email",
",",
"size",
"=",
"size",
")",
")",
"except",
":",
"return",
"''",
"return",
"mark_safe",
"(",
"'<img class=\"{css_class}\" src=\"{src}\" width=\"{width}\"'",
"' height=\"{height}\" alt=\"{alt}\" />'",
".",
"format",
"(",
"css_class",
"=",
"css_class",
",",
"src",
"=",
"url",
",",
"width",
"=",
"size",
",",
"height",
"=",
"size",
",",
"alt",
"=",
"alt_text",
")",
")"
] |
c4849d93ed43b419eceff0ff2de83d4265597629
|
test
|
get_gravatar_url
|
Builds a url to a gravatar from an email address.
:param email: The email to fetch the gravatar for
:param size: The size (in pixels) of the gravatar to fetch
:param default: What type of default image to use if the gravatar does not exist
:param rating: Used to filter the allowed gravatar ratings
:param secure: If True use https, otherwise plain http
|
django_gravatar/helpers.py
|
def get_gravatar_url(email, size=GRAVATAR_DEFAULT_SIZE, default=GRAVATAR_DEFAULT_IMAGE,
rating=GRAVATAR_DEFAULT_RATING, secure=GRAVATAR_DEFAULT_SECURE):
"""
Builds a url to a gravatar from an email address.
:param email: The email to fetch the gravatar for
:param size: The size (in pixels) of the gravatar to fetch
:param default: What type of default image to use if the gravatar does not exist
:param rating: Used to filter the allowed gravatar ratings
:param secure: If True use https, otherwise plain http
"""
if secure:
url_base = GRAVATAR_SECURE_URL
else:
url_base = GRAVATAR_URL
# Calculate the email hash
email_hash = calculate_gravatar_hash(email)
# Build querystring
query_string = urlencode({
's': str(size),
'd': default,
'r': rating,
})
# Build url
url = '{base}avatar/{hash}.jpg?{qs}'.format(base=url_base,
hash=email_hash, qs=query_string)
return url
|
def get_gravatar_url(email, size=GRAVATAR_DEFAULT_SIZE, default=GRAVATAR_DEFAULT_IMAGE,
rating=GRAVATAR_DEFAULT_RATING, secure=GRAVATAR_DEFAULT_SECURE):
"""
Builds a url to a gravatar from an email address.
:param email: The email to fetch the gravatar for
:param size: The size (in pixels) of the gravatar to fetch
:param default: What type of default image to use if the gravatar does not exist
:param rating: Used to filter the allowed gravatar ratings
:param secure: If True use https, otherwise plain http
"""
if secure:
url_base = GRAVATAR_SECURE_URL
else:
url_base = GRAVATAR_URL
# Calculate the email hash
email_hash = calculate_gravatar_hash(email)
# Build querystring
query_string = urlencode({
's': str(size),
'd': default,
'r': rating,
})
# Build url
url = '{base}avatar/{hash}.jpg?{qs}'.format(base=url_base,
hash=email_hash, qs=query_string)
return url
|
[
"Builds",
"a",
"url",
"to",
"a",
"gravatar",
"from",
"an",
"email",
"address",
"."
] |
twaddington/django-gravatar
|
python
|
https://github.com/twaddington/django-gravatar/blob/c4849d93ed43b419eceff0ff2de83d4265597629/django_gravatar/helpers.py#L41-L71
|
[
"def",
"get_gravatar_url",
"(",
"email",
",",
"size",
"=",
"GRAVATAR_DEFAULT_SIZE",
",",
"default",
"=",
"GRAVATAR_DEFAULT_IMAGE",
",",
"rating",
"=",
"GRAVATAR_DEFAULT_RATING",
",",
"secure",
"=",
"GRAVATAR_DEFAULT_SECURE",
")",
":",
"if",
"secure",
":",
"url_base",
"=",
"GRAVATAR_SECURE_URL",
"else",
":",
"url_base",
"=",
"GRAVATAR_URL",
"# Calculate the email hash",
"email_hash",
"=",
"calculate_gravatar_hash",
"(",
"email",
")",
"# Build querystring",
"query_string",
"=",
"urlencode",
"(",
"{",
"'s'",
":",
"str",
"(",
"size",
")",
",",
"'d'",
":",
"default",
",",
"'r'",
":",
"rating",
",",
"}",
")",
"# Build url",
"url",
"=",
"'{base}avatar/{hash}.jpg?{qs}'",
".",
"format",
"(",
"base",
"=",
"url_base",
",",
"hash",
"=",
"email_hash",
",",
"qs",
"=",
"query_string",
")",
"return",
"url"
] |
c4849d93ed43b419eceff0ff2de83d4265597629
|
test
|
has_gravatar
|
Returns True if the user has a gravatar, False if otherwise
|
django_gravatar/helpers.py
|
def has_gravatar(email):
"""
Returns True if the user has a gravatar, False if otherwise
"""
# Request a 404 response if the gravatar does not exist
url = get_gravatar_url(email, default=GRAVATAR_DEFAULT_IMAGE_404)
# Verify an OK response was received
try:
request = Request(url)
request.get_method = lambda: 'HEAD'
return 200 == urlopen(request).code
except (HTTPError, URLError):
return False
|
def has_gravatar(email):
"""
Returns True if the user has a gravatar, False if otherwise
"""
# Request a 404 response if the gravatar does not exist
url = get_gravatar_url(email, default=GRAVATAR_DEFAULT_IMAGE_404)
# Verify an OK response was received
try:
request = Request(url)
request.get_method = lambda: 'HEAD'
return 200 == urlopen(request).code
except (HTTPError, URLError):
return False
|
[
"Returns",
"True",
"if",
"the",
"user",
"has",
"a",
"gravatar",
"False",
"if",
"otherwise"
] |
twaddington/django-gravatar
|
python
|
https://github.com/twaddington/django-gravatar/blob/c4849d93ed43b419eceff0ff2de83d4265597629/django_gravatar/helpers.py#L74-L87
|
[
"def",
"has_gravatar",
"(",
"email",
")",
":",
"# Request a 404 response if the gravatar does not exist",
"url",
"=",
"get_gravatar_url",
"(",
"email",
",",
"default",
"=",
"GRAVATAR_DEFAULT_IMAGE_404",
")",
"# Verify an OK response was received",
"try",
":",
"request",
"=",
"Request",
"(",
"url",
")",
"request",
".",
"get_method",
"=",
"lambda",
":",
"'HEAD'",
"return",
"200",
"==",
"urlopen",
"(",
"request",
")",
".",
"code",
"except",
"(",
"HTTPError",
",",
"URLError",
")",
":",
"return",
"False"
] |
c4849d93ed43b419eceff0ff2de83d4265597629
|
test
|
get_gravatar_profile_url
|
Builds a url to a gravatar profile from an email address.
:param email: The email to fetch the gravatar for
:param secure: If True use https, otherwise plain http
|
django_gravatar/helpers.py
|
def get_gravatar_profile_url(email, secure=GRAVATAR_DEFAULT_SECURE):
"""
Builds a url to a gravatar profile from an email address.
:param email: The email to fetch the gravatar for
:param secure: If True use https, otherwise plain http
"""
if secure:
url_base = GRAVATAR_SECURE_URL
else:
url_base = GRAVATAR_URL
# Calculate the email hash
email_hash = calculate_gravatar_hash(email)
# Build url
url = '{base}{hash}'.format(base=url_base, hash=email_hash)
return url
|
def get_gravatar_profile_url(email, secure=GRAVATAR_DEFAULT_SECURE):
"""
Builds a url to a gravatar profile from an email address.
:param email: The email to fetch the gravatar for
:param secure: If True use https, otherwise plain http
"""
if secure:
url_base = GRAVATAR_SECURE_URL
else:
url_base = GRAVATAR_URL
# Calculate the email hash
email_hash = calculate_gravatar_hash(email)
# Build url
url = '{base}{hash}'.format(base=url_base, hash=email_hash)
return url
|
[
"Builds",
"a",
"url",
"to",
"a",
"gravatar",
"profile",
"from",
"an",
"email",
"address",
"."
] |
twaddington/django-gravatar
|
python
|
https://github.com/twaddington/django-gravatar/blob/c4849d93ed43b419eceff0ff2de83d4265597629/django_gravatar/helpers.py#L90-L108
|
[
"def",
"get_gravatar_profile_url",
"(",
"email",
",",
"secure",
"=",
"GRAVATAR_DEFAULT_SECURE",
")",
":",
"if",
"secure",
":",
"url_base",
"=",
"GRAVATAR_SECURE_URL",
"else",
":",
"url_base",
"=",
"GRAVATAR_URL",
"# Calculate the email hash",
"email_hash",
"=",
"calculate_gravatar_hash",
"(",
"email",
")",
"# Build url",
"url",
"=",
"'{base}{hash}'",
".",
"format",
"(",
"base",
"=",
"url_base",
",",
"hash",
"=",
"email_hash",
")",
"return",
"url"
] |
c4849d93ed43b419eceff0ff2de83d4265597629
|
test
|
graph_coloring_qubo
|
the QUBO for k-coloring a graph A is as follows:
variables:
x_{v,c} = 1 if vertex v of A gets color c; x_{v,c} = 0 otherwise
constraints:
1) each v in A gets exactly one color.
This constraint is enforced by including the term (\sum_c x_{v,c} - 1)^2 in the QUBO,
which is minimized when \sum_c x_{v,c} = 1.
2) If u and v in A are adjacent, then they get different colors.
This constraint is enforced by including terms x_{v,c} x_{u,c} in the QUBO,
which is minimzed when at most one of u and v get color c.
Total QUBO:
Q(x) = \sum_v (\sum_c x_{v,c} - 1)^2 + \sum_{u ~ v} \sum_c x_{v,c} x_{u,c}
The graph of interactions for this QUBO consists of cliques of size k (with vertices {x_{v,c} for c = 0,...,k-1})
plus k disjoint copies of the graph A (one for each color).
|
examples/fourcolor.py
|
def graph_coloring_qubo(graph, k):
"""
the QUBO for k-coloring a graph A is as follows:
variables:
x_{v,c} = 1 if vertex v of A gets color c; x_{v,c} = 0 otherwise
constraints:
1) each v in A gets exactly one color.
This constraint is enforced by including the term (\sum_c x_{v,c} - 1)^2 in the QUBO,
which is minimized when \sum_c x_{v,c} = 1.
2) If u and v in A are adjacent, then they get different colors.
This constraint is enforced by including terms x_{v,c} x_{u,c} in the QUBO,
which is minimzed when at most one of u and v get color c.
Total QUBO:
Q(x) = \sum_v (\sum_c x_{v,c} - 1)^2 + \sum_{u ~ v} \sum_c x_{v,c} x_{u,c}
The graph of interactions for this QUBO consists of cliques of size k (with vertices {x_{v,c} for c = 0,...,k-1})
plus k disjoint copies of the graph A (one for each color).
"""
K = nx.complete_graph(k)
g1 = nx.cartesian_product(nx.create_empty_copy(graph), K)
g2 = nx.cartesian_product(graph, nx.create_empty_copy(K))
return nx.compose(g1, g2)
|
def graph_coloring_qubo(graph, k):
"""
the QUBO for k-coloring a graph A is as follows:
variables:
x_{v,c} = 1 if vertex v of A gets color c; x_{v,c} = 0 otherwise
constraints:
1) each v in A gets exactly one color.
This constraint is enforced by including the term (\sum_c x_{v,c} - 1)^2 in the QUBO,
which is minimized when \sum_c x_{v,c} = 1.
2) If u and v in A are adjacent, then they get different colors.
This constraint is enforced by including terms x_{v,c} x_{u,c} in the QUBO,
which is minimzed when at most one of u and v get color c.
Total QUBO:
Q(x) = \sum_v (\sum_c x_{v,c} - 1)^2 + \sum_{u ~ v} \sum_c x_{v,c} x_{u,c}
The graph of interactions for this QUBO consists of cliques of size k (with vertices {x_{v,c} for c = 0,...,k-1})
plus k disjoint copies of the graph A (one for each color).
"""
K = nx.complete_graph(k)
g1 = nx.cartesian_product(nx.create_empty_copy(graph), K)
g2 = nx.cartesian_product(graph, nx.create_empty_copy(K))
return nx.compose(g1, g2)
|
[
"the",
"QUBO",
"for",
"k",
"-",
"coloring",
"a",
"graph",
"A",
"is",
"as",
"follows",
":"
] |
dwavesystems/minorminer
|
python
|
https://github.com/dwavesystems/minorminer/blob/05cac6db180adf8223a613dff808248e3048b07d/examples/fourcolor.py#L43-L70
|
[
"def",
"graph_coloring_qubo",
"(",
"graph",
",",
"k",
")",
":",
"K",
"=",
"nx",
".",
"complete_graph",
"(",
"k",
")",
"g1",
"=",
"nx",
".",
"cartesian_product",
"(",
"nx",
".",
"create_empty_copy",
"(",
"graph",
")",
",",
"K",
")",
"g2",
"=",
"nx",
".",
"cartesian_product",
"(",
"graph",
",",
"nx",
".",
"create_empty_copy",
"(",
"K",
")",
")",
"return",
"nx",
".",
"compose",
"(",
"g1",
",",
"g2",
")"
] |
05cac6db180adf8223a613dff808248e3048b07d
|
test
|
chimera_blocks
|
Generator for blocks for a chimera block quotient
|
examples/fourcolor.py
|
def chimera_blocks(M=16, N=16, L=4):
"""
Generator for blocks for a chimera block quotient
"""
for x in xrange(M):
for y in xrange(N):
for u in (0, 1):
yield tuple((x, y, u, k) for k in xrange(L))
|
def chimera_blocks(M=16, N=16, L=4):
"""
Generator for blocks for a chimera block quotient
"""
for x in xrange(M):
for y in xrange(N):
for u in (0, 1):
yield tuple((x, y, u, k) for k in xrange(L))
|
[
"Generator",
"for",
"blocks",
"for",
"a",
"chimera",
"block",
"quotient"
] |
dwavesystems/minorminer
|
python
|
https://github.com/dwavesystems/minorminer/blob/05cac6db180adf8223a613dff808248e3048b07d/examples/fourcolor.py#L73-L80
|
[
"def",
"chimera_blocks",
"(",
"M",
"=",
"16",
",",
"N",
"=",
"16",
",",
"L",
"=",
"4",
")",
":",
"for",
"x",
"in",
"xrange",
"(",
"M",
")",
":",
"for",
"y",
"in",
"xrange",
"(",
"N",
")",
":",
"for",
"u",
"in",
"(",
"0",
",",
"1",
")",
":",
"yield",
"tuple",
"(",
"(",
"x",
",",
"y",
",",
"u",
",",
"k",
")",
"for",
"k",
"in",
"xrange",
"(",
"L",
")",
")"
] |
05cac6db180adf8223a613dff808248e3048b07d
|
test
|
chimera_block_quotient
|
Extract the blocks from a graph, and returns a
block-quotient graph according to the acceptability
functions block_good and eblock_good
Inputs:
G: a networkx graph
blocks: a tuple of tuples
|
examples/fourcolor.py
|
def chimera_block_quotient(G, blocks):
"""
Extract the blocks from a graph, and returns a
block-quotient graph according to the acceptability
functions block_good and eblock_good
Inputs:
G: a networkx graph
blocks: a tuple of tuples
"""
from networkx import Graph
from itertools import product
BG = Graph()
blockid = {}
for i, b in enumerate(blocks):
BG.add_node(i)
if not b or not all(G.has_node(x) for x in b):
continue
for q in b:
if q in blockid:
raise(RuntimeError, "two blocks overlap")
blockid[q] = i
for q, u in blockid.items():
ublock = blocks[u]
for p in G[q]:
if p not in blockid:
continue
v = blockid[p]
if BG.has_edge(u, v) or u == v:
continue
vblock = blocks[v]
if ublock[0][2] == vblock[0][2]:
block_edges = zip(ublock, vblock)
else:
block_edges = product(ublock, vblock)
if all(G.has_edge(x, y) for x, y in block_edges):
BG.add_edge(u, v)
return BG
|
def chimera_block_quotient(G, blocks):
"""
Extract the blocks from a graph, and returns a
block-quotient graph according to the acceptability
functions block_good and eblock_good
Inputs:
G: a networkx graph
blocks: a tuple of tuples
"""
from networkx import Graph
from itertools import product
BG = Graph()
blockid = {}
for i, b in enumerate(blocks):
BG.add_node(i)
if not b or not all(G.has_node(x) for x in b):
continue
for q in b:
if q in blockid:
raise(RuntimeError, "two blocks overlap")
blockid[q] = i
for q, u in blockid.items():
ublock = blocks[u]
for p in G[q]:
if p not in blockid:
continue
v = blockid[p]
if BG.has_edge(u, v) or u == v:
continue
vblock = blocks[v]
if ublock[0][2] == vblock[0][2]:
block_edges = zip(ublock, vblock)
else:
block_edges = product(ublock, vblock)
if all(G.has_edge(x, y) for x, y in block_edges):
BG.add_edge(u, v)
return BG
|
[
"Extract",
"the",
"blocks",
"from",
"a",
"graph",
"and",
"returns",
"a",
"block",
"-",
"quotient",
"graph",
"according",
"to",
"the",
"acceptability",
"functions",
"block_good",
"and",
"eblock_good"
] |
dwavesystems/minorminer
|
python
|
https://github.com/dwavesystems/minorminer/blob/05cac6db180adf8223a613dff808248e3048b07d/examples/fourcolor.py#L83-L126
|
[
"def",
"chimera_block_quotient",
"(",
"G",
",",
"blocks",
")",
":",
"from",
"networkx",
"import",
"Graph",
"from",
"itertools",
"import",
"product",
"BG",
"=",
"Graph",
"(",
")",
"blockid",
"=",
"{",
"}",
"for",
"i",
",",
"b",
"in",
"enumerate",
"(",
"blocks",
")",
":",
"BG",
".",
"add_node",
"(",
"i",
")",
"if",
"not",
"b",
"or",
"not",
"all",
"(",
"G",
".",
"has_node",
"(",
"x",
")",
"for",
"x",
"in",
"b",
")",
":",
"continue",
"for",
"q",
"in",
"b",
":",
"if",
"q",
"in",
"blockid",
":",
"raise",
"(",
"RuntimeError",
",",
"\"two blocks overlap\"",
")",
"blockid",
"[",
"q",
"]",
"=",
"i",
"for",
"q",
",",
"u",
"in",
"blockid",
".",
"items",
"(",
")",
":",
"ublock",
"=",
"blocks",
"[",
"u",
"]",
"for",
"p",
"in",
"G",
"[",
"q",
"]",
":",
"if",
"p",
"not",
"in",
"blockid",
":",
"continue",
"v",
"=",
"blockid",
"[",
"p",
"]",
"if",
"BG",
".",
"has_edge",
"(",
"u",
",",
"v",
")",
"or",
"u",
"==",
"v",
":",
"continue",
"vblock",
"=",
"blocks",
"[",
"v",
"]",
"if",
"ublock",
"[",
"0",
"]",
"[",
"2",
"]",
"==",
"vblock",
"[",
"0",
"]",
"[",
"2",
"]",
":",
"block_edges",
"=",
"zip",
"(",
"ublock",
",",
"vblock",
")",
"else",
":",
"block_edges",
"=",
"product",
"(",
"ublock",
",",
"vblock",
")",
"if",
"all",
"(",
"G",
".",
"has_edge",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"block_edges",
")",
":",
"BG",
".",
"add_edge",
"(",
"u",
",",
"v",
")",
"return",
"BG"
] |
05cac6db180adf8223a613dff808248e3048b07d
|
test
|
embed_with_quotient
|
Produce an embedding in target_graph suitable to
check if source_graph is 4-colorable. More generally,
if target_graph is a (M,N,L) Chimera subgraph, the
test is for L-colorability. This depends heavily upon
the Chimera structure
Inputs:
source_graph, target_graph: networkx graphs
M,N,L: integers defining the base chimera topology
Outputs:
emb: a dictionary mapping (v,i)
|
examples/fourcolor.py
|
def embed_with_quotient(source_graph, target_graph, M=16, N=16, L=4, **args):
"""
Produce an embedding in target_graph suitable to
check if source_graph is 4-colorable. More generally,
if target_graph is a (M,N,L) Chimera subgraph, the
test is for L-colorability. This depends heavily upon
the Chimera structure
Inputs:
source_graph, target_graph: networkx graphs
M,N,L: integers defining the base chimera topology
Outputs:
emb: a dictionary mapping (v,i)
"""
from random import sample
blocks = list(chimera_blocks(M, N, L))
BG = chimera_block_quotient(target_graph, blocks)
ublocks = {block: (block[0][2], i)
for (i, block) in enumerate(blocks) if BG.has_node(i)}
source_e = list(source_graph.edges())
source_n = {x for e in source_e for x in e}
fabric_e = list(BG.edges())
# Construct the hints:
# Goal: each source node must be connected to one horizontal block and one
# vertical block (by Chimera structure, each source node will
# contain a full (horizontal and vertical) unit cell
# Construction:
# 0. for each source node `z`, construct two dummy nodes (z,0) and (z,1)
# in both the source and target graphs used by the embedder
# 1. fix the embedding `(z,u) -> (z,u)` for all dummy nodes
# 2. for each target block `i` with orientation `u`, and for each source
# node `z`, add the target edge `((z,u), i)`
# 3. for each source node `z` and each orientation `u`, add the source
# edge `((z,u), z)`
fix_chains = {}
for z in source_n:
for u in (0, 1):
source_e.append(((z, u), z))
fix_chains[z, u] = [(z, u)]
for u, i in ublocks.values():
fabric_e.append(((z, u), i))
# first, grab a few embeddings in the quotient graph. this is super fast
embs = filter(None, [find_embedding(source_e, fabric_e,
fixed_chains=fix_chains,
chainlength_patience=0,
**args) for _ in range(10)])
# select the best-looking candidate so far
emb = min(embs, key=lambda e: sorted((len(c)
for c in e.values()), reverse=True))
# work down the chainlengths in our embeding
for _ in range(10):
emb = find_embedding(source_e, fabric_e,
fixed_chains=fix_chains,
initial_chains=emb,
chainlength_patience=3,
skip_initialization=True,
**args)
# next, translate the block-embedding to a qubit-embedding
newemb = {}
for v in source_n:
for k in range(L):
newemb[v, k] = [blocks[i][k] for i in emb[v]]
return newemb
|
def embed_with_quotient(source_graph, target_graph, M=16, N=16, L=4, **args):
"""
Produce an embedding in target_graph suitable to
check if source_graph is 4-colorable. More generally,
if target_graph is a (M,N,L) Chimera subgraph, the
test is for L-colorability. This depends heavily upon
the Chimera structure
Inputs:
source_graph, target_graph: networkx graphs
M,N,L: integers defining the base chimera topology
Outputs:
emb: a dictionary mapping (v,i)
"""
from random import sample
blocks = list(chimera_blocks(M, N, L))
BG = chimera_block_quotient(target_graph, blocks)
ublocks = {block: (block[0][2], i)
for (i, block) in enumerate(blocks) if BG.has_node(i)}
source_e = list(source_graph.edges())
source_n = {x for e in source_e for x in e}
fabric_e = list(BG.edges())
# Construct the hints:
# Goal: each source node must be connected to one horizontal block and one
# vertical block (by Chimera structure, each source node will
# contain a full (horizontal and vertical) unit cell
# Construction:
# 0. for each source node `z`, construct two dummy nodes (z,0) and (z,1)
# in both the source and target graphs used by the embedder
# 1. fix the embedding `(z,u) -> (z,u)` for all dummy nodes
# 2. for each target block `i` with orientation `u`, and for each source
# node `z`, add the target edge `((z,u), i)`
# 3. for each source node `z` and each orientation `u`, add the source
# edge `((z,u), z)`
fix_chains = {}
for z in source_n:
for u in (0, 1):
source_e.append(((z, u), z))
fix_chains[z, u] = [(z, u)]
for u, i in ublocks.values():
fabric_e.append(((z, u), i))
# first, grab a few embeddings in the quotient graph. this is super fast
embs = filter(None, [find_embedding(source_e, fabric_e,
fixed_chains=fix_chains,
chainlength_patience=0,
**args) for _ in range(10)])
# select the best-looking candidate so far
emb = min(embs, key=lambda e: sorted((len(c)
for c in e.values()), reverse=True))
# work down the chainlengths in our embeding
for _ in range(10):
emb = find_embedding(source_e, fabric_e,
fixed_chains=fix_chains,
initial_chains=emb,
chainlength_patience=3,
skip_initialization=True,
**args)
# next, translate the block-embedding to a qubit-embedding
newemb = {}
for v in source_n:
for k in range(L):
newemb[v, k] = [blocks[i][k] for i in emb[v]]
return newemb
|
[
"Produce",
"an",
"embedding",
"in",
"target_graph",
"suitable",
"to",
"check",
"if",
"source_graph",
"is",
"4",
"-",
"colorable",
".",
"More",
"generally",
"if",
"target_graph",
"is",
"a",
"(",
"M",
"N",
"L",
")",
"Chimera",
"subgraph",
"the",
"test",
"is",
"for",
"L",
"-",
"colorability",
".",
"This",
"depends",
"heavily",
"upon",
"the",
"Chimera",
"structure"
] |
dwavesystems/minorminer
|
python
|
https://github.com/dwavesystems/minorminer/blob/05cac6db180adf8223a613dff808248e3048b07d/examples/fourcolor.py#L129-L202
|
[
"def",
"embed_with_quotient",
"(",
"source_graph",
",",
"target_graph",
",",
"M",
"=",
"16",
",",
"N",
"=",
"16",
",",
"L",
"=",
"4",
",",
"*",
"*",
"args",
")",
":",
"from",
"random",
"import",
"sample",
"blocks",
"=",
"list",
"(",
"chimera_blocks",
"(",
"M",
",",
"N",
",",
"L",
")",
")",
"BG",
"=",
"chimera_block_quotient",
"(",
"target_graph",
",",
"blocks",
")",
"ublocks",
"=",
"{",
"block",
":",
"(",
"block",
"[",
"0",
"]",
"[",
"2",
"]",
",",
"i",
")",
"for",
"(",
"i",
",",
"block",
")",
"in",
"enumerate",
"(",
"blocks",
")",
"if",
"BG",
".",
"has_node",
"(",
"i",
")",
"}",
"source_e",
"=",
"list",
"(",
"source_graph",
".",
"edges",
"(",
")",
")",
"source_n",
"=",
"{",
"x",
"for",
"e",
"in",
"source_e",
"for",
"x",
"in",
"e",
"}",
"fabric_e",
"=",
"list",
"(",
"BG",
".",
"edges",
"(",
")",
")",
"# Construct the hints:",
"# Goal: each source node must be connected to one horizontal block and one",
"# vertical block (by Chimera structure, each source node will",
"# contain a full (horizontal and vertical) unit cell",
"# Construction:",
"# 0. for each source node `z`, construct two dummy nodes (z,0) and (z,1)",
"# in both the source and target graphs used by the embedder",
"# 1. fix the embedding `(z,u) -> (z,u)` for all dummy nodes",
"# 2. for each target block `i` with orientation `u`, and for each source",
"# node `z`, add the target edge `((z,u), i)`",
"# 3. for each source node `z` and each orientation `u`, add the source",
"# edge `((z,u), z)`",
"fix_chains",
"=",
"{",
"}",
"for",
"z",
"in",
"source_n",
":",
"for",
"u",
"in",
"(",
"0",
",",
"1",
")",
":",
"source_e",
".",
"append",
"(",
"(",
"(",
"z",
",",
"u",
")",
",",
"z",
")",
")",
"fix_chains",
"[",
"z",
",",
"u",
"]",
"=",
"[",
"(",
"z",
",",
"u",
")",
"]",
"for",
"u",
",",
"i",
"in",
"ublocks",
".",
"values",
"(",
")",
":",
"fabric_e",
".",
"append",
"(",
"(",
"(",
"z",
",",
"u",
")",
",",
"i",
")",
")",
"# first, grab a few embeddings in the quotient graph. this is super fast",
"embs",
"=",
"filter",
"(",
"None",
",",
"[",
"find_embedding",
"(",
"source_e",
",",
"fabric_e",
",",
"fixed_chains",
"=",
"fix_chains",
",",
"chainlength_patience",
"=",
"0",
",",
"*",
"*",
"args",
")",
"for",
"_",
"in",
"range",
"(",
"10",
")",
"]",
")",
"# select the best-looking candidate so far",
"emb",
"=",
"min",
"(",
"embs",
",",
"key",
"=",
"lambda",
"e",
":",
"sorted",
"(",
"(",
"len",
"(",
"c",
")",
"for",
"c",
"in",
"e",
".",
"values",
"(",
")",
")",
",",
"reverse",
"=",
"True",
")",
")",
"# work down the chainlengths in our embeding",
"for",
"_",
"in",
"range",
"(",
"10",
")",
":",
"emb",
"=",
"find_embedding",
"(",
"source_e",
",",
"fabric_e",
",",
"fixed_chains",
"=",
"fix_chains",
",",
"initial_chains",
"=",
"emb",
",",
"chainlength_patience",
"=",
"3",
",",
"skip_initialization",
"=",
"True",
",",
"*",
"*",
"args",
")",
"# next, translate the block-embedding to a qubit-embedding",
"newemb",
"=",
"{",
"}",
"for",
"v",
"in",
"source_n",
":",
"for",
"k",
"in",
"range",
"(",
"L",
")",
":",
"newemb",
"[",
"v",
",",
"k",
"]",
"=",
"[",
"blocks",
"[",
"i",
"]",
"[",
"k",
"]",
"for",
"i",
"in",
"emb",
"[",
"v",
"]",
"]",
"return",
"newemb"
] |
05cac6db180adf8223a613dff808248e3048b07d
|
test
|
enumerate_resonance_smiles
|
Return a set of resonance forms as SMILES strings, given a SMILES string.
:param smiles: A SMILES string.
:returns: A set containing SMILES strings for every possible resonance form.
:rtype: set of strings.
|
molvs/resonance.py
|
def enumerate_resonance_smiles(smiles):
"""Return a set of resonance forms as SMILES strings, given a SMILES string.
:param smiles: A SMILES string.
:returns: A set containing SMILES strings for every possible resonance form.
:rtype: set of strings.
"""
mol = Chem.MolFromSmiles(smiles)
#Chem.SanitizeMol(mol) # MolFromSmiles does Sanitize by default
mesomers = ResonanceEnumerator().enumerate(mol)
return {Chem.MolToSmiles(m, isomericSmiles=True) for m in mesomers}
|
def enumerate_resonance_smiles(smiles):
"""Return a set of resonance forms as SMILES strings, given a SMILES string.
:param smiles: A SMILES string.
:returns: A set containing SMILES strings for every possible resonance form.
:rtype: set of strings.
"""
mol = Chem.MolFromSmiles(smiles)
#Chem.SanitizeMol(mol) # MolFromSmiles does Sanitize by default
mesomers = ResonanceEnumerator().enumerate(mol)
return {Chem.MolToSmiles(m, isomericSmiles=True) for m in mesomers}
|
[
"Return",
"a",
"set",
"of",
"resonance",
"forms",
"as",
"SMILES",
"strings",
"given",
"a",
"SMILES",
"string",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/resonance.py#L81-L91
|
[
"def",
"enumerate_resonance_smiles",
"(",
"smiles",
")",
":",
"mol",
"=",
"Chem",
".",
"MolFromSmiles",
"(",
"smiles",
")",
"#Chem.SanitizeMol(mol) # MolFromSmiles does Sanitize by default",
"mesomers",
"=",
"ResonanceEnumerator",
"(",
")",
".",
"enumerate",
"(",
"mol",
")",
"return",
"{",
"Chem",
".",
"MolToSmiles",
"(",
"m",
",",
"isomericSmiles",
"=",
"True",
")",
"for",
"m",
"in",
"mesomers",
"}"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
ResonanceEnumerator.enumerate
|
Enumerate all possible resonance forms and return them as a list.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: A list of all possible resonance forms of the molecule.
:rtype: list of rdkit.Chem.rdchem.Mol
|
molvs/resonance.py
|
def enumerate(self, mol):
"""Enumerate all possible resonance forms and return them as a list.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: A list of all possible resonance forms of the molecule.
:rtype: list of rdkit.Chem.rdchem.Mol
"""
flags = 0
if self.kekule_all:
flags = flags | Chem.KEKULE_ALL
if self.allow_incomplete_octets:
flags = flags | Chem.ALLOW_INCOMPLETE_OCTETS
if self.allow_charge_separation:
flags = flags | Chem.ALLOW_CHARGE_SEPARATION
if self.unconstrained_anions:
flags = flags | Chem.UNCONSTRAINED_ANIONS
if self.unconstrained_cations:
flags = flags | Chem.UNCONSTRAINED_CATIONS
results = []
for result in Chem.ResonanceMolSupplier(mol, flags=flags, maxStructs=self.max_structures):
# This seems necessary? ResonanceMolSupplier only does a partial sanitization
Chem.SanitizeMol(result)
results.append(result)
return results
|
def enumerate(self, mol):
"""Enumerate all possible resonance forms and return them as a list.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: A list of all possible resonance forms of the molecule.
:rtype: list of rdkit.Chem.rdchem.Mol
"""
flags = 0
if self.kekule_all:
flags = flags | Chem.KEKULE_ALL
if self.allow_incomplete_octets:
flags = flags | Chem.ALLOW_INCOMPLETE_OCTETS
if self.allow_charge_separation:
flags = flags | Chem.ALLOW_CHARGE_SEPARATION
if self.unconstrained_anions:
flags = flags | Chem.UNCONSTRAINED_ANIONS
if self.unconstrained_cations:
flags = flags | Chem.UNCONSTRAINED_CATIONS
results = []
for result in Chem.ResonanceMolSupplier(mol, flags=flags, maxStructs=self.max_structures):
# This seems necessary? ResonanceMolSupplier only does a partial sanitization
Chem.SanitizeMol(result)
results.append(result)
return results
|
[
"Enumerate",
"all",
"possible",
"resonance",
"forms",
"and",
"return",
"them",
"as",
"a",
"list",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/resonance.py#L52-L76
|
[
"def",
"enumerate",
"(",
"self",
",",
"mol",
")",
":",
"flags",
"=",
"0",
"if",
"self",
".",
"kekule_all",
":",
"flags",
"=",
"flags",
"|",
"Chem",
".",
"KEKULE_ALL",
"if",
"self",
".",
"allow_incomplete_octets",
":",
"flags",
"=",
"flags",
"|",
"Chem",
".",
"ALLOW_INCOMPLETE_OCTETS",
"if",
"self",
".",
"allow_charge_separation",
":",
"flags",
"=",
"flags",
"|",
"Chem",
".",
"ALLOW_CHARGE_SEPARATION",
"if",
"self",
".",
"unconstrained_anions",
":",
"flags",
"=",
"flags",
"|",
"Chem",
".",
"UNCONSTRAINED_ANIONS",
"if",
"self",
".",
"unconstrained_cations",
":",
"flags",
"=",
"flags",
"|",
"Chem",
".",
"UNCONSTRAINED_CATIONS",
"results",
"=",
"[",
"]",
"for",
"result",
"in",
"Chem",
".",
"ResonanceMolSupplier",
"(",
"mol",
",",
"flags",
"=",
"flags",
",",
"maxStructs",
"=",
"self",
".",
"max_structures",
")",
":",
"# This seems necessary? ResonanceMolSupplier only does a partial sanitization",
"Chem",
".",
"SanitizeMol",
"(",
"result",
")",
"results",
".",
"append",
"(",
"result",
")",
"return",
"results"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Normalizer.normalize
|
Apply a series of Normalization transforms to correct functional groups and recombine charges.
A series of transforms are applied to the molecule. For each Normalization, the transform is applied repeatedly
until no further changes occur. If any changes occurred, we go back and start from the first Normalization
again, in case the changes mean an earlier transform is now applicable. The molecule is returned once the entire
series of Normalizations cause no further changes or if max_restarts (default 200) is reached.
:param mol: The molecule to normalize.
:type mol: rdkit.Chem.rdchem.Mol
:return: The normalized fragment.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/normalize.py
|
def normalize(self, mol):
"""Apply a series of Normalization transforms to correct functional groups and recombine charges.
A series of transforms are applied to the molecule. For each Normalization, the transform is applied repeatedly
until no further changes occur. If any changes occurred, we go back and start from the first Normalization
again, in case the changes mean an earlier transform is now applicable. The molecule is returned once the entire
series of Normalizations cause no further changes or if max_restarts (default 200) is reached.
:param mol: The molecule to normalize.
:type mol: rdkit.Chem.rdchem.Mol
:return: The normalized fragment.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running Normalizer')
# Normalize each fragment separately to get around quirky RunReactants behaviour
fragments = []
for fragment in Chem.GetMolFrags(mol, asMols=True):
fragments.append(self._normalize_fragment(fragment))
# Join normalized fragments into a single molecule again
outmol = fragments.pop()
for fragment in fragments:
outmol = Chem.CombineMols(outmol, fragment)
Chem.SanitizeMol(outmol)
return outmol
|
def normalize(self, mol):
"""Apply a series of Normalization transforms to correct functional groups and recombine charges.
A series of transforms are applied to the molecule. For each Normalization, the transform is applied repeatedly
until no further changes occur. If any changes occurred, we go back and start from the first Normalization
again, in case the changes mean an earlier transform is now applicable. The molecule is returned once the entire
series of Normalizations cause no further changes or if max_restarts (default 200) is reached.
:param mol: The molecule to normalize.
:type mol: rdkit.Chem.rdchem.Mol
:return: The normalized fragment.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running Normalizer')
# Normalize each fragment separately to get around quirky RunReactants behaviour
fragments = []
for fragment in Chem.GetMolFrags(mol, asMols=True):
fragments.append(self._normalize_fragment(fragment))
# Join normalized fragments into a single molecule again
outmol = fragments.pop()
for fragment in fragments:
outmol = Chem.CombineMols(outmol, fragment)
Chem.SanitizeMol(outmol)
return outmol
|
[
"Apply",
"a",
"series",
"of",
"Normalization",
"transforms",
"to",
"correct",
"functional",
"groups",
"and",
"recombine",
"charges",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/normalize.py#L114-L137
|
[
"def",
"normalize",
"(",
"self",
",",
"mol",
")",
":",
"log",
".",
"debug",
"(",
"'Running Normalizer'",
")",
"# Normalize each fragment separately to get around quirky RunReactants behaviour",
"fragments",
"=",
"[",
"]",
"for",
"fragment",
"in",
"Chem",
".",
"GetMolFrags",
"(",
"mol",
",",
"asMols",
"=",
"True",
")",
":",
"fragments",
".",
"append",
"(",
"self",
".",
"_normalize_fragment",
"(",
"fragment",
")",
")",
"# Join normalized fragments into a single molecule again",
"outmol",
"=",
"fragments",
".",
"pop",
"(",
")",
"for",
"fragment",
"in",
"fragments",
":",
"outmol",
"=",
"Chem",
".",
"CombineMols",
"(",
"outmol",
",",
"fragment",
")",
"Chem",
".",
"SanitizeMol",
"(",
"outmol",
")",
"return",
"outmol"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Normalizer._apply_transform
|
Repeatedly apply normalization transform to molecule until no changes occur.
It is possible for multiple products to be produced when a rule is applied. The rule is applied repeatedly to
each of the products, until no further changes occur or after 20 attempts. If there are multiple unique products
after the final application, the first product (sorted alphabetically by SMILES) is chosen.
|
molvs/normalize.py
|
def _apply_transform(self, mol, rule):
"""Repeatedly apply normalization transform to molecule until no changes occur.
It is possible for multiple products to be produced when a rule is applied. The rule is applied repeatedly to
each of the products, until no further changes occur or after 20 attempts. If there are multiple unique products
after the final application, the first product (sorted alphabetically by SMILES) is chosen.
"""
mols = [mol]
for n in six.moves.range(20):
products = {}
for mol in mols:
for product in [x[0] for x in rule.RunReactants((mol,))]:
if Chem.SanitizeMol(product, catchErrors=True) == 0:
products[Chem.MolToSmiles(product, isomericSmiles=True)] = product
if products:
mols = [products[s] for s in sorted(products)]
else:
# If n == 0, the rule was not applicable and we return None
return mols[0] if n > 0 else None
|
def _apply_transform(self, mol, rule):
"""Repeatedly apply normalization transform to molecule until no changes occur.
It is possible for multiple products to be produced when a rule is applied. The rule is applied repeatedly to
each of the products, until no further changes occur or after 20 attempts. If there are multiple unique products
after the final application, the first product (sorted alphabetically by SMILES) is chosen.
"""
mols = [mol]
for n in six.moves.range(20):
products = {}
for mol in mols:
for product in [x[0] for x in rule.RunReactants((mol,))]:
if Chem.SanitizeMol(product, catchErrors=True) == 0:
products[Chem.MolToSmiles(product, isomericSmiles=True)] = product
if products:
mols = [products[s] for s in sorted(products)]
else:
# If n == 0, the rule was not applicable and we return None
return mols[0] if n > 0 else None
|
[
"Repeatedly",
"apply",
"normalization",
"transform",
"to",
"molecule",
"until",
"no",
"changes",
"occur",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/normalize.py#L156-L174
|
[
"def",
"_apply_transform",
"(",
"self",
",",
"mol",
",",
"rule",
")",
":",
"mols",
"=",
"[",
"mol",
"]",
"for",
"n",
"in",
"six",
".",
"moves",
".",
"range",
"(",
"20",
")",
":",
"products",
"=",
"{",
"}",
"for",
"mol",
"in",
"mols",
":",
"for",
"product",
"in",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"rule",
".",
"RunReactants",
"(",
"(",
"mol",
",",
")",
")",
"]",
":",
"if",
"Chem",
".",
"SanitizeMol",
"(",
"product",
",",
"catchErrors",
"=",
"True",
")",
"==",
"0",
":",
"products",
"[",
"Chem",
".",
"MolToSmiles",
"(",
"product",
",",
"isomericSmiles",
"=",
"True",
")",
"]",
"=",
"product",
"if",
"products",
":",
"mols",
"=",
"[",
"products",
"[",
"s",
"]",
"for",
"s",
"in",
"sorted",
"(",
"products",
")",
"]",
"else",
":",
"# If n == 0, the rule was not applicable and we return None",
"return",
"mols",
"[",
"0",
"]",
"if",
"n",
">",
"0",
"else",
"None"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
TautomerCanonicalizer.canonicalize
|
Return a canonical tautomer by enumerating and scoring all possible tautomers.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The canonical tautomer.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/tautomer.py
|
def canonicalize(self, mol):
"""Return a canonical tautomer by enumerating and scoring all possible tautomers.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The canonical tautomer.
:rtype: rdkit.Chem.rdchem.Mol
"""
# TODO: Overload the mol parameter to pass a list of pre-enumerated tautomers
tautomers = self._enumerate_tautomers(mol)
if len(tautomers) == 1:
return tautomers[0]
# Calculate score for each tautomer
highest = None
for t in tautomers:
smiles = Chem.MolToSmiles(t, isomericSmiles=True)
log.debug('Tautomer: %s', smiles)
score = 0
# Add aromatic ring scores
ssr = Chem.GetSymmSSSR(t)
for ring in ssr:
btypes = {t.GetBondBetweenAtoms(*pair).GetBondType() for pair in pairwise(ring)}
elements = {t.GetAtomWithIdx(idx).GetAtomicNum() for idx in ring}
if btypes == {BondType.AROMATIC}:
log.debug('Score +100 (aromatic ring)')
score += 100
if elements == {6}:
log.debug('Score +150 (carbocyclic aromatic ring)')
score += 150
# Add SMARTS scores
for tscore in self.scores:
for match in t.GetSubstructMatches(tscore.smarts):
log.debug('Score %+d (%s)', tscore.score, tscore.name)
score += tscore.score
# Add (P,S,Se,Te)-H scores
for atom in t.GetAtoms():
if atom.GetAtomicNum() in {15, 16, 34, 52}:
hs = atom.GetTotalNumHs()
if hs:
log.debug('Score %+d (%s-H bonds)', -hs, atom.GetSymbol())
score -= hs
# Set as highest if score higher or if score equal and smiles comes first alphabetically
if not highest or highest['score'] < score or (highest['score'] == score and smiles < highest['smiles']):
log.debug('New highest tautomer: %s (%s)', smiles, score)
highest = {'smiles': smiles, 'tautomer': t, 'score': score}
return highest['tautomer']
|
def canonicalize(self, mol):
"""Return a canonical tautomer by enumerating and scoring all possible tautomers.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The canonical tautomer.
:rtype: rdkit.Chem.rdchem.Mol
"""
# TODO: Overload the mol parameter to pass a list of pre-enumerated tautomers
tautomers = self._enumerate_tautomers(mol)
if len(tautomers) == 1:
return tautomers[0]
# Calculate score for each tautomer
highest = None
for t in tautomers:
smiles = Chem.MolToSmiles(t, isomericSmiles=True)
log.debug('Tautomer: %s', smiles)
score = 0
# Add aromatic ring scores
ssr = Chem.GetSymmSSSR(t)
for ring in ssr:
btypes = {t.GetBondBetweenAtoms(*pair).GetBondType() for pair in pairwise(ring)}
elements = {t.GetAtomWithIdx(idx).GetAtomicNum() for idx in ring}
if btypes == {BondType.AROMATIC}:
log.debug('Score +100 (aromatic ring)')
score += 100
if elements == {6}:
log.debug('Score +150 (carbocyclic aromatic ring)')
score += 150
# Add SMARTS scores
for tscore in self.scores:
for match in t.GetSubstructMatches(tscore.smarts):
log.debug('Score %+d (%s)', tscore.score, tscore.name)
score += tscore.score
# Add (P,S,Se,Te)-H scores
for atom in t.GetAtoms():
if atom.GetAtomicNum() in {15, 16, 34, 52}:
hs = atom.GetTotalNumHs()
if hs:
log.debug('Score %+d (%s-H bonds)', -hs, atom.GetSymbol())
score -= hs
# Set as highest if score higher or if score equal and smiles comes first alphabetically
if not highest or highest['score'] < score or (highest['score'] == score and smiles < highest['smiles']):
log.debug('New highest tautomer: %s (%s)', smiles, score)
highest = {'smiles': smiles, 'tautomer': t, 'score': score}
return highest['tautomer']
|
[
"Return",
"a",
"canonical",
"tautomer",
"by",
"enumerating",
"and",
"scoring",
"all",
"possible",
"tautomers",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/tautomer.py#L170-L215
|
[
"def",
"canonicalize",
"(",
"self",
",",
"mol",
")",
":",
"# TODO: Overload the mol parameter to pass a list of pre-enumerated tautomers",
"tautomers",
"=",
"self",
".",
"_enumerate_tautomers",
"(",
"mol",
")",
"if",
"len",
"(",
"tautomers",
")",
"==",
"1",
":",
"return",
"tautomers",
"[",
"0",
"]",
"# Calculate score for each tautomer",
"highest",
"=",
"None",
"for",
"t",
"in",
"tautomers",
":",
"smiles",
"=",
"Chem",
".",
"MolToSmiles",
"(",
"t",
",",
"isomericSmiles",
"=",
"True",
")",
"log",
".",
"debug",
"(",
"'Tautomer: %s'",
",",
"smiles",
")",
"score",
"=",
"0",
"# Add aromatic ring scores",
"ssr",
"=",
"Chem",
".",
"GetSymmSSSR",
"(",
"t",
")",
"for",
"ring",
"in",
"ssr",
":",
"btypes",
"=",
"{",
"t",
".",
"GetBondBetweenAtoms",
"(",
"*",
"pair",
")",
".",
"GetBondType",
"(",
")",
"for",
"pair",
"in",
"pairwise",
"(",
"ring",
")",
"}",
"elements",
"=",
"{",
"t",
".",
"GetAtomWithIdx",
"(",
"idx",
")",
".",
"GetAtomicNum",
"(",
")",
"for",
"idx",
"in",
"ring",
"}",
"if",
"btypes",
"==",
"{",
"BondType",
".",
"AROMATIC",
"}",
":",
"log",
".",
"debug",
"(",
"'Score +100 (aromatic ring)'",
")",
"score",
"+=",
"100",
"if",
"elements",
"==",
"{",
"6",
"}",
":",
"log",
".",
"debug",
"(",
"'Score +150 (carbocyclic aromatic ring)'",
")",
"score",
"+=",
"150",
"# Add SMARTS scores",
"for",
"tscore",
"in",
"self",
".",
"scores",
":",
"for",
"match",
"in",
"t",
".",
"GetSubstructMatches",
"(",
"tscore",
".",
"smarts",
")",
":",
"log",
".",
"debug",
"(",
"'Score %+d (%s)'",
",",
"tscore",
".",
"score",
",",
"tscore",
".",
"name",
")",
"score",
"+=",
"tscore",
".",
"score",
"# Add (P,S,Se,Te)-H scores",
"for",
"atom",
"in",
"t",
".",
"GetAtoms",
"(",
")",
":",
"if",
"atom",
".",
"GetAtomicNum",
"(",
")",
"in",
"{",
"15",
",",
"16",
",",
"34",
",",
"52",
"}",
":",
"hs",
"=",
"atom",
".",
"GetTotalNumHs",
"(",
")",
"if",
"hs",
":",
"log",
".",
"debug",
"(",
"'Score %+d (%s-H bonds)'",
",",
"-",
"hs",
",",
"atom",
".",
"GetSymbol",
"(",
")",
")",
"score",
"-=",
"hs",
"# Set as highest if score higher or if score equal and smiles comes first alphabetically",
"if",
"not",
"highest",
"or",
"highest",
"[",
"'score'",
"]",
"<",
"score",
"or",
"(",
"highest",
"[",
"'score'",
"]",
"==",
"score",
"and",
"smiles",
"<",
"highest",
"[",
"'smiles'",
"]",
")",
":",
"log",
".",
"debug",
"(",
"'New highest tautomer: %s (%s)'",
",",
"smiles",
",",
"score",
")",
"highest",
"=",
"{",
"'smiles'",
":",
"smiles",
",",
"'tautomer'",
":",
"t",
",",
"'score'",
":",
"score",
"}",
"return",
"highest",
"[",
"'tautomer'",
"]"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
TautomerEnumerator.enumerate
|
Enumerate all possible tautomers and return them as a list.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: A list of all possible tautomers of the molecule.
:rtype: list of rdkit.Chem.rdchem.Mol
|
molvs/tautomer.py
|
def enumerate(self, mol):
"""Enumerate all possible tautomers and return them as a list.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: A list of all possible tautomers of the molecule.
:rtype: list of rdkit.Chem.rdchem.Mol
"""
smiles = Chem.MolToSmiles(mol, isomericSmiles=True)
tautomers = {smiles: copy.deepcopy(mol)}
# Create a kekulized form of the molecule to match the SMARTS against
kekulized = copy.deepcopy(mol)
Chem.Kekulize(kekulized)
kekulized = {smiles: kekulized}
done = set()
while len(tautomers) < self.max_tautomers:
for tsmiles in sorted(tautomers):
if tsmiles in done:
continue
for transform in self.transforms:
for match in kekulized[tsmiles].GetSubstructMatches(transform.tautomer):
# log.debug('Matched rule: %s to %s for %s', transform.name, tsmiles, match)
# Create a copy of in the input molecule so we can modify it
# Use kekule form so bonds are explicitly single/double instead of aromatic
product = copy.deepcopy(kekulized[tsmiles])
# Remove a hydrogen from the first matched atom and add one to the last
first = product.GetAtomWithIdx(match[0])
last = product.GetAtomWithIdx(match[-1])
# log.debug('%s: H%s -> H%s' % (first.GetSymbol(), first.GetTotalNumHs(), first.GetTotalNumHs() - 1))
# log.debug('%s: H%s -> H%s' % (last.GetSymbol(), last.GetTotalNumHs(), last.GetTotalNumHs() + 1))
first.SetNumExplicitHs(max(0, first.GetTotalNumHs() - 1))
last.SetNumExplicitHs(last.GetTotalNumHs() + 1)
# Remove any implicit hydrogens from the first and last atoms now we have set the count explicitly
first.SetNoImplicit(True)
last.SetNoImplicit(True)
# Adjust bond orders
for bi, pair in enumerate(pairwise(match)):
if transform.bonds:
# Set the resulting bond types as manually specified in the transform
# log.debug('%s-%s: %s -> %s' % (product.GetAtomWithIdx(pair[0]).GetSymbol(), product.GetAtomWithIdx(pair[1]).GetSymbol(), product.GetBondBetweenAtoms(*pair).GetBondType(), transform.bonds[bi]))
product.GetBondBetweenAtoms(*pair).SetBondType(transform.bonds[bi])
else:
# If no manually specified bond types, just swap single and double bonds
current_bond_type = product.GetBondBetweenAtoms(*pair).GetBondType()
product.GetBondBetweenAtoms(*pair).SetBondType(BondType.DOUBLE if current_bond_type == BondType.SINGLE else BondType.SINGLE)
# log.debug('%s-%s: %s -> %s' % (product.GetAtomWithIdx(pair[0]).GetSymbol(), product.GetAtomWithIdx(pair[1]).GetSymbol(), current_bond_type, product.GetBondBetweenAtoms(*pair).GetBondType()))
# Adjust charges
if transform.charges:
for ci, idx in enumerate(match):
atom = product.GetAtomWithIdx(idx)
# log.debug('%s: C%s -> C%s' % (atom.GetSymbol(), atom.GetFormalCharge(), atom.GetFormalCharge() + transform.charges[ci]))
atom.SetFormalCharge(atom.GetFormalCharge() + transform.charges[ci])
try:
Chem.SanitizeMol(product)
smiles = Chem.MolToSmiles(product, isomericSmiles=True)
log.debug('Applied rule: %s to %s', transform.name, tsmiles)
if smiles not in tautomers:
log.debug('New tautomer produced: %s' % smiles)
kekulized_product = copy.deepcopy(product)
Chem.Kekulize(kekulized_product)
tautomers[smiles] = product
kekulized[smiles] = kekulized_product
else:
log.debug('Previous tautomer produced again: %s' % smiles)
except ValueError:
log.debug('ValueError Applying rule: %s', transform.name)
done.add(tsmiles)
if len(tautomers) == len(done):
break
else:
log.warning('Tautomer enumeration stopped at maximum %s', self.max_tautomers)
# Clean up stereochemistry
for tautomer in tautomers.values():
Chem.AssignStereochemistry(tautomer, force=True, cleanIt=True)
for bond in tautomer.GetBonds():
if bond.GetBondType() == BondType.DOUBLE and bond.GetStereo() > BondStereo.STEREOANY:
begin = bond.GetBeginAtomIdx()
end = bond.GetEndAtomIdx()
for othertautomer in tautomers.values():
if not othertautomer.GetBondBetweenAtoms(begin, end).GetBondType() == BondType.DOUBLE:
neighbours = tautomer.GetAtomWithIdx(begin).GetBonds() + tautomer.GetAtomWithIdx(end).GetBonds()
for otherbond in neighbours:
if otherbond.GetBondDir() in {BondDir.ENDUPRIGHT, BondDir.ENDDOWNRIGHT}:
otherbond.SetBondDir(BondDir.NONE)
Chem.AssignStereochemistry(tautomer, force=True, cleanIt=True)
log.debug('Removed stereochemistry from unfixed double bond')
break
return list(tautomers.values())
|
def enumerate(self, mol):
"""Enumerate all possible tautomers and return them as a list.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: A list of all possible tautomers of the molecule.
:rtype: list of rdkit.Chem.rdchem.Mol
"""
smiles = Chem.MolToSmiles(mol, isomericSmiles=True)
tautomers = {smiles: copy.deepcopy(mol)}
# Create a kekulized form of the molecule to match the SMARTS against
kekulized = copy.deepcopy(mol)
Chem.Kekulize(kekulized)
kekulized = {smiles: kekulized}
done = set()
while len(tautomers) < self.max_tautomers:
for tsmiles in sorted(tautomers):
if tsmiles in done:
continue
for transform in self.transforms:
for match in kekulized[tsmiles].GetSubstructMatches(transform.tautomer):
# log.debug('Matched rule: %s to %s for %s', transform.name, tsmiles, match)
# Create a copy of in the input molecule so we can modify it
# Use kekule form so bonds are explicitly single/double instead of aromatic
product = copy.deepcopy(kekulized[tsmiles])
# Remove a hydrogen from the first matched atom and add one to the last
first = product.GetAtomWithIdx(match[0])
last = product.GetAtomWithIdx(match[-1])
# log.debug('%s: H%s -> H%s' % (first.GetSymbol(), first.GetTotalNumHs(), first.GetTotalNumHs() - 1))
# log.debug('%s: H%s -> H%s' % (last.GetSymbol(), last.GetTotalNumHs(), last.GetTotalNumHs() + 1))
first.SetNumExplicitHs(max(0, first.GetTotalNumHs() - 1))
last.SetNumExplicitHs(last.GetTotalNumHs() + 1)
# Remove any implicit hydrogens from the first and last atoms now we have set the count explicitly
first.SetNoImplicit(True)
last.SetNoImplicit(True)
# Adjust bond orders
for bi, pair in enumerate(pairwise(match)):
if transform.bonds:
# Set the resulting bond types as manually specified in the transform
# log.debug('%s-%s: %s -> %s' % (product.GetAtomWithIdx(pair[0]).GetSymbol(), product.GetAtomWithIdx(pair[1]).GetSymbol(), product.GetBondBetweenAtoms(*pair).GetBondType(), transform.bonds[bi]))
product.GetBondBetweenAtoms(*pair).SetBondType(transform.bonds[bi])
else:
# If no manually specified bond types, just swap single and double bonds
current_bond_type = product.GetBondBetweenAtoms(*pair).GetBondType()
product.GetBondBetweenAtoms(*pair).SetBondType(BondType.DOUBLE if current_bond_type == BondType.SINGLE else BondType.SINGLE)
# log.debug('%s-%s: %s -> %s' % (product.GetAtomWithIdx(pair[0]).GetSymbol(), product.GetAtomWithIdx(pair[1]).GetSymbol(), current_bond_type, product.GetBondBetweenAtoms(*pair).GetBondType()))
# Adjust charges
if transform.charges:
for ci, idx in enumerate(match):
atom = product.GetAtomWithIdx(idx)
# log.debug('%s: C%s -> C%s' % (atom.GetSymbol(), atom.GetFormalCharge(), atom.GetFormalCharge() + transform.charges[ci]))
atom.SetFormalCharge(atom.GetFormalCharge() + transform.charges[ci])
try:
Chem.SanitizeMol(product)
smiles = Chem.MolToSmiles(product, isomericSmiles=True)
log.debug('Applied rule: %s to %s', transform.name, tsmiles)
if smiles not in tautomers:
log.debug('New tautomer produced: %s' % smiles)
kekulized_product = copy.deepcopy(product)
Chem.Kekulize(kekulized_product)
tautomers[smiles] = product
kekulized[smiles] = kekulized_product
else:
log.debug('Previous tautomer produced again: %s' % smiles)
except ValueError:
log.debug('ValueError Applying rule: %s', transform.name)
done.add(tsmiles)
if len(tautomers) == len(done):
break
else:
log.warning('Tautomer enumeration stopped at maximum %s', self.max_tautomers)
# Clean up stereochemistry
for tautomer in tautomers.values():
Chem.AssignStereochemistry(tautomer, force=True, cleanIt=True)
for bond in tautomer.GetBonds():
if bond.GetBondType() == BondType.DOUBLE and bond.GetStereo() > BondStereo.STEREOANY:
begin = bond.GetBeginAtomIdx()
end = bond.GetEndAtomIdx()
for othertautomer in tautomers.values():
if not othertautomer.GetBondBetweenAtoms(begin, end).GetBondType() == BondType.DOUBLE:
neighbours = tautomer.GetAtomWithIdx(begin).GetBonds() + tautomer.GetAtomWithIdx(end).GetBonds()
for otherbond in neighbours:
if otherbond.GetBondDir() in {BondDir.ENDUPRIGHT, BondDir.ENDDOWNRIGHT}:
otherbond.SetBondDir(BondDir.NONE)
Chem.AssignStereochemistry(tautomer, force=True, cleanIt=True)
log.debug('Removed stereochemistry from unfixed double bond')
break
return list(tautomers.values())
|
[
"Enumerate",
"all",
"possible",
"tautomers",
"and",
"return",
"them",
"as",
"a",
"list",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/tautomer.py#L240-L327
|
[
"def",
"enumerate",
"(",
"self",
",",
"mol",
")",
":",
"smiles",
"=",
"Chem",
".",
"MolToSmiles",
"(",
"mol",
",",
"isomericSmiles",
"=",
"True",
")",
"tautomers",
"=",
"{",
"smiles",
":",
"copy",
".",
"deepcopy",
"(",
"mol",
")",
"}",
"# Create a kekulized form of the molecule to match the SMARTS against",
"kekulized",
"=",
"copy",
".",
"deepcopy",
"(",
"mol",
")",
"Chem",
".",
"Kekulize",
"(",
"kekulized",
")",
"kekulized",
"=",
"{",
"smiles",
":",
"kekulized",
"}",
"done",
"=",
"set",
"(",
")",
"while",
"len",
"(",
"tautomers",
")",
"<",
"self",
".",
"max_tautomers",
":",
"for",
"tsmiles",
"in",
"sorted",
"(",
"tautomers",
")",
":",
"if",
"tsmiles",
"in",
"done",
":",
"continue",
"for",
"transform",
"in",
"self",
".",
"transforms",
":",
"for",
"match",
"in",
"kekulized",
"[",
"tsmiles",
"]",
".",
"GetSubstructMatches",
"(",
"transform",
".",
"tautomer",
")",
":",
"# log.debug('Matched rule: %s to %s for %s', transform.name, tsmiles, match)",
"# Create a copy of in the input molecule so we can modify it",
"# Use kekule form so bonds are explicitly single/double instead of aromatic",
"product",
"=",
"copy",
".",
"deepcopy",
"(",
"kekulized",
"[",
"tsmiles",
"]",
")",
"# Remove a hydrogen from the first matched atom and add one to the last",
"first",
"=",
"product",
".",
"GetAtomWithIdx",
"(",
"match",
"[",
"0",
"]",
")",
"last",
"=",
"product",
".",
"GetAtomWithIdx",
"(",
"match",
"[",
"-",
"1",
"]",
")",
"# log.debug('%s: H%s -> H%s' % (first.GetSymbol(), first.GetTotalNumHs(), first.GetTotalNumHs() - 1))",
"# log.debug('%s: H%s -> H%s' % (last.GetSymbol(), last.GetTotalNumHs(), last.GetTotalNumHs() + 1))",
"first",
".",
"SetNumExplicitHs",
"(",
"max",
"(",
"0",
",",
"first",
".",
"GetTotalNumHs",
"(",
")",
"-",
"1",
")",
")",
"last",
".",
"SetNumExplicitHs",
"(",
"last",
".",
"GetTotalNumHs",
"(",
")",
"+",
"1",
")",
"# Remove any implicit hydrogens from the first and last atoms now we have set the count explicitly",
"first",
".",
"SetNoImplicit",
"(",
"True",
")",
"last",
".",
"SetNoImplicit",
"(",
"True",
")",
"# Adjust bond orders",
"for",
"bi",
",",
"pair",
"in",
"enumerate",
"(",
"pairwise",
"(",
"match",
")",
")",
":",
"if",
"transform",
".",
"bonds",
":",
"# Set the resulting bond types as manually specified in the transform",
"# log.debug('%s-%s: %s -> %s' % (product.GetAtomWithIdx(pair[0]).GetSymbol(), product.GetAtomWithIdx(pair[1]).GetSymbol(), product.GetBondBetweenAtoms(*pair).GetBondType(), transform.bonds[bi]))",
"product",
".",
"GetBondBetweenAtoms",
"(",
"*",
"pair",
")",
".",
"SetBondType",
"(",
"transform",
".",
"bonds",
"[",
"bi",
"]",
")",
"else",
":",
"# If no manually specified bond types, just swap single and double bonds",
"current_bond_type",
"=",
"product",
".",
"GetBondBetweenAtoms",
"(",
"*",
"pair",
")",
".",
"GetBondType",
"(",
")",
"product",
".",
"GetBondBetweenAtoms",
"(",
"*",
"pair",
")",
".",
"SetBondType",
"(",
"BondType",
".",
"DOUBLE",
"if",
"current_bond_type",
"==",
"BondType",
".",
"SINGLE",
"else",
"BondType",
".",
"SINGLE",
")",
"# log.debug('%s-%s: %s -> %s' % (product.GetAtomWithIdx(pair[0]).GetSymbol(), product.GetAtomWithIdx(pair[1]).GetSymbol(), current_bond_type, product.GetBondBetweenAtoms(*pair).GetBondType()))",
"# Adjust charges",
"if",
"transform",
".",
"charges",
":",
"for",
"ci",
",",
"idx",
"in",
"enumerate",
"(",
"match",
")",
":",
"atom",
"=",
"product",
".",
"GetAtomWithIdx",
"(",
"idx",
")",
"# log.debug('%s: C%s -> C%s' % (atom.GetSymbol(), atom.GetFormalCharge(), atom.GetFormalCharge() + transform.charges[ci]))",
"atom",
".",
"SetFormalCharge",
"(",
"atom",
".",
"GetFormalCharge",
"(",
")",
"+",
"transform",
".",
"charges",
"[",
"ci",
"]",
")",
"try",
":",
"Chem",
".",
"SanitizeMol",
"(",
"product",
")",
"smiles",
"=",
"Chem",
".",
"MolToSmiles",
"(",
"product",
",",
"isomericSmiles",
"=",
"True",
")",
"log",
".",
"debug",
"(",
"'Applied rule: %s to %s'",
",",
"transform",
".",
"name",
",",
"tsmiles",
")",
"if",
"smiles",
"not",
"in",
"tautomers",
":",
"log",
".",
"debug",
"(",
"'New tautomer produced: %s'",
"%",
"smiles",
")",
"kekulized_product",
"=",
"copy",
".",
"deepcopy",
"(",
"product",
")",
"Chem",
".",
"Kekulize",
"(",
"kekulized_product",
")",
"tautomers",
"[",
"smiles",
"]",
"=",
"product",
"kekulized",
"[",
"smiles",
"]",
"=",
"kekulized_product",
"else",
":",
"log",
".",
"debug",
"(",
"'Previous tautomer produced again: %s'",
"%",
"smiles",
")",
"except",
"ValueError",
":",
"log",
".",
"debug",
"(",
"'ValueError Applying rule: %s'",
",",
"transform",
".",
"name",
")",
"done",
".",
"add",
"(",
"tsmiles",
")",
"if",
"len",
"(",
"tautomers",
")",
"==",
"len",
"(",
"done",
")",
":",
"break",
"else",
":",
"log",
".",
"warning",
"(",
"'Tautomer enumeration stopped at maximum %s'",
",",
"self",
".",
"max_tautomers",
")",
"# Clean up stereochemistry",
"for",
"tautomer",
"in",
"tautomers",
".",
"values",
"(",
")",
":",
"Chem",
".",
"AssignStereochemistry",
"(",
"tautomer",
",",
"force",
"=",
"True",
",",
"cleanIt",
"=",
"True",
")",
"for",
"bond",
"in",
"tautomer",
".",
"GetBonds",
"(",
")",
":",
"if",
"bond",
".",
"GetBondType",
"(",
")",
"==",
"BondType",
".",
"DOUBLE",
"and",
"bond",
".",
"GetStereo",
"(",
")",
">",
"BondStereo",
".",
"STEREOANY",
":",
"begin",
"=",
"bond",
".",
"GetBeginAtomIdx",
"(",
")",
"end",
"=",
"bond",
".",
"GetEndAtomIdx",
"(",
")",
"for",
"othertautomer",
"in",
"tautomers",
".",
"values",
"(",
")",
":",
"if",
"not",
"othertautomer",
".",
"GetBondBetweenAtoms",
"(",
"begin",
",",
"end",
")",
".",
"GetBondType",
"(",
")",
"==",
"BondType",
".",
"DOUBLE",
":",
"neighbours",
"=",
"tautomer",
".",
"GetAtomWithIdx",
"(",
"begin",
")",
".",
"GetBonds",
"(",
")",
"+",
"tautomer",
".",
"GetAtomWithIdx",
"(",
"end",
")",
".",
"GetBonds",
"(",
")",
"for",
"otherbond",
"in",
"neighbours",
":",
"if",
"otherbond",
".",
"GetBondDir",
"(",
")",
"in",
"{",
"BondDir",
".",
"ENDUPRIGHT",
",",
"BondDir",
".",
"ENDDOWNRIGHT",
"}",
":",
"otherbond",
".",
"SetBondDir",
"(",
"BondDir",
".",
"NONE",
")",
"Chem",
".",
"AssignStereochemistry",
"(",
"tautomer",
",",
"force",
"=",
"True",
",",
"cleanIt",
"=",
"True",
")",
"log",
".",
"debug",
"(",
"'Removed stereochemistry from unfixed double bond'",
")",
"break",
"return",
"list",
"(",
"tautomers",
".",
"values",
"(",
")",
")"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
validate_smiles
|
Return log messages for a given SMILES string using the default validations.
Note: This is a convenience function for quickly validating a single SMILES string. It is more efficient to use
the :class:`~molvs.validate.Validator` class directly when working with many molecules or when custom options
are needed.
:param string smiles: The SMILES for the molecule.
:returns: A list of log messages.
:rtype: list of strings.
|
molvs/validate.py
|
def validate_smiles(smiles):
"""Return log messages for a given SMILES string using the default validations.
Note: This is a convenience function for quickly validating a single SMILES string. It is more efficient to use
the :class:`~molvs.validate.Validator` class directly when working with many molecules or when custom options
are needed.
:param string smiles: The SMILES for the molecule.
:returns: A list of log messages.
:rtype: list of strings.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles)
logs = Validator().validate(mol)
return logs
|
def validate_smiles(smiles):
"""Return log messages for a given SMILES string using the default validations.
Note: This is a convenience function for quickly validating a single SMILES string. It is more efficient to use
the :class:`~molvs.validate.Validator` class directly when working with many molecules or when custom options
are needed.
:param string smiles: The SMILES for the molecule.
:returns: A list of log messages.
:rtype: list of strings.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles)
logs = Validator().validate(mol)
return logs
|
[
"Return",
"log",
"messages",
"for",
"a",
"given",
"SMILES",
"string",
"using",
"the",
"default",
"validations",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/validate.py#L105-L119
|
[
"def",
"validate_smiles",
"(",
"smiles",
")",
":",
"# Skip sanitize as standardize does this anyway",
"mol",
"=",
"Chem",
".",
"MolFromSmiles",
"(",
"smiles",
")",
"logs",
"=",
"Validator",
"(",
")",
".",
"validate",
"(",
"mol",
")",
"return",
"logs"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
MetalDisconnector.disconnect
|
Break covalent bonds between metals and organic atoms under certain conditions.
The algorithm works as follows:
- Disconnect N, O, F from any metal.
- Disconnect other non-metals from transition metals + Al (but not Hg, Ga, Ge, In, Sn, As, Tl, Pb, Bi, Po).
- For every bond broken, adjust the charges of the begin and end atoms accordingly.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The molecule with metals disconnected.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/metal.py
|
def disconnect(self, mol):
"""Break covalent bonds between metals and organic atoms under certain conditions.
The algorithm works as follows:
- Disconnect N, O, F from any metal.
- Disconnect other non-metals from transition metals + Al (but not Hg, Ga, Ge, In, Sn, As, Tl, Pb, Bi, Po).
- For every bond broken, adjust the charges of the begin and end atoms accordingly.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The molecule with metals disconnected.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running MetalDisconnector')
# Remove bonds that match SMARTS
for smarts in [self._metal_nof, self._metal_non]:
pairs = mol.GetSubstructMatches(smarts)
rwmol = Chem.RWMol(mol)
orders = []
for i, j in pairs:
# TODO: Could get the valence contributions of the bond instead of GetBondTypeAsDouble?
orders.append(int(mol.GetBondBetweenAtoms(i, j).GetBondTypeAsDouble()))
rwmol.RemoveBond(i, j)
# Adjust neighbouring charges accordingly
mol = rwmol.GetMol()
for n, (i, j) in enumerate(pairs):
chg = orders[n]
atom1 = mol.GetAtomWithIdx(i)
atom1.SetFormalCharge(atom1.GetFormalCharge() + chg)
atom2 = mol.GetAtomWithIdx(j)
atom2.SetFormalCharge(atom2.GetFormalCharge() - chg)
log.info('Removed covalent bond between %s and %s', atom1.GetSymbol(), atom2.GetSymbol())
Chem.SanitizeMol(mol)
return mol
|
def disconnect(self, mol):
"""Break covalent bonds between metals and organic atoms under certain conditions.
The algorithm works as follows:
- Disconnect N, O, F from any metal.
- Disconnect other non-metals from transition metals + Al (but not Hg, Ga, Ge, In, Sn, As, Tl, Pb, Bi, Po).
- For every bond broken, adjust the charges of the begin and end atoms accordingly.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:return: The molecule with metals disconnected.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running MetalDisconnector')
# Remove bonds that match SMARTS
for smarts in [self._metal_nof, self._metal_non]:
pairs = mol.GetSubstructMatches(smarts)
rwmol = Chem.RWMol(mol)
orders = []
for i, j in pairs:
# TODO: Could get the valence contributions of the bond instead of GetBondTypeAsDouble?
orders.append(int(mol.GetBondBetweenAtoms(i, j).GetBondTypeAsDouble()))
rwmol.RemoveBond(i, j)
# Adjust neighbouring charges accordingly
mol = rwmol.GetMol()
for n, (i, j) in enumerate(pairs):
chg = orders[n]
atom1 = mol.GetAtomWithIdx(i)
atom1.SetFormalCharge(atom1.GetFormalCharge() + chg)
atom2 = mol.GetAtomWithIdx(j)
atom2.SetFormalCharge(atom2.GetFormalCharge() - chg)
log.info('Removed covalent bond between %s and %s', atom1.GetSymbol(), atom2.GetSymbol())
Chem.SanitizeMol(mol)
return mol
|
[
"Break",
"covalent",
"bonds",
"between",
"metals",
"and",
"organic",
"atoms",
"under",
"certain",
"conditions",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/metal.py#L38-L72
|
[
"def",
"disconnect",
"(",
"self",
",",
"mol",
")",
":",
"log",
".",
"debug",
"(",
"'Running MetalDisconnector'",
")",
"# Remove bonds that match SMARTS",
"for",
"smarts",
"in",
"[",
"self",
".",
"_metal_nof",
",",
"self",
".",
"_metal_non",
"]",
":",
"pairs",
"=",
"mol",
".",
"GetSubstructMatches",
"(",
"smarts",
")",
"rwmol",
"=",
"Chem",
".",
"RWMol",
"(",
"mol",
")",
"orders",
"=",
"[",
"]",
"for",
"i",
",",
"j",
"in",
"pairs",
":",
"# TODO: Could get the valence contributions of the bond instead of GetBondTypeAsDouble?",
"orders",
".",
"append",
"(",
"int",
"(",
"mol",
".",
"GetBondBetweenAtoms",
"(",
"i",
",",
"j",
")",
".",
"GetBondTypeAsDouble",
"(",
")",
")",
")",
"rwmol",
".",
"RemoveBond",
"(",
"i",
",",
"j",
")",
"# Adjust neighbouring charges accordingly",
"mol",
"=",
"rwmol",
".",
"GetMol",
"(",
")",
"for",
"n",
",",
"(",
"i",
",",
"j",
")",
"in",
"enumerate",
"(",
"pairs",
")",
":",
"chg",
"=",
"orders",
"[",
"n",
"]",
"atom1",
"=",
"mol",
".",
"GetAtomWithIdx",
"(",
"i",
")",
"atom1",
".",
"SetFormalCharge",
"(",
"atom1",
".",
"GetFormalCharge",
"(",
")",
"+",
"chg",
")",
"atom2",
"=",
"mol",
".",
"GetAtomWithIdx",
"(",
"j",
")",
"atom2",
".",
"SetFormalCharge",
"(",
"atom2",
".",
"GetFormalCharge",
"(",
")",
"-",
"chg",
")",
"log",
".",
"info",
"(",
"'Removed covalent bond between %s and %s'",
",",
"atom1",
".",
"GetSymbol",
"(",
")",
",",
"atom2",
".",
"GetSymbol",
"(",
")",
")",
"Chem",
".",
"SanitizeMol",
"(",
"mol",
")",
"return",
"mol"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
standardize_smiles
|
Return a standardized canonical SMILES string given a SMILES string.
Note: This is a convenience function for quickly standardizing a single SMILES string. It is more efficient to use
the :class:`~molvs.standardize.Standardizer` class directly when working with many molecules or when custom options
are needed.
:param string smiles: The SMILES for the molecule.
:returns: The SMILES for the standardized molecule.
:rtype: string.
|
molvs/standardize.py
|
def standardize_smiles(smiles):
"""Return a standardized canonical SMILES string given a SMILES string.
Note: This is a convenience function for quickly standardizing a single SMILES string. It is more efficient to use
the :class:`~molvs.standardize.Standardizer` class directly when working with many molecules or when custom options
are needed.
:param string smiles: The SMILES for the molecule.
:returns: The SMILES for the standardized molecule.
:rtype: string.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles, sanitize=False)
mol = Standardizer().standardize(mol)
return Chem.MolToSmiles(mol, isomericSmiles=True)
|
def standardize_smiles(smiles):
"""Return a standardized canonical SMILES string given a SMILES string.
Note: This is a convenience function for quickly standardizing a single SMILES string. It is more efficient to use
the :class:`~molvs.standardize.Standardizer` class directly when working with many molecules or when custom options
are needed.
:param string smiles: The SMILES for the molecule.
:returns: The SMILES for the standardized molecule.
:rtype: string.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles, sanitize=False)
mol = Standardizer().standardize(mol)
return Chem.MolToSmiles(mol, isomericSmiles=True)
|
[
"Return",
"a",
"standardized",
"canonical",
"SMILES",
"string",
"given",
"a",
"SMILES",
"string",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L289-L303
|
[
"def",
"standardize_smiles",
"(",
"smiles",
")",
":",
"# Skip sanitize as standardize does this anyway",
"mol",
"=",
"Chem",
".",
"MolFromSmiles",
"(",
"smiles",
",",
"sanitize",
"=",
"False",
")",
"mol",
"=",
"Standardizer",
"(",
")",
".",
"standardize",
"(",
"mol",
")",
"return",
"Chem",
".",
"MolToSmiles",
"(",
"mol",
",",
"isomericSmiles",
"=",
"True",
")"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
enumerate_tautomers_smiles
|
Return a set of tautomers as SMILES strings, given a SMILES string.
:param smiles: A SMILES string.
:returns: A set containing SMILES strings for every possible tautomer.
:rtype: set of strings.
|
molvs/standardize.py
|
def enumerate_tautomers_smiles(smiles):
"""Return a set of tautomers as SMILES strings, given a SMILES string.
:param smiles: A SMILES string.
:returns: A set containing SMILES strings for every possible tautomer.
:rtype: set of strings.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles, sanitize=False)
mol = Standardizer().standardize(mol)
tautomers = TautomerEnumerator().enumerate(mol)
return {Chem.MolToSmiles(m, isomericSmiles=True) for m in tautomers}
|
def enumerate_tautomers_smiles(smiles):
"""Return a set of tautomers as SMILES strings, given a SMILES string.
:param smiles: A SMILES string.
:returns: A set containing SMILES strings for every possible tautomer.
:rtype: set of strings.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles, sanitize=False)
mol = Standardizer().standardize(mol)
tautomers = TautomerEnumerator().enumerate(mol)
return {Chem.MolToSmiles(m, isomericSmiles=True) for m in tautomers}
|
[
"Return",
"a",
"set",
"of",
"tautomers",
"as",
"SMILES",
"strings",
"given",
"a",
"SMILES",
"string",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L306-L317
|
[
"def",
"enumerate_tautomers_smiles",
"(",
"smiles",
")",
":",
"# Skip sanitize as standardize does this anyway",
"mol",
"=",
"Chem",
".",
"MolFromSmiles",
"(",
"smiles",
",",
"sanitize",
"=",
"False",
")",
"mol",
"=",
"Standardizer",
"(",
")",
".",
"standardize",
"(",
"mol",
")",
"tautomers",
"=",
"TautomerEnumerator",
"(",
")",
".",
"enumerate",
"(",
"mol",
")",
"return",
"{",
"Chem",
".",
"MolToSmiles",
"(",
"m",
",",
"isomericSmiles",
"=",
"True",
")",
"for",
"m",
"in",
"tautomers",
"}"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
canonicalize_tautomer_smiles
|
Return a standardized canonical tautomer SMILES string given a SMILES string.
Note: This is a convenience function for quickly standardizing and finding the canonical tautomer for a single
SMILES string. It is more efficient to use the :class:`~molvs.standardize.Standardizer` class directly when working
with many molecules or when custom options are needed.
:param string smiles: The SMILES for the molecule.
:returns: The SMILES for the standardize canonical tautomer.
:rtype: string.
|
molvs/standardize.py
|
def canonicalize_tautomer_smiles(smiles):
"""Return a standardized canonical tautomer SMILES string given a SMILES string.
Note: This is a convenience function for quickly standardizing and finding the canonical tautomer for a single
SMILES string. It is more efficient to use the :class:`~molvs.standardize.Standardizer` class directly when working
with many molecules or when custom options are needed.
:param string smiles: The SMILES for the molecule.
:returns: The SMILES for the standardize canonical tautomer.
:rtype: string.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles, sanitize=False)
mol = Standardizer().standardize(mol)
tautomer = TautomerCanonicalizer().canonicalize(mol)
return Chem.MolToSmiles(tautomer, isomericSmiles=True)
|
def canonicalize_tautomer_smiles(smiles):
"""Return a standardized canonical tautomer SMILES string given a SMILES string.
Note: This is a convenience function for quickly standardizing and finding the canonical tautomer for a single
SMILES string. It is more efficient to use the :class:`~molvs.standardize.Standardizer` class directly when working
with many molecules or when custom options are needed.
:param string smiles: The SMILES for the molecule.
:returns: The SMILES for the standardize canonical tautomer.
:rtype: string.
"""
# Skip sanitize as standardize does this anyway
mol = Chem.MolFromSmiles(smiles, sanitize=False)
mol = Standardizer().standardize(mol)
tautomer = TautomerCanonicalizer().canonicalize(mol)
return Chem.MolToSmiles(tautomer, isomericSmiles=True)
|
[
"Return",
"a",
"standardized",
"canonical",
"tautomer",
"SMILES",
"string",
"given",
"a",
"SMILES",
"string",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L320-L335
|
[
"def",
"canonicalize_tautomer_smiles",
"(",
"smiles",
")",
":",
"# Skip sanitize as standardize does this anyway",
"mol",
"=",
"Chem",
".",
"MolFromSmiles",
"(",
"smiles",
",",
"sanitize",
"=",
"False",
")",
"mol",
"=",
"Standardizer",
"(",
")",
".",
"standardize",
"(",
"mol",
")",
"tautomer",
"=",
"TautomerCanonicalizer",
"(",
")",
".",
"canonicalize",
"(",
"mol",
")",
"return",
"Chem",
".",
"MolToSmiles",
"(",
"tautomer",
",",
"isomericSmiles",
"=",
"True",
")"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Standardizer.standardize
|
Return a standardized version the given molecule.
The standardization process consists of the following stages: RDKit
:py:func:`~rdkit.Chem.rdmolops.RemoveHs`, RDKit :py:func:`~rdkit.Chem.rdmolops.SanitizeMol`,
:class:`~molvs.metal.MetalDisconnector`, :class:`~molvs.normalize.Normalizer`,
:class:`~molvs.charge.Reionizer`, RDKit :py:func:`~rdkit.Chem.rdmolops.AssignStereochemistry`.
:param mol: The molecule to standardize.
:type mol: rdkit.Chem.rdchem.Mol
:returns: The standardized molecule.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/standardize.py
|
def standardize(self, mol):
"""Return a standardized version the given molecule.
The standardization process consists of the following stages: RDKit
:py:func:`~rdkit.Chem.rdmolops.RemoveHs`, RDKit :py:func:`~rdkit.Chem.rdmolops.SanitizeMol`,
:class:`~molvs.metal.MetalDisconnector`, :class:`~molvs.normalize.Normalizer`,
:class:`~molvs.charge.Reionizer`, RDKit :py:func:`~rdkit.Chem.rdmolops.AssignStereochemistry`.
:param mol: The molecule to standardize.
:type mol: rdkit.Chem.rdchem.Mol
:returns: The standardized molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
mol = copy.deepcopy(mol)
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
mol = self.disconnect_metals(mol)
mol = self.normalize(mol)
mol = self.reionize(mol)
Chem.AssignStereochemistry(mol, force=True, cleanIt=True)
# TODO: Check this removes symmetric stereocenters
return mol
|
def standardize(self, mol):
"""Return a standardized version the given molecule.
The standardization process consists of the following stages: RDKit
:py:func:`~rdkit.Chem.rdmolops.RemoveHs`, RDKit :py:func:`~rdkit.Chem.rdmolops.SanitizeMol`,
:class:`~molvs.metal.MetalDisconnector`, :class:`~molvs.normalize.Normalizer`,
:class:`~molvs.charge.Reionizer`, RDKit :py:func:`~rdkit.Chem.rdmolops.AssignStereochemistry`.
:param mol: The molecule to standardize.
:type mol: rdkit.Chem.rdchem.Mol
:returns: The standardized molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
mol = copy.deepcopy(mol)
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
mol = self.disconnect_metals(mol)
mol = self.normalize(mol)
mol = self.reionize(mol)
Chem.AssignStereochemistry(mol, force=True, cleanIt=True)
# TODO: Check this removes symmetric stereocenters
return mol
|
[
"Return",
"a",
"standardized",
"version",
"the",
"given",
"molecule",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L78-L99
|
[
"def",
"standardize",
"(",
"self",
",",
"mol",
")",
":",
"mol",
"=",
"copy",
".",
"deepcopy",
"(",
"mol",
")",
"Chem",
".",
"SanitizeMol",
"(",
"mol",
")",
"mol",
"=",
"Chem",
".",
"RemoveHs",
"(",
"mol",
")",
"mol",
"=",
"self",
".",
"disconnect_metals",
"(",
"mol",
")",
"mol",
"=",
"self",
".",
"normalize",
"(",
"mol",
")",
"mol",
"=",
"self",
".",
"reionize",
"(",
"mol",
")",
"Chem",
".",
"AssignStereochemistry",
"(",
"mol",
",",
"force",
"=",
"True",
",",
"cleanIt",
"=",
"True",
")",
"# TODO: Check this removes symmetric stereocenters",
"return",
"mol"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Standardizer.tautomer_parent
|
Return the tautomer parent of a given molecule.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The tautomer parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/standardize.py
|
def tautomer_parent(self, mol, skip_standardize=False):
"""Return the tautomer parent of a given molecule.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The tautomer parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
tautomer = self.canonicalize_tautomer(mol)
tautomer = self.standardize(tautomer)
return tautomer
|
def tautomer_parent(self, mol, skip_standardize=False):
"""Return the tautomer parent of a given molecule.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The tautomer parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
tautomer = self.canonicalize_tautomer(mol)
tautomer = self.standardize(tautomer)
return tautomer
|
[
"Return",
"the",
"tautomer",
"parent",
"of",
"a",
"given",
"molecule",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L101-L114
|
[
"def",
"tautomer_parent",
"(",
"self",
",",
"mol",
",",
"skip_standardize",
"=",
"False",
")",
":",
"if",
"not",
"skip_standardize",
":",
"mol",
"=",
"self",
".",
"standardize",
"(",
"mol",
")",
"tautomer",
"=",
"self",
".",
"canonicalize_tautomer",
"(",
"mol",
")",
"tautomer",
"=",
"self",
".",
"standardize",
"(",
"tautomer",
")",
"return",
"tautomer"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Standardizer.fragment_parent
|
Return the fragment parent of a given molecule.
The fragment parent is the largest organic covalent unit in the molecule.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The fragment parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/standardize.py
|
def fragment_parent(self, mol, skip_standardize=False):
"""Return the fragment parent of a given molecule.
The fragment parent is the largest organic covalent unit in the molecule.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The fragment parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
# TODO: Consider applying FragmentRemover first to remove salts, solvents?
fragment = self.largest_fragment(mol)
return fragment
|
def fragment_parent(self, mol, skip_standardize=False):
"""Return the fragment parent of a given molecule.
The fragment parent is the largest organic covalent unit in the molecule.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The fragment parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
# TODO: Consider applying FragmentRemover first to remove salts, solvents?
fragment = self.largest_fragment(mol)
return fragment
|
[
"Return",
"the",
"fragment",
"parent",
"of",
"a",
"given",
"molecule",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L116-L131
|
[
"def",
"fragment_parent",
"(",
"self",
",",
"mol",
",",
"skip_standardize",
"=",
"False",
")",
":",
"if",
"not",
"skip_standardize",
":",
"mol",
"=",
"self",
".",
"standardize",
"(",
"mol",
")",
"# TODO: Consider applying FragmentRemover first to remove salts, solvents?",
"fragment",
"=",
"self",
".",
"largest_fragment",
"(",
"mol",
")",
"return",
"fragment"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Standardizer.stereo_parent
|
Return the stereo parent of a given molecule.
The stereo parent has all stereochemistry information removed from tetrahedral centers and double bonds.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The stereo parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/standardize.py
|
def stereo_parent(self, mol, skip_standardize=False):
"""Return the stereo parent of a given molecule.
The stereo parent has all stereochemistry information removed from tetrahedral centers and double bonds.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The stereo parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
else:
mol = copy.deepcopy(mol)
Chem.RemoveStereochemistry(mol)
return mol
|
def stereo_parent(self, mol, skip_standardize=False):
"""Return the stereo parent of a given molecule.
The stereo parent has all stereochemistry information removed from tetrahedral centers and double bonds.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The stereo parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
else:
mol = copy.deepcopy(mol)
Chem.RemoveStereochemistry(mol)
return mol
|
[
"Return",
"the",
"stereo",
"parent",
"of",
"a",
"given",
"molecule",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L133-L149
|
[
"def",
"stereo_parent",
"(",
"self",
",",
"mol",
",",
"skip_standardize",
"=",
"False",
")",
":",
"if",
"not",
"skip_standardize",
":",
"mol",
"=",
"self",
".",
"standardize",
"(",
"mol",
")",
"else",
":",
"mol",
"=",
"copy",
".",
"deepcopy",
"(",
"mol",
")",
"Chem",
".",
"RemoveStereochemistry",
"(",
"mol",
")",
"return",
"mol"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Standardizer.isotope_parent
|
Return the isotope parent of a given molecule.
The isotope parent has all atoms replaced with the most abundant isotope for that element.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The isotope parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/standardize.py
|
def isotope_parent(self, mol, skip_standardize=False):
"""Return the isotope parent of a given molecule.
The isotope parent has all atoms replaced with the most abundant isotope for that element.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The isotope parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
else:
mol = copy.deepcopy(mol)
# Replace isotopes with common weight
for atom in mol.GetAtoms():
atom.SetIsotope(0)
return mol
|
def isotope_parent(self, mol, skip_standardize=False):
"""Return the isotope parent of a given molecule.
The isotope parent has all atoms replaced with the most abundant isotope for that element.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The isotope parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
else:
mol = copy.deepcopy(mol)
# Replace isotopes with common weight
for atom in mol.GetAtoms():
atom.SetIsotope(0)
return mol
|
[
"Return",
"the",
"isotope",
"parent",
"of",
"a",
"given",
"molecule",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L151-L169
|
[
"def",
"isotope_parent",
"(",
"self",
",",
"mol",
",",
"skip_standardize",
"=",
"False",
")",
":",
"if",
"not",
"skip_standardize",
":",
"mol",
"=",
"self",
".",
"standardize",
"(",
"mol",
")",
"else",
":",
"mol",
"=",
"copy",
".",
"deepcopy",
"(",
"mol",
")",
"# Replace isotopes with common weight",
"for",
"atom",
"in",
"mol",
".",
"GetAtoms",
"(",
")",
":",
"atom",
".",
"SetIsotope",
"(",
"0",
")",
"return",
"mol"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Standardizer.charge_parent
|
Return the charge parent of a given molecule.
The charge parent is the uncharged version of the fragment parent.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The charge parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/standardize.py
|
def charge_parent(self, mol, skip_standardize=False):
"""Return the charge parent of a given molecule.
The charge parent is the uncharged version of the fragment parent.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The charge parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
# TODO: All ionized acids and bases should be neutralised.
if not skip_standardize:
mol = self.standardize(mol)
fragment = self.fragment_parent(mol, skip_standardize=True)
if fragment:
uncharged = self.uncharge(fragment)
# During final standardization, the Reionizer ensures any remaining charges are in the right places
uncharged = self.standardize(uncharged)
return uncharged
|
def charge_parent(self, mol, skip_standardize=False):
"""Return the charge parent of a given molecule.
The charge parent is the uncharged version of the fragment parent.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The charge parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
# TODO: All ionized acids and bases should be neutralised.
if not skip_standardize:
mol = self.standardize(mol)
fragment = self.fragment_parent(mol, skip_standardize=True)
if fragment:
uncharged = self.uncharge(fragment)
# During final standardization, the Reionizer ensures any remaining charges are in the right places
uncharged = self.standardize(uncharged)
return uncharged
|
[
"Return",
"the",
"charge",
"parent",
"of",
"a",
"given",
"molecule",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L171-L190
|
[
"def",
"charge_parent",
"(",
"self",
",",
"mol",
",",
"skip_standardize",
"=",
"False",
")",
":",
"# TODO: All ionized acids and bases should be neutralised.",
"if",
"not",
"skip_standardize",
":",
"mol",
"=",
"self",
".",
"standardize",
"(",
"mol",
")",
"fragment",
"=",
"self",
".",
"fragment_parent",
"(",
"mol",
",",
"skip_standardize",
"=",
"True",
")",
"if",
"fragment",
":",
"uncharged",
"=",
"self",
".",
"uncharge",
"(",
"fragment",
")",
"# During final standardization, the Reionizer ensures any remaining charges are in the right places",
"uncharged",
"=",
"self",
".",
"standardize",
"(",
"uncharged",
")",
"return",
"uncharged"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Standardizer.super_parent
|
Return the super parent of a given molecule.
THe super parent is fragment, charge, isotope, stereochemistry and tautomer insensitive. From the input
molecule, the largest fragment is taken. This is uncharged and then isotope and stereochemistry information is
discarded. Finally, the canonical tautomer is determined and returned.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The super parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/standardize.py
|
def super_parent(self, mol, skip_standardize=False):
"""Return the super parent of a given molecule.
THe super parent is fragment, charge, isotope, stereochemistry and tautomer insensitive. From the input
molecule, the largest fragment is taken. This is uncharged and then isotope and stereochemistry information is
discarded. Finally, the canonical tautomer is determined and returned.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The super parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
# We don't need to get fragment parent, because the charge parent is the largest fragment
mol = self.charge_parent(mol, skip_standardize=True)
mol = self.isotope_parent(mol, skip_standardize=True)
mol = self.stereo_parent(mol, skip_standardize=True)
mol = self.tautomer_parent(mol, skip_standardize=True)
mol = self.standardize(mol)
return mol
|
def super_parent(self, mol, skip_standardize=False):
"""Return the super parent of a given molecule.
THe super parent is fragment, charge, isotope, stereochemistry and tautomer insensitive. From the input
molecule, the largest fragment is taken. This is uncharged and then isotope and stereochemistry information is
discarded. Finally, the canonical tautomer is determined and returned.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The super parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
if not skip_standardize:
mol = self.standardize(mol)
# We don't need to get fragment parent, because the charge parent is the largest fragment
mol = self.charge_parent(mol, skip_standardize=True)
mol = self.isotope_parent(mol, skip_standardize=True)
mol = self.stereo_parent(mol, skip_standardize=True)
mol = self.tautomer_parent(mol, skip_standardize=True)
mol = self.standardize(mol)
return mol
|
[
"Return",
"the",
"super",
"parent",
"of",
"a",
"given",
"molecule",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L192-L213
|
[
"def",
"super_parent",
"(",
"self",
",",
"mol",
",",
"skip_standardize",
"=",
"False",
")",
":",
"if",
"not",
"skip_standardize",
":",
"mol",
"=",
"self",
".",
"standardize",
"(",
"mol",
")",
"# We don't need to get fragment parent, because the charge parent is the largest fragment",
"mol",
"=",
"self",
".",
"charge_parent",
"(",
"mol",
",",
"skip_standardize",
"=",
"True",
")",
"mol",
"=",
"self",
".",
"isotope_parent",
"(",
"mol",
",",
"skip_standardize",
"=",
"True",
")",
"mol",
"=",
"self",
".",
"stereo_parent",
"(",
"mol",
",",
"skip_standardize",
"=",
"True",
")",
"mol",
"=",
"self",
".",
"tautomer_parent",
"(",
"mol",
",",
"skip_standardize",
"=",
"True",
")",
"mol",
"=",
"self",
".",
"standardize",
"(",
"mol",
")",
"return",
"mol"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Standardizer.canonicalize_tautomer
|
:returns: A callable :class:`~molvs.tautomer.TautomerCanonicalizer` instance.
|
molvs/standardize.py
|
def canonicalize_tautomer(self):
"""
:returns: A callable :class:`~molvs.tautomer.TautomerCanonicalizer` instance.
"""
return TautomerCanonicalizer(transforms=self.tautomer_transforms, scores=self.tautomer_scores,
max_tautomers=self.max_tautomers)
|
def canonicalize_tautomer(self):
"""
:returns: A callable :class:`~molvs.tautomer.TautomerCanonicalizer` instance.
"""
return TautomerCanonicalizer(transforms=self.tautomer_transforms, scores=self.tautomer_scores,
max_tautomers=self.max_tautomers)
|
[
":",
"returns",
":",
"A",
"callable",
":",
"class",
":",
"~molvs",
".",
"tautomer",
".",
"TautomerCanonicalizer",
"instance",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/standardize.py#L281-L286
|
[
"def",
"canonicalize_tautomer",
"(",
"self",
")",
":",
"return",
"TautomerCanonicalizer",
"(",
"transforms",
"=",
"self",
".",
"tautomer_transforms",
",",
"scores",
"=",
"self",
".",
"tautomer_scores",
",",
"max_tautomers",
"=",
"self",
".",
"max_tautomers",
")"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
main
|
Main function for molvs command line interface.
|
molvs/cli.py
|
def main():
"""Main function for molvs command line interface."""
# Root options
parser = MolvsParser(epilog='use "molvs <command> -h" to show help for a specific command')
subparsers = parser.add_subparsers(title='Available commands')
# Options common to all commands
common_parser = MolvsParser(add_help=False)
common_parser.add_argument('infile', nargs='?', help='input filename', type=argparse.FileType('r'), default=sys.stdin)
common_parser.add_argument('-i', '--intype', help='input filetype', choices=FILETYPES)
common_parser.add_argument('-:', '--smiles', help='input SMILES instead of file', metavar='<smiles>')
common_parser.add_argument('-O', '--outfile', help='output filename', type=argparse.FileType('w'), default=sys.stdout, metavar='<outfile>')
# Standardize options
standardize_parser = subparsers.add_parser('standardize', help='standardize a molecule', parents=[common_parser])
standardize_parser.add_argument('-o', '--outtype', help='output filetype', choices=FILETYPES)
standardize_parser.set_defaults(func=standardize_main)
# Validate options
validate_parser = subparsers.add_parser('validate', help='validate a molecule', parents=[common_parser])
validate_parser.set_defaults(func=validate_main)
args = parser.parse_args()
try:
args.func(args)
except Exception as e:
sys.stderr.write('Error: %s\n\n'.encode() % e.message)
parser.print_help()
sys.exit(2)
|
def main():
"""Main function for molvs command line interface."""
# Root options
parser = MolvsParser(epilog='use "molvs <command> -h" to show help for a specific command')
subparsers = parser.add_subparsers(title='Available commands')
# Options common to all commands
common_parser = MolvsParser(add_help=False)
common_parser.add_argument('infile', nargs='?', help='input filename', type=argparse.FileType('r'), default=sys.stdin)
common_parser.add_argument('-i', '--intype', help='input filetype', choices=FILETYPES)
common_parser.add_argument('-:', '--smiles', help='input SMILES instead of file', metavar='<smiles>')
common_parser.add_argument('-O', '--outfile', help='output filename', type=argparse.FileType('w'), default=sys.stdout, metavar='<outfile>')
# Standardize options
standardize_parser = subparsers.add_parser('standardize', help='standardize a molecule', parents=[common_parser])
standardize_parser.add_argument('-o', '--outtype', help='output filetype', choices=FILETYPES)
standardize_parser.set_defaults(func=standardize_main)
# Validate options
validate_parser = subparsers.add_parser('validate', help='validate a molecule', parents=[common_parser])
validate_parser.set_defaults(func=validate_main)
args = parser.parse_args()
try:
args.func(args)
except Exception as e:
sys.stderr.write('Error: %s\n\n'.encode() % e.message)
parser.print_help()
sys.exit(2)
|
[
"Main",
"function",
"for",
"molvs",
"command",
"line",
"interface",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/cli.py#L35-L65
|
[
"def",
"main",
"(",
")",
":",
"# Root options",
"parser",
"=",
"MolvsParser",
"(",
"epilog",
"=",
"'use \"molvs <command> -h\" to show help for a specific command'",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"title",
"=",
"'Available commands'",
")",
"# Options common to all commands",
"common_parser",
"=",
"MolvsParser",
"(",
"add_help",
"=",
"False",
")",
"common_parser",
".",
"add_argument",
"(",
"'infile'",
",",
"nargs",
"=",
"'?'",
",",
"help",
"=",
"'input filename'",
",",
"type",
"=",
"argparse",
".",
"FileType",
"(",
"'r'",
")",
",",
"default",
"=",
"sys",
".",
"stdin",
")",
"common_parser",
".",
"add_argument",
"(",
"'-i'",
",",
"'--intype'",
",",
"help",
"=",
"'input filetype'",
",",
"choices",
"=",
"FILETYPES",
")",
"common_parser",
".",
"add_argument",
"(",
"'-:'",
",",
"'--smiles'",
",",
"help",
"=",
"'input SMILES instead of file'",
",",
"metavar",
"=",
"'<smiles>'",
")",
"common_parser",
".",
"add_argument",
"(",
"'-O'",
",",
"'--outfile'",
",",
"help",
"=",
"'output filename'",
",",
"type",
"=",
"argparse",
".",
"FileType",
"(",
"'w'",
")",
",",
"default",
"=",
"sys",
".",
"stdout",
",",
"metavar",
"=",
"'<outfile>'",
")",
"# Standardize options",
"standardize_parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'standardize'",
",",
"help",
"=",
"'standardize a molecule'",
",",
"parents",
"=",
"[",
"common_parser",
"]",
")",
"standardize_parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--outtype'",
",",
"help",
"=",
"'output filetype'",
",",
"choices",
"=",
"FILETYPES",
")",
"standardize_parser",
".",
"set_defaults",
"(",
"func",
"=",
"standardize_main",
")",
"# Validate options",
"validate_parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'validate'",
",",
"help",
"=",
"'validate a molecule'",
",",
"parents",
"=",
"[",
"common_parser",
"]",
")",
"validate_parser",
".",
"set_defaults",
"(",
"func",
"=",
"validate_main",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"try",
":",
"args",
".",
"func",
"(",
"args",
")",
"except",
"Exception",
"as",
"e",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Error: %s\\n\\n'",
".",
"encode",
"(",
")",
"%",
"e",
".",
"message",
")",
"parser",
".",
"print_help",
"(",
")",
"sys",
".",
"exit",
"(",
"2",
")"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Reionizer.reionize
|
Enforce charges on certain atoms, then perform competitive reionization.
First, charge corrections are applied to ensure, for example, that free metals are correctly ionized. Then, if
a molecule with multiple acid groups is partially ionized, ensure the strongest acids ionize first.
The algorithm works as follows:
- Use SMARTS to find the strongest protonated acid and the weakest ionized acid.
- If the ionized acid is weaker than the protonated acid, swap proton and repeat.
:param mol: The molecule to reionize.
:type mol: rdkit.Chem.rdchem.Mol
:return: The reionized molecule.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/charge.py
|
def reionize(self, mol):
"""Enforce charges on certain atoms, then perform competitive reionization.
First, charge corrections are applied to ensure, for example, that free metals are correctly ionized. Then, if
a molecule with multiple acid groups is partially ionized, ensure the strongest acids ionize first.
The algorithm works as follows:
- Use SMARTS to find the strongest protonated acid and the weakest ionized acid.
- If the ionized acid is weaker than the protonated acid, swap proton and repeat.
:param mol: The molecule to reionize.
:type mol: rdkit.Chem.rdchem.Mol
:return: The reionized molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running Reionizer')
start_charge = Chem.GetFormalCharge(mol)
# Apply forced charge corrections
for cc in self.charge_corrections:
for match in mol.GetSubstructMatches(cc.smarts):
atom = mol.GetAtomWithIdx(match[0])
log.info('Applying charge correction %s (%s %+d)', cc.name, atom.GetSymbol(), cc.charge)
atom.SetFormalCharge(cc.charge)
current_charge = Chem.GetFormalCharge(mol)
charge_diff = Chem.GetFormalCharge(mol) - start_charge
# If molecule is now neutral, assume everything is now fixed
# But otherwise, if charge has become more positive, look for additional protonated acid groups to ionize
if not current_charge == 0:
while charge_diff > 0:
ppos, poccur = self._strongest_protonated(mol)
if ppos is None:
break
log.info('Ionizing %s to balance previous charge corrections', self.acid_base_pairs[ppos].name)
patom = mol.GetAtomWithIdx(poccur[-1])
patom.SetFormalCharge(patom.GetFormalCharge() - 1)
if patom.GetNumExplicitHs() > 0:
patom.SetNumExplicitHs(patom.GetNumExplicitHs() - 1)
# else:
patom.UpdatePropertyCache()
charge_diff -= 1
already_moved = set()
while True:
ppos, poccur = self._strongest_protonated(mol)
ipos, ioccur = self._weakest_ionized(mol)
if ioccur and poccur and ppos < ipos:
if poccur[-1] == ioccur[-1]:
# Bad! H wouldn't be moved, resulting in infinite loop.
log.warning('Aborted reionization due to unexpected situation')
break
key = tuple(sorted([poccur[-1], ioccur[-1]]))
if key in already_moved:
log.warning('Aborting reionization to avoid infinite loop due to it being ambiguous where to put a Hydrogen')
break
already_moved.add(key)
log.info('Moved proton from %s to %s', self.acid_base_pairs[ppos].name, self.acid_base_pairs[ipos].name)
# Remove hydrogen from strongest protonated
patom = mol.GetAtomWithIdx(poccur[-1])
patom.SetFormalCharge(patom.GetFormalCharge() - 1)
# If no implicit Hs to autoremove, and at least 1 explicit H to remove, reduce explicit count by 1
if patom.GetNumImplicitHs() == 0 and patom.GetNumExplicitHs() > 0:
patom.SetNumExplicitHs(patom.GetNumExplicitHs() - 1)
# TODO: Remove any chiral label on patom?
patom.UpdatePropertyCache()
# Add hydrogen to weakest ionized
iatom = mol.GetAtomWithIdx(ioccur[-1])
iatom.SetFormalCharge(iatom.GetFormalCharge() + 1)
# Increase explicit H count if no implicit, or aromatic N or P, or non default valence state
if (iatom.GetNoImplicit() or
((patom.GetAtomicNum() == 7 or patom.GetAtomicNum() == 15) and patom.GetIsAromatic()) or
iatom.GetTotalValence() not in list(Chem.GetPeriodicTable().GetValenceList(iatom.GetAtomicNum()))):
iatom.SetNumExplicitHs(iatom.GetNumExplicitHs() + 1)
iatom.UpdatePropertyCache()
else:
break
# TODO: Canonical ionization position if multiple equivalent positions?
Chem.SanitizeMol(mol)
return mol
|
def reionize(self, mol):
"""Enforce charges on certain atoms, then perform competitive reionization.
First, charge corrections are applied to ensure, for example, that free metals are correctly ionized. Then, if
a molecule with multiple acid groups is partially ionized, ensure the strongest acids ionize first.
The algorithm works as follows:
- Use SMARTS to find the strongest protonated acid and the weakest ionized acid.
- If the ionized acid is weaker than the protonated acid, swap proton and repeat.
:param mol: The molecule to reionize.
:type mol: rdkit.Chem.rdchem.Mol
:return: The reionized molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running Reionizer')
start_charge = Chem.GetFormalCharge(mol)
# Apply forced charge corrections
for cc in self.charge_corrections:
for match in mol.GetSubstructMatches(cc.smarts):
atom = mol.GetAtomWithIdx(match[0])
log.info('Applying charge correction %s (%s %+d)', cc.name, atom.GetSymbol(), cc.charge)
atom.SetFormalCharge(cc.charge)
current_charge = Chem.GetFormalCharge(mol)
charge_diff = Chem.GetFormalCharge(mol) - start_charge
# If molecule is now neutral, assume everything is now fixed
# But otherwise, if charge has become more positive, look for additional protonated acid groups to ionize
if not current_charge == 0:
while charge_diff > 0:
ppos, poccur = self._strongest_protonated(mol)
if ppos is None:
break
log.info('Ionizing %s to balance previous charge corrections', self.acid_base_pairs[ppos].name)
patom = mol.GetAtomWithIdx(poccur[-1])
patom.SetFormalCharge(patom.GetFormalCharge() - 1)
if patom.GetNumExplicitHs() > 0:
patom.SetNumExplicitHs(patom.GetNumExplicitHs() - 1)
# else:
patom.UpdatePropertyCache()
charge_diff -= 1
already_moved = set()
while True:
ppos, poccur = self._strongest_protonated(mol)
ipos, ioccur = self._weakest_ionized(mol)
if ioccur and poccur and ppos < ipos:
if poccur[-1] == ioccur[-1]:
# Bad! H wouldn't be moved, resulting in infinite loop.
log.warning('Aborted reionization due to unexpected situation')
break
key = tuple(sorted([poccur[-1], ioccur[-1]]))
if key in already_moved:
log.warning('Aborting reionization to avoid infinite loop due to it being ambiguous where to put a Hydrogen')
break
already_moved.add(key)
log.info('Moved proton from %s to %s', self.acid_base_pairs[ppos].name, self.acid_base_pairs[ipos].name)
# Remove hydrogen from strongest protonated
patom = mol.GetAtomWithIdx(poccur[-1])
patom.SetFormalCharge(patom.GetFormalCharge() - 1)
# If no implicit Hs to autoremove, and at least 1 explicit H to remove, reduce explicit count by 1
if patom.GetNumImplicitHs() == 0 and patom.GetNumExplicitHs() > 0:
patom.SetNumExplicitHs(patom.GetNumExplicitHs() - 1)
# TODO: Remove any chiral label on patom?
patom.UpdatePropertyCache()
# Add hydrogen to weakest ionized
iatom = mol.GetAtomWithIdx(ioccur[-1])
iatom.SetFormalCharge(iatom.GetFormalCharge() + 1)
# Increase explicit H count if no implicit, or aromatic N or P, or non default valence state
if (iatom.GetNoImplicit() or
((patom.GetAtomicNum() == 7 or patom.GetAtomicNum() == 15) and patom.GetIsAromatic()) or
iatom.GetTotalValence() not in list(Chem.GetPeriodicTable().GetValenceList(iatom.GetAtomicNum()))):
iatom.SetNumExplicitHs(iatom.GetNumExplicitHs() + 1)
iatom.UpdatePropertyCache()
else:
break
# TODO: Canonical ionization position if multiple equivalent positions?
Chem.SanitizeMol(mol)
return mol
|
[
"Enforce",
"charges",
"on",
"certain",
"atoms",
"then",
"perform",
"competitive",
"reionization",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/charge.py#L154-L241
|
[
"def",
"reionize",
"(",
"self",
",",
"mol",
")",
":",
"log",
".",
"debug",
"(",
"'Running Reionizer'",
")",
"start_charge",
"=",
"Chem",
".",
"GetFormalCharge",
"(",
"mol",
")",
"# Apply forced charge corrections",
"for",
"cc",
"in",
"self",
".",
"charge_corrections",
":",
"for",
"match",
"in",
"mol",
".",
"GetSubstructMatches",
"(",
"cc",
".",
"smarts",
")",
":",
"atom",
"=",
"mol",
".",
"GetAtomWithIdx",
"(",
"match",
"[",
"0",
"]",
")",
"log",
".",
"info",
"(",
"'Applying charge correction %s (%s %+d)'",
",",
"cc",
".",
"name",
",",
"atom",
".",
"GetSymbol",
"(",
")",
",",
"cc",
".",
"charge",
")",
"atom",
".",
"SetFormalCharge",
"(",
"cc",
".",
"charge",
")",
"current_charge",
"=",
"Chem",
".",
"GetFormalCharge",
"(",
"mol",
")",
"charge_diff",
"=",
"Chem",
".",
"GetFormalCharge",
"(",
"mol",
")",
"-",
"start_charge",
"# If molecule is now neutral, assume everything is now fixed",
"# But otherwise, if charge has become more positive, look for additional protonated acid groups to ionize",
"if",
"not",
"current_charge",
"==",
"0",
":",
"while",
"charge_diff",
">",
"0",
":",
"ppos",
",",
"poccur",
"=",
"self",
".",
"_strongest_protonated",
"(",
"mol",
")",
"if",
"ppos",
"is",
"None",
":",
"break",
"log",
".",
"info",
"(",
"'Ionizing %s to balance previous charge corrections'",
",",
"self",
".",
"acid_base_pairs",
"[",
"ppos",
"]",
".",
"name",
")",
"patom",
"=",
"mol",
".",
"GetAtomWithIdx",
"(",
"poccur",
"[",
"-",
"1",
"]",
")",
"patom",
".",
"SetFormalCharge",
"(",
"patom",
".",
"GetFormalCharge",
"(",
")",
"-",
"1",
")",
"if",
"patom",
".",
"GetNumExplicitHs",
"(",
")",
">",
"0",
":",
"patom",
".",
"SetNumExplicitHs",
"(",
"patom",
".",
"GetNumExplicitHs",
"(",
")",
"-",
"1",
")",
"# else:",
"patom",
".",
"UpdatePropertyCache",
"(",
")",
"charge_diff",
"-=",
"1",
"already_moved",
"=",
"set",
"(",
")",
"while",
"True",
":",
"ppos",
",",
"poccur",
"=",
"self",
".",
"_strongest_protonated",
"(",
"mol",
")",
"ipos",
",",
"ioccur",
"=",
"self",
".",
"_weakest_ionized",
"(",
"mol",
")",
"if",
"ioccur",
"and",
"poccur",
"and",
"ppos",
"<",
"ipos",
":",
"if",
"poccur",
"[",
"-",
"1",
"]",
"==",
"ioccur",
"[",
"-",
"1",
"]",
":",
"# Bad! H wouldn't be moved, resulting in infinite loop.",
"log",
".",
"warning",
"(",
"'Aborted reionization due to unexpected situation'",
")",
"break",
"key",
"=",
"tuple",
"(",
"sorted",
"(",
"[",
"poccur",
"[",
"-",
"1",
"]",
",",
"ioccur",
"[",
"-",
"1",
"]",
"]",
")",
")",
"if",
"key",
"in",
"already_moved",
":",
"log",
".",
"warning",
"(",
"'Aborting reionization to avoid infinite loop due to it being ambiguous where to put a Hydrogen'",
")",
"break",
"already_moved",
".",
"add",
"(",
"key",
")",
"log",
".",
"info",
"(",
"'Moved proton from %s to %s'",
",",
"self",
".",
"acid_base_pairs",
"[",
"ppos",
"]",
".",
"name",
",",
"self",
".",
"acid_base_pairs",
"[",
"ipos",
"]",
".",
"name",
")",
"# Remove hydrogen from strongest protonated",
"patom",
"=",
"mol",
".",
"GetAtomWithIdx",
"(",
"poccur",
"[",
"-",
"1",
"]",
")",
"patom",
".",
"SetFormalCharge",
"(",
"patom",
".",
"GetFormalCharge",
"(",
")",
"-",
"1",
")",
"# If no implicit Hs to autoremove, and at least 1 explicit H to remove, reduce explicit count by 1",
"if",
"patom",
".",
"GetNumImplicitHs",
"(",
")",
"==",
"0",
"and",
"patom",
".",
"GetNumExplicitHs",
"(",
")",
">",
"0",
":",
"patom",
".",
"SetNumExplicitHs",
"(",
"patom",
".",
"GetNumExplicitHs",
"(",
")",
"-",
"1",
")",
"# TODO: Remove any chiral label on patom?",
"patom",
".",
"UpdatePropertyCache",
"(",
")",
"# Add hydrogen to weakest ionized",
"iatom",
"=",
"mol",
".",
"GetAtomWithIdx",
"(",
"ioccur",
"[",
"-",
"1",
"]",
")",
"iatom",
".",
"SetFormalCharge",
"(",
"iatom",
".",
"GetFormalCharge",
"(",
")",
"+",
"1",
")",
"# Increase explicit H count if no implicit, or aromatic N or P, or non default valence state",
"if",
"(",
"iatom",
".",
"GetNoImplicit",
"(",
")",
"or",
"(",
"(",
"patom",
".",
"GetAtomicNum",
"(",
")",
"==",
"7",
"or",
"patom",
".",
"GetAtomicNum",
"(",
")",
"==",
"15",
")",
"and",
"patom",
".",
"GetIsAromatic",
"(",
")",
")",
"or",
"iatom",
".",
"GetTotalValence",
"(",
")",
"not",
"in",
"list",
"(",
"Chem",
".",
"GetPeriodicTable",
"(",
")",
".",
"GetValenceList",
"(",
"iatom",
".",
"GetAtomicNum",
"(",
")",
")",
")",
")",
":",
"iatom",
".",
"SetNumExplicitHs",
"(",
"iatom",
".",
"GetNumExplicitHs",
"(",
")",
"+",
"1",
")",
"iatom",
".",
"UpdatePropertyCache",
"(",
")",
"else",
":",
"break",
"# TODO: Canonical ionization position if multiple equivalent positions?",
"Chem",
".",
"SanitizeMol",
"(",
"mol",
")",
"return",
"mol"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Uncharger.uncharge
|
Neutralize molecule by adding/removing hydrogens.
:param mol: The molecule to uncharge.
:type mol: rdkit.Chem.rdchem.Mol
:return: The uncharged molecule.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/charge.py
|
def uncharge(self, mol):
"""Neutralize molecule by adding/removing hydrogens.
:param mol: The molecule to uncharge.
:type mol: rdkit.Chem.rdchem.Mol
:return: The uncharged molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running Uncharger')
mol = copy.deepcopy(mol)
# Neutralize positive charges
pos_remainder = 0
neg_count = 0
for atom in mol.GetAtoms():
# Remove hydrogen from positive atoms and reduce formal change until neutral or no more hydrogens
while atom.GetFormalCharge() > 0 and atom.GetNumExplicitHs() > 0:
atom.SetNumExplicitHs(atom.GetNumExplicitHs() - 1)
atom.SetFormalCharge(atom.GetFormalCharge() - 1)
log.info('Removed positive charge')
chg = atom.GetFormalCharge()
if chg > 0:
# Record number of non-neutralizable positive charges
pos_remainder += chg
elif chg < 0:
# Record total number of negative charges
neg_count += -chg
# Choose negative charges to leave in order to balance non-neutralizable positive charges
neg_skip = self._get_neg_skip(mol, pos_remainder)
# Neutralize remaining negative charges
for atom in mol.GetAtoms():
log.info(atom.GetIdx())
if atom.GetIdx() in neg_skip:
continue
# Make sure to stop when neg_count <= pos_remainder, as it is possible that neg_skip is not large enough
while atom.GetFormalCharge() < 0 and neg_count > pos_remainder:
atom.SetNumExplicitHs(atom.GetNumExplicitHs() + 1)
atom.SetFormalCharge(atom.GetFormalCharge() + 1)
neg_count -= 1
log.info('Removed negative charge')
return mol
|
def uncharge(self, mol):
"""Neutralize molecule by adding/removing hydrogens.
:param mol: The molecule to uncharge.
:type mol: rdkit.Chem.rdchem.Mol
:return: The uncharged molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running Uncharger')
mol = copy.deepcopy(mol)
# Neutralize positive charges
pos_remainder = 0
neg_count = 0
for atom in mol.GetAtoms():
# Remove hydrogen from positive atoms and reduce formal change until neutral or no more hydrogens
while atom.GetFormalCharge() > 0 and atom.GetNumExplicitHs() > 0:
atom.SetNumExplicitHs(atom.GetNumExplicitHs() - 1)
atom.SetFormalCharge(atom.GetFormalCharge() - 1)
log.info('Removed positive charge')
chg = atom.GetFormalCharge()
if chg > 0:
# Record number of non-neutralizable positive charges
pos_remainder += chg
elif chg < 0:
# Record total number of negative charges
neg_count += -chg
# Choose negative charges to leave in order to balance non-neutralizable positive charges
neg_skip = self._get_neg_skip(mol, pos_remainder)
# Neutralize remaining negative charges
for atom in mol.GetAtoms():
log.info(atom.GetIdx())
if atom.GetIdx() in neg_skip:
continue
# Make sure to stop when neg_count <= pos_remainder, as it is possible that neg_skip is not large enough
while atom.GetFormalCharge() < 0 and neg_count > pos_remainder:
atom.SetNumExplicitHs(atom.GetNumExplicitHs() + 1)
atom.SetFormalCharge(atom.GetFormalCharge() + 1)
neg_count -= 1
log.info('Removed negative charge')
return mol
|
[
"Neutralize",
"molecule",
"by",
"adding",
"/",
"removing",
"hydrogens",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/charge.py#L272-L314
|
[
"def",
"uncharge",
"(",
"self",
",",
"mol",
")",
":",
"log",
".",
"debug",
"(",
"'Running Uncharger'",
")",
"mol",
"=",
"copy",
".",
"deepcopy",
"(",
"mol",
")",
"# Neutralize positive charges",
"pos_remainder",
"=",
"0",
"neg_count",
"=",
"0",
"for",
"atom",
"in",
"mol",
".",
"GetAtoms",
"(",
")",
":",
"# Remove hydrogen from positive atoms and reduce formal change until neutral or no more hydrogens",
"while",
"atom",
".",
"GetFormalCharge",
"(",
")",
">",
"0",
"and",
"atom",
".",
"GetNumExplicitHs",
"(",
")",
">",
"0",
":",
"atom",
".",
"SetNumExplicitHs",
"(",
"atom",
".",
"GetNumExplicitHs",
"(",
")",
"-",
"1",
")",
"atom",
".",
"SetFormalCharge",
"(",
"atom",
".",
"GetFormalCharge",
"(",
")",
"-",
"1",
")",
"log",
".",
"info",
"(",
"'Removed positive charge'",
")",
"chg",
"=",
"atom",
".",
"GetFormalCharge",
"(",
")",
"if",
"chg",
">",
"0",
":",
"# Record number of non-neutralizable positive charges",
"pos_remainder",
"+=",
"chg",
"elif",
"chg",
"<",
"0",
":",
"# Record total number of negative charges",
"neg_count",
"+=",
"-",
"chg",
"# Choose negative charges to leave in order to balance non-neutralizable positive charges",
"neg_skip",
"=",
"self",
".",
"_get_neg_skip",
"(",
"mol",
",",
"pos_remainder",
")",
"# Neutralize remaining negative charges",
"for",
"atom",
"in",
"mol",
".",
"GetAtoms",
"(",
")",
":",
"log",
".",
"info",
"(",
"atom",
".",
"GetIdx",
"(",
")",
")",
"if",
"atom",
".",
"GetIdx",
"(",
")",
"in",
"neg_skip",
":",
"continue",
"# Make sure to stop when neg_count <= pos_remainder, as it is possible that neg_skip is not large enough",
"while",
"atom",
".",
"GetFormalCharge",
"(",
")",
"<",
"0",
"and",
"neg_count",
">",
"pos_remainder",
":",
"atom",
".",
"SetNumExplicitHs",
"(",
"atom",
".",
"GetNumExplicitHs",
"(",
")",
"+",
"1",
")",
"atom",
".",
"SetFormalCharge",
"(",
"atom",
".",
"GetFormalCharge",
"(",
")",
"+",
"1",
")",
"neg_count",
"-=",
"1",
"log",
".",
"info",
"(",
"'Removed negative charge'",
")",
"return",
"mol"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
Uncharger._get_neg_skip
|
Get negatively charged atoms to skip (up to pos_count).
|
molvs/charge.py
|
def _get_neg_skip(self, mol, pos_count):
"""Get negatively charged atoms to skip (up to pos_count)."""
neg_skip = set()
if pos_count:
# Get negative oxygens in charge-separated nitro groups TODO: Any other special cases to skip?
for occurrence in mol.GetSubstructMatches(self.nitro):
neg_skip.add(occurrence[-1])
if len(neg_skip) >= pos_count:
return neg_skip
# Get strongest ionized acids
for position, pair in enumerate(self.acid_base_pairs):
for occurrence in mol.GetSubstructMatches(pair.base):
neg_skip.add(occurrence[-1])
if len(neg_skip) >= pos_count:
return neg_skip
return neg_skip
|
def _get_neg_skip(self, mol, pos_count):
"""Get negatively charged atoms to skip (up to pos_count)."""
neg_skip = set()
if pos_count:
# Get negative oxygens in charge-separated nitro groups TODO: Any other special cases to skip?
for occurrence in mol.GetSubstructMatches(self.nitro):
neg_skip.add(occurrence[-1])
if len(neg_skip) >= pos_count:
return neg_skip
# Get strongest ionized acids
for position, pair in enumerate(self.acid_base_pairs):
for occurrence in mol.GetSubstructMatches(pair.base):
neg_skip.add(occurrence[-1])
if len(neg_skip) >= pos_count:
return neg_skip
return neg_skip
|
[
"Get",
"negatively",
"charged",
"atoms",
"to",
"skip",
"(",
"up",
"to",
"pos_count",
")",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/charge.py#L316-L331
|
[
"def",
"_get_neg_skip",
"(",
"self",
",",
"mol",
",",
"pos_count",
")",
":",
"neg_skip",
"=",
"set",
"(",
")",
"if",
"pos_count",
":",
"# Get negative oxygens in charge-separated nitro groups TODO: Any other special cases to skip?",
"for",
"occurrence",
"in",
"mol",
".",
"GetSubstructMatches",
"(",
"self",
".",
"nitro",
")",
":",
"neg_skip",
".",
"add",
"(",
"occurrence",
"[",
"-",
"1",
"]",
")",
"if",
"len",
"(",
"neg_skip",
")",
">=",
"pos_count",
":",
"return",
"neg_skip",
"# Get strongest ionized acids",
"for",
"position",
",",
"pair",
"in",
"enumerate",
"(",
"self",
".",
"acid_base_pairs",
")",
":",
"for",
"occurrence",
"in",
"mol",
".",
"GetSubstructMatches",
"(",
"pair",
".",
"base",
")",
":",
"neg_skip",
".",
"add",
"(",
"occurrence",
"[",
"-",
"1",
"]",
")",
"if",
"len",
"(",
"neg_skip",
")",
">=",
"pos_count",
":",
"return",
"neg_skip",
"return",
"neg_skip"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
FragmentRemover.remove
|
Return the molecule with specified fragments removed.
:param mol: The molecule to remove fragments from.
:type mol: rdkit.Chem.rdchem.Mol
:return: The molecule with fragments removed.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/fragment.py
|
def remove(self, mol):
"""Return the molecule with specified fragments removed.
:param mol: The molecule to remove fragments from.
:type mol: rdkit.Chem.rdchem.Mol
:return: The molecule with fragments removed.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running FragmentRemover')
# Iterate FragmentPatterns and remove matching fragments
for frag in self.fragments:
# If nothing is left or leave_last and only one fragment, end here
if mol.GetNumAtoms() == 0 or (self.leave_last and len(Chem.GetMolFrags(mol)) <= 1):
break
# Apply removal for this FragmentPattern
removed = Chem.DeleteSubstructs(mol, frag.smarts, onlyFrags=True)
if not mol.GetNumAtoms() == removed.GetNumAtoms():
log.info('Removed fragment: %s', frag.name)
if self.leave_last and removed.GetNumAtoms() == 0:
# All the remaining fragments match this pattern - leave them all
break
mol = removed
return mol
|
def remove(self, mol):
"""Return the molecule with specified fragments removed.
:param mol: The molecule to remove fragments from.
:type mol: rdkit.Chem.rdchem.Mol
:return: The molecule with fragments removed.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running FragmentRemover')
# Iterate FragmentPatterns and remove matching fragments
for frag in self.fragments:
# If nothing is left or leave_last and only one fragment, end here
if mol.GetNumAtoms() == 0 or (self.leave_last and len(Chem.GetMolFrags(mol)) <= 1):
break
# Apply removal for this FragmentPattern
removed = Chem.DeleteSubstructs(mol, frag.smarts, onlyFrags=True)
if not mol.GetNumAtoms() == removed.GetNumAtoms():
log.info('Removed fragment: %s', frag.name)
if self.leave_last and removed.GetNumAtoms() == 0:
# All the remaining fragments match this pattern - leave them all
break
mol = removed
return mol
|
[
"Return",
"the",
"molecule",
"with",
"specified",
"fragments",
"removed",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/fragment.py#L157-L179
|
[
"def",
"remove",
"(",
"self",
",",
"mol",
")",
":",
"log",
".",
"debug",
"(",
"'Running FragmentRemover'",
")",
"# Iterate FragmentPatterns and remove matching fragments",
"for",
"frag",
"in",
"self",
".",
"fragments",
":",
"# If nothing is left or leave_last and only one fragment, end here",
"if",
"mol",
".",
"GetNumAtoms",
"(",
")",
"==",
"0",
"or",
"(",
"self",
".",
"leave_last",
"and",
"len",
"(",
"Chem",
".",
"GetMolFrags",
"(",
"mol",
")",
")",
"<=",
"1",
")",
":",
"break",
"# Apply removal for this FragmentPattern",
"removed",
"=",
"Chem",
".",
"DeleteSubstructs",
"(",
"mol",
",",
"frag",
".",
"smarts",
",",
"onlyFrags",
"=",
"True",
")",
"if",
"not",
"mol",
".",
"GetNumAtoms",
"(",
")",
"==",
"removed",
".",
"GetNumAtoms",
"(",
")",
":",
"log",
".",
"info",
"(",
"'Removed fragment: %s'",
",",
"frag",
".",
"name",
")",
"if",
"self",
".",
"leave_last",
"and",
"removed",
".",
"GetNumAtoms",
"(",
")",
"==",
"0",
":",
"# All the remaining fragments match this pattern - leave them all",
"break",
"mol",
"=",
"removed",
"return",
"mol"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
LargestFragmentChooser.choose
|
Return the largest covalent unit.
The largest fragment is determined by number of atoms (including hydrogens). Ties are broken by taking the
fragment with the higher molecular weight, and then by taking the first alphabetically by SMILES if needed.
:param mol: The molecule to choose the largest fragment from.
:type mol: rdkit.Chem.rdchem.Mol
:return: The largest fragment.
:rtype: rdkit.Chem.rdchem.Mol
|
molvs/fragment.py
|
def choose(self, mol):
"""Return the largest covalent unit.
The largest fragment is determined by number of atoms (including hydrogens). Ties are broken by taking the
fragment with the higher molecular weight, and then by taking the first alphabetically by SMILES if needed.
:param mol: The molecule to choose the largest fragment from.
:type mol: rdkit.Chem.rdchem.Mol
:return: The largest fragment.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running LargestFragmentChooser')
# TODO: Alternatively allow a list of fragments to be passed as the mol parameter
fragments = Chem.GetMolFrags(mol, asMols=True)
largest = None
for f in fragments:
smiles = Chem.MolToSmiles(f, isomericSmiles=True)
log.debug('Fragment: %s', smiles)
organic = is_organic(f)
if self.prefer_organic:
# Skip this fragment if not organic and we already have an organic fragment as the largest so far
if largest and largest['organic'] and not organic:
continue
# Reset largest if it wasn't organic and this fragment is organic
if largest and organic and not largest['organic']:
largest = None
# Count atoms
atoms = 0
for a in f.GetAtoms():
atoms += 1 + a.GetTotalNumHs()
# Skip this fragment if fewer atoms than the largest
if largest and atoms < largest['atoms']:
continue
# Skip this fragment if equal number of atoms but weight is lower
weight = rdMolDescriptors.CalcExactMolWt(f)
if largest and atoms == largest['atoms'] and weight < largest['weight']:
continue
# Skip this fragment if equal atoms and equal weight but smiles comes last alphabetically
if largest and atoms == largest['atoms'] and weight == largest['weight'] and smiles > largest['smiles']:
continue
# Otherwise this is the largest so far
log.debug('New largest fragment: %s (%s)', smiles, atoms)
largest = {'smiles': smiles, 'fragment': f, 'atoms': atoms, 'weight': weight, 'organic': organic}
return largest['fragment']
|
def choose(self, mol):
"""Return the largest covalent unit.
The largest fragment is determined by number of atoms (including hydrogens). Ties are broken by taking the
fragment with the higher molecular weight, and then by taking the first alphabetically by SMILES if needed.
:param mol: The molecule to choose the largest fragment from.
:type mol: rdkit.Chem.rdchem.Mol
:return: The largest fragment.
:rtype: rdkit.Chem.rdchem.Mol
"""
log.debug('Running LargestFragmentChooser')
# TODO: Alternatively allow a list of fragments to be passed as the mol parameter
fragments = Chem.GetMolFrags(mol, asMols=True)
largest = None
for f in fragments:
smiles = Chem.MolToSmiles(f, isomericSmiles=True)
log.debug('Fragment: %s', smiles)
organic = is_organic(f)
if self.prefer_organic:
# Skip this fragment if not organic and we already have an organic fragment as the largest so far
if largest and largest['organic'] and not organic:
continue
# Reset largest if it wasn't organic and this fragment is organic
if largest and organic and not largest['organic']:
largest = None
# Count atoms
atoms = 0
for a in f.GetAtoms():
atoms += 1 + a.GetTotalNumHs()
# Skip this fragment if fewer atoms than the largest
if largest and atoms < largest['atoms']:
continue
# Skip this fragment if equal number of atoms but weight is lower
weight = rdMolDescriptors.CalcExactMolWt(f)
if largest and atoms == largest['atoms'] and weight < largest['weight']:
continue
# Skip this fragment if equal atoms and equal weight but smiles comes last alphabetically
if largest and atoms == largest['atoms'] and weight == largest['weight'] and smiles > largest['smiles']:
continue
# Otherwise this is the largest so far
log.debug('New largest fragment: %s (%s)', smiles, atoms)
largest = {'smiles': smiles, 'fragment': f, 'atoms': atoms, 'weight': weight, 'organic': organic}
return largest['fragment']
|
[
"Return",
"the",
"largest",
"covalent",
"unit",
"."
] |
mcs07/MolVS
|
python
|
https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/fragment.py#L200-L243
|
[
"def",
"choose",
"(",
"self",
",",
"mol",
")",
":",
"log",
".",
"debug",
"(",
"'Running LargestFragmentChooser'",
")",
"# TODO: Alternatively allow a list of fragments to be passed as the mol parameter",
"fragments",
"=",
"Chem",
".",
"GetMolFrags",
"(",
"mol",
",",
"asMols",
"=",
"True",
")",
"largest",
"=",
"None",
"for",
"f",
"in",
"fragments",
":",
"smiles",
"=",
"Chem",
".",
"MolToSmiles",
"(",
"f",
",",
"isomericSmiles",
"=",
"True",
")",
"log",
".",
"debug",
"(",
"'Fragment: %s'",
",",
"smiles",
")",
"organic",
"=",
"is_organic",
"(",
"f",
")",
"if",
"self",
".",
"prefer_organic",
":",
"# Skip this fragment if not organic and we already have an organic fragment as the largest so far",
"if",
"largest",
"and",
"largest",
"[",
"'organic'",
"]",
"and",
"not",
"organic",
":",
"continue",
"# Reset largest if it wasn't organic and this fragment is organic",
"if",
"largest",
"and",
"organic",
"and",
"not",
"largest",
"[",
"'organic'",
"]",
":",
"largest",
"=",
"None",
"# Count atoms",
"atoms",
"=",
"0",
"for",
"a",
"in",
"f",
".",
"GetAtoms",
"(",
")",
":",
"atoms",
"+=",
"1",
"+",
"a",
".",
"GetTotalNumHs",
"(",
")",
"# Skip this fragment if fewer atoms than the largest",
"if",
"largest",
"and",
"atoms",
"<",
"largest",
"[",
"'atoms'",
"]",
":",
"continue",
"# Skip this fragment if equal number of atoms but weight is lower",
"weight",
"=",
"rdMolDescriptors",
".",
"CalcExactMolWt",
"(",
"f",
")",
"if",
"largest",
"and",
"atoms",
"==",
"largest",
"[",
"'atoms'",
"]",
"and",
"weight",
"<",
"largest",
"[",
"'weight'",
"]",
":",
"continue",
"# Skip this fragment if equal atoms and equal weight but smiles comes last alphabetically",
"if",
"largest",
"and",
"atoms",
"==",
"largest",
"[",
"'atoms'",
"]",
"and",
"weight",
"==",
"largest",
"[",
"'weight'",
"]",
"and",
"smiles",
">",
"largest",
"[",
"'smiles'",
"]",
":",
"continue",
"# Otherwise this is the largest so far",
"log",
".",
"debug",
"(",
"'New largest fragment: %s (%s)'",
",",
"smiles",
",",
"atoms",
")",
"largest",
"=",
"{",
"'smiles'",
":",
"smiles",
",",
"'fragment'",
":",
"f",
",",
"'atoms'",
":",
"atoms",
",",
"'weight'",
":",
"weight",
",",
"'organic'",
":",
"organic",
"}",
"return",
"largest",
"[",
"'fragment'",
"]"
] |
d815fe52d160abcecbcbf117e6437bf727dbd8ad
|
test
|
integrate_ivp
|
Example program integrating an IVP problem of van der Pol oscillator
|
examples/van_der_pol.py
|
def integrate_ivp(u0=1.0, v0=0.0, mu=1.0, tend=10.0, dt0=1e-8, nt=0,
nsteps=600, t0=0.0, atol=1e-8, rtol=1e-8, plot=False,
savefig='None', method='bdf', dpi=100, verbose=False):
"""
Example program integrating an IVP problem of van der Pol oscillator
"""
f, j = get_f_and_j(mu)
if nt > 1:
tout = np.linspace(t0, tend, nt)
yout, nfo = integrate_predefined(
f, j, [u0, v0], tout, dt0, atol, rtol, nsteps=nsteps,
check_indexing=False, method=method)
else:
tout, yout, nfo = integrate_adaptive(
f, j, [u0, v0], t0, tend, dt0, atol, rtol, nsteps=nsteps,
check_indexing=False, method=method) # dfdt[:] also for len == 1
if verbose:
print(nfo)
if plot:
import matplotlib.pyplot as plt
plt.plot(tout, yout[:, 1], 'g--')
plt.plot(tout, yout[:, 0], 'k-', linewidth=2)
if savefig == 'None':
plt.show()
else:
plt.savefig(savefig, dpi=dpi)
|
def integrate_ivp(u0=1.0, v0=0.0, mu=1.0, tend=10.0, dt0=1e-8, nt=0,
nsteps=600, t0=0.0, atol=1e-8, rtol=1e-8, plot=False,
savefig='None', method='bdf', dpi=100, verbose=False):
"""
Example program integrating an IVP problem of van der Pol oscillator
"""
f, j = get_f_and_j(mu)
if nt > 1:
tout = np.linspace(t0, tend, nt)
yout, nfo = integrate_predefined(
f, j, [u0, v0], tout, dt0, atol, rtol, nsteps=nsteps,
check_indexing=False, method=method)
else:
tout, yout, nfo = integrate_adaptive(
f, j, [u0, v0], t0, tend, dt0, atol, rtol, nsteps=nsteps,
check_indexing=False, method=method) # dfdt[:] also for len == 1
if verbose:
print(nfo)
if plot:
import matplotlib.pyplot as plt
plt.plot(tout, yout[:, 1], 'g--')
plt.plot(tout, yout[:, 0], 'k-', linewidth=2)
if savefig == 'None':
plt.show()
else:
plt.savefig(savefig, dpi=dpi)
|
[
"Example",
"program",
"integrating",
"an",
"IVP",
"problem",
"of",
"van",
"der",
"Pol",
"oscillator"
] |
bjodah/pycvodes
|
python
|
https://github.com/bjodah/pycvodes/blob/00637a682d363319bc5c7c73a78f033556fde8a5/examples/van_der_pol.py#L25-L50
|
[
"def",
"integrate_ivp",
"(",
"u0",
"=",
"1.0",
",",
"v0",
"=",
"0.0",
",",
"mu",
"=",
"1.0",
",",
"tend",
"=",
"10.0",
",",
"dt0",
"=",
"1e-8",
",",
"nt",
"=",
"0",
",",
"nsteps",
"=",
"600",
",",
"t0",
"=",
"0.0",
",",
"atol",
"=",
"1e-8",
",",
"rtol",
"=",
"1e-8",
",",
"plot",
"=",
"False",
",",
"savefig",
"=",
"'None'",
",",
"method",
"=",
"'bdf'",
",",
"dpi",
"=",
"100",
",",
"verbose",
"=",
"False",
")",
":",
"f",
",",
"j",
"=",
"get_f_and_j",
"(",
"mu",
")",
"if",
"nt",
">",
"1",
":",
"tout",
"=",
"np",
".",
"linspace",
"(",
"t0",
",",
"tend",
",",
"nt",
")",
"yout",
",",
"nfo",
"=",
"integrate_predefined",
"(",
"f",
",",
"j",
",",
"[",
"u0",
",",
"v0",
"]",
",",
"tout",
",",
"dt0",
",",
"atol",
",",
"rtol",
",",
"nsteps",
"=",
"nsteps",
",",
"check_indexing",
"=",
"False",
",",
"method",
"=",
"method",
")",
"else",
":",
"tout",
",",
"yout",
",",
"nfo",
"=",
"integrate_adaptive",
"(",
"f",
",",
"j",
",",
"[",
"u0",
",",
"v0",
"]",
",",
"t0",
",",
"tend",
",",
"dt0",
",",
"atol",
",",
"rtol",
",",
"nsteps",
"=",
"nsteps",
",",
"check_indexing",
"=",
"False",
",",
"method",
"=",
"method",
")",
"# dfdt[:] also for len == 1",
"if",
"verbose",
":",
"print",
"(",
"nfo",
")",
"if",
"plot",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"plt",
".",
"plot",
"(",
"tout",
",",
"yout",
"[",
":",
",",
"1",
"]",
",",
"'g--'",
")",
"plt",
".",
"plot",
"(",
"tout",
",",
"yout",
"[",
":",
",",
"0",
"]",
",",
"'k-'",
",",
"linewidth",
"=",
"2",
")",
"if",
"savefig",
"==",
"'None'",
":",
"plt",
".",
"show",
"(",
")",
"else",
":",
"plt",
".",
"savefig",
"(",
"savefig",
",",
"dpi",
"=",
"dpi",
")"
] |
00637a682d363319bc5c7c73a78f033556fde8a5
|
test
|
integrate_adaptive
|
Integrates a system of ordinary differential equations.
Solves the initial value problem (IVP) defined by the user supplied
arguments. The solver chooses at what values of the independent variable
results should be reported.
Parameters
----------
rhs : callable
Function with signature f(t, y, fout) which modifies fout *inplace*.
jac : callable
Function with signature either jac(t, y, jmat_out, dfdx_out) for
dense/banded jacobians, or jac(t, y, data, colptrs, rowvals) for
sparse (CSC) jacobians. ``jac`` should modify ``jmat_out``, ``dfdx_out``
(dense, banded) or (``data``, ``colptrs``, ``rowvals``) *inplace*.
(see also ``lband``, ``uband``, ``nnz``)
y0 : array_like
Initial values of the dependent variables.
x0 : float
Initial value of the independent variable.
xend : float
Stopping value for the independent variable.
dx0 : float
Initial step-size.
atol : float
Absolute tolerance.
rtol : float
Relative tolerance.
dx_min : float
Minimum step (default: 0.0).
dx_max : float
Maximum step (default: 0.0).
nsteps : int
Maximum number of steps (default: 500).
method : str
One of: 'adams' or 'bdf' (default: 'bdf')
nderiv : int
Number of derivatives (default: 0).
roots : callback
With signature ``roots(x, yarr[:ny], out[:nroots]) -> None``.
nroots : int
Number of root functions in roots.
return_on_root : bool
Exit early (on first found root).
check_callable : bool
perform signature sanity checks on ``rhs`` and ``jac``
check_indexing : bool
Perform item setting sanity checks on ``rhs`` and ``jac``.
\*\*kwargs:
'lband' : int
Number of lower bands.
Indexing: ``banded[row_i - col_i + uband, col_i]``.
'uband' : int
Number of upper bands.
Indexing: ``banded[row_i - col_i + uband, col_i]``.
'iter_type' : str (default: 'default')
One of: 'default', 'functional', 'newton'
'linear_solver': str (default: 'default')
One of: 'default', 'dense', 'banded', 'gmres',
'gmres_classic', 'bicgstab', 'tfqmr'
'return_on_error' : bool
Returns on error without raising an excpetion (with ``'success'==False``).
'autorestart' : int
Useful for autonomous systems where conditions change during integration.
Will restart the integration with ``x==0``. Maximum number of steps is then
given by ``autorestart * nsteps``.
'record_rhs_xvals' : bool
When True: will return x values for rhs calls in ``info['rhs_xvals']``.
'record_jac_xvals' : bool
When True will return x values for jac calls in ``info['jac_xvals']``.
'record_order' : bool
When True will return used time stepper order in ``info['orders']``.
'record_fpe' : bool
When True will return observed floating point errors in ``info['fpes']``. (see ``fpes``)
'record_steps' : bool
When True will return stepsizes taken in ``info['steps']``.
'dx0cb' : callable
Callback for calculating dx0 (make sure to pass ``dx0==0.0``) to enable.
Signature: ``f(x, y[:]) -> float``.
'dx_max_cb' : callable
Callback for calculating dx_max.
Signature: ``f(x, y[:]) -> float``.
'autonomous_exprs' bool
Whether expressions contain the independent variable. If not, autorestart
is allowed to shift the independent variable to zero at restart).
'nnz' : int
Maximum number of nonzero entries in the sparse (CSC) jacobian (default: -1).
Must set ``nnz >= 0`` and ``linear_solver`` to 'klu' to enable use of sparse
``jac`` signature.
'jtimes' : callable
Function with signature f(v, Jv, t, y, fy) to calculate the product of the
Jacobian evaluated at t, y with a vector v. Should modify Jv *inplace*.
For use with linear solvers 'gmres', 'gmres_classic', 'bicgstab', 'tfqmr'.
'ew_ele' : bool
Whether to return error_weights, estimated_local_errors in info dict.
'constraints': array
Per component constraints 0.0: no constraint, 1.0: >=0, -1.0: <=0, 2.0: >0.0, -2.0: <0.0.
Returns
-------
(xout, yout, info):
xout: 1-dimensional array of values for the independent variable
yout: 2-dimensional array of the dependent variables (axis 1) for
values corresponding to xout (axis 0).
info: Dictionary with information about the integration.
|
pycvodes/__init__.py
|
def integrate_adaptive(rhs, jac, y0, x0, xend, atol, rtol, dx0=.0,
dx_min=.0, dx_max=.0, nsteps=500, method=None, nderiv=0,
roots=None, nroots=0, return_on_root=False,
check_callable=False, check_indexing=False,
**kwargs):
""" Integrates a system of ordinary differential equations.
Solves the initial value problem (IVP) defined by the user supplied
arguments. The solver chooses at what values of the independent variable
results should be reported.
Parameters
----------
rhs : callable
Function with signature f(t, y, fout) which modifies fout *inplace*.
jac : callable
Function with signature either jac(t, y, jmat_out, dfdx_out) for
dense/banded jacobians, or jac(t, y, data, colptrs, rowvals) for
sparse (CSC) jacobians. ``jac`` should modify ``jmat_out``, ``dfdx_out``
(dense, banded) or (``data``, ``colptrs``, ``rowvals``) *inplace*.
(see also ``lband``, ``uband``, ``nnz``)
y0 : array_like
Initial values of the dependent variables.
x0 : float
Initial value of the independent variable.
xend : float
Stopping value for the independent variable.
dx0 : float
Initial step-size.
atol : float
Absolute tolerance.
rtol : float
Relative tolerance.
dx_min : float
Minimum step (default: 0.0).
dx_max : float
Maximum step (default: 0.0).
nsteps : int
Maximum number of steps (default: 500).
method : str
One of: 'adams' or 'bdf' (default: 'bdf')
nderiv : int
Number of derivatives (default: 0).
roots : callback
With signature ``roots(x, yarr[:ny], out[:nroots]) -> None``.
nroots : int
Number of root functions in roots.
return_on_root : bool
Exit early (on first found root).
check_callable : bool
perform signature sanity checks on ``rhs`` and ``jac``
check_indexing : bool
Perform item setting sanity checks on ``rhs`` and ``jac``.
\*\*kwargs:
'lband' : int
Number of lower bands.
Indexing: ``banded[row_i - col_i + uband, col_i]``.
'uband' : int
Number of upper bands.
Indexing: ``banded[row_i - col_i + uband, col_i]``.
'iter_type' : str (default: 'default')
One of: 'default', 'functional', 'newton'
'linear_solver': str (default: 'default')
One of: 'default', 'dense', 'banded', 'gmres',
'gmres_classic', 'bicgstab', 'tfqmr'
'return_on_error' : bool
Returns on error without raising an excpetion (with ``'success'==False``).
'autorestart' : int
Useful for autonomous systems where conditions change during integration.
Will restart the integration with ``x==0``. Maximum number of steps is then
given by ``autorestart * nsteps``.
'record_rhs_xvals' : bool
When True: will return x values for rhs calls in ``info['rhs_xvals']``.
'record_jac_xvals' : bool
When True will return x values for jac calls in ``info['jac_xvals']``.
'record_order' : bool
When True will return used time stepper order in ``info['orders']``.
'record_fpe' : bool
When True will return observed floating point errors in ``info['fpes']``. (see ``fpes``)
'record_steps' : bool
When True will return stepsizes taken in ``info['steps']``.
'dx0cb' : callable
Callback for calculating dx0 (make sure to pass ``dx0==0.0``) to enable.
Signature: ``f(x, y[:]) -> float``.
'dx_max_cb' : callable
Callback for calculating dx_max.
Signature: ``f(x, y[:]) -> float``.
'autonomous_exprs' bool
Whether expressions contain the independent variable. If not, autorestart
is allowed to shift the independent variable to zero at restart).
'nnz' : int
Maximum number of nonzero entries in the sparse (CSC) jacobian (default: -1).
Must set ``nnz >= 0`` and ``linear_solver`` to 'klu' to enable use of sparse
``jac`` signature.
'jtimes' : callable
Function with signature f(v, Jv, t, y, fy) to calculate the product of the
Jacobian evaluated at t, y with a vector v. Should modify Jv *inplace*.
For use with linear solvers 'gmres', 'gmres_classic', 'bicgstab', 'tfqmr'.
'ew_ele' : bool
Whether to return error_weights, estimated_local_errors in info dict.
'constraints': array
Per component constraints 0.0: no constraint, 1.0: >=0, -1.0: <=0, 2.0: >0.0, -2.0: <0.0.
Returns
-------
(xout, yout, info):
xout: 1-dimensional array of values for the independent variable
yout: 2-dimensional array of the dependent variables (axis 1) for
values corresponding to xout (axis 0).
info: Dictionary with information about the integration.
"""
# Sanity checks to reduce risk of having a segfault:
lband, uband = kwargs.get('lband', None), kwargs.get('uband', None)
nnz = kwargs.get('nnz', None)
_check_jac_type(lband=lband, uband=uband, nnz=nnz)
if check_callable:
_check_callable(rhs, jac, x0, y0, lband, uband, nnz)
if check_indexing:
_check_indexing(rhs, jac, x0, y0, lband, uband, nnz)
return adaptive(rhs, jac, np.ascontiguousarray(y0, dtype=np.float64), x0, xend,
atol, rtol, method or ('adams' if jac is None else 'bdf'),
nsteps, dx0, dx_min, dx_max, nderiv=nderiv, roots=roots, nroots=nroots,
return_on_root=return_on_root, **kwargs)
|
def integrate_adaptive(rhs, jac, y0, x0, xend, atol, rtol, dx0=.0,
dx_min=.0, dx_max=.0, nsteps=500, method=None, nderiv=0,
roots=None, nroots=0, return_on_root=False,
check_callable=False, check_indexing=False,
**kwargs):
""" Integrates a system of ordinary differential equations.
Solves the initial value problem (IVP) defined by the user supplied
arguments. The solver chooses at what values of the independent variable
results should be reported.
Parameters
----------
rhs : callable
Function with signature f(t, y, fout) which modifies fout *inplace*.
jac : callable
Function with signature either jac(t, y, jmat_out, dfdx_out) for
dense/banded jacobians, or jac(t, y, data, colptrs, rowvals) for
sparse (CSC) jacobians. ``jac`` should modify ``jmat_out``, ``dfdx_out``
(dense, banded) or (``data``, ``colptrs``, ``rowvals``) *inplace*.
(see also ``lband``, ``uband``, ``nnz``)
y0 : array_like
Initial values of the dependent variables.
x0 : float
Initial value of the independent variable.
xend : float
Stopping value for the independent variable.
dx0 : float
Initial step-size.
atol : float
Absolute tolerance.
rtol : float
Relative tolerance.
dx_min : float
Minimum step (default: 0.0).
dx_max : float
Maximum step (default: 0.0).
nsteps : int
Maximum number of steps (default: 500).
method : str
One of: 'adams' or 'bdf' (default: 'bdf')
nderiv : int
Number of derivatives (default: 0).
roots : callback
With signature ``roots(x, yarr[:ny], out[:nroots]) -> None``.
nroots : int
Number of root functions in roots.
return_on_root : bool
Exit early (on first found root).
check_callable : bool
perform signature sanity checks on ``rhs`` and ``jac``
check_indexing : bool
Perform item setting sanity checks on ``rhs`` and ``jac``.
\*\*kwargs:
'lband' : int
Number of lower bands.
Indexing: ``banded[row_i - col_i + uband, col_i]``.
'uband' : int
Number of upper bands.
Indexing: ``banded[row_i - col_i + uband, col_i]``.
'iter_type' : str (default: 'default')
One of: 'default', 'functional', 'newton'
'linear_solver': str (default: 'default')
One of: 'default', 'dense', 'banded', 'gmres',
'gmres_classic', 'bicgstab', 'tfqmr'
'return_on_error' : bool
Returns on error without raising an excpetion (with ``'success'==False``).
'autorestart' : int
Useful for autonomous systems where conditions change during integration.
Will restart the integration with ``x==0``. Maximum number of steps is then
given by ``autorestart * nsteps``.
'record_rhs_xvals' : bool
When True: will return x values for rhs calls in ``info['rhs_xvals']``.
'record_jac_xvals' : bool
When True will return x values for jac calls in ``info['jac_xvals']``.
'record_order' : bool
When True will return used time stepper order in ``info['orders']``.
'record_fpe' : bool
When True will return observed floating point errors in ``info['fpes']``. (see ``fpes``)
'record_steps' : bool
When True will return stepsizes taken in ``info['steps']``.
'dx0cb' : callable
Callback for calculating dx0 (make sure to pass ``dx0==0.0``) to enable.
Signature: ``f(x, y[:]) -> float``.
'dx_max_cb' : callable
Callback for calculating dx_max.
Signature: ``f(x, y[:]) -> float``.
'autonomous_exprs' bool
Whether expressions contain the independent variable. If not, autorestart
is allowed to shift the independent variable to zero at restart).
'nnz' : int
Maximum number of nonzero entries in the sparse (CSC) jacobian (default: -1).
Must set ``nnz >= 0`` and ``linear_solver`` to 'klu' to enable use of sparse
``jac`` signature.
'jtimes' : callable
Function with signature f(v, Jv, t, y, fy) to calculate the product of the
Jacobian evaluated at t, y with a vector v. Should modify Jv *inplace*.
For use with linear solvers 'gmres', 'gmres_classic', 'bicgstab', 'tfqmr'.
'ew_ele' : bool
Whether to return error_weights, estimated_local_errors in info dict.
'constraints': array
Per component constraints 0.0: no constraint, 1.0: >=0, -1.0: <=0, 2.0: >0.0, -2.0: <0.0.
Returns
-------
(xout, yout, info):
xout: 1-dimensional array of values for the independent variable
yout: 2-dimensional array of the dependent variables (axis 1) for
values corresponding to xout (axis 0).
info: Dictionary with information about the integration.
"""
# Sanity checks to reduce risk of having a segfault:
lband, uband = kwargs.get('lband', None), kwargs.get('uband', None)
nnz = kwargs.get('nnz', None)
_check_jac_type(lband=lband, uband=uband, nnz=nnz)
if check_callable:
_check_callable(rhs, jac, x0, y0, lband, uband, nnz)
if check_indexing:
_check_indexing(rhs, jac, x0, y0, lband, uband, nnz)
return adaptive(rhs, jac, np.ascontiguousarray(y0, dtype=np.float64), x0, xend,
atol, rtol, method or ('adams' if jac is None else 'bdf'),
nsteps, dx0, dx_min, dx_max, nderiv=nderiv, roots=roots, nroots=nroots,
return_on_root=return_on_root, **kwargs)
|
[
"Integrates",
"a",
"system",
"of",
"ordinary",
"differential",
"equations",
"."
] |
bjodah/pycvodes
|
python
|
https://github.com/bjodah/pycvodes/blob/00637a682d363319bc5c7c73a78f033556fde8a5/pycvodes/__init__.py#L21-L147
|
[
"def",
"integrate_adaptive",
"(",
"rhs",
",",
"jac",
",",
"y0",
",",
"x0",
",",
"xend",
",",
"atol",
",",
"rtol",
",",
"dx0",
"=",
".0",
",",
"dx_min",
"=",
".0",
",",
"dx_max",
"=",
".0",
",",
"nsteps",
"=",
"500",
",",
"method",
"=",
"None",
",",
"nderiv",
"=",
"0",
",",
"roots",
"=",
"None",
",",
"nroots",
"=",
"0",
",",
"return_on_root",
"=",
"False",
",",
"check_callable",
"=",
"False",
",",
"check_indexing",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# Sanity checks to reduce risk of having a segfault:",
"lband",
",",
"uband",
"=",
"kwargs",
".",
"get",
"(",
"'lband'",
",",
"None",
")",
",",
"kwargs",
".",
"get",
"(",
"'uband'",
",",
"None",
")",
"nnz",
"=",
"kwargs",
".",
"get",
"(",
"'nnz'",
",",
"None",
")",
"_check_jac_type",
"(",
"lband",
"=",
"lband",
",",
"uband",
"=",
"uband",
",",
"nnz",
"=",
"nnz",
")",
"if",
"check_callable",
":",
"_check_callable",
"(",
"rhs",
",",
"jac",
",",
"x0",
",",
"y0",
",",
"lband",
",",
"uband",
",",
"nnz",
")",
"if",
"check_indexing",
":",
"_check_indexing",
"(",
"rhs",
",",
"jac",
",",
"x0",
",",
"y0",
",",
"lband",
",",
"uband",
",",
"nnz",
")",
"return",
"adaptive",
"(",
"rhs",
",",
"jac",
",",
"np",
".",
"ascontiguousarray",
"(",
"y0",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
",",
"x0",
",",
"xend",
",",
"atol",
",",
"rtol",
",",
"method",
"or",
"(",
"'adams'",
"if",
"jac",
"is",
"None",
"else",
"'bdf'",
")",
",",
"nsteps",
",",
"dx0",
",",
"dx_min",
",",
"dx_max",
",",
"nderiv",
"=",
"nderiv",
",",
"roots",
"=",
"roots",
",",
"nroots",
"=",
"nroots",
",",
"return_on_root",
"=",
"return_on_root",
",",
"*",
"*",
"kwargs",
")"
] |
00637a682d363319bc5c7c73a78f033556fde8a5
|
test
|
integrate_predefined
|
Integrates a system of ordinary differential equations.
Solves the initial value problem (IVP) defined by the user supplied
arguments. The user chooses at what values of the independent variable
results should be reported.
Parameters
----------
rhs : callable
Function with signature f(t, y, fout) which modifies fout *inplace*.
jac : callable
Function with signature either jac(t, y, jmat_out, dfdx_out) for
dense/banded jacobians, or jac(t, y, data, colptrs, rowvals) for
sparse (CSC) jacobians. ``jac`` should modify ``jmat_out``, ``dfdx_out``
(dense, banded) or (``data``, ``colptrs``, ``rowvals``) *inplace*.
(see also ``lband``, ``uband``, ``nnz``)
y0 : array_like
Initial values of the dependent variables.
xout : array_like
Values of the independent variable.
dx0 : float
Initial step-size.
atol : float
Absolute tolerance.
rtol : float
Relative tolerance.
dx_min : float
Minimum step (default: 0.0).
dx_max : float
Maximum step (default: 0.0).
nsteps : int
Maximum number of steps (default: 500).
method : str
One of: 'adams' or 'bdf' (default: 'bdf').
nderiv : int
Number of derivatives (default: 0).
roots : callback (default: None)
With signature ``roots(x, yarr[:ny], out[:nroots]) -> None``,
see info['root_indices'], note that xout is unaffected.
nroots : int (default: 0)
Number of root functions in roots.
check_callable : bool (default: False)
Perform signature sanity checks on ``rhs`` and ``jac``.
check_indexing : bool (default: False)
Perform item setting sanity checks on ``rhs`` and ``jac``.
\*\*kwargs:
'lband' : int
Number of lower bands.
Indexing: ``banded[row_i - col_i + uband, col_i]``.
'uband' : int
Number of upper bands.
Indexing: ``banded[row_i - col_i + uband, col_i]``.
'iter_type' : str (default: 'default')
One of: 'default', 'functional', 'newton'.
'linear_solver' : str (default: 'default')
One of: 'default', 'dense', 'banded', 'gmres',
'gmres_classic', 'bicgstab', 'tfqmr', 'klu'.
'return_on_error' : bool
Returns on error without raising an excpetion (with ``'success'==False``).
'autorestart' : int
Useful for autonomous systems where conditions change during integration.
Will restart the integration with ``x==0``. Maximum number of steps is then
given by ``2**autorestart * nsteps``.
'record_rhs_xvals' : bool
When True: will return x values for rhs calls in ``info['rhs_xvals']``.
'record_jac_xvals' : bool
When True will return x values for jac calls in ``info['jac_xvals']``.
'record_order' : bool
When True will return used time stepper order in ``info['orders']``.
'record_fpe' : bool
When True will return observed floating point errors in ``info['fpes']``. (see ``fpes``)
'record_steps' : bool
When True will return stepsizes taken in ``info['steps']``.
'dx0cb': callable
Callback for calculating dx0 (make sure to pass ``dx0==0.0``) to enable.
Signature: ``f(x, y[:]) -> float``.
'dx_max_cb' : callable
Callback for calculating dx_max.
Signature: ``f(x, y[:]) -> float``.
'autonomous_exprs' : bool
Whether expressions contain the independent variable. If not, autorestart
is allowed to shift the independent variable to zero at restart).
'nnz' : int
Maximum number of nonzero entries in the sparse (CSC) jacobian (default: -1).
Must set ``nnz >= 0`` and ``linear_solver`` to 'klu' to enable use of sparse
``jac`` signature.
'jtimes' : callable
Function with signature f(v, Jv, t, y, fy) to calculate the product of the
Jacobian evaluated at t, y with a vector v. Should modify Jv *inplace*.
For use with linear solvers 'gmres', 'gmres_classic', 'bicgstab', 'tfqmr'.
'ew_ele' : bool
Whether to return error_weights, estimated_local_errors in info dict.
'constraints': array
Per component constraints 0.0: no constraint, 1.0: >=0, -1.0: <=0, 2.0: >0.0, -2.0: <0.0.
Returns
-------
(yout, info):
yout: 2-dimensional array of the dependent variables (axis 1) for
values corresponding to xout (axis 0)
info: Dictionary with information about the integration.
|
pycvodes/__init__.py
|
def integrate_predefined(rhs, jac, y0, xout, atol, rtol, jac_type="dense",
dx0=.0, dx_min=.0, dx_max=.0, nsteps=500, method=None,
nderiv=0, roots=None, nroots=0, check_callable=False,
check_indexing=False, **kwargs):
""" Integrates a system of ordinary differential equations.
Solves the initial value problem (IVP) defined by the user supplied
arguments. The user chooses at what values of the independent variable
results should be reported.
Parameters
----------
rhs : callable
Function with signature f(t, y, fout) which modifies fout *inplace*.
jac : callable
Function with signature either jac(t, y, jmat_out, dfdx_out) for
dense/banded jacobians, or jac(t, y, data, colptrs, rowvals) for
sparse (CSC) jacobians. ``jac`` should modify ``jmat_out``, ``dfdx_out``
(dense, banded) or (``data``, ``colptrs``, ``rowvals``) *inplace*.
(see also ``lband``, ``uband``, ``nnz``)
y0 : array_like
Initial values of the dependent variables.
xout : array_like
Values of the independent variable.
dx0 : float
Initial step-size.
atol : float
Absolute tolerance.
rtol : float
Relative tolerance.
dx_min : float
Minimum step (default: 0.0).
dx_max : float
Maximum step (default: 0.0).
nsteps : int
Maximum number of steps (default: 500).
method : str
One of: 'adams' or 'bdf' (default: 'bdf').
nderiv : int
Number of derivatives (default: 0).
roots : callback (default: None)
With signature ``roots(x, yarr[:ny], out[:nroots]) -> None``,
see info['root_indices'], note that xout is unaffected.
nroots : int (default: 0)
Number of root functions in roots.
check_callable : bool (default: False)
Perform signature sanity checks on ``rhs`` and ``jac``.
check_indexing : bool (default: False)
Perform item setting sanity checks on ``rhs`` and ``jac``.
\*\*kwargs:
'lband' : int
Number of lower bands.
Indexing: ``banded[row_i - col_i + uband, col_i]``.
'uband' : int
Number of upper bands.
Indexing: ``banded[row_i - col_i + uband, col_i]``.
'iter_type' : str (default: 'default')
One of: 'default', 'functional', 'newton'.
'linear_solver' : str (default: 'default')
One of: 'default', 'dense', 'banded', 'gmres',
'gmres_classic', 'bicgstab', 'tfqmr', 'klu'.
'return_on_error' : bool
Returns on error without raising an excpetion (with ``'success'==False``).
'autorestart' : int
Useful for autonomous systems where conditions change during integration.
Will restart the integration with ``x==0``. Maximum number of steps is then
given by ``2**autorestart * nsteps``.
'record_rhs_xvals' : bool
When True: will return x values for rhs calls in ``info['rhs_xvals']``.
'record_jac_xvals' : bool
When True will return x values for jac calls in ``info['jac_xvals']``.
'record_order' : bool
When True will return used time stepper order in ``info['orders']``.
'record_fpe' : bool
When True will return observed floating point errors in ``info['fpes']``. (see ``fpes``)
'record_steps' : bool
When True will return stepsizes taken in ``info['steps']``.
'dx0cb': callable
Callback for calculating dx0 (make sure to pass ``dx0==0.0``) to enable.
Signature: ``f(x, y[:]) -> float``.
'dx_max_cb' : callable
Callback for calculating dx_max.
Signature: ``f(x, y[:]) -> float``.
'autonomous_exprs' : bool
Whether expressions contain the independent variable. If not, autorestart
is allowed to shift the independent variable to zero at restart).
'nnz' : int
Maximum number of nonzero entries in the sparse (CSC) jacobian (default: -1).
Must set ``nnz >= 0`` and ``linear_solver`` to 'klu' to enable use of sparse
``jac`` signature.
'jtimes' : callable
Function with signature f(v, Jv, t, y, fy) to calculate the product of the
Jacobian evaluated at t, y with a vector v. Should modify Jv *inplace*.
For use with linear solvers 'gmres', 'gmres_classic', 'bicgstab', 'tfqmr'.
'ew_ele' : bool
Whether to return error_weights, estimated_local_errors in info dict.
'constraints': array
Per component constraints 0.0: no constraint, 1.0: >=0, -1.0: <=0, 2.0: >0.0, -2.0: <0.0.
Returns
-------
(yout, info):
yout: 2-dimensional array of the dependent variables (axis 1) for
values corresponding to xout (axis 0)
info: Dictionary with information about the integration.
"""
# Sanity checks to reduce risk of having a segfault:
x0 = xout[0]
lband, uband = kwargs.get('lband', None), kwargs.get('uband', None)
nnz = kwargs.get('nnz', None)
_check_jac_type(lband=lband, uband=uband, nnz=nnz)
if check_callable:
_check_callable(rhs, jac, x0, y0, lband, uband, nnz)
if check_indexing:
_check_indexing(rhs, jac, x0, y0, lband, uband, nnz)
return predefined(
rhs, jac,
np.ascontiguousarray(y0, dtype=np.float64),
np.ascontiguousarray(xout, dtype=np.float64),
atol, rtol, method or ('adams' if jac is None else 'bdf'),
nsteps, dx0, dx_min, dx_max, nderiv=nderiv, roots=roots,
nroots=nroots, **kwargs)
|
def integrate_predefined(rhs, jac, y0, xout, atol, rtol, jac_type="dense",
dx0=.0, dx_min=.0, dx_max=.0, nsteps=500, method=None,
nderiv=0, roots=None, nroots=0, check_callable=False,
check_indexing=False, **kwargs):
""" Integrates a system of ordinary differential equations.
Solves the initial value problem (IVP) defined by the user supplied
arguments. The user chooses at what values of the independent variable
results should be reported.
Parameters
----------
rhs : callable
Function with signature f(t, y, fout) which modifies fout *inplace*.
jac : callable
Function with signature either jac(t, y, jmat_out, dfdx_out) for
dense/banded jacobians, or jac(t, y, data, colptrs, rowvals) for
sparse (CSC) jacobians. ``jac`` should modify ``jmat_out``, ``dfdx_out``
(dense, banded) or (``data``, ``colptrs``, ``rowvals``) *inplace*.
(see also ``lband``, ``uband``, ``nnz``)
y0 : array_like
Initial values of the dependent variables.
xout : array_like
Values of the independent variable.
dx0 : float
Initial step-size.
atol : float
Absolute tolerance.
rtol : float
Relative tolerance.
dx_min : float
Minimum step (default: 0.0).
dx_max : float
Maximum step (default: 0.0).
nsteps : int
Maximum number of steps (default: 500).
method : str
One of: 'adams' or 'bdf' (default: 'bdf').
nderiv : int
Number of derivatives (default: 0).
roots : callback (default: None)
With signature ``roots(x, yarr[:ny], out[:nroots]) -> None``,
see info['root_indices'], note that xout is unaffected.
nroots : int (default: 0)
Number of root functions in roots.
check_callable : bool (default: False)
Perform signature sanity checks on ``rhs`` and ``jac``.
check_indexing : bool (default: False)
Perform item setting sanity checks on ``rhs`` and ``jac``.
\*\*kwargs:
'lband' : int
Number of lower bands.
Indexing: ``banded[row_i - col_i + uband, col_i]``.
'uband' : int
Number of upper bands.
Indexing: ``banded[row_i - col_i + uband, col_i]``.
'iter_type' : str (default: 'default')
One of: 'default', 'functional', 'newton'.
'linear_solver' : str (default: 'default')
One of: 'default', 'dense', 'banded', 'gmres',
'gmres_classic', 'bicgstab', 'tfqmr', 'klu'.
'return_on_error' : bool
Returns on error without raising an excpetion (with ``'success'==False``).
'autorestart' : int
Useful for autonomous systems where conditions change during integration.
Will restart the integration with ``x==0``. Maximum number of steps is then
given by ``2**autorestart * nsteps``.
'record_rhs_xvals' : bool
When True: will return x values for rhs calls in ``info['rhs_xvals']``.
'record_jac_xvals' : bool
When True will return x values for jac calls in ``info['jac_xvals']``.
'record_order' : bool
When True will return used time stepper order in ``info['orders']``.
'record_fpe' : bool
When True will return observed floating point errors in ``info['fpes']``. (see ``fpes``)
'record_steps' : bool
When True will return stepsizes taken in ``info['steps']``.
'dx0cb': callable
Callback for calculating dx0 (make sure to pass ``dx0==0.0``) to enable.
Signature: ``f(x, y[:]) -> float``.
'dx_max_cb' : callable
Callback for calculating dx_max.
Signature: ``f(x, y[:]) -> float``.
'autonomous_exprs' : bool
Whether expressions contain the independent variable. If not, autorestart
is allowed to shift the independent variable to zero at restart).
'nnz' : int
Maximum number of nonzero entries in the sparse (CSC) jacobian (default: -1).
Must set ``nnz >= 0`` and ``linear_solver`` to 'klu' to enable use of sparse
``jac`` signature.
'jtimes' : callable
Function with signature f(v, Jv, t, y, fy) to calculate the product of the
Jacobian evaluated at t, y with a vector v. Should modify Jv *inplace*.
For use with linear solvers 'gmres', 'gmres_classic', 'bicgstab', 'tfqmr'.
'ew_ele' : bool
Whether to return error_weights, estimated_local_errors in info dict.
'constraints': array
Per component constraints 0.0: no constraint, 1.0: >=0, -1.0: <=0, 2.0: >0.0, -2.0: <0.0.
Returns
-------
(yout, info):
yout: 2-dimensional array of the dependent variables (axis 1) for
values corresponding to xout (axis 0)
info: Dictionary with information about the integration.
"""
# Sanity checks to reduce risk of having a segfault:
x0 = xout[0]
lband, uband = kwargs.get('lband', None), kwargs.get('uband', None)
nnz = kwargs.get('nnz', None)
_check_jac_type(lband=lband, uband=uband, nnz=nnz)
if check_callable:
_check_callable(rhs, jac, x0, y0, lband, uband, nnz)
if check_indexing:
_check_indexing(rhs, jac, x0, y0, lband, uband, nnz)
return predefined(
rhs, jac,
np.ascontiguousarray(y0, dtype=np.float64),
np.ascontiguousarray(xout, dtype=np.float64),
atol, rtol, method or ('adams' if jac is None else 'bdf'),
nsteps, dx0, dx_min, dx_max, nderiv=nderiv, roots=roots,
nroots=nroots, **kwargs)
|
[
"Integrates",
"a",
"system",
"of",
"ordinary",
"differential",
"equations",
"."
] |
bjodah/pycvodes
|
python
|
https://github.com/bjodah/pycvodes/blob/00637a682d363319bc5c7c73a78f033556fde8a5/pycvodes/__init__.py#L150-L275
|
[
"def",
"integrate_predefined",
"(",
"rhs",
",",
"jac",
",",
"y0",
",",
"xout",
",",
"atol",
",",
"rtol",
",",
"jac_type",
"=",
"\"dense\"",
",",
"dx0",
"=",
".0",
",",
"dx_min",
"=",
".0",
",",
"dx_max",
"=",
".0",
",",
"nsteps",
"=",
"500",
",",
"method",
"=",
"None",
",",
"nderiv",
"=",
"0",
",",
"roots",
"=",
"None",
",",
"nroots",
"=",
"0",
",",
"check_callable",
"=",
"False",
",",
"check_indexing",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# Sanity checks to reduce risk of having a segfault:",
"x0",
"=",
"xout",
"[",
"0",
"]",
"lband",
",",
"uband",
"=",
"kwargs",
".",
"get",
"(",
"'lband'",
",",
"None",
")",
",",
"kwargs",
".",
"get",
"(",
"'uband'",
",",
"None",
")",
"nnz",
"=",
"kwargs",
".",
"get",
"(",
"'nnz'",
",",
"None",
")",
"_check_jac_type",
"(",
"lband",
"=",
"lband",
",",
"uband",
"=",
"uband",
",",
"nnz",
"=",
"nnz",
")",
"if",
"check_callable",
":",
"_check_callable",
"(",
"rhs",
",",
"jac",
",",
"x0",
",",
"y0",
",",
"lband",
",",
"uband",
",",
"nnz",
")",
"if",
"check_indexing",
":",
"_check_indexing",
"(",
"rhs",
",",
"jac",
",",
"x0",
",",
"y0",
",",
"lband",
",",
"uband",
",",
"nnz",
")",
"return",
"predefined",
"(",
"rhs",
",",
"jac",
",",
"np",
".",
"ascontiguousarray",
"(",
"y0",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
",",
"np",
".",
"ascontiguousarray",
"(",
"xout",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
",",
"atol",
",",
"rtol",
",",
"method",
"or",
"(",
"'adams'",
"if",
"jac",
"is",
"None",
"else",
"'bdf'",
")",
",",
"nsteps",
",",
"dx0",
",",
"dx_min",
",",
"dx_max",
",",
"nderiv",
"=",
"nderiv",
",",
"roots",
"=",
"roots",
",",
"nroots",
"=",
"nroots",
",",
"*",
"*",
"kwargs",
")"
] |
00637a682d363319bc5c7c73a78f033556fde8a5
|
test
|
GitHub_LLNL_Stats.get_stats
|
Retrieves the statistics from the given organization with the given
credentials. Will not retreive data if file exists and force hasn't been
set to True. This is to save GH API requests.
|
scripts/github_stats.py
|
def get_stats(self, username='', password='', organization='llnl',
force=True, repo_type='public'):
"""
Retrieves the statistics from the given organization with the given
credentials. Will not retreive data if file exists and force hasn't been
set to True. This is to save GH API requests.
"""
date = str(datetime.date.today())
file_path = ('../github_stats_output/' + date[:4] + '/' + date[:7] + '/'
+ date + '.csv')
if force or not os.path.isfile(file_path):
my_github.login(username, password)
calls_beginning = self.logged_in_gh.ratelimit_remaining + 1
print 'Rate Limit: ' + str(calls_beginning)
my_github.get_org(organization)
count_members = my_github.get_mems_of_org()
count_teams = my_github.get_teams_of_org()
my_github.repos(repo_type=repo_type, organization=organization)
#Write JSON
my_github.write_org_json(dict_to_write=self.members_json,
path_ending_type='members', is_list=True)
my_github.write_org_json(dict_to_write=
{'singleton': self.org_retrieved.to_json()},
path_ending_type='organization')
my_github.write_org_json(dict_to_write=self.teams_json,
path_ending_type='teams', is_list=True)
my_github.write_repo_json(dict_to_write=self.repos_json,
path_ending_type='repo')
my_github.write_repo_json(dict_to_write=self.contributors_json,
path_ending_type='contributors', is_list=True)
my_github.write_repo_json(dict_to_write=self.pull_requests_json,
path_ending_type='pull-requests', is_list=True)
my_github.write_repo_json(dict_to_write=self.issues_json,
path_ending_type='issues', is_list=True)
my_github.write_repo_json(dict_to_write=self.languages_json,
path_ending_type='languages', is_dict=True)
my_github.write_repo_json(dict_to_write=self.commits_json,
path_ending_type='commits', is_list=True)
#Write CSV
my_github.write_to_file(file_path,
date,
organization,
count_members,
count_teams)
calls_remaining = self.logged_in_gh.ratelimit_remaining
calls_used = calls_beginning - calls_remaining
print ('Rate Limit Remaining: ' + str(calls_remaining) + '\nUsed '
+ str(calls_used) + ' API calls.')
|
def get_stats(self, username='', password='', organization='llnl',
force=True, repo_type='public'):
"""
Retrieves the statistics from the given organization with the given
credentials. Will not retreive data if file exists and force hasn't been
set to True. This is to save GH API requests.
"""
date = str(datetime.date.today())
file_path = ('../github_stats_output/' + date[:4] + '/' + date[:7] + '/'
+ date + '.csv')
if force or not os.path.isfile(file_path):
my_github.login(username, password)
calls_beginning = self.logged_in_gh.ratelimit_remaining + 1
print 'Rate Limit: ' + str(calls_beginning)
my_github.get_org(organization)
count_members = my_github.get_mems_of_org()
count_teams = my_github.get_teams_of_org()
my_github.repos(repo_type=repo_type, organization=organization)
#Write JSON
my_github.write_org_json(dict_to_write=self.members_json,
path_ending_type='members', is_list=True)
my_github.write_org_json(dict_to_write=
{'singleton': self.org_retrieved.to_json()},
path_ending_type='organization')
my_github.write_org_json(dict_to_write=self.teams_json,
path_ending_type='teams', is_list=True)
my_github.write_repo_json(dict_to_write=self.repos_json,
path_ending_type='repo')
my_github.write_repo_json(dict_to_write=self.contributors_json,
path_ending_type='contributors', is_list=True)
my_github.write_repo_json(dict_to_write=self.pull_requests_json,
path_ending_type='pull-requests', is_list=True)
my_github.write_repo_json(dict_to_write=self.issues_json,
path_ending_type='issues', is_list=True)
my_github.write_repo_json(dict_to_write=self.languages_json,
path_ending_type='languages', is_dict=True)
my_github.write_repo_json(dict_to_write=self.commits_json,
path_ending_type='commits', is_list=True)
#Write CSV
my_github.write_to_file(file_path,
date,
organization,
count_members,
count_teams)
calls_remaining = self.logged_in_gh.ratelimit_remaining
calls_used = calls_beginning - calls_remaining
print ('Rate Limit Remaining: ' + str(calls_remaining) + '\nUsed '
+ str(calls_used) + ' API calls.')
|
[
"Retrieves",
"the",
"statistics",
"from",
"the",
"given",
"organization",
"with",
"the",
"given",
"credentials",
".",
"Will",
"not",
"retreive",
"data",
"if",
"file",
"exists",
"and",
"force",
"hasn",
"t",
"been",
"set",
"to",
"True",
".",
"This",
"is",
"to",
"save",
"GH",
"API",
"requests",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L38-L86
|
[
"def",
"get_stats",
"(",
"self",
",",
"username",
"=",
"''",
",",
"password",
"=",
"''",
",",
"organization",
"=",
"'llnl'",
",",
"force",
"=",
"True",
",",
"repo_type",
"=",
"'public'",
")",
":",
"date",
"=",
"str",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
"file_path",
"=",
"(",
"'../github_stats_output/'",
"+",
"date",
"[",
":",
"4",
"]",
"+",
"'/'",
"+",
"date",
"[",
":",
"7",
"]",
"+",
"'/'",
"+",
"date",
"+",
"'.csv'",
")",
"if",
"force",
"or",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"my_github",
".",
"login",
"(",
"username",
",",
"password",
")",
"calls_beginning",
"=",
"self",
".",
"logged_in_gh",
".",
"ratelimit_remaining",
"+",
"1",
"print",
"'Rate Limit: '",
"+",
"str",
"(",
"calls_beginning",
")",
"my_github",
".",
"get_org",
"(",
"organization",
")",
"count_members",
"=",
"my_github",
".",
"get_mems_of_org",
"(",
")",
"count_teams",
"=",
"my_github",
".",
"get_teams_of_org",
"(",
")",
"my_github",
".",
"repos",
"(",
"repo_type",
"=",
"repo_type",
",",
"organization",
"=",
"organization",
")",
"#Write JSON",
"my_github",
".",
"write_org_json",
"(",
"dict_to_write",
"=",
"self",
".",
"members_json",
",",
"path_ending_type",
"=",
"'members'",
",",
"is_list",
"=",
"True",
")",
"my_github",
".",
"write_org_json",
"(",
"dict_to_write",
"=",
"{",
"'singleton'",
":",
"self",
".",
"org_retrieved",
".",
"to_json",
"(",
")",
"}",
",",
"path_ending_type",
"=",
"'organization'",
")",
"my_github",
".",
"write_org_json",
"(",
"dict_to_write",
"=",
"self",
".",
"teams_json",
",",
"path_ending_type",
"=",
"'teams'",
",",
"is_list",
"=",
"True",
")",
"my_github",
".",
"write_repo_json",
"(",
"dict_to_write",
"=",
"self",
".",
"repos_json",
",",
"path_ending_type",
"=",
"'repo'",
")",
"my_github",
".",
"write_repo_json",
"(",
"dict_to_write",
"=",
"self",
".",
"contributors_json",
",",
"path_ending_type",
"=",
"'contributors'",
",",
"is_list",
"=",
"True",
")",
"my_github",
".",
"write_repo_json",
"(",
"dict_to_write",
"=",
"self",
".",
"pull_requests_json",
",",
"path_ending_type",
"=",
"'pull-requests'",
",",
"is_list",
"=",
"True",
")",
"my_github",
".",
"write_repo_json",
"(",
"dict_to_write",
"=",
"self",
".",
"issues_json",
",",
"path_ending_type",
"=",
"'issues'",
",",
"is_list",
"=",
"True",
")",
"my_github",
".",
"write_repo_json",
"(",
"dict_to_write",
"=",
"self",
".",
"languages_json",
",",
"path_ending_type",
"=",
"'languages'",
",",
"is_dict",
"=",
"True",
")",
"my_github",
".",
"write_repo_json",
"(",
"dict_to_write",
"=",
"self",
".",
"commits_json",
",",
"path_ending_type",
"=",
"'commits'",
",",
"is_list",
"=",
"True",
")",
"#Write CSV",
"my_github",
".",
"write_to_file",
"(",
"file_path",
",",
"date",
",",
"organization",
",",
"count_members",
",",
"count_teams",
")",
"calls_remaining",
"=",
"self",
".",
"logged_in_gh",
".",
"ratelimit_remaining",
"calls_used",
"=",
"calls_beginning",
"-",
"calls_remaining",
"print",
"(",
"'Rate Limit Remaining: '",
"+",
"str",
"(",
"calls_remaining",
")",
"+",
"'\\nUsed '",
"+",
"str",
"(",
"calls_used",
")",
"+",
"' API calls.'",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.get_mems_of_org
|
Retrieves the number of members of the organization.
|
scripts/github_stats.py
|
def get_mems_of_org(self):
"""
Retrieves the number of members of the organization.
"""
print 'Getting members.'
counter = 0
for member in self.org_retrieved.iter_members():
self.members_json[member.id] = member.to_json()
counter += 1
return counter
|
def get_mems_of_org(self):
"""
Retrieves the number of members of the organization.
"""
print 'Getting members.'
counter = 0
for member in self.org_retrieved.iter_members():
self.members_json[member.id] = member.to_json()
counter += 1
return counter
|
[
"Retrieves",
"the",
"number",
"of",
"members",
"of",
"the",
"organization",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L147-L156
|
[
"def",
"get_mems_of_org",
"(",
"self",
")",
":",
"print",
"'Getting members.'",
"counter",
"=",
"0",
"for",
"member",
"in",
"self",
".",
"org_retrieved",
".",
"iter_members",
"(",
")",
":",
"self",
".",
"members_json",
"[",
"member",
".",
"id",
"]",
"=",
"member",
".",
"to_json",
"(",
")",
"counter",
"+=",
"1",
"return",
"counter"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.get_teams_of_org
|
Retrieves the number of teams of the organization.
|
scripts/github_stats.py
|
def get_teams_of_org(self):
"""
Retrieves the number of teams of the organization.
"""
print 'Getting teams.'
counter = 0
for team in self.org_retrieved.iter_teams():
self.teams_json[team.id] = team.to_json()
counter += 1
return counter
|
def get_teams_of_org(self):
"""
Retrieves the number of teams of the organization.
"""
print 'Getting teams.'
counter = 0
for team in self.org_retrieved.iter_teams():
self.teams_json[team.id] = team.to_json()
counter += 1
return counter
|
[
"Retrieves",
"the",
"number",
"of",
"teams",
"of",
"the",
"organization",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L158-L167
|
[
"def",
"get_teams_of_org",
"(",
"self",
")",
":",
"print",
"'Getting teams.'",
"counter",
"=",
"0",
"for",
"team",
"in",
"self",
".",
"org_retrieved",
".",
"iter_teams",
"(",
")",
":",
"self",
".",
"teams_json",
"[",
"team",
".",
"id",
"]",
"=",
"team",
".",
"to_json",
"(",
")",
"counter",
"+=",
"1",
"return",
"counter"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.repos
|
Retrieves info about the repos of the current organization.
|
scripts/github_stats.py
|
def repos(self, repo_type='public', organization='llnl'):
"""
Retrieves info about the repos of the current organization.
"""
print 'Getting repos.'
for repo in self.org_retrieved.iter_repos(type=repo_type):
#JSON
json = repo.to_json()
self.repos_json[repo.name] = json
#CSV
temp_repo = my_repo.My_Repo()
temp_repo.name = repo.full_name
self.total_repos += 1
temp_repo.contributors = my_github.get_total_contributors(repo)
self.total_contributors += temp_repo.contributors
temp_repo.forks = repo.forks_count
self.total_forks += temp_repo.forks
temp_repo.stargazers = repo.stargazers
self.total_stars += temp_repo.stargazers
temp_repo.pull_requests_open, temp_repo.pull_requests_closed = \
my_github.get_pull_reqs(repo)
temp_repo.pull_requests = (temp_repo.pull_requests_open
+ temp_repo.pull_requests_closed)
self.total_pull_reqs += temp_repo.pull_requests_open
self.total_pull_reqs += temp_repo.pull_requests_closed
self.total_pull_reqs_open += temp_repo.pull_requests_open
self.total_pull_reqs_closed += temp_repo.pull_requests_closed
temp_repo.open_issues = repo.open_issues_count
self.total_open_issues += temp_repo.open_issues
temp_repo.closed_issues = my_github.get_issues(repo, organization=organization)
temp_repo.issues = temp_repo.closed_issues + temp_repo.open_issues
self.total_closed_issues += temp_repo.closed_issues
self.total_issues += temp_repo.issues
my_github.get_languages(repo, temp_repo)
temp_repo.readme = my_github.get_readme(repo)
#temp_repo.license = my_github.get_license(repo)
temp_repo.commits = self.get_commits(repo=repo, organization=organization)
self.total_commits += temp_repo.commits
self.all_repos.append(temp_repo)
|
def repos(self, repo_type='public', organization='llnl'):
"""
Retrieves info about the repos of the current organization.
"""
print 'Getting repos.'
for repo in self.org_retrieved.iter_repos(type=repo_type):
#JSON
json = repo.to_json()
self.repos_json[repo.name] = json
#CSV
temp_repo = my_repo.My_Repo()
temp_repo.name = repo.full_name
self.total_repos += 1
temp_repo.contributors = my_github.get_total_contributors(repo)
self.total_contributors += temp_repo.contributors
temp_repo.forks = repo.forks_count
self.total_forks += temp_repo.forks
temp_repo.stargazers = repo.stargazers
self.total_stars += temp_repo.stargazers
temp_repo.pull_requests_open, temp_repo.pull_requests_closed = \
my_github.get_pull_reqs(repo)
temp_repo.pull_requests = (temp_repo.pull_requests_open
+ temp_repo.pull_requests_closed)
self.total_pull_reqs += temp_repo.pull_requests_open
self.total_pull_reqs += temp_repo.pull_requests_closed
self.total_pull_reqs_open += temp_repo.pull_requests_open
self.total_pull_reqs_closed += temp_repo.pull_requests_closed
temp_repo.open_issues = repo.open_issues_count
self.total_open_issues += temp_repo.open_issues
temp_repo.closed_issues = my_github.get_issues(repo, organization=organization)
temp_repo.issues = temp_repo.closed_issues + temp_repo.open_issues
self.total_closed_issues += temp_repo.closed_issues
self.total_issues += temp_repo.issues
my_github.get_languages(repo, temp_repo)
temp_repo.readme = my_github.get_readme(repo)
#temp_repo.license = my_github.get_license(repo)
temp_repo.commits = self.get_commits(repo=repo, organization=organization)
self.total_commits += temp_repo.commits
self.all_repos.append(temp_repo)
|
[
"Retrieves",
"info",
"about",
"the",
"repos",
"of",
"the",
"current",
"organization",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L169-L207
|
[
"def",
"repos",
"(",
"self",
",",
"repo_type",
"=",
"'public'",
",",
"organization",
"=",
"'llnl'",
")",
":",
"print",
"'Getting repos.'",
"for",
"repo",
"in",
"self",
".",
"org_retrieved",
".",
"iter_repos",
"(",
"type",
"=",
"repo_type",
")",
":",
"#JSON",
"json",
"=",
"repo",
".",
"to_json",
"(",
")",
"self",
".",
"repos_json",
"[",
"repo",
".",
"name",
"]",
"=",
"json",
"#CSV",
"temp_repo",
"=",
"my_repo",
".",
"My_Repo",
"(",
")",
"temp_repo",
".",
"name",
"=",
"repo",
".",
"full_name",
"self",
".",
"total_repos",
"+=",
"1",
"temp_repo",
".",
"contributors",
"=",
"my_github",
".",
"get_total_contributors",
"(",
"repo",
")",
"self",
".",
"total_contributors",
"+=",
"temp_repo",
".",
"contributors",
"temp_repo",
".",
"forks",
"=",
"repo",
".",
"forks_count",
"self",
".",
"total_forks",
"+=",
"temp_repo",
".",
"forks",
"temp_repo",
".",
"stargazers",
"=",
"repo",
".",
"stargazers",
"self",
".",
"total_stars",
"+=",
"temp_repo",
".",
"stargazers",
"temp_repo",
".",
"pull_requests_open",
",",
"temp_repo",
".",
"pull_requests_closed",
"=",
"my_github",
".",
"get_pull_reqs",
"(",
"repo",
")",
"temp_repo",
".",
"pull_requests",
"=",
"(",
"temp_repo",
".",
"pull_requests_open",
"+",
"temp_repo",
".",
"pull_requests_closed",
")",
"self",
".",
"total_pull_reqs",
"+=",
"temp_repo",
".",
"pull_requests_open",
"self",
".",
"total_pull_reqs",
"+=",
"temp_repo",
".",
"pull_requests_closed",
"self",
".",
"total_pull_reqs_open",
"+=",
"temp_repo",
".",
"pull_requests_open",
"self",
".",
"total_pull_reqs_closed",
"+=",
"temp_repo",
".",
"pull_requests_closed",
"temp_repo",
".",
"open_issues",
"=",
"repo",
".",
"open_issues_count",
"self",
".",
"total_open_issues",
"+=",
"temp_repo",
".",
"open_issues",
"temp_repo",
".",
"closed_issues",
"=",
"my_github",
".",
"get_issues",
"(",
"repo",
",",
"organization",
"=",
"organization",
")",
"temp_repo",
".",
"issues",
"=",
"temp_repo",
".",
"closed_issues",
"+",
"temp_repo",
".",
"open_issues",
"self",
".",
"total_closed_issues",
"+=",
"temp_repo",
".",
"closed_issues",
"self",
".",
"total_issues",
"+=",
"temp_repo",
".",
"issues",
"my_github",
".",
"get_languages",
"(",
"repo",
",",
"temp_repo",
")",
"temp_repo",
".",
"readme",
"=",
"my_github",
".",
"get_readme",
"(",
"repo",
")",
"#temp_repo.license = my_github.get_license(repo)",
"temp_repo",
".",
"commits",
"=",
"self",
".",
"get_commits",
"(",
"repo",
"=",
"repo",
",",
"organization",
"=",
"organization",
")",
"self",
".",
"total_commits",
"+=",
"temp_repo",
".",
"commits",
"self",
".",
"all_repos",
".",
"append",
"(",
"temp_repo",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.get_total_contributors
|
Retrieves the number of contributors to a repo in the organization.
Also adds to unique contributor list.
|
scripts/github_stats.py
|
def get_total_contributors(self, repo):
"""
Retrieves the number of contributors to a repo in the organization.
Also adds to unique contributor list.
"""
repo_contributors = 0
for contributor in repo.iter_contributors():
repo_contributors += 1
self.unique_contributors[contributor.id].append(repo.name)
self.contributors_json[repo.name].append(contributor.to_json())
return repo_contributors
|
def get_total_contributors(self, repo):
"""
Retrieves the number of contributors to a repo in the organization.
Also adds to unique contributor list.
"""
repo_contributors = 0
for contributor in repo.iter_contributors():
repo_contributors += 1
self.unique_contributors[contributor.id].append(repo.name)
self.contributors_json[repo.name].append(contributor.to_json())
return repo_contributors
|
[
"Retrieves",
"the",
"number",
"of",
"contributors",
"to",
"a",
"repo",
"in",
"the",
"organization",
".",
"Also",
"adds",
"to",
"unique",
"contributor",
"list",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L209-L219
|
[
"def",
"get_total_contributors",
"(",
"self",
",",
"repo",
")",
":",
"repo_contributors",
"=",
"0",
"for",
"contributor",
"in",
"repo",
".",
"iter_contributors",
"(",
")",
":",
"repo_contributors",
"+=",
"1",
"self",
".",
"unique_contributors",
"[",
"contributor",
".",
"id",
"]",
".",
"append",
"(",
"repo",
".",
"name",
")",
"self",
".",
"contributors_json",
"[",
"repo",
".",
"name",
"]",
".",
"append",
"(",
"contributor",
".",
"to_json",
"(",
")",
")",
"return",
"repo_contributors"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.get_pull_reqs
|
Retrieves the number of pull requests on a repo in the organization.
|
scripts/github_stats.py
|
def get_pull_reqs(self, repo):
"""
Retrieves the number of pull requests on a repo in the organization.
"""
pull_reqs_open = 0
pull_reqs_closed = 0
for pull_request in repo.iter_pulls(state='all'):
self.pull_requests_json[repo.name].append(pull_request.to_json())
if pull_request.closed_at is not None:
pull_reqs_closed += 1
else:
pull_reqs_open += 1
return pull_reqs_open, pull_reqs_closed
|
def get_pull_reqs(self, repo):
"""
Retrieves the number of pull requests on a repo in the organization.
"""
pull_reqs_open = 0
pull_reqs_closed = 0
for pull_request in repo.iter_pulls(state='all'):
self.pull_requests_json[repo.name].append(pull_request.to_json())
if pull_request.closed_at is not None:
pull_reqs_closed += 1
else:
pull_reqs_open += 1
return pull_reqs_open, pull_reqs_closed
|
[
"Retrieves",
"the",
"number",
"of",
"pull",
"requests",
"on",
"a",
"repo",
"in",
"the",
"organization",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L221-L233
|
[
"def",
"get_pull_reqs",
"(",
"self",
",",
"repo",
")",
":",
"pull_reqs_open",
"=",
"0",
"pull_reqs_closed",
"=",
"0",
"for",
"pull_request",
"in",
"repo",
".",
"iter_pulls",
"(",
"state",
"=",
"'all'",
")",
":",
"self",
".",
"pull_requests_json",
"[",
"repo",
".",
"name",
"]",
".",
"append",
"(",
"pull_request",
".",
"to_json",
"(",
")",
")",
"if",
"pull_request",
".",
"closed_at",
"is",
"not",
"None",
":",
"pull_reqs_closed",
"+=",
"1",
"else",
":",
"pull_reqs_open",
"+=",
"1",
"return",
"pull_reqs_open",
",",
"pull_reqs_closed"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.get_issues
|
Retrieves the number of closed issues.
|
scripts/github_stats.py
|
def get_issues(self, repo, organization='llnl'):
"""
Retrieves the number of closed issues.
"""
#JSON
path = ('../github-data/' + organization + '/' + repo.name + '/issues')
is_only_today = False
if not os.path.exists(path): #no previous path, get all issues
all_issues = repo.iter_issues(state='all')
is_only_today = True
else:
files = os.listdir(path)
date = str(files[-1][:-5])
if date == str(datetime.date.today()):
#most recent date is actually today, get previous most recent date
if len(files) > 2:
date = str(files[-2][:-5])
else:
#This means there is only one file, today. Retrieve every issue
all_issues = repo.iter_issues(state='all')
is_only_today = True
if not is_only_today:#there's a previous saved JSON that's not today
all_issues = repo.iter_issues(since=date, state='all')
for issue in all_issues:
self.issues_json[repo.name].append(issue.to_json())
#CSV
closed_issues = 0
for issue in repo.iter_issues(state='closed'):
if issue is not None:
closed_issues += 1
return closed_issues
|
def get_issues(self, repo, organization='llnl'):
"""
Retrieves the number of closed issues.
"""
#JSON
path = ('../github-data/' + organization + '/' + repo.name + '/issues')
is_only_today = False
if not os.path.exists(path): #no previous path, get all issues
all_issues = repo.iter_issues(state='all')
is_only_today = True
else:
files = os.listdir(path)
date = str(files[-1][:-5])
if date == str(datetime.date.today()):
#most recent date is actually today, get previous most recent date
if len(files) > 2:
date = str(files[-2][:-5])
else:
#This means there is only one file, today. Retrieve every issue
all_issues = repo.iter_issues(state='all')
is_only_today = True
if not is_only_today:#there's a previous saved JSON that's not today
all_issues = repo.iter_issues(since=date, state='all')
for issue in all_issues:
self.issues_json[repo.name].append(issue.to_json())
#CSV
closed_issues = 0
for issue in repo.iter_issues(state='closed'):
if issue is not None:
closed_issues += 1
return closed_issues
|
[
"Retrieves",
"the",
"number",
"of",
"closed",
"issues",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L235-L265
|
[
"def",
"get_issues",
"(",
"self",
",",
"repo",
",",
"organization",
"=",
"'llnl'",
")",
":",
"#JSON",
"path",
"=",
"(",
"'../github-data/'",
"+",
"organization",
"+",
"'/'",
"+",
"repo",
".",
"name",
"+",
"'/issues'",
")",
"is_only_today",
"=",
"False",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"#no previous path, get all issues",
"all_issues",
"=",
"repo",
".",
"iter_issues",
"(",
"state",
"=",
"'all'",
")",
"is_only_today",
"=",
"True",
"else",
":",
"files",
"=",
"os",
".",
"listdir",
"(",
"path",
")",
"date",
"=",
"str",
"(",
"files",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"5",
"]",
")",
"if",
"date",
"==",
"str",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
":",
"#most recent date is actually today, get previous most recent date",
"if",
"len",
"(",
"files",
")",
">",
"2",
":",
"date",
"=",
"str",
"(",
"files",
"[",
"-",
"2",
"]",
"[",
":",
"-",
"5",
"]",
")",
"else",
":",
"#This means there is only one file, today. Retrieve every issue",
"all_issues",
"=",
"repo",
".",
"iter_issues",
"(",
"state",
"=",
"'all'",
")",
"is_only_today",
"=",
"True",
"if",
"not",
"is_only_today",
":",
"#there's a previous saved JSON that's not today",
"all_issues",
"=",
"repo",
".",
"iter_issues",
"(",
"since",
"=",
"date",
",",
"state",
"=",
"'all'",
")",
"for",
"issue",
"in",
"all_issues",
":",
"self",
".",
"issues_json",
"[",
"repo",
".",
"name",
"]",
".",
"append",
"(",
"issue",
".",
"to_json",
"(",
")",
")",
"#CSV",
"closed_issues",
"=",
"0",
"for",
"issue",
"in",
"repo",
".",
"iter_issues",
"(",
"state",
"=",
"'closed'",
")",
":",
"if",
"issue",
"is",
"not",
"None",
":",
"closed_issues",
"+=",
"1",
"return",
"closed_issues"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.get_languages
|
Retrieves the languages used in the repo and increments the respective
counts of those languages. Only increments languages that have names.
Anything else is not incremented (i.e. numbers).
|
scripts/github_stats.py
|
def get_languages(self, repo, temp_repo):
"""
Retrieves the languages used in the repo and increments the respective
counts of those languages. Only increments languages that have names.
Anything else is not incremented (i.e. numbers).
"""
try:
self.languages[repo.language] += 1
except KeyError:
count = self.languages[repo.language] = 1
for repo_languages in repo.iter_languages():
self.languages_json[repo.name][repo_languages[0]] = repo_languages[1]
for language in repo_languages:
if isinstance(language, basestring):#is language
temp_repo.languages.append(language)
self.previous_language = language
else:#record size bytes of language
try:
self.languages_size[self.previous_language] += \
language
except KeyError:
size = self.languages_size[self.previous_language] \
= language
|
def get_languages(self, repo, temp_repo):
"""
Retrieves the languages used in the repo and increments the respective
counts of those languages. Only increments languages that have names.
Anything else is not incremented (i.e. numbers).
"""
try:
self.languages[repo.language] += 1
except KeyError:
count = self.languages[repo.language] = 1
for repo_languages in repo.iter_languages():
self.languages_json[repo.name][repo_languages[0]] = repo_languages[1]
for language in repo_languages:
if isinstance(language, basestring):#is language
temp_repo.languages.append(language)
self.previous_language = language
else:#record size bytes of language
try:
self.languages_size[self.previous_language] += \
language
except KeyError:
size = self.languages_size[self.previous_language] \
= language
|
[
"Retrieves",
"the",
"languages",
"used",
"in",
"the",
"repo",
"and",
"increments",
"the",
"respective",
"counts",
"of",
"those",
"languages",
".",
"Only",
"increments",
"languages",
"that",
"have",
"names",
".",
"Anything",
"else",
"is",
"not",
"incremented",
"(",
"i",
".",
"e",
".",
"numbers",
")",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L267-L289
|
[
"def",
"get_languages",
"(",
"self",
",",
"repo",
",",
"temp_repo",
")",
":",
"try",
":",
"self",
".",
"languages",
"[",
"repo",
".",
"language",
"]",
"+=",
"1",
"except",
"KeyError",
":",
"count",
"=",
"self",
".",
"languages",
"[",
"repo",
".",
"language",
"]",
"=",
"1",
"for",
"repo_languages",
"in",
"repo",
".",
"iter_languages",
"(",
")",
":",
"self",
".",
"languages_json",
"[",
"repo",
".",
"name",
"]",
"[",
"repo_languages",
"[",
"0",
"]",
"]",
"=",
"repo_languages",
"[",
"1",
"]",
"for",
"language",
"in",
"repo_languages",
":",
"if",
"isinstance",
"(",
"language",
",",
"basestring",
")",
":",
"#is language",
"temp_repo",
".",
"languages",
".",
"append",
"(",
"language",
")",
"self",
".",
"previous_language",
"=",
"language",
"else",
":",
"#record size bytes of language",
"try",
":",
"self",
".",
"languages_size",
"[",
"self",
".",
"previous_language",
"]",
"+=",
"language",
"except",
"KeyError",
":",
"size",
"=",
"self",
".",
"languages_size",
"[",
"self",
".",
"previous_language",
"]",
"=",
"language"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.get_readme
|
Checks to see if the given repo has a ReadMe. MD means it has a correct
Readme recognized by GitHub.
|
scripts/github_stats.py
|
def get_readme(self, repo):
"""
Checks to see if the given repo has a ReadMe. MD means it has a correct
Readme recognized by GitHub.
"""
readme_contents = repo.readme()
if readme_contents is not None:
self.total_readmes += 1
return 'MD'
if self.search_limit >= 28:
print 'Hit search limit. Sleeping for 60 sec.'
time.sleep(60)
self.search_limit = 0
self.search_limit += 1
search_results = self.logged_in_gh.search_code('readme'
+ 'in:path repo:' + repo.full_name)
try:
for result in search_results:
path = result.path[1:]
if '/' not in path and 'readme' in path.lower():
self.total_readmes += 1
return path
return 'MISS'
except (github3.models.GitHubError, StopIteration) as e:
return 'MISS'
|
def get_readme(self, repo):
"""
Checks to see if the given repo has a ReadMe. MD means it has a correct
Readme recognized by GitHub.
"""
readme_contents = repo.readme()
if readme_contents is not None:
self.total_readmes += 1
return 'MD'
if self.search_limit >= 28:
print 'Hit search limit. Sleeping for 60 sec.'
time.sleep(60)
self.search_limit = 0
self.search_limit += 1
search_results = self.logged_in_gh.search_code('readme'
+ 'in:path repo:' + repo.full_name)
try:
for result in search_results:
path = result.path[1:]
if '/' not in path and 'readme' in path.lower():
self.total_readmes += 1
return path
return 'MISS'
except (github3.models.GitHubError, StopIteration) as e:
return 'MISS'
|
[
"Checks",
"to",
"see",
"if",
"the",
"given",
"repo",
"has",
"a",
"ReadMe",
".",
"MD",
"means",
"it",
"has",
"a",
"correct",
"Readme",
"recognized",
"by",
"GitHub",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L291-L315
|
[
"def",
"get_readme",
"(",
"self",
",",
"repo",
")",
":",
"readme_contents",
"=",
"repo",
".",
"readme",
"(",
")",
"if",
"readme_contents",
"is",
"not",
"None",
":",
"self",
".",
"total_readmes",
"+=",
"1",
"return",
"'MD'",
"if",
"self",
".",
"search_limit",
">=",
"28",
":",
"print",
"'Hit search limit. Sleeping for 60 sec.'",
"time",
".",
"sleep",
"(",
"60",
")",
"self",
".",
"search_limit",
"=",
"0",
"self",
".",
"search_limit",
"+=",
"1",
"search_results",
"=",
"self",
".",
"logged_in_gh",
".",
"search_code",
"(",
"'readme'",
"+",
"'in:path repo:'",
"+",
"repo",
".",
"full_name",
")",
"try",
":",
"for",
"result",
"in",
"search_results",
":",
"path",
"=",
"result",
".",
"path",
"[",
"1",
":",
"]",
"if",
"'/'",
"not",
"in",
"path",
"and",
"'readme'",
"in",
"path",
".",
"lower",
"(",
")",
":",
"self",
".",
"total_readmes",
"+=",
"1",
"return",
"path",
"return",
"'MISS'",
"except",
"(",
"github3",
".",
"models",
".",
"GitHubError",
",",
"StopIteration",
")",
"as",
"e",
":",
"return",
"'MISS'"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.get_license
|
Checks to see if the given repo has a top level LICENSE file.
|
scripts/github_stats.py
|
def get_license(self, repo):
"""
Checks to see if the given repo has a top level LICENSE file.
"""
if self.search_limit >= 28:
print 'Hit search limit. Sleeping for 60 sec.'
time.sleep(60)
self.search_limit = 0
self.search_limit += 1
search_results = self.logged_in_gh.search_code('license'
+ 'in:path repo:' + repo.full_name)
try:
for result in search_results:
path = result.path[1:]
if '/' not in path and 'license' in path.lower():
self.total_licenses += 1
return path
return 'MISS'
except (StopIteration) as e:
return 'MISS'
|
def get_license(self, repo):
"""
Checks to see if the given repo has a top level LICENSE file.
"""
if self.search_limit >= 28:
print 'Hit search limit. Sleeping for 60 sec.'
time.sleep(60)
self.search_limit = 0
self.search_limit += 1
search_results = self.logged_in_gh.search_code('license'
+ 'in:path repo:' + repo.full_name)
try:
for result in search_results:
path = result.path[1:]
if '/' not in path and 'license' in path.lower():
self.total_licenses += 1
return path
return 'MISS'
except (StopIteration) as e:
return 'MISS'
|
[
"Checks",
"to",
"see",
"if",
"the",
"given",
"repo",
"has",
"a",
"top",
"level",
"LICENSE",
"file",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L317-L336
|
[
"def",
"get_license",
"(",
"self",
",",
"repo",
")",
":",
"if",
"self",
".",
"search_limit",
">=",
"28",
":",
"print",
"'Hit search limit. Sleeping for 60 sec.'",
"time",
".",
"sleep",
"(",
"60",
")",
"self",
".",
"search_limit",
"=",
"0",
"self",
".",
"search_limit",
"+=",
"1",
"search_results",
"=",
"self",
".",
"logged_in_gh",
".",
"search_code",
"(",
"'license'",
"+",
"'in:path repo:'",
"+",
"repo",
".",
"full_name",
")",
"try",
":",
"for",
"result",
"in",
"search_results",
":",
"path",
"=",
"result",
".",
"path",
"[",
"1",
":",
"]",
"if",
"'/'",
"not",
"in",
"path",
"and",
"'license'",
"in",
"path",
".",
"lower",
"(",
")",
":",
"self",
".",
"total_licenses",
"+=",
"1",
"return",
"path",
"return",
"'MISS'",
"except",
"(",
"StopIteration",
")",
"as",
"e",
":",
"return",
"'MISS'"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.get_commits
|
Retrieves the number of commits to a repo in the organization. If it is
the first time getting commits for a repo, it will get all commits and
save them to JSON. If there are previous commits saved, it will only get
commits that have not been saved to disk since the last date of commits.
|
scripts/github_stats.py
|
def get_commits(self, repo, organization='llnl'):
"""
Retrieves the number of commits to a repo in the organization. If it is
the first time getting commits for a repo, it will get all commits and
save them to JSON. If there are previous commits saved, it will only get
commits that have not been saved to disk since the last date of commits.
"""
#JSON
path = ('../github-data/' + organization + '/' + repo.name + '/commits')
is_only_today = False
if not os.path.exists(path): #no previous path, get all commits
all_commits = repo.iter_commits()
is_only_today = True
else:
files = os.listdir(path)
date = str(files[-1][:-5])
if date == str(datetime.date.today()):
#most recent date is actually today, get previous most recent date
if len(files) > 2:
date = str(files[-2][:-5])
else:
#This means there is only one file, today. Retrieve every commit
all_commits = repo.iter_commits()
is_only_today = True
if not is_only_today:#there's a previous saved JSON that's not today
all_commits = repo.iter_commits(since=date)
for commit in all_commits:
self.commits_json[repo.name].append(commit.to_json())
#for csv
count = 0
for commit in repo.iter_commits():
count += 1
return count
|
def get_commits(self, repo, organization='llnl'):
"""
Retrieves the number of commits to a repo in the organization. If it is
the first time getting commits for a repo, it will get all commits and
save them to JSON. If there are previous commits saved, it will only get
commits that have not been saved to disk since the last date of commits.
"""
#JSON
path = ('../github-data/' + organization + '/' + repo.name + '/commits')
is_only_today = False
if not os.path.exists(path): #no previous path, get all commits
all_commits = repo.iter_commits()
is_only_today = True
else:
files = os.listdir(path)
date = str(files[-1][:-5])
if date == str(datetime.date.today()):
#most recent date is actually today, get previous most recent date
if len(files) > 2:
date = str(files[-2][:-5])
else:
#This means there is only one file, today. Retrieve every commit
all_commits = repo.iter_commits()
is_only_today = True
if not is_only_today:#there's a previous saved JSON that's not today
all_commits = repo.iter_commits(since=date)
for commit in all_commits:
self.commits_json[repo.name].append(commit.to_json())
#for csv
count = 0
for commit in repo.iter_commits():
count += 1
return count
|
[
"Retrieves",
"the",
"number",
"of",
"commits",
"to",
"a",
"repo",
"in",
"the",
"organization",
".",
"If",
"it",
"is",
"the",
"first",
"time",
"getting",
"commits",
"for",
"a",
"repo",
"it",
"will",
"get",
"all",
"commits",
"and",
"save",
"them",
"to",
"JSON",
".",
"If",
"there",
"are",
"previous",
"commits",
"saved",
"it",
"will",
"only",
"get",
"commits",
"that",
"have",
"not",
"been",
"saved",
"to",
"disk",
"since",
"the",
"last",
"date",
"of",
"commits",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L338-L370
|
[
"def",
"get_commits",
"(",
"self",
",",
"repo",
",",
"organization",
"=",
"'llnl'",
")",
":",
"#JSON",
"path",
"=",
"(",
"'../github-data/'",
"+",
"organization",
"+",
"'/'",
"+",
"repo",
".",
"name",
"+",
"'/commits'",
")",
"is_only_today",
"=",
"False",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"#no previous path, get all commits",
"all_commits",
"=",
"repo",
".",
"iter_commits",
"(",
")",
"is_only_today",
"=",
"True",
"else",
":",
"files",
"=",
"os",
".",
"listdir",
"(",
"path",
")",
"date",
"=",
"str",
"(",
"files",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"5",
"]",
")",
"if",
"date",
"==",
"str",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
":",
"#most recent date is actually today, get previous most recent date",
"if",
"len",
"(",
"files",
")",
">",
"2",
":",
"date",
"=",
"str",
"(",
"files",
"[",
"-",
"2",
"]",
"[",
":",
"-",
"5",
"]",
")",
"else",
":",
"#This means there is only one file, today. Retrieve every commit",
"all_commits",
"=",
"repo",
".",
"iter_commits",
"(",
")",
"is_only_today",
"=",
"True",
"if",
"not",
"is_only_today",
":",
"#there's a previous saved JSON that's not today",
"all_commits",
"=",
"repo",
".",
"iter_commits",
"(",
"since",
"=",
"date",
")",
"for",
"commit",
"in",
"all_commits",
":",
"self",
".",
"commits_json",
"[",
"repo",
".",
"name",
"]",
".",
"append",
"(",
"commit",
".",
"to_json",
"(",
")",
")",
"#for csv",
"count",
"=",
"0",
"for",
"commit",
"in",
"repo",
".",
"iter_commits",
"(",
")",
":",
"count",
"+=",
"1",
"return",
"count"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.write_org_json
|
Writes stats from the organization to JSON.
|
scripts/github_stats.py
|
def write_org_json(self, date=(datetime.date.today()),
organization='llnl',dict_to_write={}, path_ending_type='',
is_list=False):
"""
Writes stats from the organization to JSON.
"""
path = ('../github-data/' + organization + '-org/'
+ path_ending_type + '/' + str(date) + '.json')
self.checkDir(path)
with open(path, 'w') as out_clear:#clear old data
out_clear.close()
with open(path, 'a') as out:
if is_list:#used for list of items
out.write('[')
for item in dict_to_write:
out.write(json.dumps(dict_to_write[item], sort_keys=True,
indent=4, separators=(',', ': ')) + ',')
out.seek(-1, os.SEEK_END)#kill last comma
out.truncate()
if is_list:
out.write(']')
out.close()
|
def write_org_json(self, date=(datetime.date.today()),
organization='llnl',dict_to_write={}, path_ending_type='',
is_list=False):
"""
Writes stats from the organization to JSON.
"""
path = ('../github-data/' + organization + '-org/'
+ path_ending_type + '/' + str(date) + '.json')
self.checkDir(path)
with open(path, 'w') as out_clear:#clear old data
out_clear.close()
with open(path, 'a') as out:
if is_list:#used for list of items
out.write('[')
for item in dict_to_write:
out.write(json.dumps(dict_to_write[item], sort_keys=True,
indent=4, separators=(',', ': ')) + ',')
out.seek(-1, os.SEEK_END)#kill last comma
out.truncate()
if is_list:
out.write(']')
out.close()
|
[
"Writes",
"stats",
"from",
"the",
"organization",
"to",
"JSON",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L372-L393
|
[
"def",
"write_org_json",
"(",
"self",
",",
"date",
"=",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
",",
"organization",
"=",
"'llnl'",
",",
"dict_to_write",
"=",
"{",
"}",
",",
"path_ending_type",
"=",
"''",
",",
"is_list",
"=",
"False",
")",
":",
"path",
"=",
"(",
"'../github-data/'",
"+",
"organization",
"+",
"'-org/'",
"+",
"path_ending_type",
"+",
"'/'",
"+",
"str",
"(",
"date",
")",
"+",
"'.json'",
")",
"self",
".",
"checkDir",
"(",
"path",
")",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"out_clear",
":",
"#clear old data",
"out_clear",
".",
"close",
"(",
")",
"with",
"open",
"(",
"path",
",",
"'a'",
")",
"as",
"out",
":",
"if",
"is_list",
":",
"#used for list of items",
"out",
".",
"write",
"(",
"'['",
")",
"for",
"item",
"in",
"dict_to_write",
":",
"out",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"dict_to_write",
"[",
"item",
"]",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"+",
"','",
")",
"out",
".",
"seek",
"(",
"-",
"1",
",",
"os",
".",
"SEEK_END",
")",
"#kill last comma",
"out",
".",
"truncate",
"(",
")",
"if",
"is_list",
":",
"out",
".",
"write",
"(",
"']'",
")",
"out",
".",
"close",
"(",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.write_repo_json
|
#Writes repo specific data to JSON.
|
scripts/github_stats.py
|
def write_repo_json(self, date=(datetime.date.today()),
organization='llnl', dict_to_write={}, path_ending_type='',
is_list=False, is_dict=False):
"""
#Writes repo specific data to JSON.
"""
for repo in dict_to_write:
path = ('../github-data/' + organization + '/' + repo + '/' +
path_ending_type + '/' + str(date) + '.json')
self.checkDir(path)
with open(path, 'w') as out:
if is_list:
out.write('[')
for value in dict_to_write[repo]:
if is_dict:
for inner_dict in value:
out.write(json.dumps(inner_dict, sort_keys=True,
indent=4, separators=(',', ': ')) + ',')
else:
out.write(json.dumps(value, sort_keys=True,
indent=4, separators=(',', ': ')) + ',')
out.seek(-1, os.SEEK_END)#kill last comma
out.truncate()
out.write(']')
else:
out.write(json.dumps(dict_to_write[repo], sort_keys=True,
indent=4, separators=(',', ': ')))
out.close()
|
def write_repo_json(self, date=(datetime.date.today()),
organization='llnl', dict_to_write={}, path_ending_type='',
is_list=False, is_dict=False):
"""
#Writes repo specific data to JSON.
"""
for repo in dict_to_write:
path = ('../github-data/' + organization + '/' + repo + '/' +
path_ending_type + '/' + str(date) + '.json')
self.checkDir(path)
with open(path, 'w') as out:
if is_list:
out.write('[')
for value in dict_to_write[repo]:
if is_dict:
for inner_dict in value:
out.write(json.dumps(inner_dict, sort_keys=True,
indent=4, separators=(',', ': ')) + ',')
else:
out.write(json.dumps(value, sort_keys=True,
indent=4, separators=(',', ': ')) + ',')
out.seek(-1, os.SEEK_END)#kill last comma
out.truncate()
out.write(']')
else:
out.write(json.dumps(dict_to_write[repo], sort_keys=True,
indent=4, separators=(',', ': ')))
out.close()
|
[
"#Writes",
"repo",
"specific",
"data",
"to",
"JSON",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L395-L422
|
[
"def",
"write_repo_json",
"(",
"self",
",",
"date",
"=",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
",",
"organization",
"=",
"'llnl'",
",",
"dict_to_write",
"=",
"{",
"}",
",",
"path_ending_type",
"=",
"''",
",",
"is_list",
"=",
"False",
",",
"is_dict",
"=",
"False",
")",
":",
"for",
"repo",
"in",
"dict_to_write",
":",
"path",
"=",
"(",
"'../github-data/'",
"+",
"organization",
"+",
"'/'",
"+",
"repo",
"+",
"'/'",
"+",
"path_ending_type",
"+",
"'/'",
"+",
"str",
"(",
"date",
")",
"+",
"'.json'",
")",
"self",
".",
"checkDir",
"(",
"path",
")",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"out",
":",
"if",
"is_list",
":",
"out",
".",
"write",
"(",
"'['",
")",
"for",
"value",
"in",
"dict_to_write",
"[",
"repo",
"]",
":",
"if",
"is_dict",
":",
"for",
"inner_dict",
"in",
"value",
":",
"out",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"inner_dict",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"+",
"','",
")",
"else",
":",
"out",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"value",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
"+",
"','",
")",
"out",
".",
"seek",
"(",
"-",
"1",
",",
"os",
".",
"SEEK_END",
")",
"#kill last comma",
"out",
".",
"truncate",
"(",
")",
"out",
".",
"write",
"(",
"']'",
")",
"else",
":",
"out",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"dict_to_write",
"[",
"repo",
"]",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
")",
"out",
".",
"close",
"(",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.write_to_file
|
Writes the current organization information to file (csv).
|
scripts/github_stats.py
|
def write_to_file(self, file_path='', date=str(datetime.date.today()),
organization='N/A', members=0, teams=0):
"""
Writes the current organization information to file (csv).
"""
self.checkDir(file_path)
with open(file_path, 'w+') as output:
output.write('date,organization,members,teams,unique_contributors,'
+ 'repository,contributors,forks,stargazers,pull_requests,'
+ 'open_issues,has_readme,has_license,languages,pull_requests_open,'
+ 'pull_requests_closed,commits,closed_issues,issues\n' + date + ','
+ organization + ',' + str(members) + ',' + str(teams) + ','
+ str(len(self.unique_contributors)) + '\n')
for repo in self.all_repos:
output.write(',,,,,' + repo.name + ',' + str(repo.contributors)
+ ',' + str(repo.forks) + ','
+ str(repo.stargazers) + ',' + str(repo.pull_requests) + ','
+ str(repo.open_issues) + ',' + str(repo.readme) + ','
+ str(repo.license) + ',' + ' '.join(sorted(repo.languages))
+ ',' + str(repo.pull_requests_open) + ','
+ str(repo.pull_requests_closed) + ',' + str(repo.commits)
+ ',' + str(repo.closed_issues) + ',' + str(repo.issues)
+ '\n')
output.write(',,,,total,' + str(self.total_repos) + ','
+ str(self.total_contributors) + ','
+ str(self.total_forks) + ',' + str(self.total_stars) + ','
+ str(self.total_pull_reqs) + ',' + str(self.total_open_issues)
+ ',' + str(self.total_readmes) + ',' + str(self.total_licenses)
+ ',,' + str(self.total_pull_reqs_open) + ','
+ str(self.total_pull_reqs_closed) + ','
+ str(self.total_commits) + ',' + str(self.total_closed_issues)
+ ',' + str(self.total_issues))
output.close()
#Update total
self.write_totals(file_path="../github_stats_output/total.csv", date=date,
organization=organization, members=members, teams=teams)
#Update language sizes
self.write_languages(file_path='../github_stats_output/languages.csv',
date=date)
|
def write_to_file(self, file_path='', date=str(datetime.date.today()),
organization='N/A', members=0, teams=0):
"""
Writes the current organization information to file (csv).
"""
self.checkDir(file_path)
with open(file_path, 'w+') as output:
output.write('date,organization,members,teams,unique_contributors,'
+ 'repository,contributors,forks,stargazers,pull_requests,'
+ 'open_issues,has_readme,has_license,languages,pull_requests_open,'
+ 'pull_requests_closed,commits,closed_issues,issues\n' + date + ','
+ organization + ',' + str(members) + ',' + str(teams) + ','
+ str(len(self.unique_contributors)) + '\n')
for repo in self.all_repos:
output.write(',,,,,' + repo.name + ',' + str(repo.contributors)
+ ',' + str(repo.forks) + ','
+ str(repo.stargazers) + ',' + str(repo.pull_requests) + ','
+ str(repo.open_issues) + ',' + str(repo.readme) + ','
+ str(repo.license) + ',' + ' '.join(sorted(repo.languages))
+ ',' + str(repo.pull_requests_open) + ','
+ str(repo.pull_requests_closed) + ',' + str(repo.commits)
+ ',' + str(repo.closed_issues) + ',' + str(repo.issues)
+ '\n')
output.write(',,,,total,' + str(self.total_repos) + ','
+ str(self.total_contributors) + ','
+ str(self.total_forks) + ',' + str(self.total_stars) + ','
+ str(self.total_pull_reqs) + ',' + str(self.total_open_issues)
+ ',' + str(self.total_readmes) + ',' + str(self.total_licenses)
+ ',,' + str(self.total_pull_reqs_open) + ','
+ str(self.total_pull_reqs_closed) + ','
+ str(self.total_commits) + ',' + str(self.total_closed_issues)
+ ',' + str(self.total_issues))
output.close()
#Update total
self.write_totals(file_path="../github_stats_output/total.csv", date=date,
organization=organization, members=members, teams=teams)
#Update language sizes
self.write_languages(file_path='../github_stats_output/languages.csv',
date=date)
|
[
"Writes",
"the",
"current",
"organization",
"information",
"to",
"file",
"(",
"csv",
")",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L424-L462
|
[
"def",
"write_to_file",
"(",
"self",
",",
"file_path",
"=",
"''",
",",
"date",
"=",
"str",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
",",
"organization",
"=",
"'N/A'",
",",
"members",
"=",
"0",
",",
"teams",
"=",
"0",
")",
":",
"self",
".",
"checkDir",
"(",
"file_path",
")",
"with",
"open",
"(",
"file_path",
",",
"'w+'",
")",
"as",
"output",
":",
"output",
".",
"write",
"(",
"'date,organization,members,teams,unique_contributors,'",
"+",
"'repository,contributors,forks,stargazers,pull_requests,'",
"+",
"'open_issues,has_readme,has_license,languages,pull_requests_open,'",
"+",
"'pull_requests_closed,commits,closed_issues,issues\\n'",
"+",
"date",
"+",
"','",
"+",
"organization",
"+",
"','",
"+",
"str",
"(",
"members",
")",
"+",
"','",
"+",
"str",
"(",
"teams",
")",
"+",
"','",
"+",
"str",
"(",
"len",
"(",
"self",
".",
"unique_contributors",
")",
")",
"+",
"'\\n'",
")",
"for",
"repo",
"in",
"self",
".",
"all_repos",
":",
"output",
".",
"write",
"(",
"',,,,,'",
"+",
"repo",
".",
"name",
"+",
"','",
"+",
"str",
"(",
"repo",
".",
"contributors",
")",
"+",
"','",
"+",
"str",
"(",
"repo",
".",
"forks",
")",
"+",
"','",
"+",
"str",
"(",
"repo",
".",
"stargazers",
")",
"+",
"','",
"+",
"str",
"(",
"repo",
".",
"pull_requests",
")",
"+",
"','",
"+",
"str",
"(",
"repo",
".",
"open_issues",
")",
"+",
"','",
"+",
"str",
"(",
"repo",
".",
"readme",
")",
"+",
"','",
"+",
"str",
"(",
"repo",
".",
"license",
")",
"+",
"','",
"+",
"' '",
".",
"join",
"(",
"sorted",
"(",
"repo",
".",
"languages",
")",
")",
"+",
"','",
"+",
"str",
"(",
"repo",
".",
"pull_requests_open",
")",
"+",
"','",
"+",
"str",
"(",
"repo",
".",
"pull_requests_closed",
")",
"+",
"','",
"+",
"str",
"(",
"repo",
".",
"commits",
")",
"+",
"','",
"+",
"str",
"(",
"repo",
".",
"closed_issues",
")",
"+",
"','",
"+",
"str",
"(",
"repo",
".",
"issues",
")",
"+",
"'\\n'",
")",
"output",
".",
"write",
"(",
"',,,,total,'",
"+",
"str",
"(",
"self",
".",
"total_repos",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_contributors",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_forks",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_stars",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_pull_reqs",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_open_issues",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_readmes",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_licenses",
")",
"+",
"',,'",
"+",
"str",
"(",
"self",
".",
"total_pull_reqs_open",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_pull_reqs_closed",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_commits",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_closed_issues",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_issues",
")",
")",
"output",
".",
"close",
"(",
")",
"#Update total",
"self",
".",
"write_totals",
"(",
"file_path",
"=",
"\"../github_stats_output/total.csv\"",
",",
"date",
"=",
"date",
",",
"organization",
"=",
"organization",
",",
"members",
"=",
"members",
",",
"teams",
"=",
"teams",
")",
"#Update language sizes",
"self",
".",
"write_languages",
"(",
"file_path",
"=",
"'../github_stats_output/languages.csv'",
",",
"date",
"=",
"date",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.write_totals
|
Updates the total.csv file with current data.
|
scripts/github_stats.py
|
def write_totals(self, file_path='', date=str(datetime.date.today()),
organization='N/A', members=0, teams=0):
"""
Updates the total.csv file with current data.
"""
total_exists = os.path.isfile(file_path)
with open(file_path, 'a') as out_total:
if not total_exists:
out_total.write('date,organization,repos,members,teams,'
+ 'unique_contributors,total_contributors,forks,'
+ 'stargazers,pull_requests,open_issues,has_readme,'
+ 'has_license,pull_requests_open,pull_requests_closed,'
+ 'commits,id,closed_issues,issues\n')
self.delete_last_line(date=date, file_path=file_path)
out_total.close()
with open(file_path, 'r') as file_read:
row_count = sum(1 for row in file_read) - 1
file_read.close()
with open(file_path, 'a') as out_total:
out_total.write(date + ',' + organization + ','
+ str(self.total_repos) + ',' + str(members) + ',' + str(teams)
+ ',' + str(len(self.unique_contributors)) + ','
+ str(self.total_contributors) + ',' + str(self.total_forks)
+ ',' + str(self.total_stars) + ',' + str(self.total_pull_reqs)
+ ',' + str(self.total_open_issues) + ','
+ str(self.total_readmes) + ',' + str(self.total_licenses) + ','
+ str(self.total_pull_reqs_open) + ','
+ str(self.total_pull_reqs_closed) + ','
+ str(self.total_commits) + ',' + str(row_count) + ','
+ str(self.total_closed_issues) + ',' + str(self.total_issues)
+ '\n')
out_total.close()
|
def write_totals(self, file_path='', date=str(datetime.date.today()),
organization='N/A', members=0, teams=0):
"""
Updates the total.csv file with current data.
"""
total_exists = os.path.isfile(file_path)
with open(file_path, 'a') as out_total:
if not total_exists:
out_total.write('date,organization,repos,members,teams,'
+ 'unique_contributors,total_contributors,forks,'
+ 'stargazers,pull_requests,open_issues,has_readme,'
+ 'has_license,pull_requests_open,pull_requests_closed,'
+ 'commits,id,closed_issues,issues\n')
self.delete_last_line(date=date, file_path=file_path)
out_total.close()
with open(file_path, 'r') as file_read:
row_count = sum(1 for row in file_read) - 1
file_read.close()
with open(file_path, 'a') as out_total:
out_total.write(date + ',' + organization + ','
+ str(self.total_repos) + ',' + str(members) + ',' + str(teams)
+ ',' + str(len(self.unique_contributors)) + ','
+ str(self.total_contributors) + ',' + str(self.total_forks)
+ ',' + str(self.total_stars) + ',' + str(self.total_pull_reqs)
+ ',' + str(self.total_open_issues) + ','
+ str(self.total_readmes) + ',' + str(self.total_licenses) + ','
+ str(self.total_pull_reqs_open) + ','
+ str(self.total_pull_reqs_closed) + ','
+ str(self.total_commits) + ',' + str(row_count) + ','
+ str(self.total_closed_issues) + ',' + str(self.total_issues)
+ '\n')
out_total.close()
|
[
"Updates",
"the",
"total",
".",
"csv",
"file",
"with",
"current",
"data",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L464-L496
|
[
"def",
"write_totals",
"(",
"self",
",",
"file_path",
"=",
"''",
",",
"date",
"=",
"str",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
",",
"organization",
"=",
"'N/A'",
",",
"members",
"=",
"0",
",",
"teams",
"=",
"0",
")",
":",
"total_exists",
"=",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
"with",
"open",
"(",
"file_path",
",",
"'a'",
")",
"as",
"out_total",
":",
"if",
"not",
"total_exists",
":",
"out_total",
".",
"write",
"(",
"'date,organization,repos,members,teams,'",
"+",
"'unique_contributors,total_contributors,forks,'",
"+",
"'stargazers,pull_requests,open_issues,has_readme,'",
"+",
"'has_license,pull_requests_open,pull_requests_closed,'",
"+",
"'commits,id,closed_issues,issues\\n'",
")",
"self",
".",
"delete_last_line",
"(",
"date",
"=",
"date",
",",
"file_path",
"=",
"file_path",
")",
"out_total",
".",
"close",
"(",
")",
"with",
"open",
"(",
"file_path",
",",
"'r'",
")",
"as",
"file_read",
":",
"row_count",
"=",
"sum",
"(",
"1",
"for",
"row",
"in",
"file_read",
")",
"-",
"1",
"file_read",
".",
"close",
"(",
")",
"with",
"open",
"(",
"file_path",
",",
"'a'",
")",
"as",
"out_total",
":",
"out_total",
".",
"write",
"(",
"date",
"+",
"','",
"+",
"organization",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_repos",
")",
"+",
"','",
"+",
"str",
"(",
"members",
")",
"+",
"','",
"+",
"str",
"(",
"teams",
")",
"+",
"','",
"+",
"str",
"(",
"len",
"(",
"self",
".",
"unique_contributors",
")",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_contributors",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_forks",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_stars",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_pull_reqs",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_open_issues",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_readmes",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_licenses",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_pull_reqs_open",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_pull_reqs_closed",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_commits",
")",
"+",
"','",
"+",
"str",
"(",
"row_count",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_closed_issues",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"total_issues",
")",
"+",
"'\\n'",
")",
"out_total",
".",
"close",
"(",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.write_languages
|
Updates languages.csv file with current data.
|
scripts/github_stats.py
|
def write_languages(self, file_path='',date=str(datetime.date.today())):
"""
Updates languages.csv file with current data.
"""
self.remove_date(file_path=file_path, date=date)
languages_exists = os.path.isfile(file_path)
with open(file_path, 'a') as out_languages:
if not languages_exists:
out_languages.write('date,language,count,size,size_log\n')
languages_sorted = sorted(self.languages_size)
#self.delete_last_line(date=date, file_path=file_path)
for language in languages_sorted:
try:
out_languages.write(date + ',' + language + ','
+ str(self.languages[language]) + ','
+ str(self.languages_size[language]) + ','
+ str(math.log10(int(self.languages_size[language])))
+ '\n')
except (TypeError, KeyError) as e:
out_languages.write(date + ',' + language + ','
+ str(0) + ','
+ str(self.languages_size[language]) + ','
+ str(math.log10(int(self.languages_size[language])))
+ '\n')
|
def write_languages(self, file_path='',date=str(datetime.date.today())):
"""
Updates languages.csv file with current data.
"""
self.remove_date(file_path=file_path, date=date)
languages_exists = os.path.isfile(file_path)
with open(file_path, 'a') as out_languages:
if not languages_exists:
out_languages.write('date,language,count,size,size_log\n')
languages_sorted = sorted(self.languages_size)
#self.delete_last_line(date=date, file_path=file_path)
for language in languages_sorted:
try:
out_languages.write(date + ',' + language + ','
+ str(self.languages[language]) + ','
+ str(self.languages_size[language]) + ','
+ str(math.log10(int(self.languages_size[language])))
+ '\n')
except (TypeError, KeyError) as e:
out_languages.write(date + ',' + language + ','
+ str(0) + ','
+ str(self.languages_size[language]) + ','
+ str(math.log10(int(self.languages_size[language])))
+ '\n')
|
[
"Updates",
"languages",
".",
"csv",
"file",
"with",
"current",
"data",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L498-L521
|
[
"def",
"write_languages",
"(",
"self",
",",
"file_path",
"=",
"''",
",",
"date",
"=",
"str",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
")",
":",
"self",
".",
"remove_date",
"(",
"file_path",
"=",
"file_path",
",",
"date",
"=",
"date",
")",
"languages_exists",
"=",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
"with",
"open",
"(",
"file_path",
",",
"'a'",
")",
"as",
"out_languages",
":",
"if",
"not",
"languages_exists",
":",
"out_languages",
".",
"write",
"(",
"'date,language,count,size,size_log\\n'",
")",
"languages_sorted",
"=",
"sorted",
"(",
"self",
".",
"languages_size",
")",
"#self.delete_last_line(date=date, file_path=file_path)",
"for",
"language",
"in",
"languages_sorted",
":",
"try",
":",
"out_languages",
".",
"write",
"(",
"date",
"+",
"','",
"+",
"language",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"languages",
"[",
"language",
"]",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"languages_size",
"[",
"language",
"]",
")",
"+",
"','",
"+",
"str",
"(",
"math",
".",
"log10",
"(",
"int",
"(",
"self",
".",
"languages_size",
"[",
"language",
"]",
")",
")",
")",
"+",
"'\\n'",
")",
"except",
"(",
"TypeError",
",",
"KeyError",
")",
"as",
"e",
":",
"out_languages",
".",
"write",
"(",
"date",
"+",
"','",
"+",
"language",
"+",
"','",
"+",
"str",
"(",
"0",
")",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"languages_size",
"[",
"language",
"]",
")",
"+",
"','",
"+",
"str",
"(",
"math",
".",
"log10",
"(",
"int",
"(",
"self",
".",
"languages_size",
"[",
"language",
"]",
")",
")",
")",
"+",
"'\\n'",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.checkDir
|
Checks if a directory exists. If not, it creates one with the specified
file_path.
|
scripts/github_stats.py
|
def checkDir(self, file_path=''):
"""
Checks if a directory exists. If not, it creates one with the specified
file_path.
"""
if not os.path.exists(os.path.dirname(file_path)):
try:
os.makedirs(os.path.dirname(file_path))
except OSError as e:
if e.errno != errno.EEXIST:
raise
|
def checkDir(self, file_path=''):
"""
Checks if a directory exists. If not, it creates one with the specified
file_path.
"""
if not os.path.exists(os.path.dirname(file_path)):
try:
os.makedirs(os.path.dirname(file_path))
except OSError as e:
if e.errno != errno.EEXIST:
raise
|
[
"Checks",
"if",
"a",
"directory",
"exists",
".",
"If",
"not",
"it",
"creates",
"one",
"with",
"the",
"specified",
"file_path",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L523-L533
|
[
"def",
"checkDir",
"(",
"self",
",",
"file_path",
"=",
"''",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"file_path",
")",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"file_path",
")",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
":",
"raise"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.remove_date
|
Removes all rows of the associated date from the given csv file.
Defaults to today.
|
scripts/github_stats.py
|
def remove_date(self, file_path='', date=str(datetime.date.today())):
"""
Removes all rows of the associated date from the given csv file.
Defaults to today.
"""
languages_exists = os.path.isfile(file_path)
if languages_exists:
with open(file_path, 'rb') as inp, open('temp.csv', 'wb') as out:
writer = csv.writer(out)
for row in csv.reader(inp):
if row[0] != date:
writer.writerow(row)
inp.close()
out.close()
os.remove(file_path)
os.rename("temp.csv",file_path)
|
def remove_date(self, file_path='', date=str(datetime.date.today())):
"""
Removes all rows of the associated date from the given csv file.
Defaults to today.
"""
languages_exists = os.path.isfile(file_path)
if languages_exists:
with open(file_path, 'rb') as inp, open('temp.csv', 'wb') as out:
writer = csv.writer(out)
for row in csv.reader(inp):
if row[0] != date:
writer.writerow(row)
inp.close()
out.close()
os.remove(file_path)
os.rename("temp.csv",file_path)
|
[
"Removes",
"all",
"rows",
"of",
"the",
"associated",
"date",
"from",
"the",
"given",
"csv",
"file",
".",
"Defaults",
"to",
"today",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L535-L550
|
[
"def",
"remove_date",
"(",
"self",
",",
"file_path",
"=",
"''",
",",
"date",
"=",
"str",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
")",
":",
"languages_exists",
"=",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
"if",
"languages_exists",
":",
"with",
"open",
"(",
"file_path",
",",
"'rb'",
")",
"as",
"inp",
",",
"open",
"(",
"'temp.csv'",
",",
"'wb'",
")",
"as",
"out",
":",
"writer",
"=",
"csv",
".",
"writer",
"(",
"out",
")",
"for",
"row",
"in",
"csv",
".",
"reader",
"(",
"inp",
")",
":",
"if",
"row",
"[",
"0",
"]",
"!=",
"date",
":",
"writer",
".",
"writerow",
"(",
"row",
")",
"inp",
".",
"close",
"(",
")",
"out",
".",
"close",
"(",
")",
"os",
".",
"remove",
"(",
"file_path",
")",
"os",
".",
"rename",
"(",
"\"temp.csv\"",
",",
"file_path",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_LLNL_Stats.delete_last_line
|
The following code was modified from
http://stackoverflow.com/a/10289740 &
http://stackoverflow.com/a/17309010
It essentially will check if the total for the current date already
exists in total.csv. If it does, it just removes the last line.
This is so the script could be run more than once a day and not
create many entries in the total.csv file for the same date.
|
scripts/github_stats.py
|
def delete_last_line(self, file_path='', date=str(datetime.date.today())):
"""
The following code was modified from
http://stackoverflow.com/a/10289740 &
http://stackoverflow.com/a/17309010
It essentially will check if the total for the current date already
exists in total.csv. If it does, it just removes the last line.
This is so the script could be run more than once a day and not
create many entries in the total.csv file for the same date.
"""
deleted_line = False
if os.path.isfile(file_path):
with open(file_path, 'r+') as file:
reader = csv.reader(file, delimiter=',')
for row in reader:
if date == row[0]:
file.seek(0, os.SEEK_END)
pos = file.tell() - 1
while pos > 0 and file.read(1) != "\n":
pos -= 1
file.seek(pos, os.SEEK_SET)
if pos > 0:
file.seek(pos, os.SEEK_SET)
file.truncate()
deleted_line = True
break
if deleted_line: file.write('\n')
file.close()
|
def delete_last_line(self, file_path='', date=str(datetime.date.today())):
"""
The following code was modified from
http://stackoverflow.com/a/10289740 &
http://stackoverflow.com/a/17309010
It essentially will check if the total for the current date already
exists in total.csv. If it does, it just removes the last line.
This is so the script could be run more than once a day and not
create many entries in the total.csv file for the same date.
"""
deleted_line = False
if os.path.isfile(file_path):
with open(file_path, 'r+') as file:
reader = csv.reader(file, delimiter=',')
for row in reader:
if date == row[0]:
file.seek(0, os.SEEK_END)
pos = file.tell() - 1
while pos > 0 and file.read(1) != "\n":
pos -= 1
file.seek(pos, os.SEEK_SET)
if pos > 0:
file.seek(pos, os.SEEK_SET)
file.truncate()
deleted_line = True
break
if deleted_line: file.write('\n')
file.close()
|
[
"The",
"following",
"code",
"was",
"modified",
"from",
"http",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"a",
"/",
"10289740",
"&",
"http",
":",
"//",
"stackoverflow",
".",
"com",
"/",
"a",
"/",
"17309010",
"It",
"essentially",
"will",
"check",
"if",
"the",
"total",
"for",
"the",
"current",
"date",
"already",
"exists",
"in",
"total",
".",
"csv",
".",
"If",
"it",
"does",
"it",
"just",
"removes",
"the",
"last",
"line",
".",
"This",
"is",
"so",
"the",
"script",
"could",
"be",
"run",
"more",
"than",
"once",
"a",
"day",
"and",
"not",
"create",
"many",
"entries",
"in",
"the",
"total",
".",
"csv",
"file",
"for",
"the",
"same",
"date",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/github_stats.py#L552-L579
|
[
"def",
"delete_last_line",
"(",
"self",
",",
"file_path",
"=",
"''",
",",
"date",
"=",
"str",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
")",
":",
"deleted_line",
"=",
"False",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"with",
"open",
"(",
"file_path",
",",
"'r+'",
")",
"as",
"file",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"file",
",",
"delimiter",
"=",
"','",
")",
"for",
"row",
"in",
"reader",
":",
"if",
"date",
"==",
"row",
"[",
"0",
"]",
":",
"file",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_END",
")",
"pos",
"=",
"file",
".",
"tell",
"(",
")",
"-",
"1",
"while",
"pos",
">",
"0",
"and",
"file",
".",
"read",
"(",
"1",
")",
"!=",
"\"\\n\"",
":",
"pos",
"-=",
"1",
"file",
".",
"seek",
"(",
"pos",
",",
"os",
".",
"SEEK_SET",
")",
"if",
"pos",
">",
"0",
":",
"file",
".",
"seek",
"(",
"pos",
",",
"os",
".",
"SEEK_SET",
")",
"file",
".",
"truncate",
"(",
")",
"deleted_line",
"=",
"True",
"break",
"if",
"deleted_line",
":",
"file",
".",
"write",
"(",
"'\\n'",
")",
"file",
".",
"close",
"(",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
gov_orgs
|
Returns a list of the names of US Government GitHub organizations
Based on: https://government.github.com/community/
Exmample return:
{'llnl', '18f', 'gsa', 'dhs-ncats', 'spack', ...}
|
scraper/github/__init__.py
|
def gov_orgs():
"""
Returns a list of the names of US Government GitHub organizations
Based on: https://government.github.com/community/
Exmample return:
{'llnl', '18f', 'gsa', 'dhs-ncats', 'spack', ...}
"""
us_gov_github_orgs = set()
gov_orgs = requests.get('https://government.github.com/organizations.json').json()
us_gov_github_orgs.update(gov_orgs['governments']['U.S. Federal'])
us_gov_github_orgs.update(gov_orgs['governments']['U.S. Military and Intelligence'])
us_gov_github_orgs.update(gov_orgs['research']['U.S. Research Labs'])
return list(us_gov_github_orgs)
|
def gov_orgs():
"""
Returns a list of the names of US Government GitHub organizations
Based on: https://government.github.com/community/
Exmample return:
{'llnl', '18f', 'gsa', 'dhs-ncats', 'spack', ...}
"""
us_gov_github_orgs = set()
gov_orgs = requests.get('https://government.github.com/organizations.json').json()
us_gov_github_orgs.update(gov_orgs['governments']['U.S. Federal'])
us_gov_github_orgs.update(gov_orgs['governments']['U.S. Military and Intelligence'])
us_gov_github_orgs.update(gov_orgs['research']['U.S. Research Labs'])
return list(us_gov_github_orgs)
|
[
"Returns",
"a",
"list",
"of",
"the",
"names",
"of",
"US",
"Government",
"GitHub",
"organizations"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/__init__.py#L14-L31
|
[
"def",
"gov_orgs",
"(",
")",
":",
"us_gov_github_orgs",
"=",
"set",
"(",
")",
"gov_orgs",
"=",
"requests",
".",
"get",
"(",
"'https://government.github.com/organizations.json'",
")",
".",
"json",
"(",
")",
"us_gov_github_orgs",
".",
"update",
"(",
"gov_orgs",
"[",
"'governments'",
"]",
"[",
"'U.S. Federal'",
"]",
")",
"us_gov_github_orgs",
".",
"update",
"(",
"gov_orgs",
"[",
"'governments'",
"]",
"[",
"'U.S. Military and Intelligence'",
"]",
")",
"us_gov_github_orgs",
".",
"update",
"(",
"gov_orgs",
"[",
"'research'",
"]",
"[",
"'U.S. Research Labs'",
"]",
")",
"return",
"list",
"(",
"us_gov_github_orgs",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
create_session
|
Create a github3.py session connected to GitHub.com
If token is not provided, will attempt to use the GITHUB_API_TOKEN
environment variable if present.
|
scraper/github/__init__.py
|
def create_session(token=None):
"""
Create a github3.py session connected to GitHub.com
If token is not provided, will attempt to use the GITHUB_API_TOKEN
environment variable if present.
"""
if token is None:
token = os.environ.get('GITHUB_API_TOKEN', None)
gh_session = github3.login(token=token)
if gh_session is None:
raise RuntimeError('Invalid or missing GITHUB_API_TOKEN')
return gh_session
|
def create_session(token=None):
"""
Create a github3.py session connected to GitHub.com
If token is not provided, will attempt to use the GITHUB_API_TOKEN
environment variable if present.
"""
if token is None:
token = os.environ.get('GITHUB_API_TOKEN', None)
gh_session = github3.login(token=token)
if gh_session is None:
raise RuntimeError('Invalid or missing GITHUB_API_TOKEN')
return gh_session
|
[
"Create",
"a",
"github3",
".",
"py",
"session",
"connected",
"to",
"GitHub",
".",
"com"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/__init__.py#L34-L49
|
[
"def",
"create_session",
"(",
"token",
"=",
"None",
")",
":",
"if",
"token",
"is",
"None",
":",
"token",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'GITHUB_API_TOKEN'",
",",
"None",
")",
"gh_session",
"=",
"github3",
".",
"login",
"(",
"token",
"=",
"token",
")",
"if",
"gh_session",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"'Invalid or missing GITHUB_API_TOKEN'",
")",
"return",
"gh_session"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
create_enterprise_session
|
Create a github3.py session for a GitHub Enterprise instance
If token is not provided, will attempt to use the GITHUB_API_TOKEN
environment variable if present.
|
scraper/github/__init__.py
|
def create_enterprise_session(url, token=None):
"""
Create a github3.py session for a GitHub Enterprise instance
If token is not provided, will attempt to use the GITHUB_API_TOKEN
environment variable if present.
"""
gh_session = github3.enterprise_login(url=url, token=token)
if gh_session is None:
msg = 'Unable to connect to GitHub Enterprise (%s) with provided token.'
raise RuntimeError(msg, url)
return gh_session
|
def create_enterprise_session(url, token=None):
"""
Create a github3.py session for a GitHub Enterprise instance
If token is not provided, will attempt to use the GITHUB_API_TOKEN
environment variable if present.
"""
gh_session = github3.enterprise_login(url=url, token=token)
if gh_session is None:
msg = 'Unable to connect to GitHub Enterprise (%s) with provided token.'
raise RuntimeError(msg, url)
return gh_session
|
[
"Create",
"a",
"github3",
".",
"py",
"session",
"for",
"a",
"GitHub",
"Enterprise",
"instance"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/__init__.py#L52-L66
|
[
"def",
"create_enterprise_session",
"(",
"url",
",",
"token",
"=",
"None",
")",
":",
"gh_session",
"=",
"github3",
".",
"enterprise_login",
"(",
"url",
"=",
"url",
",",
"token",
"=",
"token",
")",
"if",
"gh_session",
"is",
"None",
":",
"msg",
"=",
"'Unable to connect to GitHub Enterprise (%s) with provided token.'",
"raise",
"RuntimeError",
"(",
"msg",
",",
"url",
")",
"return",
"gh_session"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
_check_api_limits
|
Simplified check for API limits
If necessary, spin in place waiting for API to reset before returning.
See: https://developer.github.com/v3/#rate-limiting
|
scraper/github/__init__.py
|
def _check_api_limits(gh_session, api_required=250, sleep_time=15):
"""
Simplified check for API limits
If necessary, spin in place waiting for API to reset before returning.
See: https://developer.github.com/v3/#rate-limiting
"""
api_rates = gh_session.rate_limit()
api_remaining = api_rates['rate']['remaining']
api_reset = api_rates['rate']['reset']
logger.debug('Rate Limit - %d requests remaining', api_remaining)
if api_remaining > api_required:
return
now_time = time.time()
time_to_reset = int(api_reset - now_time)
logger.warn('Rate Limit Depleted - Sleeping for %d seconds', time_to_reset)
while now_time < api_reset:
time.sleep(10)
now_time = time.time()
return
|
def _check_api_limits(gh_session, api_required=250, sleep_time=15):
"""
Simplified check for API limits
If necessary, spin in place waiting for API to reset before returning.
See: https://developer.github.com/v3/#rate-limiting
"""
api_rates = gh_session.rate_limit()
api_remaining = api_rates['rate']['remaining']
api_reset = api_rates['rate']['reset']
logger.debug('Rate Limit - %d requests remaining', api_remaining)
if api_remaining > api_required:
return
now_time = time.time()
time_to_reset = int(api_reset - now_time)
logger.warn('Rate Limit Depleted - Sleeping for %d seconds', time_to_reset)
while now_time < api_reset:
time.sleep(10)
now_time = time.time()
return
|
[
"Simplified",
"check",
"for",
"API",
"limits"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/__init__.py#L76-L101
|
[
"def",
"_check_api_limits",
"(",
"gh_session",
",",
"api_required",
"=",
"250",
",",
"sleep_time",
"=",
"15",
")",
":",
"api_rates",
"=",
"gh_session",
".",
"rate_limit",
"(",
")",
"api_remaining",
"=",
"api_rates",
"[",
"'rate'",
"]",
"[",
"'remaining'",
"]",
"api_reset",
"=",
"api_rates",
"[",
"'rate'",
"]",
"[",
"'reset'",
"]",
"logger",
".",
"debug",
"(",
"'Rate Limit - %d requests remaining'",
",",
"api_remaining",
")",
"if",
"api_remaining",
">",
"api_required",
":",
"return",
"now_time",
"=",
"time",
".",
"time",
"(",
")",
"time_to_reset",
"=",
"int",
"(",
"api_reset",
"-",
"now_time",
")",
"logger",
".",
"warn",
"(",
"'Rate Limit Depleted - Sleeping for %d seconds'",
",",
"time_to_reset",
")",
"while",
"now_time",
"<",
"api_reset",
":",
"time",
".",
"sleep",
"(",
"10",
")",
"now_time",
"=",
"time",
".",
"time",
"(",
")",
"return"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
connect
|
Create a GitHub session for making requests
|
scraper/github/__init__.py
|
def connect(url='https://github.com', token=None):
"""
Create a GitHub session for making requests
"""
gh_session = None
if url == 'https://github.com':
gh_session = create_session(token)
else:
gh_session = create_enterprise_session(url, token)
if gh_session is None:
msg = 'Unable to connect to (%s) with provided token.'
raise RuntimeError(msg, url)
logger.info('Connected to: %s', url)
return gh_session
|
def connect(url='https://github.com', token=None):
"""
Create a GitHub session for making requests
"""
gh_session = None
if url == 'https://github.com':
gh_session = create_session(token)
else:
gh_session = create_enterprise_session(url, token)
if gh_session is None:
msg = 'Unable to connect to (%s) with provided token.'
raise RuntimeError(msg, url)
logger.info('Connected to: %s', url)
return gh_session
|
[
"Create",
"a",
"GitHub",
"session",
"for",
"making",
"requests"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/__init__.py#L104-L121
|
[
"def",
"connect",
"(",
"url",
"=",
"'https://github.com'",
",",
"token",
"=",
"None",
")",
":",
"gh_session",
"=",
"None",
"if",
"url",
"==",
"'https://github.com'",
":",
"gh_session",
"=",
"create_session",
"(",
"token",
")",
"else",
":",
"gh_session",
"=",
"create_enterprise_session",
"(",
"url",
",",
"token",
")",
"if",
"gh_session",
"is",
"None",
":",
"msg",
"=",
"'Unable to connect to (%s) with provided token.'",
"raise",
"RuntimeError",
"(",
"msg",
",",
"url",
")",
"logger",
".",
"info",
"(",
"'Connected to: %s'",
",",
"url",
")",
"return",
"gh_session"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
query_repos
|
Yields GitHub3.py repo objects for provided orgs and repo names
If orgs and repos are BOTH empty, execute special mode of getting ALL
repositories from the GitHub Server.
If public_only is True, will return only those repos that are marked as
public. Set this to false to return all organizations that the session has
permissions to access.
|
scraper/github/__init__.py
|
def query_repos(gh_session, orgs=None, repos=None, public_only=True):
"""
Yields GitHub3.py repo objects for provided orgs and repo names
If orgs and repos are BOTH empty, execute special mode of getting ALL
repositories from the GitHub Server.
If public_only is True, will return only those repos that are marked as
public. Set this to false to return all organizations that the session has
permissions to access.
"""
if orgs is None:
orgs = []
if repos is None:
repos = []
if public_only:
privacy = 'public'
else:
privacy = 'all'
_check_api_limits(gh_session, 10)
for org_name in orgs:
org = gh_session.organization(org_name)
num_repos = org.public_repos_count
_check_api_limits(gh_session, _num_requests_needed(num_repos))
for repo in org.repositories(type=privacy):
_check_api_limits(gh_session, 10)
yield repo
for repo_name in repos:
_check_api_limits(gh_session, 10)
org, name = repo_name.split('/')
yield gh_session.repository(org, name)
if not (orgs or repos):
for repo in gh_session.all_repositories():
yield repo
|
def query_repos(gh_session, orgs=None, repos=None, public_only=True):
"""
Yields GitHub3.py repo objects for provided orgs and repo names
If orgs and repos are BOTH empty, execute special mode of getting ALL
repositories from the GitHub Server.
If public_only is True, will return only those repos that are marked as
public. Set this to false to return all organizations that the session has
permissions to access.
"""
if orgs is None:
orgs = []
if repos is None:
repos = []
if public_only:
privacy = 'public'
else:
privacy = 'all'
_check_api_limits(gh_session, 10)
for org_name in orgs:
org = gh_session.organization(org_name)
num_repos = org.public_repos_count
_check_api_limits(gh_session, _num_requests_needed(num_repos))
for repo in org.repositories(type=privacy):
_check_api_limits(gh_session, 10)
yield repo
for repo_name in repos:
_check_api_limits(gh_session, 10)
org, name = repo_name.split('/')
yield gh_session.repository(org, name)
if not (orgs or repos):
for repo in gh_session.all_repositories():
yield repo
|
[
"Yields",
"GitHub3",
".",
"py",
"repo",
"objects",
"for",
"provided",
"orgs",
"and",
"repo",
"names"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/__init__.py#L124-L164
|
[
"def",
"query_repos",
"(",
"gh_session",
",",
"orgs",
"=",
"None",
",",
"repos",
"=",
"None",
",",
"public_only",
"=",
"True",
")",
":",
"if",
"orgs",
"is",
"None",
":",
"orgs",
"=",
"[",
"]",
"if",
"repos",
"is",
"None",
":",
"repos",
"=",
"[",
"]",
"if",
"public_only",
":",
"privacy",
"=",
"'public'",
"else",
":",
"privacy",
"=",
"'all'",
"_check_api_limits",
"(",
"gh_session",
",",
"10",
")",
"for",
"org_name",
"in",
"orgs",
":",
"org",
"=",
"gh_session",
".",
"organization",
"(",
"org_name",
")",
"num_repos",
"=",
"org",
".",
"public_repos_count",
"_check_api_limits",
"(",
"gh_session",
",",
"_num_requests_needed",
"(",
"num_repos",
")",
")",
"for",
"repo",
"in",
"org",
".",
"repositories",
"(",
"type",
"=",
"privacy",
")",
":",
"_check_api_limits",
"(",
"gh_session",
",",
"10",
")",
"yield",
"repo",
"for",
"repo_name",
"in",
"repos",
":",
"_check_api_limits",
"(",
"gh_session",
",",
"10",
")",
"org",
",",
"name",
"=",
"repo_name",
".",
"split",
"(",
"'/'",
")",
"yield",
"gh_session",
".",
"repository",
"(",
"org",
",",
"name",
")",
"if",
"not",
"(",
"orgs",
"or",
"repos",
")",
":",
"for",
"repo",
"in",
"gh_session",
".",
"all_repositories",
"(",
")",
":",
"yield",
"repo"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_Stargazers.get_stats
|
Retrieves the traffic for the users of the given organization.
Requires organization admin credentials token to access the data.
|
scripts/get_stargazers.py
|
def get_stats(self, username='', password='', organization='llnl', force=True):
"""
Retrieves the traffic for the users of the given organization.
Requires organization admin credentials token to access the data.
"""
date = str(datetime.date.today())
stargazers_file_path = ('../github_stats_output/stargazers.csv')
if force or not os.path.isfile(file_path):
my_github.login(username, password)
calls_beginning = self.logged_in_gh.ratelimit_remaining + 1
print 'Rate Limit: ' + str(calls_beginning)
my_github.get_org(organization)
my_github.get_repos()
my_github.write_to_file(file_path=stargazers_file_path)
#my_github.write_to_file(file_path=stargazers_file_path)
calls_remaining = self.logged_in_gh.ratelimit_remaining
calls_used = calls_beginning - calls_remaining
print ('Rate Limit Remaining: ' + str(calls_remaining) + '\nUsed '
+ str(calls_used) + ' API calls.')
|
def get_stats(self, username='', password='', organization='llnl', force=True):
"""
Retrieves the traffic for the users of the given organization.
Requires organization admin credentials token to access the data.
"""
date = str(datetime.date.today())
stargazers_file_path = ('../github_stats_output/stargazers.csv')
if force or not os.path.isfile(file_path):
my_github.login(username, password)
calls_beginning = self.logged_in_gh.ratelimit_remaining + 1
print 'Rate Limit: ' + str(calls_beginning)
my_github.get_org(organization)
my_github.get_repos()
my_github.write_to_file(file_path=stargazers_file_path)
#my_github.write_to_file(file_path=stargazers_file_path)
calls_remaining = self.logged_in_gh.ratelimit_remaining
calls_used = calls_beginning - calls_remaining
print ('Rate Limit Remaining: ' + str(calls_remaining) + '\nUsed '
+ str(calls_used) + ' API calls.')
|
[
"Retrieves",
"the",
"traffic",
"for",
"the",
"users",
"of",
"the",
"given",
"organization",
".",
"Requires",
"organization",
"admin",
"credentials",
"token",
"to",
"access",
"the",
"data",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_stargazers.py#L11-L29
|
[
"def",
"get_stats",
"(",
"self",
",",
"username",
"=",
"''",
",",
"password",
"=",
"''",
",",
"organization",
"=",
"'llnl'",
",",
"force",
"=",
"True",
")",
":",
"date",
"=",
"str",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
"stargazers_file_path",
"=",
"(",
"'../github_stats_output/stargazers.csv'",
")",
"if",
"force",
"or",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"my_github",
".",
"login",
"(",
"username",
",",
"password",
")",
"calls_beginning",
"=",
"self",
".",
"logged_in_gh",
".",
"ratelimit_remaining",
"+",
"1",
"print",
"'Rate Limit: '",
"+",
"str",
"(",
"calls_beginning",
")",
"my_github",
".",
"get_org",
"(",
"organization",
")",
"my_github",
".",
"get_repos",
"(",
")",
"my_github",
".",
"write_to_file",
"(",
"file_path",
"=",
"stargazers_file_path",
")",
"#my_github.write_to_file(file_path=stargazers_file_path)",
"calls_remaining",
"=",
"self",
".",
"logged_in_gh",
".",
"ratelimit_remaining",
"calls_used",
"=",
"calls_beginning",
"-",
"calls_remaining",
"print",
"(",
"'Rate Limit Remaining: '",
"+",
"str",
"(",
"calls_remaining",
")",
"+",
"'\\nUsed '",
"+",
"str",
"(",
"calls_used",
")",
"+",
"' API calls.'",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_Stargazers.get_org
|
Retrieves an organization via given org name. If given
empty string, prompts user for an org name.
|
scripts/get_stargazers.py
|
def get_org(self, organization_name=''):
"""
Retrieves an organization via given org name. If given
empty string, prompts user for an org name.
"""
self.organization_name = organization_name
if(organization_name == ''):
self.organization_name = raw_input('Organization: ')
print 'Getting organization.'
self.org_retrieved = self.logged_in_gh.organization(organization_name)
|
def get_org(self, organization_name=''):
"""
Retrieves an organization via given org name. If given
empty string, prompts user for an org name.
"""
self.organization_name = organization_name
if(organization_name == ''):
self.organization_name = raw_input('Organization: ')
print 'Getting organization.'
self.org_retrieved = self.logged_in_gh.organization(organization_name)
|
[
"Retrieves",
"an",
"organization",
"via",
"given",
"org",
"name",
".",
"If",
"given",
"empty",
"string",
"prompts",
"user",
"for",
"an",
"org",
"name",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_stargazers.py#L80-L89
|
[
"def",
"get_org",
"(",
"self",
",",
"organization_name",
"=",
"''",
")",
":",
"self",
".",
"organization_name",
"=",
"organization_name",
"if",
"(",
"organization_name",
"==",
"''",
")",
":",
"self",
".",
"organization_name",
"=",
"raw_input",
"(",
"'Organization: '",
")",
"print",
"'Getting organization.'",
"self",
".",
"org_retrieved",
"=",
"self",
".",
"logged_in_gh",
".",
"organization",
"(",
"organization_name",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_Stargazers.get_repos
|
Gets the repos for the organization and builds the URL/headers for
getting timestamps of stargazers.
|
scripts/get_stargazers.py
|
def get_repos(self):
"""
Gets the repos for the organization and builds the URL/headers for
getting timestamps of stargazers.
"""
print 'Getting repos.'
#Uses the developer API. Note this could change.
headers = {'Accept': 'application/vnd.github.v3.star+json', 'Authorization': 'token ' + self.token}
temp_count = 0
for repo in self.org_retrieved.iter_repos():
temp_count += 1
url = ('https://api.github.com/repos/' + self.organization_name + '/' + repo.name)
self.repos[repo.name] = self.get_stargazers(url=url, headers=headers)
self.calc_stargazers(start_count=650)
print 'total count: \t' + str(self.total_count)
print str(temp_count) + ' repos'
|
def get_repos(self):
"""
Gets the repos for the organization and builds the URL/headers for
getting timestamps of stargazers.
"""
print 'Getting repos.'
#Uses the developer API. Note this could change.
headers = {'Accept': 'application/vnd.github.v3.star+json', 'Authorization': 'token ' + self.token}
temp_count = 0
for repo in self.org_retrieved.iter_repos():
temp_count += 1
url = ('https://api.github.com/repos/' + self.organization_name + '/' + repo.name)
self.repos[repo.name] = self.get_stargazers(url=url, headers=headers)
self.calc_stargazers(start_count=650)
print 'total count: \t' + str(self.total_count)
print str(temp_count) + ' repos'
|
[
"Gets",
"the",
"repos",
"for",
"the",
"organization",
"and",
"builds",
"the",
"URL",
"/",
"headers",
"for",
"getting",
"timestamps",
"of",
"stargazers",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_stargazers.py#L91-L107
|
[
"def",
"get_repos",
"(",
"self",
")",
":",
"print",
"'Getting repos.'",
"#Uses the developer API. Note this could change.",
"headers",
"=",
"{",
"'Accept'",
":",
"'application/vnd.github.v3.star+json'",
",",
"'Authorization'",
":",
"'token '",
"+",
"self",
".",
"token",
"}",
"temp_count",
"=",
"0",
"for",
"repo",
"in",
"self",
".",
"org_retrieved",
".",
"iter_repos",
"(",
")",
":",
"temp_count",
"+=",
"1",
"url",
"=",
"(",
"'https://api.github.com/repos/'",
"+",
"self",
".",
"organization_name",
"+",
"'/'",
"+",
"repo",
".",
"name",
")",
"self",
".",
"repos",
"[",
"repo",
".",
"name",
"]",
"=",
"self",
".",
"get_stargazers",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
")",
"self",
".",
"calc_stargazers",
"(",
"start_count",
"=",
"650",
")",
"print",
"'total count: \\t'",
"+",
"str",
"(",
"self",
".",
"total_count",
")",
"print",
"str",
"(",
"temp_count",
")",
"+",
"' repos'"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_Stargazers.get_stargazers
|
Return a list of the stargazers of a GitHub repo
Includes both the 'starred_at' and 'user' data.
param: url
url is the 'stargazers_url' of the form:
https://api.github.com/repos/LLNL/spack/stargazers
|
scripts/get_stargazers.py
|
def get_stargazers(self, url, headers={}):
"""
Return a list of the stargazers of a GitHub repo
Includes both the 'starred_at' and 'user' data.
param: url
url is the 'stargazers_url' of the form:
https://api.github.com/repos/LLNL/spack/stargazers
"""
url = url + '/stargazers?per_page=100&page=%s'
page = 1
gazers = []
json_data = requests.get(url % page, headers=headers).json()
while json_data:
gazers.extend(json_data)
page += 1
json_data = requests.get(url % page, headers=headers).json()
return gazers
|
def get_stargazers(self, url, headers={}):
"""
Return a list of the stargazers of a GitHub repo
Includes both the 'starred_at' and 'user' data.
param: url
url is the 'stargazers_url' of the form:
https://api.github.com/repos/LLNL/spack/stargazers
"""
url = url + '/stargazers?per_page=100&page=%s'
page = 1
gazers = []
json_data = requests.get(url % page, headers=headers).json()
while json_data:
gazers.extend(json_data)
page += 1
json_data = requests.get(url % page, headers=headers).json()
return gazers
|
[
"Return",
"a",
"list",
"of",
"the",
"stargazers",
"of",
"a",
"GitHub",
"repo"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_stargazers.py#L109-L128
|
[
"def",
"get_stargazers",
"(",
"self",
",",
"url",
",",
"headers",
"=",
"{",
"}",
")",
":",
"url",
"=",
"url",
"+",
"'/stargazers?per_page=100&page=%s'",
"page",
"=",
"1",
"gazers",
"=",
"[",
"]",
"json_data",
"=",
"requests",
".",
"get",
"(",
"url",
"%",
"page",
",",
"headers",
"=",
"headers",
")",
".",
"json",
"(",
")",
"while",
"json_data",
":",
"gazers",
".",
"extend",
"(",
"json_data",
")",
"page",
"+=",
"1",
"json_data",
"=",
"requests",
".",
"get",
"(",
"url",
"%",
"page",
",",
"headers",
"=",
"headers",
")",
".",
"json",
"(",
")",
"return",
"gazers"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_Stargazers.write_to_file
|
Writes stargazers data to file.
|
scripts/get_stargazers.py
|
def write_to_file(self, file_path='', date=(datetime.date.today()),
organization='llnl'):
"""
Writes stargazers data to file.
"""
with open(file_path, 'w+') as out:
out.write('date,organization,stargazers\n')
sorted_stargazers = sorted(self.stargazers)#sort based on lowercase
for star in sorted_stargazers:
out.write(star + ',' + str(self.stargazers[star]) + '\n')
out.close()
|
def write_to_file(self, file_path='', date=(datetime.date.today()),
organization='llnl'):
"""
Writes stargazers data to file.
"""
with open(file_path, 'w+') as out:
out.write('date,organization,stargazers\n')
sorted_stargazers = sorted(self.stargazers)#sort based on lowercase
for star in sorted_stargazers:
out.write(star + ',' + str(self.stargazers[star]) + '\n')
out.close()
|
[
"Writes",
"stargazers",
"data",
"to",
"file",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_stargazers.py#L147-L157
|
[
"def",
"write_to_file",
"(",
"self",
",",
"file_path",
"=",
"''",
",",
"date",
"=",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
",",
"organization",
"=",
"'llnl'",
")",
":",
"with",
"open",
"(",
"file_path",
",",
"'w+'",
")",
"as",
"out",
":",
"out",
".",
"write",
"(",
"'date,organization,stargazers\\n'",
")",
"sorted_stargazers",
"=",
"sorted",
"(",
"self",
".",
"stargazers",
")",
"#sort based on lowercase",
"for",
"star",
"in",
"sorted_stargazers",
":",
"out",
".",
"write",
"(",
"star",
"+",
"','",
"+",
"str",
"(",
"self",
".",
"stargazers",
"[",
"star",
"]",
")",
"+",
"'\\n'",
")",
"out",
".",
"close",
"(",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
Project.from_github3
|
Create CodeGovProject object from github3 Repository object
|
scraper/code_gov/models.py
|
def from_github3(klass, repository, labor_hours=True):
"""
Create CodeGovProject object from github3 Repository object
"""
if not isinstance(repository, github3.repos.repo._Repository):
raise TypeError('Repository must be a github3 Repository object')
logger.info('Processing: %s', repository.full_name)
project = klass()
logger.debug('GitHub3: repository=%s', repository)
# -- REQUIRED FIELDS --
project['name'] = repository.name
project['repositoryURL'] = repository.git_url
project['description'] = repository.description
try:
repo_license = repository.license()
except github3.exceptions.NotFoundError:
logger.debug('no license found for repo=%s', repository)
repo_license = None
if repo_license:
license = repo_license.license
if license:
logger.debug('license spdx=%s; url=%s', license.spdx_id, license.url)
if license.url is None:
project['permissions']['licenses'] = [{"name": license.spdx_id}]
else:
project['permissions']['licenses'] = [{"URL": license.url, "name": license.spdx_id}]
else:
project['permissions']['licenses'] = None
public_server = repository.html_url.startswith('https://github.com')
if not repository.private and public_server:
project['permissions']['usageType'] = 'openSource'
elif date_parse(repository.created_at) < POLICY_START_DATE:
project['permissions']['usageType'] = 'exemptByPolicyDate'
if labor_hours:
project['laborHours'] = labor_hours_from_url(project['repositoryURL'])
else:
project['laborHours'] = 0
project['tags'] = ['github']
old_accept = repository.session.headers['Accept']
repository.session.headers['Accept'] = 'application/vnd.github.mercy-preview+json'
topics = repository._get(repository.url + '/topics').json()
project['tags'].extend(topics.get('names', []))
repository.session.headers['Accept'] = old_accept
# Hacky way to get an Organization object back with GitHub3.py >= 1.2.0
owner_url = repository.owner.url
owner_api_response = repository._get(owner_url)
organization = repository._json(owner_api_response, 200)
project['contact']['email'] = organization['email']
project['contact']['URL'] = organization['html_url']
# -- OPTIONAL FIELDS --
# project['version'] = ''
project['organization'] = organization['name']
# TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370
project['status'] = 'Development'
project['vcs'] = 'git'
project['homepageURL'] = repository.html_url
project['downloadURL'] = repository.downloads_url
project['languages'] = [l for l, _ in repository.languages()]
# project['partners'] = []
# project['relatedCode'] = []
# project['reusedCode'] = []
# date: [object] A date object describing the release.
# created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.
# lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.
# metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.
try:
created_at = repository.created_at.date()
except AttributeError:
created_at = date_parse(repository.created_at).date()
try:
updated_at = repository.updated_at.date()
except AttributeError:
updated_at = date_parse(repository.updated_at).date()
project['date'] = {
'created': created_at.isoformat(),
'lastModified': updated_at.isoformat(),
'metadataLastUpdated': '',
}
_prune_dict_null_str(project)
return project
|
def from_github3(klass, repository, labor_hours=True):
"""
Create CodeGovProject object from github3 Repository object
"""
if not isinstance(repository, github3.repos.repo._Repository):
raise TypeError('Repository must be a github3 Repository object')
logger.info('Processing: %s', repository.full_name)
project = klass()
logger.debug('GitHub3: repository=%s', repository)
# -- REQUIRED FIELDS --
project['name'] = repository.name
project['repositoryURL'] = repository.git_url
project['description'] = repository.description
try:
repo_license = repository.license()
except github3.exceptions.NotFoundError:
logger.debug('no license found for repo=%s', repository)
repo_license = None
if repo_license:
license = repo_license.license
if license:
logger.debug('license spdx=%s; url=%s', license.spdx_id, license.url)
if license.url is None:
project['permissions']['licenses'] = [{"name": license.spdx_id}]
else:
project['permissions']['licenses'] = [{"URL": license.url, "name": license.spdx_id}]
else:
project['permissions']['licenses'] = None
public_server = repository.html_url.startswith('https://github.com')
if not repository.private and public_server:
project['permissions']['usageType'] = 'openSource'
elif date_parse(repository.created_at) < POLICY_START_DATE:
project['permissions']['usageType'] = 'exemptByPolicyDate'
if labor_hours:
project['laborHours'] = labor_hours_from_url(project['repositoryURL'])
else:
project['laborHours'] = 0
project['tags'] = ['github']
old_accept = repository.session.headers['Accept']
repository.session.headers['Accept'] = 'application/vnd.github.mercy-preview+json'
topics = repository._get(repository.url + '/topics').json()
project['tags'].extend(topics.get('names', []))
repository.session.headers['Accept'] = old_accept
# Hacky way to get an Organization object back with GitHub3.py >= 1.2.0
owner_url = repository.owner.url
owner_api_response = repository._get(owner_url)
organization = repository._json(owner_api_response, 200)
project['contact']['email'] = organization['email']
project['contact']['URL'] = organization['html_url']
# -- OPTIONAL FIELDS --
# project['version'] = ''
project['organization'] = organization['name']
# TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370
project['status'] = 'Development'
project['vcs'] = 'git'
project['homepageURL'] = repository.html_url
project['downloadURL'] = repository.downloads_url
project['languages'] = [l for l, _ in repository.languages()]
# project['partners'] = []
# project['relatedCode'] = []
# project['reusedCode'] = []
# date: [object] A date object describing the release.
# created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.
# lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.
# metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.
try:
created_at = repository.created_at.date()
except AttributeError:
created_at = date_parse(repository.created_at).date()
try:
updated_at = repository.updated_at.date()
except AttributeError:
updated_at = date_parse(repository.updated_at).date()
project['date'] = {
'created': created_at.isoformat(),
'lastModified': updated_at.isoformat(),
'metadataLastUpdated': '',
}
_prune_dict_null_str(project)
return project
|
[
"Create",
"CodeGovProject",
"object",
"from",
"github3",
"Repository",
"object"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/code_gov/models.py#L178-L283
|
[
"def",
"from_github3",
"(",
"klass",
",",
"repository",
",",
"labor_hours",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"repository",
",",
"github3",
".",
"repos",
".",
"repo",
".",
"_Repository",
")",
":",
"raise",
"TypeError",
"(",
"'Repository must be a github3 Repository object'",
")",
"logger",
".",
"info",
"(",
"'Processing: %s'",
",",
"repository",
".",
"full_name",
")",
"project",
"=",
"klass",
"(",
")",
"logger",
".",
"debug",
"(",
"'GitHub3: repository=%s'",
",",
"repository",
")",
"# -- REQUIRED FIELDS --",
"project",
"[",
"'name'",
"]",
"=",
"repository",
".",
"name",
"project",
"[",
"'repositoryURL'",
"]",
"=",
"repository",
".",
"git_url",
"project",
"[",
"'description'",
"]",
"=",
"repository",
".",
"description",
"try",
":",
"repo_license",
"=",
"repository",
".",
"license",
"(",
")",
"except",
"github3",
".",
"exceptions",
".",
"NotFoundError",
":",
"logger",
".",
"debug",
"(",
"'no license found for repo=%s'",
",",
"repository",
")",
"repo_license",
"=",
"None",
"if",
"repo_license",
":",
"license",
"=",
"repo_license",
".",
"license",
"if",
"license",
":",
"logger",
".",
"debug",
"(",
"'license spdx=%s; url=%s'",
",",
"license",
".",
"spdx_id",
",",
"license",
".",
"url",
")",
"if",
"license",
".",
"url",
"is",
"None",
":",
"project",
"[",
"'permissions'",
"]",
"[",
"'licenses'",
"]",
"=",
"[",
"{",
"\"name\"",
":",
"license",
".",
"spdx_id",
"}",
"]",
"else",
":",
"project",
"[",
"'permissions'",
"]",
"[",
"'licenses'",
"]",
"=",
"[",
"{",
"\"URL\"",
":",
"license",
".",
"url",
",",
"\"name\"",
":",
"license",
".",
"spdx_id",
"}",
"]",
"else",
":",
"project",
"[",
"'permissions'",
"]",
"[",
"'licenses'",
"]",
"=",
"None",
"public_server",
"=",
"repository",
".",
"html_url",
".",
"startswith",
"(",
"'https://github.com'",
")",
"if",
"not",
"repository",
".",
"private",
"and",
"public_server",
":",
"project",
"[",
"'permissions'",
"]",
"[",
"'usageType'",
"]",
"=",
"'openSource'",
"elif",
"date_parse",
"(",
"repository",
".",
"created_at",
")",
"<",
"POLICY_START_DATE",
":",
"project",
"[",
"'permissions'",
"]",
"[",
"'usageType'",
"]",
"=",
"'exemptByPolicyDate'",
"if",
"labor_hours",
":",
"project",
"[",
"'laborHours'",
"]",
"=",
"labor_hours_from_url",
"(",
"project",
"[",
"'repositoryURL'",
"]",
")",
"else",
":",
"project",
"[",
"'laborHours'",
"]",
"=",
"0",
"project",
"[",
"'tags'",
"]",
"=",
"[",
"'github'",
"]",
"old_accept",
"=",
"repository",
".",
"session",
".",
"headers",
"[",
"'Accept'",
"]",
"repository",
".",
"session",
".",
"headers",
"[",
"'Accept'",
"]",
"=",
"'application/vnd.github.mercy-preview+json'",
"topics",
"=",
"repository",
".",
"_get",
"(",
"repository",
".",
"url",
"+",
"'/topics'",
")",
".",
"json",
"(",
")",
"project",
"[",
"'tags'",
"]",
".",
"extend",
"(",
"topics",
".",
"get",
"(",
"'names'",
",",
"[",
"]",
")",
")",
"repository",
".",
"session",
".",
"headers",
"[",
"'Accept'",
"]",
"=",
"old_accept",
"# Hacky way to get an Organization object back with GitHub3.py >= 1.2.0",
"owner_url",
"=",
"repository",
".",
"owner",
".",
"url",
"owner_api_response",
"=",
"repository",
".",
"_get",
"(",
"owner_url",
")",
"organization",
"=",
"repository",
".",
"_json",
"(",
"owner_api_response",
",",
"200",
")",
"project",
"[",
"'contact'",
"]",
"[",
"'email'",
"]",
"=",
"organization",
"[",
"'email'",
"]",
"project",
"[",
"'contact'",
"]",
"[",
"'URL'",
"]",
"=",
"organization",
"[",
"'html_url'",
"]",
"# -- OPTIONAL FIELDS --",
"# project['version'] = ''",
"project",
"[",
"'organization'",
"]",
"=",
"organization",
"[",
"'name'",
"]",
"# TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370",
"project",
"[",
"'status'",
"]",
"=",
"'Development'",
"project",
"[",
"'vcs'",
"]",
"=",
"'git'",
"project",
"[",
"'homepageURL'",
"]",
"=",
"repository",
".",
"html_url",
"project",
"[",
"'downloadURL'",
"]",
"=",
"repository",
".",
"downloads_url",
"project",
"[",
"'languages'",
"]",
"=",
"[",
"l",
"for",
"l",
",",
"_",
"in",
"repository",
".",
"languages",
"(",
")",
"]",
"# project['partners'] = []",
"# project['relatedCode'] = []",
"# project['reusedCode'] = []",
"# date: [object] A date object describing the release.",
"# created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.",
"# lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.",
"# metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.",
"try",
":",
"created_at",
"=",
"repository",
".",
"created_at",
".",
"date",
"(",
")",
"except",
"AttributeError",
":",
"created_at",
"=",
"date_parse",
"(",
"repository",
".",
"created_at",
")",
".",
"date",
"(",
")",
"try",
":",
"updated_at",
"=",
"repository",
".",
"updated_at",
".",
"date",
"(",
")",
"except",
"AttributeError",
":",
"updated_at",
"=",
"date_parse",
"(",
"repository",
".",
"updated_at",
")",
".",
"date",
"(",
")",
"project",
"[",
"'date'",
"]",
"=",
"{",
"'created'",
":",
"created_at",
".",
"isoformat",
"(",
")",
",",
"'lastModified'",
":",
"updated_at",
".",
"isoformat",
"(",
")",
",",
"'metadataLastUpdated'",
":",
"''",
",",
"}",
"_prune_dict_null_str",
"(",
"project",
")",
"return",
"project"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
Project.from_gitlab
|
Create CodeGovProject object from GitLab Repository
|
scraper/code_gov/models.py
|
def from_gitlab(klass, repository, labor_hours=True):
"""
Create CodeGovProject object from GitLab Repository
"""
if not isinstance(repository, gitlab.v4.objects.Project):
raise TypeError('Repository must be a gitlab Repository object')
project = klass()
logger.debug(
'GitLab: repository_id=%d path_with_namespace=%s',
repository.id,
repository.path_with_namespace,
)
# -- REQUIRED FIELDS --
project['name'] = repository.name
project['repositoryURL'] = repository.http_url_to_repo
project['description'] = repository.description
# TODO: Update licenses from GitLab API
project['permissions']['licenses'] = None
web_url = repository.web_url
public_server = web_url.startswith('https://gitlab.com')
if repository.visibility in ('public') and public_server:
project['permissions']['usageType'] = 'openSource'
elif date_parse(repository.created_at) < POLICY_START_DATE:
project['permissions']['usageType'] = 'exemptByPolicyDate'
if labor_hours:
project['laborHours'] = labor_hours_from_url(project['repositoryURL'])
else:
project['laborHours'] = 0
project['tags'] = ['gitlab'] + repository.tag_list
project['contact'] = {
'email': '',
'URL': web_url,
}
# -- OPTIONAL FIELDS --
# project['version'] = ''
project['organization'] = repository.namespace['name']
# TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370
project['status'] = 'Development'
project['vcs'] = 'git'
project['homepageURL'] = repository.web_url
api_url = repository.manager.gitlab._url
archive_suffix = '/projects/%s/repository/archive' % repository.get_id()
project['downloadURL'] = api_url + archive_suffix
# project['languages'] = [l for l, _ in repository.languages()]
# project['partners'] = []
# project['relatedCode'] = []
# project['reusedCode'] = []
project['date'] = {
'created': date_parse(repository.created_at).date().isoformat(),
'lastModified': date_parse(repository.last_activity_at).date().isoformat(),
'metadataLastUpdated': '',
}
_prune_dict_null_str(project)
return project
|
def from_gitlab(klass, repository, labor_hours=True):
"""
Create CodeGovProject object from GitLab Repository
"""
if not isinstance(repository, gitlab.v4.objects.Project):
raise TypeError('Repository must be a gitlab Repository object')
project = klass()
logger.debug(
'GitLab: repository_id=%d path_with_namespace=%s',
repository.id,
repository.path_with_namespace,
)
# -- REQUIRED FIELDS --
project['name'] = repository.name
project['repositoryURL'] = repository.http_url_to_repo
project['description'] = repository.description
# TODO: Update licenses from GitLab API
project['permissions']['licenses'] = None
web_url = repository.web_url
public_server = web_url.startswith('https://gitlab.com')
if repository.visibility in ('public') and public_server:
project['permissions']['usageType'] = 'openSource'
elif date_parse(repository.created_at) < POLICY_START_DATE:
project['permissions']['usageType'] = 'exemptByPolicyDate'
if labor_hours:
project['laborHours'] = labor_hours_from_url(project['repositoryURL'])
else:
project['laborHours'] = 0
project['tags'] = ['gitlab'] + repository.tag_list
project['contact'] = {
'email': '',
'URL': web_url,
}
# -- OPTIONAL FIELDS --
# project['version'] = ''
project['organization'] = repository.namespace['name']
# TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370
project['status'] = 'Development'
project['vcs'] = 'git'
project['homepageURL'] = repository.web_url
api_url = repository.manager.gitlab._url
archive_suffix = '/projects/%s/repository/archive' % repository.get_id()
project['downloadURL'] = api_url + archive_suffix
# project['languages'] = [l for l, _ in repository.languages()]
# project['partners'] = []
# project['relatedCode'] = []
# project['reusedCode'] = []
project['date'] = {
'created': date_parse(repository.created_at).date().isoformat(),
'lastModified': date_parse(repository.last_activity_at).date().isoformat(),
'metadataLastUpdated': '',
}
_prune_dict_null_str(project)
return project
|
[
"Create",
"CodeGovProject",
"object",
"from",
"GitLab",
"Repository"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/code_gov/models.py#L286-L360
|
[
"def",
"from_gitlab",
"(",
"klass",
",",
"repository",
",",
"labor_hours",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"repository",
",",
"gitlab",
".",
"v4",
".",
"objects",
".",
"Project",
")",
":",
"raise",
"TypeError",
"(",
"'Repository must be a gitlab Repository object'",
")",
"project",
"=",
"klass",
"(",
")",
"logger",
".",
"debug",
"(",
"'GitLab: repository_id=%d path_with_namespace=%s'",
",",
"repository",
".",
"id",
",",
"repository",
".",
"path_with_namespace",
",",
")",
"# -- REQUIRED FIELDS --",
"project",
"[",
"'name'",
"]",
"=",
"repository",
".",
"name",
"project",
"[",
"'repositoryURL'",
"]",
"=",
"repository",
".",
"http_url_to_repo",
"project",
"[",
"'description'",
"]",
"=",
"repository",
".",
"description",
"# TODO: Update licenses from GitLab API",
"project",
"[",
"'permissions'",
"]",
"[",
"'licenses'",
"]",
"=",
"None",
"web_url",
"=",
"repository",
".",
"web_url",
"public_server",
"=",
"web_url",
".",
"startswith",
"(",
"'https://gitlab.com'",
")",
"if",
"repository",
".",
"visibility",
"in",
"(",
"'public'",
")",
"and",
"public_server",
":",
"project",
"[",
"'permissions'",
"]",
"[",
"'usageType'",
"]",
"=",
"'openSource'",
"elif",
"date_parse",
"(",
"repository",
".",
"created_at",
")",
"<",
"POLICY_START_DATE",
":",
"project",
"[",
"'permissions'",
"]",
"[",
"'usageType'",
"]",
"=",
"'exemptByPolicyDate'",
"if",
"labor_hours",
":",
"project",
"[",
"'laborHours'",
"]",
"=",
"labor_hours_from_url",
"(",
"project",
"[",
"'repositoryURL'",
"]",
")",
"else",
":",
"project",
"[",
"'laborHours'",
"]",
"=",
"0",
"project",
"[",
"'tags'",
"]",
"=",
"[",
"'gitlab'",
"]",
"+",
"repository",
".",
"tag_list",
"project",
"[",
"'contact'",
"]",
"=",
"{",
"'email'",
":",
"''",
",",
"'URL'",
":",
"web_url",
",",
"}",
"# -- OPTIONAL FIELDS --",
"# project['version'] = ''",
"project",
"[",
"'organization'",
"]",
"=",
"repository",
".",
"namespace",
"[",
"'name'",
"]",
"# TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370",
"project",
"[",
"'status'",
"]",
"=",
"'Development'",
"project",
"[",
"'vcs'",
"]",
"=",
"'git'",
"project",
"[",
"'homepageURL'",
"]",
"=",
"repository",
".",
"web_url",
"api_url",
"=",
"repository",
".",
"manager",
".",
"gitlab",
".",
"_url",
"archive_suffix",
"=",
"'/projects/%s/repository/archive'",
"%",
"repository",
".",
"get_id",
"(",
")",
"project",
"[",
"'downloadURL'",
"]",
"=",
"api_url",
"+",
"archive_suffix",
"# project['languages'] = [l for l, _ in repository.languages()]",
"# project['partners'] = []",
"# project['relatedCode'] = []",
"# project['reusedCode'] = []",
"project",
"[",
"'date'",
"]",
"=",
"{",
"'created'",
":",
"date_parse",
"(",
"repository",
".",
"created_at",
")",
".",
"date",
"(",
")",
".",
"isoformat",
"(",
")",
",",
"'lastModified'",
":",
"date_parse",
"(",
"repository",
".",
"last_activity_at",
")",
".",
"date",
"(",
")",
".",
"isoformat",
"(",
")",
",",
"'metadataLastUpdated'",
":",
"''",
",",
"}",
"_prune_dict_null_str",
"(",
"project",
")",
"return",
"project"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
Project.from_stashy
|
Handles crafting Code.gov Project for Bitbucket Server repositories
|
scraper/code_gov/models.py
|
def from_stashy(klass, repository, labor_hours=True):
"""
Handles crafting Code.gov Project for Bitbucket Server repositories
"""
# if not isinstance(repository, stashy.repos.Repository):
# raise TypeError('Repository must be a stashy Repository object')
if not isinstance(repository, dict):
raise TypeError('Repository must be a dict')
project = klass()
logger.debug(
'Stashy: project_key=%s repository_slug=%s',
repository['name'],
repository['project']['key'],
)
# -- REQUIRED FIELDS --
project['name'] = repository['name']
clone_urls = [clone['href'] for clone in repository['links']['clone']]
for url in clone_urls:
# Only rely on SSH Urls for repository urls
if url.startswith('ssh://'):
project['repositoryURL'] = url
break
description = repository['project'].get('description', '')
if description:
project['description'] = 'Project description: %s' % description
project['permissions']['licenses'] = None
web_url = repository['links']['self'][0]['href']
public_server = web_url.startswith('https://bitbucket.org')
if repository['public'] and public_server:
project['permissions']['usageType'] = 'openSource'
if labor_hours:
project['laborHours'] = labor_hours_from_url(project['repositoryURL'])
else:
project['laborHours'] = 0
project['tags'] = ['bitbucket']
project['contact']['email'] = ''
project['contact']['URL'] = repository['links']['self'][0]['href']
# -- OPTIONAL FIELDS --
# project['version'] = ''
# project['organization'] = organization.name
# TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370
project['status'] = 'Development'
project['vcs'] = repository['scmId']
project['homepageURL'] = repository['links']['self'][0]['href']
# project['downloadURL'] =
# project['languages'] =
# project['partners'] = []
# project['relatedCode'] = []
# project['reusedCode'] = []
# date: [object] A date object describing the release.
# created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.
# lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.
# metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.
# project['date'] = {
# 'created': repository.pushed_at.isoformat(),
# 'lastModified': repository.updated_at.isoformat(),
# 'metadataLastUpdated': '',
# }
_prune_dict_null_str(project)
return project
|
def from_stashy(klass, repository, labor_hours=True):
"""
Handles crafting Code.gov Project for Bitbucket Server repositories
"""
# if not isinstance(repository, stashy.repos.Repository):
# raise TypeError('Repository must be a stashy Repository object')
if not isinstance(repository, dict):
raise TypeError('Repository must be a dict')
project = klass()
logger.debug(
'Stashy: project_key=%s repository_slug=%s',
repository['name'],
repository['project']['key'],
)
# -- REQUIRED FIELDS --
project['name'] = repository['name']
clone_urls = [clone['href'] for clone in repository['links']['clone']]
for url in clone_urls:
# Only rely on SSH Urls for repository urls
if url.startswith('ssh://'):
project['repositoryURL'] = url
break
description = repository['project'].get('description', '')
if description:
project['description'] = 'Project description: %s' % description
project['permissions']['licenses'] = None
web_url = repository['links']['self'][0]['href']
public_server = web_url.startswith('https://bitbucket.org')
if repository['public'] and public_server:
project['permissions']['usageType'] = 'openSource'
if labor_hours:
project['laborHours'] = labor_hours_from_url(project['repositoryURL'])
else:
project['laborHours'] = 0
project['tags'] = ['bitbucket']
project['contact']['email'] = ''
project['contact']['URL'] = repository['links']['self'][0]['href']
# -- OPTIONAL FIELDS --
# project['version'] = ''
# project['organization'] = organization.name
# TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370
project['status'] = 'Development'
project['vcs'] = repository['scmId']
project['homepageURL'] = repository['links']['self'][0]['href']
# project['downloadURL'] =
# project['languages'] =
# project['partners'] = []
# project['relatedCode'] = []
# project['reusedCode'] = []
# date: [object] A date object describing the release.
# created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.
# lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.
# metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.
# project['date'] = {
# 'created': repository.pushed_at.isoformat(),
# 'lastModified': repository.updated_at.isoformat(),
# 'metadataLastUpdated': '',
# }
_prune_dict_null_str(project)
return project
|
[
"Handles",
"crafting",
"Code",
".",
"gov",
"Project",
"for",
"Bitbucket",
"Server",
"repositories"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/code_gov/models.py#L363-L447
|
[
"def",
"from_stashy",
"(",
"klass",
",",
"repository",
",",
"labor_hours",
"=",
"True",
")",
":",
"# if not isinstance(repository, stashy.repos.Repository):",
"# raise TypeError('Repository must be a stashy Repository object')",
"if",
"not",
"isinstance",
"(",
"repository",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'Repository must be a dict'",
")",
"project",
"=",
"klass",
"(",
")",
"logger",
".",
"debug",
"(",
"'Stashy: project_key=%s repository_slug=%s'",
",",
"repository",
"[",
"'name'",
"]",
",",
"repository",
"[",
"'project'",
"]",
"[",
"'key'",
"]",
",",
")",
"# -- REQUIRED FIELDS --",
"project",
"[",
"'name'",
"]",
"=",
"repository",
"[",
"'name'",
"]",
"clone_urls",
"=",
"[",
"clone",
"[",
"'href'",
"]",
"for",
"clone",
"in",
"repository",
"[",
"'links'",
"]",
"[",
"'clone'",
"]",
"]",
"for",
"url",
"in",
"clone_urls",
":",
"# Only rely on SSH Urls for repository urls",
"if",
"url",
".",
"startswith",
"(",
"'ssh://'",
")",
":",
"project",
"[",
"'repositoryURL'",
"]",
"=",
"url",
"break",
"description",
"=",
"repository",
"[",
"'project'",
"]",
".",
"get",
"(",
"'description'",
",",
"''",
")",
"if",
"description",
":",
"project",
"[",
"'description'",
"]",
"=",
"'Project description: %s'",
"%",
"description",
"project",
"[",
"'permissions'",
"]",
"[",
"'licenses'",
"]",
"=",
"None",
"web_url",
"=",
"repository",
"[",
"'links'",
"]",
"[",
"'self'",
"]",
"[",
"0",
"]",
"[",
"'href'",
"]",
"public_server",
"=",
"web_url",
".",
"startswith",
"(",
"'https://bitbucket.org'",
")",
"if",
"repository",
"[",
"'public'",
"]",
"and",
"public_server",
":",
"project",
"[",
"'permissions'",
"]",
"[",
"'usageType'",
"]",
"=",
"'openSource'",
"if",
"labor_hours",
":",
"project",
"[",
"'laborHours'",
"]",
"=",
"labor_hours_from_url",
"(",
"project",
"[",
"'repositoryURL'",
"]",
")",
"else",
":",
"project",
"[",
"'laborHours'",
"]",
"=",
"0",
"project",
"[",
"'tags'",
"]",
"=",
"[",
"'bitbucket'",
"]",
"project",
"[",
"'contact'",
"]",
"[",
"'email'",
"]",
"=",
"''",
"project",
"[",
"'contact'",
"]",
"[",
"'URL'",
"]",
"=",
"repository",
"[",
"'links'",
"]",
"[",
"'self'",
"]",
"[",
"0",
"]",
"[",
"'href'",
"]",
"# -- OPTIONAL FIELDS --",
"# project['version'] = ''",
"# project['organization'] = organization.name",
"# TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370",
"project",
"[",
"'status'",
"]",
"=",
"'Development'",
"project",
"[",
"'vcs'",
"]",
"=",
"repository",
"[",
"'scmId'",
"]",
"project",
"[",
"'homepageURL'",
"]",
"=",
"repository",
"[",
"'links'",
"]",
"[",
"'self'",
"]",
"[",
"0",
"]",
"[",
"'href'",
"]",
"# project['downloadURL'] =",
"# project['languages'] =",
"# project['partners'] = []",
"# project['relatedCode'] = []",
"# project['reusedCode'] = []",
"# date: [object] A date object describing the release.",
"# created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.",
"# lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.",
"# metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.",
"# project['date'] = {",
"# 'created': repository.pushed_at.isoformat(),",
"# 'lastModified': repository.updated_at.isoformat(),",
"# 'metadataLastUpdated': '',",
"# }",
"_prune_dict_null_str",
"(",
"project",
")",
"return",
"project"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
Project.from_doecode
|
Create CodeGovProject object from DOE CODE record
Handles crafting Code.gov Project
|
scraper/code_gov/models.py
|
def from_doecode(klass, record):
"""
Create CodeGovProject object from DOE CODE record
Handles crafting Code.gov Project
"""
if not isinstance(record, dict):
raise TypeError('`record` must be a dict')
project = klass()
# -- REQUIRED FIELDS --
project['name'] = record['software_title']
logger.debug('DOE CODE: software_title="%s"', record['software_title'])
link = record.get('repository_link', '')
if not link:
link = record.get('landing_page')
logger.warning('DOE CODE: No repositoryURL, using landing_page: %s', link)
project['repositoryURL'] = link
project['description'] = record['description']
licenses = set(record['licenses'])
licenses.discard(None)
logger.debug('DOE CODE: licenses=%s', licenses)
license_objects = []
if 'Other' in licenses:
licenses.remove('Other')
license_objects = [{
'name': 'Other',
'URL': record['proprietary_url']
}]
if licenses:
license_objects.extend([_license_obj(license) for license in licenses])
project['permissions']['licenses'] = license_objects
if record['open_source']:
usage_type = 'openSource'
else:
usage_type = 'exemptByLaw'
project['permissions']['exemptionText'] = 'This source code is restricted by patent and / or intellectual property law.'
project['permissions']['usageType'] = usage_type
# TODO: Compute from git repo
project['laborHours'] = 0
project['tags'] = ['DOE CODE']
lab_name = record.get('lab_display_name')
if lab_name is not None:
project['tags'].append(lab_name)
project['contact']['email'] = record['owner']
# project['contact']['URL'] = ''
# project['contact']['name'] = ''
# project['contact']['phone'] = ''
# -- OPTIONAL FIELDS --
if 'version_number' in record and record['version_number']:
project['version'] = record['version_number']
if lab_name is not None:
project['organization'] = lab_name
# Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370
status = record.get('ever_announced')
if status is None:
raise ValueError('DOE CODE: Unable to determine "ever_announced" value!')
elif status:
status = 'Production'
else:
status = 'Development'
project['status'] = status
vcs = None
link = project['repositoryURL']
if 'github.com' in link:
vcs = 'git'
if vcs is None:
logger.debug('DOE CODE: Unable to determine vcs for: name="%s", repositoryURL=%s', project['name'], link)
vcs = ''
if vcs:
project['vcs'] = vcs
url = record.get('landing_page', '')
if url:
project['homepageURL'] = url
# record['downloadURL'] = ''
# self['disclaimerText'] = ''
# self['disclaimerURL'] = ''
if 'programming_languages' in record:
project['languages'] = record['programming_languages']
# self['partners'] = []
# TODO: Look into using record['contributing_organizations']
# self['relatedCode'] = []
# self['reusedCode'] = []
# date: [object] A date object describing the release.
# created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.
# lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.
# metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.
if 'date_record_added' in record and 'date_record_updated' in record:
project['date'] = {
'created': record['date_record_added'],
# 'lastModified': '',
'metadataLastUpdated': record['date_record_updated']
}
return project
|
def from_doecode(klass, record):
"""
Create CodeGovProject object from DOE CODE record
Handles crafting Code.gov Project
"""
if not isinstance(record, dict):
raise TypeError('`record` must be a dict')
project = klass()
# -- REQUIRED FIELDS --
project['name'] = record['software_title']
logger.debug('DOE CODE: software_title="%s"', record['software_title'])
link = record.get('repository_link', '')
if not link:
link = record.get('landing_page')
logger.warning('DOE CODE: No repositoryURL, using landing_page: %s', link)
project['repositoryURL'] = link
project['description'] = record['description']
licenses = set(record['licenses'])
licenses.discard(None)
logger.debug('DOE CODE: licenses=%s', licenses)
license_objects = []
if 'Other' in licenses:
licenses.remove('Other')
license_objects = [{
'name': 'Other',
'URL': record['proprietary_url']
}]
if licenses:
license_objects.extend([_license_obj(license) for license in licenses])
project['permissions']['licenses'] = license_objects
if record['open_source']:
usage_type = 'openSource'
else:
usage_type = 'exemptByLaw'
project['permissions']['exemptionText'] = 'This source code is restricted by patent and / or intellectual property law.'
project['permissions']['usageType'] = usage_type
# TODO: Compute from git repo
project['laborHours'] = 0
project['tags'] = ['DOE CODE']
lab_name = record.get('lab_display_name')
if lab_name is not None:
project['tags'].append(lab_name)
project['contact']['email'] = record['owner']
# project['contact']['URL'] = ''
# project['contact']['name'] = ''
# project['contact']['phone'] = ''
# -- OPTIONAL FIELDS --
if 'version_number' in record and record['version_number']:
project['version'] = record['version_number']
if lab_name is not None:
project['organization'] = lab_name
# Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370
status = record.get('ever_announced')
if status is None:
raise ValueError('DOE CODE: Unable to determine "ever_announced" value!')
elif status:
status = 'Production'
else:
status = 'Development'
project['status'] = status
vcs = None
link = project['repositoryURL']
if 'github.com' in link:
vcs = 'git'
if vcs is None:
logger.debug('DOE CODE: Unable to determine vcs for: name="%s", repositoryURL=%s', project['name'], link)
vcs = ''
if vcs:
project['vcs'] = vcs
url = record.get('landing_page', '')
if url:
project['homepageURL'] = url
# record['downloadURL'] = ''
# self['disclaimerText'] = ''
# self['disclaimerURL'] = ''
if 'programming_languages' in record:
project['languages'] = record['programming_languages']
# self['partners'] = []
# TODO: Look into using record['contributing_organizations']
# self['relatedCode'] = []
# self['reusedCode'] = []
# date: [object] A date object describing the release.
# created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.
# lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.
# metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.
if 'date_record_added' in record and 'date_record_updated' in record:
project['date'] = {
'created': record['date_record_added'],
# 'lastModified': '',
'metadataLastUpdated': record['date_record_updated']
}
return project
|
[
"Create",
"CodeGovProject",
"object",
"from",
"DOE",
"CODE",
"record"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/code_gov/models.py#L450-L573
|
[
"def",
"from_doecode",
"(",
"klass",
",",
"record",
")",
":",
"if",
"not",
"isinstance",
"(",
"record",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'`record` must be a dict'",
")",
"project",
"=",
"klass",
"(",
")",
"# -- REQUIRED FIELDS --",
"project",
"[",
"'name'",
"]",
"=",
"record",
"[",
"'software_title'",
"]",
"logger",
".",
"debug",
"(",
"'DOE CODE: software_title=\"%s\"'",
",",
"record",
"[",
"'software_title'",
"]",
")",
"link",
"=",
"record",
".",
"get",
"(",
"'repository_link'",
",",
"''",
")",
"if",
"not",
"link",
":",
"link",
"=",
"record",
".",
"get",
"(",
"'landing_page'",
")",
"logger",
".",
"warning",
"(",
"'DOE CODE: No repositoryURL, using landing_page: %s'",
",",
"link",
")",
"project",
"[",
"'repositoryURL'",
"]",
"=",
"link",
"project",
"[",
"'description'",
"]",
"=",
"record",
"[",
"'description'",
"]",
"licenses",
"=",
"set",
"(",
"record",
"[",
"'licenses'",
"]",
")",
"licenses",
".",
"discard",
"(",
"None",
")",
"logger",
".",
"debug",
"(",
"'DOE CODE: licenses=%s'",
",",
"licenses",
")",
"license_objects",
"=",
"[",
"]",
"if",
"'Other'",
"in",
"licenses",
":",
"licenses",
".",
"remove",
"(",
"'Other'",
")",
"license_objects",
"=",
"[",
"{",
"'name'",
":",
"'Other'",
",",
"'URL'",
":",
"record",
"[",
"'proprietary_url'",
"]",
"}",
"]",
"if",
"licenses",
":",
"license_objects",
".",
"extend",
"(",
"[",
"_license_obj",
"(",
"license",
")",
"for",
"license",
"in",
"licenses",
"]",
")",
"project",
"[",
"'permissions'",
"]",
"[",
"'licenses'",
"]",
"=",
"license_objects",
"if",
"record",
"[",
"'open_source'",
"]",
":",
"usage_type",
"=",
"'openSource'",
"else",
":",
"usage_type",
"=",
"'exemptByLaw'",
"project",
"[",
"'permissions'",
"]",
"[",
"'exemptionText'",
"]",
"=",
"'This source code is restricted by patent and / or intellectual property law.'",
"project",
"[",
"'permissions'",
"]",
"[",
"'usageType'",
"]",
"=",
"usage_type",
"# TODO: Compute from git repo",
"project",
"[",
"'laborHours'",
"]",
"=",
"0",
"project",
"[",
"'tags'",
"]",
"=",
"[",
"'DOE CODE'",
"]",
"lab_name",
"=",
"record",
".",
"get",
"(",
"'lab_display_name'",
")",
"if",
"lab_name",
"is",
"not",
"None",
":",
"project",
"[",
"'tags'",
"]",
".",
"append",
"(",
"lab_name",
")",
"project",
"[",
"'contact'",
"]",
"[",
"'email'",
"]",
"=",
"record",
"[",
"'owner'",
"]",
"# project['contact']['URL'] = ''",
"# project['contact']['name'] = ''",
"# project['contact']['phone'] = ''",
"# -- OPTIONAL FIELDS --",
"if",
"'version_number'",
"in",
"record",
"and",
"record",
"[",
"'version_number'",
"]",
":",
"project",
"[",
"'version'",
"]",
"=",
"record",
"[",
"'version_number'",
"]",
"if",
"lab_name",
"is",
"not",
"None",
":",
"project",
"[",
"'organization'",
"]",
"=",
"lab_name",
"# Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370",
"status",
"=",
"record",
".",
"get",
"(",
"'ever_announced'",
")",
"if",
"status",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'DOE CODE: Unable to determine \"ever_announced\" value!'",
")",
"elif",
"status",
":",
"status",
"=",
"'Production'",
"else",
":",
"status",
"=",
"'Development'",
"project",
"[",
"'status'",
"]",
"=",
"status",
"vcs",
"=",
"None",
"link",
"=",
"project",
"[",
"'repositoryURL'",
"]",
"if",
"'github.com'",
"in",
"link",
":",
"vcs",
"=",
"'git'",
"if",
"vcs",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"'DOE CODE: Unable to determine vcs for: name=\"%s\", repositoryURL=%s'",
",",
"project",
"[",
"'name'",
"]",
",",
"link",
")",
"vcs",
"=",
"''",
"if",
"vcs",
":",
"project",
"[",
"'vcs'",
"]",
"=",
"vcs",
"url",
"=",
"record",
".",
"get",
"(",
"'landing_page'",
",",
"''",
")",
"if",
"url",
":",
"project",
"[",
"'homepageURL'",
"]",
"=",
"url",
"# record['downloadURL'] = ''",
"# self['disclaimerText'] = ''",
"# self['disclaimerURL'] = ''",
"if",
"'programming_languages'",
"in",
"record",
":",
"project",
"[",
"'languages'",
"]",
"=",
"record",
"[",
"'programming_languages'",
"]",
"# self['partners'] = []",
"# TODO: Look into using record['contributing_organizations']",
"# self['relatedCode'] = []",
"# self['reusedCode'] = []",
"# date: [object] A date object describing the release.",
"# created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.",
"# lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.",
"# metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.",
"if",
"'date_record_added'",
"in",
"record",
"and",
"'date_record_updated'",
"in",
"record",
":",
"project",
"[",
"'date'",
"]",
"=",
"{",
"'created'",
":",
"record",
"[",
"'date_record_added'",
"]",
",",
"# 'lastModified': '',",
"'metadataLastUpdated'",
":",
"record",
"[",
"'date_record_updated'",
"]",
"}",
"return",
"project"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
Project.from_tfs
|
Creates CodeGovProject object from TFS/VSTS/AzureDevOps Instance
|
scraper/code_gov/models.py
|
def from_tfs(klass, tfs_project, labor_hours=True):
"""
Creates CodeGovProject object from TFS/VSTS/AzureDevOps Instance
"""
project = klass()
project_web_url = ''
# -- REQUIRED FIELDS --
project['name'] = tfs_project.projectInfo.name
if 'web' in tfs_project.projectInfo._links.additional_properties:
if 'href' in tfs_project.projectInfo._links.additional_properties['web']:
# URL Encodes spaces that are in the Project Name for the Project Web URL
project_web_url = requote_uri(tfs_project.projectInfo._links.additional_properties['web']['href'])
project['repositoryURL'] = project_web_url
project['homepageURL'] = project_web_url
project['description'] = tfs_project.projectInfo.description
project['vcs'] = 'TFS/AzureDevOps'
project['permissions']['license'] = None
project['tags'] = []
if labor_hours:
logger.debug('Sorry labor hour calculation not currently supported.')
# project['laborHours'] = labor_hours_from_url(project['repositoryURL'])
else:
project['laborHours'] = 0
if tfs_project.projectCreateInfo.last_update_time < POLICY_START_DATE:
project['permissions']['usageType'] = 'exemptByPolicyDate'
else:
project['permissions']['usageType'] = 'exemptByAgencyMission'
project['permissions']['exemptionText'] = 'This source code resides on a private server and has not been properly evaluated for releaseability.'
project['contact'] = {
'email': '',
'URL': project_web_url
}
project['date'] = {
'lastModified': tfs_project.projectLastUpdateInfo.last_update_time.date().isoformat(),
'created': tfs_project.projectCreateInfo.last_update_time.date().isoformat(),
'metadataLastUpdated': ''
}
_prune_dict_null_str(project)
return project
|
def from_tfs(klass, tfs_project, labor_hours=True):
"""
Creates CodeGovProject object from TFS/VSTS/AzureDevOps Instance
"""
project = klass()
project_web_url = ''
# -- REQUIRED FIELDS --
project['name'] = tfs_project.projectInfo.name
if 'web' in tfs_project.projectInfo._links.additional_properties:
if 'href' in tfs_project.projectInfo._links.additional_properties['web']:
# URL Encodes spaces that are in the Project Name for the Project Web URL
project_web_url = requote_uri(tfs_project.projectInfo._links.additional_properties['web']['href'])
project['repositoryURL'] = project_web_url
project['homepageURL'] = project_web_url
project['description'] = tfs_project.projectInfo.description
project['vcs'] = 'TFS/AzureDevOps'
project['permissions']['license'] = None
project['tags'] = []
if labor_hours:
logger.debug('Sorry labor hour calculation not currently supported.')
# project['laborHours'] = labor_hours_from_url(project['repositoryURL'])
else:
project['laborHours'] = 0
if tfs_project.projectCreateInfo.last_update_time < POLICY_START_DATE:
project['permissions']['usageType'] = 'exemptByPolicyDate'
else:
project['permissions']['usageType'] = 'exemptByAgencyMission'
project['permissions']['exemptionText'] = 'This source code resides on a private server and has not been properly evaluated for releaseability.'
project['contact'] = {
'email': '',
'URL': project_web_url
}
project['date'] = {
'lastModified': tfs_project.projectLastUpdateInfo.last_update_time.date().isoformat(),
'created': tfs_project.projectCreateInfo.last_update_time.date().isoformat(),
'metadataLastUpdated': ''
}
_prune_dict_null_str(project)
return project
|
[
"Creates",
"CodeGovProject",
"object",
"from",
"TFS",
"/",
"VSTS",
"/",
"AzureDevOps",
"Instance"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/code_gov/models.py#L576-L628
|
[
"def",
"from_tfs",
"(",
"klass",
",",
"tfs_project",
",",
"labor_hours",
"=",
"True",
")",
":",
"project",
"=",
"klass",
"(",
")",
"project_web_url",
"=",
"''",
"# -- REQUIRED FIELDS --",
"project",
"[",
"'name'",
"]",
"=",
"tfs_project",
".",
"projectInfo",
".",
"name",
"if",
"'web'",
"in",
"tfs_project",
".",
"projectInfo",
".",
"_links",
".",
"additional_properties",
":",
"if",
"'href'",
"in",
"tfs_project",
".",
"projectInfo",
".",
"_links",
".",
"additional_properties",
"[",
"'web'",
"]",
":",
"# URL Encodes spaces that are in the Project Name for the Project Web URL",
"project_web_url",
"=",
"requote_uri",
"(",
"tfs_project",
".",
"projectInfo",
".",
"_links",
".",
"additional_properties",
"[",
"'web'",
"]",
"[",
"'href'",
"]",
")",
"project",
"[",
"'repositoryURL'",
"]",
"=",
"project_web_url",
"project",
"[",
"'homepageURL'",
"]",
"=",
"project_web_url",
"project",
"[",
"'description'",
"]",
"=",
"tfs_project",
".",
"projectInfo",
".",
"description",
"project",
"[",
"'vcs'",
"]",
"=",
"'TFS/AzureDevOps'",
"project",
"[",
"'permissions'",
"]",
"[",
"'license'",
"]",
"=",
"None",
"project",
"[",
"'tags'",
"]",
"=",
"[",
"]",
"if",
"labor_hours",
":",
"logger",
".",
"debug",
"(",
"'Sorry labor hour calculation not currently supported.'",
")",
"# project['laborHours'] = labor_hours_from_url(project['repositoryURL'])",
"else",
":",
"project",
"[",
"'laborHours'",
"]",
"=",
"0",
"if",
"tfs_project",
".",
"projectCreateInfo",
".",
"last_update_time",
"<",
"POLICY_START_DATE",
":",
"project",
"[",
"'permissions'",
"]",
"[",
"'usageType'",
"]",
"=",
"'exemptByPolicyDate'",
"else",
":",
"project",
"[",
"'permissions'",
"]",
"[",
"'usageType'",
"]",
"=",
"'exemptByAgencyMission'",
"project",
"[",
"'permissions'",
"]",
"[",
"'exemptionText'",
"]",
"=",
"'This source code resides on a private server and has not been properly evaluated for releaseability.'",
"project",
"[",
"'contact'",
"]",
"=",
"{",
"'email'",
":",
"''",
",",
"'URL'",
":",
"project_web_url",
"}",
"project",
"[",
"'date'",
"]",
"=",
"{",
"'lastModified'",
":",
"tfs_project",
".",
"projectLastUpdateInfo",
".",
"last_update_time",
".",
"date",
"(",
")",
".",
"isoformat",
"(",
")",
",",
"'created'",
":",
"tfs_project",
".",
"projectCreateInfo",
".",
"last_update_time",
".",
"date",
"(",
")",
".",
"isoformat",
"(",
")",
",",
"'metadataLastUpdated'",
":",
"''",
"}",
"_prune_dict_null_str",
"(",
"project",
")",
"return",
"project"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
process_config
|
Master function to process a Scraper config file
Returns a Code.gov Metadata file
|
scraper/code_gov/__init__.py
|
def process_config(config):
"""
Master function to process a Scraper config file
Returns a Code.gov Metadata file
"""
agency = config.get('agency', 'UNKNOWN')
logger.debug('Agency: %s', agency)
method = config.get('method', 'other')
logger.debug('Inventory Method: %s', method)
compute_labor_hours = config.get('compute_labor_hours', True)
if config.get('contact_email', None) is None:
# A default contact email is required to handle the (frequent) case
# where a project / repository has no available contact email.
logger.warning('Config file should contain a "contact_email"')
logger.debug('Creating inventory from config: %s', config)
code_gov_metadata = Metadata(agency, method)
# Parse config for GitHub repositories
github_instances = config.get('GitHub', [])
if config.get('github_gov_orgs', False):
github_instances.append({
'url': 'https://github.com',
'orgs': gov_orgs(),
})
for instance in github_instances:
url = instance.get('url', 'https://github.com')
orgs = instance.get('orgs', [])
repos = instance.get('repos', [])
public_only = instance.get('public_only', True)
excluded = instance.get('exclude', [])
token = instance.get('token', None)
gh_session = github.connect(url, token)
for repo in github.query_repos(gh_session, orgs, repos, public_only):
if repo.owner.login in excluded or repo.full_name in excluded:
logger.info('Excluding: %s', repo.full_name)
continue
code_gov_project = Project.from_github3(repo, labor_hours=compute_labor_hours)
code_gov_metadata['releases'].append(code_gov_project)
# Parse config for GitLab repositories
gitlab_instances = config.get('GitLab', [])
for instance in gitlab_instances:
url = instance.get('url')
# orgs = instance.get('orgs', [])
repos = instance.get('repos', [])
# public_only = instance.get('public_only', True)
excluded = instance.get('exclude', [])
token = instance.get('token', None)
gl_session = gitlab.connect(url, token)
for repo in gitlab.query_repos(gl_session, repos):
namespace = repo.namespace['path']
path_with_namespace = repo.path_with_namespace
if namespace in excluded or path_with_namespace in excluded:
logger.info('Excluding: %s', repo.path_with_namespace)
continue
code_gov_project = Project.from_gitlab(repo, labor_hours=compute_labor_hours)
code_gov_metadata['releases'].append(code_gov_project)
# Parse config for Bitbucket repositories
bitbucket_instances = config.get('Bitbucket', [])
for instance in bitbucket_instances:
url = instance.get('url')
# orgs = instance.get('orgs', None)
# public_only = instance.get('public_only', True)
# token = instance.get('token', None)
username = instance.get('username')
password = instance.get('password')
excluded = instance.get('exclude', [])
bb_session = bitbucket.connect(url, username, password)
for repo in bitbucket.all_repos(bb_session):
project = repo['project']['key']
project_repo = '%s/%s' % (project, repo['slug'])
if project in excluded or project_repo in excluded:
logger.info('Excluding: %s', project_repo)
continue
code_gov_project = Project.from_stashy(repo, labor_hours=compute_labor_hours)
code_gov_metadata['releases'].append(code_gov_project)
# Parse config for TFS repositories
tfs_instances = config.get('TFS', [])
for instance in tfs_instances:
url = instance.get('url')
token = instance.get('token', None)
projects = tfs.get_projects_metadata(url, token)
for project in projects:
code_gov_project = Project.from_tfs(project, labor_hours=compute_labor_hours)
code_gov_metadata['releases'].append(code_gov_project)
# Handle parsing of DOE CODE records
doecode_config = config.get('DOE CODE', {})
doecode_json = doecode_config.get('json', None)
doecode_url = doecode_config.get('url', None)
doecode_key = doecode_config.get('api_key', None)
for record in doecode.process(doecode_json, doecode_url, doecode_key):
code_gov_project = Project.from_doecode(record)
code_gov_metadata['releases'].append(code_gov_project)
return code_gov_metadata
|
def process_config(config):
"""
Master function to process a Scraper config file
Returns a Code.gov Metadata file
"""
agency = config.get('agency', 'UNKNOWN')
logger.debug('Agency: %s', agency)
method = config.get('method', 'other')
logger.debug('Inventory Method: %s', method)
compute_labor_hours = config.get('compute_labor_hours', True)
if config.get('contact_email', None) is None:
# A default contact email is required to handle the (frequent) case
# where a project / repository has no available contact email.
logger.warning('Config file should contain a "contact_email"')
logger.debug('Creating inventory from config: %s', config)
code_gov_metadata = Metadata(agency, method)
# Parse config for GitHub repositories
github_instances = config.get('GitHub', [])
if config.get('github_gov_orgs', False):
github_instances.append({
'url': 'https://github.com',
'orgs': gov_orgs(),
})
for instance in github_instances:
url = instance.get('url', 'https://github.com')
orgs = instance.get('orgs', [])
repos = instance.get('repos', [])
public_only = instance.get('public_only', True)
excluded = instance.get('exclude', [])
token = instance.get('token', None)
gh_session = github.connect(url, token)
for repo in github.query_repos(gh_session, orgs, repos, public_only):
if repo.owner.login in excluded or repo.full_name in excluded:
logger.info('Excluding: %s', repo.full_name)
continue
code_gov_project = Project.from_github3(repo, labor_hours=compute_labor_hours)
code_gov_metadata['releases'].append(code_gov_project)
# Parse config for GitLab repositories
gitlab_instances = config.get('GitLab', [])
for instance in gitlab_instances:
url = instance.get('url')
# orgs = instance.get('orgs', [])
repos = instance.get('repos', [])
# public_only = instance.get('public_only', True)
excluded = instance.get('exclude', [])
token = instance.get('token', None)
gl_session = gitlab.connect(url, token)
for repo in gitlab.query_repos(gl_session, repos):
namespace = repo.namespace['path']
path_with_namespace = repo.path_with_namespace
if namespace in excluded or path_with_namespace in excluded:
logger.info('Excluding: %s', repo.path_with_namespace)
continue
code_gov_project = Project.from_gitlab(repo, labor_hours=compute_labor_hours)
code_gov_metadata['releases'].append(code_gov_project)
# Parse config for Bitbucket repositories
bitbucket_instances = config.get('Bitbucket', [])
for instance in bitbucket_instances:
url = instance.get('url')
# orgs = instance.get('orgs', None)
# public_only = instance.get('public_only', True)
# token = instance.get('token', None)
username = instance.get('username')
password = instance.get('password')
excluded = instance.get('exclude', [])
bb_session = bitbucket.connect(url, username, password)
for repo in bitbucket.all_repos(bb_session):
project = repo['project']['key']
project_repo = '%s/%s' % (project, repo['slug'])
if project in excluded or project_repo in excluded:
logger.info('Excluding: %s', project_repo)
continue
code_gov_project = Project.from_stashy(repo, labor_hours=compute_labor_hours)
code_gov_metadata['releases'].append(code_gov_project)
# Parse config for TFS repositories
tfs_instances = config.get('TFS', [])
for instance in tfs_instances:
url = instance.get('url')
token = instance.get('token', None)
projects = tfs.get_projects_metadata(url, token)
for project in projects:
code_gov_project = Project.from_tfs(project, labor_hours=compute_labor_hours)
code_gov_metadata['releases'].append(code_gov_project)
# Handle parsing of DOE CODE records
doecode_config = config.get('DOE CODE', {})
doecode_json = doecode_config.get('json', None)
doecode_url = doecode_config.get('url', None)
doecode_key = doecode_config.get('api_key', None)
for record in doecode.process(doecode_json, doecode_url, doecode_key):
code_gov_project = Project.from_doecode(record)
code_gov_metadata['releases'].append(code_gov_project)
return code_gov_metadata
|
[
"Master",
"function",
"to",
"process",
"a",
"Scraper",
"config",
"file"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/code_gov/__init__.py#L13-L128
|
[
"def",
"process_config",
"(",
"config",
")",
":",
"agency",
"=",
"config",
".",
"get",
"(",
"'agency'",
",",
"'UNKNOWN'",
")",
"logger",
".",
"debug",
"(",
"'Agency: %s'",
",",
"agency",
")",
"method",
"=",
"config",
".",
"get",
"(",
"'method'",
",",
"'other'",
")",
"logger",
".",
"debug",
"(",
"'Inventory Method: %s'",
",",
"method",
")",
"compute_labor_hours",
"=",
"config",
".",
"get",
"(",
"'compute_labor_hours'",
",",
"True",
")",
"if",
"config",
".",
"get",
"(",
"'contact_email'",
",",
"None",
")",
"is",
"None",
":",
"# A default contact email is required to handle the (frequent) case",
"# where a project / repository has no available contact email.",
"logger",
".",
"warning",
"(",
"'Config file should contain a \"contact_email\"'",
")",
"logger",
".",
"debug",
"(",
"'Creating inventory from config: %s'",
",",
"config",
")",
"code_gov_metadata",
"=",
"Metadata",
"(",
"agency",
",",
"method",
")",
"# Parse config for GitHub repositories",
"github_instances",
"=",
"config",
".",
"get",
"(",
"'GitHub'",
",",
"[",
"]",
")",
"if",
"config",
".",
"get",
"(",
"'github_gov_orgs'",
",",
"False",
")",
":",
"github_instances",
".",
"append",
"(",
"{",
"'url'",
":",
"'https://github.com'",
",",
"'orgs'",
":",
"gov_orgs",
"(",
")",
",",
"}",
")",
"for",
"instance",
"in",
"github_instances",
":",
"url",
"=",
"instance",
".",
"get",
"(",
"'url'",
",",
"'https://github.com'",
")",
"orgs",
"=",
"instance",
".",
"get",
"(",
"'orgs'",
",",
"[",
"]",
")",
"repos",
"=",
"instance",
".",
"get",
"(",
"'repos'",
",",
"[",
"]",
")",
"public_only",
"=",
"instance",
".",
"get",
"(",
"'public_only'",
",",
"True",
")",
"excluded",
"=",
"instance",
".",
"get",
"(",
"'exclude'",
",",
"[",
"]",
")",
"token",
"=",
"instance",
".",
"get",
"(",
"'token'",
",",
"None",
")",
"gh_session",
"=",
"github",
".",
"connect",
"(",
"url",
",",
"token",
")",
"for",
"repo",
"in",
"github",
".",
"query_repos",
"(",
"gh_session",
",",
"orgs",
",",
"repos",
",",
"public_only",
")",
":",
"if",
"repo",
".",
"owner",
".",
"login",
"in",
"excluded",
"or",
"repo",
".",
"full_name",
"in",
"excluded",
":",
"logger",
".",
"info",
"(",
"'Excluding: %s'",
",",
"repo",
".",
"full_name",
")",
"continue",
"code_gov_project",
"=",
"Project",
".",
"from_github3",
"(",
"repo",
",",
"labor_hours",
"=",
"compute_labor_hours",
")",
"code_gov_metadata",
"[",
"'releases'",
"]",
".",
"append",
"(",
"code_gov_project",
")",
"# Parse config for GitLab repositories",
"gitlab_instances",
"=",
"config",
".",
"get",
"(",
"'GitLab'",
",",
"[",
"]",
")",
"for",
"instance",
"in",
"gitlab_instances",
":",
"url",
"=",
"instance",
".",
"get",
"(",
"'url'",
")",
"# orgs = instance.get('orgs', [])",
"repos",
"=",
"instance",
".",
"get",
"(",
"'repos'",
",",
"[",
"]",
")",
"# public_only = instance.get('public_only', True)",
"excluded",
"=",
"instance",
".",
"get",
"(",
"'exclude'",
",",
"[",
"]",
")",
"token",
"=",
"instance",
".",
"get",
"(",
"'token'",
",",
"None",
")",
"gl_session",
"=",
"gitlab",
".",
"connect",
"(",
"url",
",",
"token",
")",
"for",
"repo",
"in",
"gitlab",
".",
"query_repos",
"(",
"gl_session",
",",
"repos",
")",
":",
"namespace",
"=",
"repo",
".",
"namespace",
"[",
"'path'",
"]",
"path_with_namespace",
"=",
"repo",
".",
"path_with_namespace",
"if",
"namespace",
"in",
"excluded",
"or",
"path_with_namespace",
"in",
"excluded",
":",
"logger",
".",
"info",
"(",
"'Excluding: %s'",
",",
"repo",
".",
"path_with_namespace",
")",
"continue",
"code_gov_project",
"=",
"Project",
".",
"from_gitlab",
"(",
"repo",
",",
"labor_hours",
"=",
"compute_labor_hours",
")",
"code_gov_metadata",
"[",
"'releases'",
"]",
".",
"append",
"(",
"code_gov_project",
")",
"# Parse config for Bitbucket repositories",
"bitbucket_instances",
"=",
"config",
".",
"get",
"(",
"'Bitbucket'",
",",
"[",
"]",
")",
"for",
"instance",
"in",
"bitbucket_instances",
":",
"url",
"=",
"instance",
".",
"get",
"(",
"'url'",
")",
"# orgs = instance.get('orgs', None)",
"# public_only = instance.get('public_only', True)",
"# token = instance.get('token', None)",
"username",
"=",
"instance",
".",
"get",
"(",
"'username'",
")",
"password",
"=",
"instance",
".",
"get",
"(",
"'password'",
")",
"excluded",
"=",
"instance",
".",
"get",
"(",
"'exclude'",
",",
"[",
"]",
")",
"bb_session",
"=",
"bitbucket",
".",
"connect",
"(",
"url",
",",
"username",
",",
"password",
")",
"for",
"repo",
"in",
"bitbucket",
".",
"all_repos",
"(",
"bb_session",
")",
":",
"project",
"=",
"repo",
"[",
"'project'",
"]",
"[",
"'key'",
"]",
"project_repo",
"=",
"'%s/%s'",
"%",
"(",
"project",
",",
"repo",
"[",
"'slug'",
"]",
")",
"if",
"project",
"in",
"excluded",
"or",
"project_repo",
"in",
"excluded",
":",
"logger",
".",
"info",
"(",
"'Excluding: %s'",
",",
"project_repo",
")",
"continue",
"code_gov_project",
"=",
"Project",
".",
"from_stashy",
"(",
"repo",
",",
"labor_hours",
"=",
"compute_labor_hours",
")",
"code_gov_metadata",
"[",
"'releases'",
"]",
".",
"append",
"(",
"code_gov_project",
")",
"# Parse config for TFS repositories",
"tfs_instances",
"=",
"config",
".",
"get",
"(",
"'TFS'",
",",
"[",
"]",
")",
"for",
"instance",
"in",
"tfs_instances",
":",
"url",
"=",
"instance",
".",
"get",
"(",
"'url'",
")",
"token",
"=",
"instance",
".",
"get",
"(",
"'token'",
",",
"None",
")",
"projects",
"=",
"tfs",
".",
"get_projects_metadata",
"(",
"url",
",",
"token",
")",
"for",
"project",
"in",
"projects",
":",
"code_gov_project",
"=",
"Project",
".",
"from_tfs",
"(",
"project",
",",
"labor_hours",
"=",
"compute_labor_hours",
")",
"code_gov_metadata",
"[",
"'releases'",
"]",
".",
"append",
"(",
"code_gov_project",
")",
"# Handle parsing of DOE CODE records",
"doecode_config",
"=",
"config",
".",
"get",
"(",
"'DOE CODE'",
",",
"{",
"}",
")",
"doecode_json",
"=",
"doecode_config",
".",
"get",
"(",
"'json'",
",",
"None",
")",
"doecode_url",
"=",
"doecode_config",
".",
"get",
"(",
"'url'",
",",
"None",
")",
"doecode_key",
"=",
"doecode_config",
".",
"get",
"(",
"'api_key'",
",",
"None",
")",
"for",
"record",
"in",
"doecode",
".",
"process",
"(",
"doecode_json",
",",
"doecode_url",
",",
"doecode_key",
")",
":",
"code_gov_project",
"=",
"Project",
".",
"from_doecode",
"(",
"record",
")",
"code_gov_metadata",
"[",
"'releases'",
"]",
".",
"append",
"(",
"code_gov_project",
")",
"return",
"code_gov_metadata"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
force_attributes
|
Forces certain fields in the Code.gov Metadata json
|
scraper/code_gov/__init__.py
|
def force_attributes(metadata, config):
"""
Forces certain fields in the Code.gov Metadata json
"""
organization = config.get('organization', '')
logger.debug('Organization: %s', organization)
contact_email = config.get('contact_email')
logger.debug('Contact Email: %s', contact_email)
permissions = config.get('permissions', {})
default_usage = permissions.get('usageType', '')
default_exemption_text = permissions.get('exemptionText', '')
logger.debug('Default usageType: %s', default_usage)
logger.debug('Default exemptionText: %s', default_exemption_text)
# Force certain fields
if organization:
logger.debug('Forcing Organization to: %s', organization)
if contact_email:
logger.debug('Forcing Contact Email to: %s', contact_email)
for release in metadata['releases']:
if organization:
release['organization'] = organization
if contact_email:
release['contact']['email'] = contact_email
if 'licenses' not in release['permissions']:
release['permissions']['licenses'] = None
if 'description' not in release:
release['description'] = 'No description available...'
if 'usageType' not in release['permissions']:
release['permissions']['usageType'] = default_usage
release['permissions']['exemptionText'] = default_exemption_text
return metadata
|
def force_attributes(metadata, config):
"""
Forces certain fields in the Code.gov Metadata json
"""
organization = config.get('organization', '')
logger.debug('Organization: %s', organization)
contact_email = config.get('contact_email')
logger.debug('Contact Email: %s', contact_email)
permissions = config.get('permissions', {})
default_usage = permissions.get('usageType', '')
default_exemption_text = permissions.get('exemptionText', '')
logger.debug('Default usageType: %s', default_usage)
logger.debug('Default exemptionText: %s', default_exemption_text)
# Force certain fields
if organization:
logger.debug('Forcing Organization to: %s', organization)
if contact_email:
logger.debug('Forcing Contact Email to: %s', contact_email)
for release in metadata['releases']:
if organization:
release['organization'] = organization
if contact_email:
release['contact']['email'] = contact_email
if 'licenses' not in release['permissions']:
release['permissions']['licenses'] = None
if 'description' not in release:
release['description'] = 'No description available...'
if 'usageType' not in release['permissions']:
release['permissions']['usageType'] = default_usage
release['permissions']['exemptionText'] = default_exemption_text
return metadata
|
[
"Forces",
"certain",
"fields",
"in",
"the",
"Code",
".",
"gov",
"Metadata",
"json"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/code_gov/__init__.py#L131-L172
|
[
"def",
"force_attributes",
"(",
"metadata",
",",
"config",
")",
":",
"organization",
"=",
"config",
".",
"get",
"(",
"'organization'",
",",
"''",
")",
"logger",
".",
"debug",
"(",
"'Organization: %s'",
",",
"organization",
")",
"contact_email",
"=",
"config",
".",
"get",
"(",
"'contact_email'",
")",
"logger",
".",
"debug",
"(",
"'Contact Email: %s'",
",",
"contact_email",
")",
"permissions",
"=",
"config",
".",
"get",
"(",
"'permissions'",
",",
"{",
"}",
")",
"default_usage",
"=",
"permissions",
".",
"get",
"(",
"'usageType'",
",",
"''",
")",
"default_exemption_text",
"=",
"permissions",
".",
"get",
"(",
"'exemptionText'",
",",
"''",
")",
"logger",
".",
"debug",
"(",
"'Default usageType: %s'",
",",
"default_usage",
")",
"logger",
".",
"debug",
"(",
"'Default exemptionText: %s'",
",",
"default_exemption_text",
")",
"# Force certain fields",
"if",
"organization",
":",
"logger",
".",
"debug",
"(",
"'Forcing Organization to: %s'",
",",
"organization",
")",
"if",
"contact_email",
":",
"logger",
".",
"debug",
"(",
"'Forcing Contact Email to: %s'",
",",
"contact_email",
")",
"for",
"release",
"in",
"metadata",
"[",
"'releases'",
"]",
":",
"if",
"organization",
":",
"release",
"[",
"'organization'",
"]",
"=",
"organization",
"if",
"contact_email",
":",
"release",
"[",
"'contact'",
"]",
"[",
"'email'",
"]",
"=",
"contact_email",
"if",
"'licenses'",
"not",
"in",
"release",
"[",
"'permissions'",
"]",
":",
"release",
"[",
"'permissions'",
"]",
"[",
"'licenses'",
"]",
"=",
"None",
"if",
"'description'",
"not",
"in",
"release",
":",
"release",
"[",
"'description'",
"]",
"=",
"'No description available...'",
"if",
"'usageType'",
"not",
"in",
"release",
"[",
"'permissions'",
"]",
":",
"release",
"[",
"'permissions'",
"]",
"[",
"'usageType'",
"]",
"=",
"default_usage",
"release",
"[",
"'permissions'",
"]",
"[",
"'exemptionText'",
"]",
"=",
"default_exemption_text",
"return",
"metadata"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
_license_obj
|
A helper function to look up license object information
Use names from: https://api.github.com/licenses
|
scraper/github/util.py
|
def _license_obj(license):
"""
A helper function to look up license object information
Use names from: https://api.github.com/licenses
"""
obj = None
if license in ('MIT', 'MIT License'):
obj = {
'URL': 'https://api.github.com/licenses/mit',
'name': 'MIT'
}
elif license in ('BSD 2-clause "Simplified" License'):
obj = {
'URL': 'https://api.github.com/licenses/bsd-2-clause',
'name': 'BSD-2-Clause'
}
elif license in ('BSD 3-clause "New" or "Revised" License'):
obj = {
'URL': 'https://api.github.com/licenses/bsd-3-clause',
'name': 'BSD-3-Clause'
}
elif license in ('Apache License 2.0'):
obj = {
'URL': 'https://api.github.com/licenses/apache-2.0',
'name': 'Apache-2.0'
}
elif license in ('GNU General Public License v2.1'):
obj = {
'URL': 'https://api.github.com/licenses/gpl-2.1',
'name': 'GPL-2.1'
}
elif license in ('GNU General Public License v2.0'):
obj = {
'URL': 'https://api.github.com/licenses/gpl-2.0',
'name': 'GPL-2.0'
}
elif license in ('GNU Lesser General Public License v2.1'):
obj = {
'URL': 'https://api.github.com/licenses/lgpl-2.1',
'name': 'LGPL-2.1'
}
elif license in ('GNU General Public License v3.0'):
obj = {
'URL': 'https://api.github.com/licenses/gpl-3.0',
'name': 'GPL-3.0'
}
elif license in ('GNU Lesser General Public License v3.0'):
obj = {
'URL': 'https://api.github.com/licenses/lgpl-3.0',
'name': 'LGPL-3.0'
}
elif license in ('Eclipse Public License 1.0'):
obj = {
'URL': 'https://api.github.com/licenses/epl-1.0',
'name': 'EPL-1.0',
}
elif license in ('Mozilla Public License 2.0'):
obj = {
'URL': 'https://api.github.com/licenses/mpl-2.0',
'name': 'MPL-2.0',
}
elif license in ('The Unlicense'):
obj = {
'URL': 'https://api.github.com/licenses/unlicense',
'name': 'Unlicense',
}
elif license in ('GNU Affero General Public License v3.0'):
obj = {
'URL': 'https://api.github.com/licenses/agpl-3.0',
'name': 'AGPL-3.0',
}
elif license in ('Eclipse Public License 2.0'):
obj = {
'URL': 'https://api.github.com/licenses/epl-2.0',
'name': 'EPL-2.0',
}
if obj is None:
logger.warn('I dont understand the license: %s', license)
raise ValueError('Aborting!')
return obj
|
def _license_obj(license):
"""
A helper function to look up license object information
Use names from: https://api.github.com/licenses
"""
obj = None
if license in ('MIT', 'MIT License'):
obj = {
'URL': 'https://api.github.com/licenses/mit',
'name': 'MIT'
}
elif license in ('BSD 2-clause "Simplified" License'):
obj = {
'URL': 'https://api.github.com/licenses/bsd-2-clause',
'name': 'BSD-2-Clause'
}
elif license in ('BSD 3-clause "New" or "Revised" License'):
obj = {
'URL': 'https://api.github.com/licenses/bsd-3-clause',
'name': 'BSD-3-Clause'
}
elif license in ('Apache License 2.0'):
obj = {
'URL': 'https://api.github.com/licenses/apache-2.0',
'name': 'Apache-2.0'
}
elif license in ('GNU General Public License v2.1'):
obj = {
'URL': 'https://api.github.com/licenses/gpl-2.1',
'name': 'GPL-2.1'
}
elif license in ('GNU General Public License v2.0'):
obj = {
'URL': 'https://api.github.com/licenses/gpl-2.0',
'name': 'GPL-2.0'
}
elif license in ('GNU Lesser General Public License v2.1'):
obj = {
'URL': 'https://api.github.com/licenses/lgpl-2.1',
'name': 'LGPL-2.1'
}
elif license in ('GNU General Public License v3.0'):
obj = {
'URL': 'https://api.github.com/licenses/gpl-3.0',
'name': 'GPL-3.0'
}
elif license in ('GNU Lesser General Public License v3.0'):
obj = {
'URL': 'https://api.github.com/licenses/lgpl-3.0',
'name': 'LGPL-3.0'
}
elif license in ('Eclipse Public License 1.0'):
obj = {
'URL': 'https://api.github.com/licenses/epl-1.0',
'name': 'EPL-1.0',
}
elif license in ('Mozilla Public License 2.0'):
obj = {
'URL': 'https://api.github.com/licenses/mpl-2.0',
'name': 'MPL-2.0',
}
elif license in ('The Unlicense'):
obj = {
'URL': 'https://api.github.com/licenses/unlicense',
'name': 'Unlicense',
}
elif license in ('GNU Affero General Public License v3.0'):
obj = {
'URL': 'https://api.github.com/licenses/agpl-3.0',
'name': 'AGPL-3.0',
}
elif license in ('Eclipse Public License 2.0'):
obj = {
'URL': 'https://api.github.com/licenses/epl-2.0',
'name': 'EPL-2.0',
}
if obj is None:
logger.warn('I dont understand the license: %s', license)
raise ValueError('Aborting!')
return obj
|
[
"A",
"helper",
"function",
"to",
"look",
"up",
"license",
"object",
"information"
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scraper/github/util.py#L6-L89
|
[
"def",
"_license_obj",
"(",
"license",
")",
":",
"obj",
"=",
"None",
"if",
"license",
"in",
"(",
"'MIT'",
",",
"'MIT License'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/mit'",
",",
"'name'",
":",
"'MIT'",
"}",
"elif",
"license",
"in",
"(",
"'BSD 2-clause \"Simplified\" License'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/bsd-2-clause'",
",",
"'name'",
":",
"'BSD-2-Clause'",
"}",
"elif",
"license",
"in",
"(",
"'BSD 3-clause \"New\" or \"Revised\" License'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/bsd-3-clause'",
",",
"'name'",
":",
"'BSD-3-Clause'",
"}",
"elif",
"license",
"in",
"(",
"'Apache License 2.0'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/apache-2.0'",
",",
"'name'",
":",
"'Apache-2.0'",
"}",
"elif",
"license",
"in",
"(",
"'GNU General Public License v2.1'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/gpl-2.1'",
",",
"'name'",
":",
"'GPL-2.1'",
"}",
"elif",
"license",
"in",
"(",
"'GNU General Public License v2.0'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/gpl-2.0'",
",",
"'name'",
":",
"'GPL-2.0'",
"}",
"elif",
"license",
"in",
"(",
"'GNU Lesser General Public License v2.1'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/lgpl-2.1'",
",",
"'name'",
":",
"'LGPL-2.1'",
"}",
"elif",
"license",
"in",
"(",
"'GNU General Public License v3.0'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/gpl-3.0'",
",",
"'name'",
":",
"'GPL-3.0'",
"}",
"elif",
"license",
"in",
"(",
"'GNU Lesser General Public License v3.0'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/lgpl-3.0'",
",",
"'name'",
":",
"'LGPL-3.0'",
"}",
"elif",
"license",
"in",
"(",
"'Eclipse Public License 1.0'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/epl-1.0'",
",",
"'name'",
":",
"'EPL-1.0'",
",",
"}",
"elif",
"license",
"in",
"(",
"'Mozilla Public License 2.0'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/mpl-2.0'",
",",
"'name'",
":",
"'MPL-2.0'",
",",
"}",
"elif",
"license",
"in",
"(",
"'The Unlicense'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/unlicense'",
",",
"'name'",
":",
"'Unlicense'",
",",
"}",
"elif",
"license",
"in",
"(",
"'GNU Affero General Public License v3.0'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/agpl-3.0'",
",",
"'name'",
":",
"'AGPL-3.0'",
",",
"}",
"elif",
"license",
"in",
"(",
"'Eclipse Public License 2.0'",
")",
":",
"obj",
"=",
"{",
"'URL'",
":",
"'https://api.github.com/licenses/epl-2.0'",
",",
"'name'",
":",
"'EPL-2.0'",
",",
"}",
"if",
"obj",
"is",
"None",
":",
"logger",
".",
"warn",
"(",
"'I dont understand the license: %s'",
",",
"license",
")",
"raise",
"ValueError",
"(",
"'Aborting!'",
")",
"return",
"obj"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_Traffic.get_stats
|
Retrieves the traffic for the users of the given organization.
Requires organization admin credentials token to access the data.
|
scripts/get_traffic.py
|
def get_stats(self, username='', password='', organization='llnl', force=True):
"""
Retrieves the traffic for the users of the given organization.
Requires organization admin credentials token to access the data.
"""
date = str(datetime.date.today())
referrers_file_path = ('../github_stats_output/referrers.csv')
views_file_path = ('../github_stats_output/views.csv')
clones_file_path = ('../github_stats_output/clones.csv')
if force or not os.path.isfile(file_path):
my_github.login(username, password)
calls_beginning = self.logged_in_gh.ratelimit_remaining + 1
print 'Rate Limit: ' + str(calls_beginning)
my_github.get_org(organization)
my_github.get_traffic()
views_row_count = my_github.check_data_redundancy(file_path=views_file_path,
dict_to_check=self.views)
clones_row_count = my_github.check_data_redundancy(file_path=clones_file_path,
dict_to_check=self.clones)
my_github.write_to_file(referrers_file_path=referrers_file_path,
views_file_path=views_file_path,
clones_file_path=clones_file_path,
views_row_count=views_row_count,
clones_row_count=clones_row_count)
my_github.write_json(dict_to_write=self.referrers_json,
path_ending_type='traffic_popular_referrers')
my_github.write_json(dict_to_write=self.views_json,
path_ending_type='traffic_views')
my_github.write_json(dict_to_write=self.clones_json,
path_ending_type='traffic_clones')
my_github.write_json(dict_to_write=self.releases_json,
path_ending_type='releases')
calls_remaining = self.logged_in_gh.ratelimit_remaining
calls_used = calls_beginning - calls_remaining
print ('Rate Limit Remaining: ' + str(calls_remaining) + '\nUsed '
+ str(calls_used) + ' API calls.')
|
def get_stats(self, username='', password='', organization='llnl', force=True):
"""
Retrieves the traffic for the users of the given organization.
Requires organization admin credentials token to access the data.
"""
date = str(datetime.date.today())
referrers_file_path = ('../github_stats_output/referrers.csv')
views_file_path = ('../github_stats_output/views.csv')
clones_file_path = ('../github_stats_output/clones.csv')
if force or not os.path.isfile(file_path):
my_github.login(username, password)
calls_beginning = self.logged_in_gh.ratelimit_remaining + 1
print 'Rate Limit: ' + str(calls_beginning)
my_github.get_org(organization)
my_github.get_traffic()
views_row_count = my_github.check_data_redundancy(file_path=views_file_path,
dict_to_check=self.views)
clones_row_count = my_github.check_data_redundancy(file_path=clones_file_path,
dict_to_check=self.clones)
my_github.write_to_file(referrers_file_path=referrers_file_path,
views_file_path=views_file_path,
clones_file_path=clones_file_path,
views_row_count=views_row_count,
clones_row_count=clones_row_count)
my_github.write_json(dict_to_write=self.referrers_json,
path_ending_type='traffic_popular_referrers')
my_github.write_json(dict_to_write=self.views_json,
path_ending_type='traffic_views')
my_github.write_json(dict_to_write=self.clones_json,
path_ending_type='traffic_clones')
my_github.write_json(dict_to_write=self.releases_json,
path_ending_type='releases')
calls_remaining = self.logged_in_gh.ratelimit_remaining
calls_used = calls_beginning - calls_remaining
print ('Rate Limit Remaining: ' + str(calls_remaining) + '\nUsed '
+ str(calls_used) + ' API calls.')
|
[
"Retrieves",
"the",
"traffic",
"for",
"the",
"users",
"of",
"the",
"given",
"organization",
".",
"Requires",
"organization",
"admin",
"credentials",
"token",
"to",
"access",
"the",
"data",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_traffic.py#L18-L53
|
[
"def",
"get_stats",
"(",
"self",
",",
"username",
"=",
"''",
",",
"password",
"=",
"''",
",",
"organization",
"=",
"'llnl'",
",",
"force",
"=",
"True",
")",
":",
"date",
"=",
"str",
"(",
"datetime",
".",
"date",
".",
"today",
"(",
")",
")",
"referrers_file_path",
"=",
"(",
"'../github_stats_output/referrers.csv'",
")",
"views_file_path",
"=",
"(",
"'../github_stats_output/views.csv'",
")",
"clones_file_path",
"=",
"(",
"'../github_stats_output/clones.csv'",
")",
"if",
"force",
"or",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"my_github",
".",
"login",
"(",
"username",
",",
"password",
")",
"calls_beginning",
"=",
"self",
".",
"logged_in_gh",
".",
"ratelimit_remaining",
"+",
"1",
"print",
"'Rate Limit: '",
"+",
"str",
"(",
"calls_beginning",
")",
"my_github",
".",
"get_org",
"(",
"organization",
")",
"my_github",
".",
"get_traffic",
"(",
")",
"views_row_count",
"=",
"my_github",
".",
"check_data_redundancy",
"(",
"file_path",
"=",
"views_file_path",
",",
"dict_to_check",
"=",
"self",
".",
"views",
")",
"clones_row_count",
"=",
"my_github",
".",
"check_data_redundancy",
"(",
"file_path",
"=",
"clones_file_path",
",",
"dict_to_check",
"=",
"self",
".",
"clones",
")",
"my_github",
".",
"write_to_file",
"(",
"referrers_file_path",
"=",
"referrers_file_path",
",",
"views_file_path",
"=",
"views_file_path",
",",
"clones_file_path",
"=",
"clones_file_path",
",",
"views_row_count",
"=",
"views_row_count",
",",
"clones_row_count",
"=",
"clones_row_count",
")",
"my_github",
".",
"write_json",
"(",
"dict_to_write",
"=",
"self",
".",
"referrers_json",
",",
"path_ending_type",
"=",
"'traffic_popular_referrers'",
")",
"my_github",
".",
"write_json",
"(",
"dict_to_write",
"=",
"self",
".",
"views_json",
",",
"path_ending_type",
"=",
"'traffic_views'",
")",
"my_github",
".",
"write_json",
"(",
"dict_to_write",
"=",
"self",
".",
"clones_json",
",",
"path_ending_type",
"=",
"'traffic_clones'",
")",
"my_github",
".",
"write_json",
"(",
"dict_to_write",
"=",
"self",
".",
"releases_json",
",",
"path_ending_type",
"=",
"'releases'",
")",
"calls_remaining",
"=",
"self",
".",
"logged_in_gh",
".",
"ratelimit_remaining",
"calls_used",
"=",
"calls_beginning",
"-",
"calls_remaining",
"print",
"(",
"'Rate Limit Remaining: '",
"+",
"str",
"(",
"calls_remaining",
")",
"+",
"'\\nUsed '",
"+",
"str",
"(",
"calls_used",
")",
"+",
"' API calls.'",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
test
|
GitHub_Traffic.get_traffic
|
Retrieves the traffic for the repositories of the given organization.
|
scripts/get_traffic.py
|
def get_traffic(self):
"""
Retrieves the traffic for the repositories of the given organization.
"""
print 'Getting traffic.'
#Uses the developer API. Note this could change.
headers = {'Accept': 'application/vnd.github.spiderman-preview', 'Authorization': 'token ' + self.token}
headers_release = {'Authorization': 'token ' + self.token}
for repo in self.org_retrieved.iter_repos(type='public'):
url = ('https://api.github.com/repos/' + self.organization_name
+ '/' + repo.name)
self.get_referrers(url=url, headers=headers, repo_name=repo.name)
self.get_paths(url=url, headers=headers)
self.get_data(url=url, headers=headers, dict_to_store=self.views,
type='views', repo_name=repo.name)
self.get_data(url=url, headers=headers, dict_to_store=self.clones,
type='clones', repo_name=repo.name)
self.get_releases(url=url, headers=headers_release, repo_name=repo.name)
|
def get_traffic(self):
"""
Retrieves the traffic for the repositories of the given organization.
"""
print 'Getting traffic.'
#Uses the developer API. Note this could change.
headers = {'Accept': 'application/vnd.github.spiderman-preview', 'Authorization': 'token ' + self.token}
headers_release = {'Authorization': 'token ' + self.token}
for repo in self.org_retrieved.iter_repos(type='public'):
url = ('https://api.github.com/repos/' + self.organization_name
+ '/' + repo.name)
self.get_referrers(url=url, headers=headers, repo_name=repo.name)
self.get_paths(url=url, headers=headers)
self.get_data(url=url, headers=headers, dict_to_store=self.views,
type='views', repo_name=repo.name)
self.get_data(url=url, headers=headers, dict_to_store=self.clones,
type='clones', repo_name=repo.name)
self.get_releases(url=url, headers=headers_release, repo_name=repo.name)
|
[
"Retrieves",
"the",
"traffic",
"for",
"the",
"repositories",
"of",
"the",
"given",
"organization",
"."
] |
LLNL/scraper
|
python
|
https://github.com/LLNL/scraper/blob/881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea/scripts/get_traffic.py#L115-L132
|
[
"def",
"get_traffic",
"(",
"self",
")",
":",
"print",
"'Getting traffic.'",
"#Uses the developer API. Note this could change.",
"headers",
"=",
"{",
"'Accept'",
":",
"'application/vnd.github.spiderman-preview'",
",",
"'Authorization'",
":",
"'token '",
"+",
"self",
".",
"token",
"}",
"headers_release",
"=",
"{",
"'Authorization'",
":",
"'token '",
"+",
"self",
".",
"token",
"}",
"for",
"repo",
"in",
"self",
".",
"org_retrieved",
".",
"iter_repos",
"(",
"type",
"=",
"'public'",
")",
":",
"url",
"=",
"(",
"'https://api.github.com/repos/'",
"+",
"self",
".",
"organization_name",
"+",
"'/'",
"+",
"repo",
".",
"name",
")",
"self",
".",
"get_referrers",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
",",
"repo_name",
"=",
"repo",
".",
"name",
")",
"self",
".",
"get_paths",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
")",
"self",
".",
"get_data",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
",",
"dict_to_store",
"=",
"self",
".",
"views",
",",
"type",
"=",
"'views'",
",",
"repo_name",
"=",
"repo",
".",
"name",
")",
"self",
".",
"get_data",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
",",
"dict_to_store",
"=",
"self",
".",
"clones",
",",
"type",
"=",
"'clones'",
",",
"repo_name",
"=",
"repo",
".",
"name",
")",
"self",
".",
"get_releases",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"headers_release",
",",
"repo_name",
"=",
"repo",
".",
"name",
")"
] |
881a316e4c04dfa5a9cf491b7c7f9f997a7c56ea
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.