partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
Screen.__render_left_panel
|
Render left blocks
|
yandextank/plugins/Console/screen.py
|
def __render_left_panel(self):
''' Render left blocks '''
self.log.debug("Rendering left blocks")
left_block = self.left_panel
left_block.render()
blank_space = self.left_panel_width - left_block.width
lines = []
pre_space = ' ' * int(blank_space / 2)
if not left_block.lines:
lines = [(''), (self.markup.RED + 'BROKEN LEFT PANEL' + self.markup.RESET)]
else:
while self.left_panel.lines:
src_line = self.left_panel.lines.pop(0)
line = pre_space + self.__truncate(src_line, self.left_panel_width)
post_space = ' ' * (self.left_panel_width - len(self.markup.clean_markup(line)))
line += post_space + self.markup.RESET
lines.append(line)
return lines
|
def __render_left_panel(self):
''' Render left blocks '''
self.log.debug("Rendering left blocks")
left_block = self.left_panel
left_block.render()
blank_space = self.left_panel_width - left_block.width
lines = []
pre_space = ' ' * int(blank_space / 2)
if not left_block.lines:
lines = [(''), (self.markup.RED + 'BROKEN LEFT PANEL' + self.markup.RESET)]
else:
while self.left_panel.lines:
src_line = self.left_panel.lines.pop(0)
line = pre_space + self.__truncate(src_line, self.left_panel_width)
post_space = ' ' * (self.left_panel_width - len(self.markup.clean_markup(line)))
line += post_space + self.markup.RESET
lines.append(line)
return lines
|
[
"Render",
"left",
"blocks"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Console/screen.py#L365-L383
|
[
"def",
"__render_left_panel",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Rendering left blocks\"",
")",
"left_block",
"=",
"self",
".",
"left_panel",
"left_block",
".",
"render",
"(",
")",
"blank_space",
"=",
"self",
".",
"left_panel_width",
"-",
"left_block",
".",
"width",
"lines",
"=",
"[",
"]",
"pre_space",
"=",
"' '",
"*",
"int",
"(",
"blank_space",
"/",
"2",
")",
"if",
"not",
"left_block",
".",
"lines",
":",
"lines",
"=",
"[",
"(",
"''",
")",
",",
"(",
"self",
".",
"markup",
".",
"RED",
"+",
"'BROKEN LEFT PANEL'",
"+",
"self",
".",
"markup",
".",
"RESET",
")",
"]",
"else",
":",
"while",
"self",
".",
"left_panel",
".",
"lines",
":",
"src_line",
"=",
"self",
".",
"left_panel",
".",
"lines",
".",
"pop",
"(",
"0",
")",
"line",
"=",
"pre_space",
"+",
"self",
".",
"__truncate",
"(",
"src_line",
",",
"self",
".",
"left_panel_width",
")",
"post_space",
"=",
"' '",
"*",
"(",
"self",
".",
"left_panel_width",
"-",
"len",
"(",
"self",
".",
"markup",
".",
"clean_markup",
"(",
"line",
")",
")",
")",
"line",
"+=",
"post_space",
"+",
"self",
".",
"markup",
".",
"RESET",
"lines",
".",
"append",
"(",
"line",
")",
"return",
"lines"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
Screen.render_screen
|
Main method to render screen view
|
yandextank/plugins/Console/screen.py
|
def render_screen(self):
''' Main method to render screen view '''
self.term_width, self.term_height = get_terminal_size()
self.log.debug(
"Terminal size: %sx%s", self.term_width, self.term_height)
self.right_panel_width = int(
(self.term_width - len(self.RIGHT_PANEL_SEPARATOR))
* (float(self.info_panel_percent) / 100)) - 1
if self.right_panel_width > 0:
self.left_panel_width = self.term_width - \
self.right_panel_width - len(self.RIGHT_PANEL_SEPARATOR) - 2
else:
self.right_panel_width = 0
self.left_panel_width = self.term_width - 1
self.log.debug(
"Left/right panels width: %s/%s", self.left_panel_width,
self.right_panel_width)
widget_output = []
if self.right_panel_width:
widget_output = []
self.log.debug("There are %d info widgets" % len(self.info_widgets))
for index, widget in sorted(
self.info_widgets.iteritems(),
key=lambda item: (item[1].get_index(), item[0])):
self.log.debug("Rendering info widget #%s: %s", index, widget)
widget_out = widget.render(self).strip()
if widget_out:
widget_output += widget_out.split("\n")
widget_output += [""]
left_lines = self.__render_left_panel()
self.log.debug("Composing final screen output")
output = []
for line_no in range(1, self.term_height):
line = " "
if line_no > 1 and left_lines:
left_line = left_lines.pop(0)
left_line_plain = self.markup.clean_markup(left_line)
left_line += (
' ' * (self.left_panel_width - len(left_line_plain)))
line += left_line
else:
line += ' ' * self.left_panel_width
if self.right_panel_width:
line += self.markup.RESET
line += self.markup.WHITE
line += self.RIGHT_PANEL_SEPARATOR
line += self.markup.RESET
right_line = self.__get_right_line(widget_output)
line += right_line
output.append(line)
return self.markup.new_line.join(output) + self.markup.new_line
|
def render_screen(self):
''' Main method to render screen view '''
self.term_width, self.term_height = get_terminal_size()
self.log.debug(
"Terminal size: %sx%s", self.term_width, self.term_height)
self.right_panel_width = int(
(self.term_width - len(self.RIGHT_PANEL_SEPARATOR))
* (float(self.info_panel_percent) / 100)) - 1
if self.right_panel_width > 0:
self.left_panel_width = self.term_width - \
self.right_panel_width - len(self.RIGHT_PANEL_SEPARATOR) - 2
else:
self.right_panel_width = 0
self.left_panel_width = self.term_width - 1
self.log.debug(
"Left/right panels width: %s/%s", self.left_panel_width,
self.right_panel_width)
widget_output = []
if self.right_panel_width:
widget_output = []
self.log.debug("There are %d info widgets" % len(self.info_widgets))
for index, widget in sorted(
self.info_widgets.iteritems(),
key=lambda item: (item[1].get_index(), item[0])):
self.log.debug("Rendering info widget #%s: %s", index, widget)
widget_out = widget.render(self).strip()
if widget_out:
widget_output += widget_out.split("\n")
widget_output += [""]
left_lines = self.__render_left_panel()
self.log.debug("Composing final screen output")
output = []
for line_no in range(1, self.term_height):
line = " "
if line_no > 1 and left_lines:
left_line = left_lines.pop(0)
left_line_plain = self.markup.clean_markup(left_line)
left_line += (
' ' * (self.left_panel_width - len(left_line_plain)))
line += left_line
else:
line += ' ' * self.left_panel_width
if self.right_panel_width:
line += self.markup.RESET
line += self.markup.WHITE
line += self.RIGHT_PANEL_SEPARATOR
line += self.markup.RESET
right_line = self.__get_right_line(widget_output)
line += right_line
output.append(line)
return self.markup.new_line.join(output) + self.markup.new_line
|
[
"Main",
"method",
"to",
"render",
"screen",
"view"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Console/screen.py#L385-L441
|
[
"def",
"render_screen",
"(",
"self",
")",
":",
"self",
".",
"term_width",
",",
"self",
".",
"term_height",
"=",
"get_terminal_size",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Terminal size: %sx%s\"",
",",
"self",
".",
"term_width",
",",
"self",
".",
"term_height",
")",
"self",
".",
"right_panel_width",
"=",
"int",
"(",
"(",
"self",
".",
"term_width",
"-",
"len",
"(",
"self",
".",
"RIGHT_PANEL_SEPARATOR",
")",
")",
"*",
"(",
"float",
"(",
"self",
".",
"info_panel_percent",
")",
"/",
"100",
")",
")",
"-",
"1",
"if",
"self",
".",
"right_panel_width",
">",
"0",
":",
"self",
".",
"left_panel_width",
"=",
"self",
".",
"term_width",
"-",
"self",
".",
"right_panel_width",
"-",
"len",
"(",
"self",
".",
"RIGHT_PANEL_SEPARATOR",
")",
"-",
"2",
"else",
":",
"self",
".",
"right_panel_width",
"=",
"0",
"self",
".",
"left_panel_width",
"=",
"self",
".",
"term_width",
"-",
"1",
"self",
".",
"log",
".",
"debug",
"(",
"\"Left/right panels width: %s/%s\"",
",",
"self",
".",
"left_panel_width",
",",
"self",
".",
"right_panel_width",
")",
"widget_output",
"=",
"[",
"]",
"if",
"self",
".",
"right_panel_width",
":",
"widget_output",
"=",
"[",
"]",
"self",
".",
"log",
".",
"debug",
"(",
"\"There are %d info widgets\"",
"%",
"len",
"(",
"self",
".",
"info_widgets",
")",
")",
"for",
"index",
",",
"widget",
"in",
"sorted",
"(",
"self",
".",
"info_widgets",
".",
"iteritems",
"(",
")",
",",
"key",
"=",
"lambda",
"item",
":",
"(",
"item",
"[",
"1",
"]",
".",
"get_index",
"(",
")",
",",
"item",
"[",
"0",
"]",
")",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Rendering info widget #%s: %s\"",
",",
"index",
",",
"widget",
")",
"widget_out",
"=",
"widget",
".",
"render",
"(",
"self",
")",
".",
"strip",
"(",
")",
"if",
"widget_out",
":",
"widget_output",
"+=",
"widget_out",
".",
"split",
"(",
"\"\\n\"",
")",
"widget_output",
"+=",
"[",
"\"\"",
"]",
"left_lines",
"=",
"self",
".",
"__render_left_panel",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Composing final screen output\"",
")",
"output",
"=",
"[",
"]",
"for",
"line_no",
"in",
"range",
"(",
"1",
",",
"self",
".",
"term_height",
")",
":",
"line",
"=",
"\" \"",
"if",
"line_no",
">",
"1",
"and",
"left_lines",
":",
"left_line",
"=",
"left_lines",
".",
"pop",
"(",
"0",
")",
"left_line_plain",
"=",
"self",
".",
"markup",
".",
"clean_markup",
"(",
"left_line",
")",
"left_line",
"+=",
"(",
"' '",
"*",
"(",
"self",
".",
"left_panel_width",
"-",
"len",
"(",
"left_line_plain",
")",
")",
")",
"line",
"+=",
"left_line",
"else",
":",
"line",
"+=",
"' '",
"*",
"self",
".",
"left_panel_width",
"if",
"self",
".",
"right_panel_width",
":",
"line",
"+=",
"self",
".",
"markup",
".",
"RESET",
"line",
"+=",
"self",
".",
"markup",
".",
"WHITE",
"line",
"+=",
"self",
".",
"RIGHT_PANEL_SEPARATOR",
"line",
"+=",
"self",
".",
"markup",
".",
"RESET",
"right_line",
"=",
"self",
".",
"__get_right_line",
"(",
"widget_output",
")",
"line",
"+=",
"right_line",
"output",
".",
"append",
"(",
"line",
")",
"return",
"self",
".",
"markup",
".",
"new_line",
".",
"join",
"(",
"output",
")",
"+",
"self",
".",
"markup",
".",
"new_line"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
Screen.add_info_widget
|
Add widget string to right panel of the screen
|
yandextank/plugins/Console/screen.py
|
def add_info_widget(self, widget):
'''
Add widget string to right panel of the screen
'''
index = widget.get_index()
while index in self.info_widgets.keys():
index += 1
self.info_widgets[widget.get_index()] = widget
|
def add_info_widget(self, widget):
'''
Add widget string to right panel of the screen
'''
index = widget.get_index()
while index in self.info_widgets.keys():
index += 1
self.info_widgets[widget.get_index()] = widget
|
[
"Add",
"widget",
"string",
"to",
"right",
"panel",
"of",
"the",
"screen"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Console/screen.py#L443-L450
|
[
"def",
"add_info_widget",
"(",
"self",
",",
"widget",
")",
":",
"index",
"=",
"widget",
".",
"get_index",
"(",
")",
"while",
"index",
"in",
"self",
".",
"info_widgets",
".",
"keys",
"(",
")",
":",
"index",
"+=",
"1",
"self",
".",
"info_widgets",
"[",
"widget",
".",
"get_index",
"(",
")",
"]",
"=",
"widget"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
AbstractBlock.fill_rectangle
|
Right-pad lines of block to equal width
|
yandextank/plugins/Console/screen.py
|
def fill_rectangle(self, prepared):
''' Right-pad lines of block to equal width '''
result = []
width = max([self.clean_len(line) for line in prepared])
for line in prepared:
spacer = ' ' * (width - self.clean_len(line))
result.append(line + (self.screen.markup.RESET, spacer))
return (width, result)
|
def fill_rectangle(self, prepared):
''' Right-pad lines of block to equal width '''
result = []
width = max([self.clean_len(line) for line in prepared])
for line in prepared:
spacer = ' ' * (width - self.clean_len(line))
result.append(line + (self.screen.markup.RESET, spacer))
return (width, result)
|
[
"Right",
"-",
"pad",
"lines",
"of",
"block",
"to",
"equal",
"width"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Console/screen.py#L476-L483
|
[
"def",
"fill_rectangle",
"(",
"self",
",",
"prepared",
")",
":",
"result",
"=",
"[",
"]",
"width",
"=",
"max",
"(",
"[",
"self",
".",
"clean_len",
"(",
"line",
")",
"for",
"line",
"in",
"prepared",
"]",
")",
"for",
"line",
"in",
"prepared",
":",
"spacer",
"=",
"' '",
"*",
"(",
"width",
"-",
"self",
".",
"clean_len",
"(",
"line",
")",
")",
"result",
".",
"append",
"(",
"line",
"+",
"(",
"self",
".",
"screen",
".",
"markup",
".",
"RESET",
",",
"spacer",
")",
")",
"return",
"(",
"width",
",",
"result",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
AbstractBlock.clean_len
|
Calculate wisible length of string
|
yandextank/plugins/Console/screen.py
|
def clean_len(self, line):
''' Calculate wisible length of string '''
if isinstance(line, basestring):
return len(self.screen.markup.clean_markup(line))
elif isinstance(line, tuple) or isinstance(line, list):
markups = self.screen.markup.get_markup_vars()
length = 0
for i in line:
if i not in markups:
length += len(i)
return length
|
def clean_len(self, line):
''' Calculate wisible length of string '''
if isinstance(line, basestring):
return len(self.screen.markup.clean_markup(line))
elif isinstance(line, tuple) or isinstance(line, list):
markups = self.screen.markup.get_markup_vars()
length = 0
for i in line:
if i not in markups:
length += len(i)
return length
|
[
"Calculate",
"wisible",
"length",
"of",
"string"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Console/screen.py#L485-L495
|
[
"def",
"clean_len",
"(",
"self",
",",
"line",
")",
":",
"if",
"isinstance",
"(",
"line",
",",
"basestring",
")",
":",
"return",
"len",
"(",
"self",
".",
"screen",
".",
"markup",
".",
"clean_markup",
"(",
"line",
")",
")",
"elif",
"isinstance",
"(",
"line",
",",
"tuple",
")",
"or",
"isinstance",
"(",
"line",
",",
"list",
")",
":",
"markups",
"=",
"self",
".",
"screen",
".",
"markup",
".",
"get_markup_vars",
"(",
")",
"length",
"=",
"0",
"for",
"i",
"in",
"line",
":",
"if",
"i",
"not",
"in",
"markups",
":",
"length",
"+=",
"len",
"(",
"i",
")",
"return",
"length"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
create
|
Creates load plan timestamps generator
>>> from util import take
>>> take(7, LoadPlanBuilder().ramp(5, 4000).create())
[0, 1000, 2000, 3000, 4000, 0, 0]
>>> take(7, create(['ramp(5, 4s)']))
[0, 1000, 2000, 3000, 4000, 0, 0]
>>> take(12, create(['ramp(5, 4s)', 'wait(5s)', 'ramp(5,4s)']))
[0, 1000, 2000, 3000, 4000, 9000, 10000, 11000, 12000, 13000, 0, 0]
>>> take(7, create(['wait(5s)', 'ramp(5, 0)']))
[5000, 5000, 5000, 5000, 5000, 0, 0]
>>> take(7, create([]))
[0, 0, 0, 0, 0, 0, 0]
>>> take(12, create(['line(1, 9, 4s)']))
[0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 0, 0, 0]
>>> take(12, create(['const(3, 5s)', 'line(7, 11, 2s)']))
[0, 0, 0, 5000, 5000, 5000, 5000, 5500, 6000, 6500, 7000, 0]
>>> take(12, create(['step(2, 10, 2, 3s)']))
[0, 0, 3000, 3000, 6000, 6000, 9000, 9000, 12000, 12000, 0, 0]
>>> take(12, LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).steps)
[(3, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)]
>>> take(12, LoadPlanBuilder().stairway(100, 950, 100, 30000).steps)
[(100, 30), (200, 30), (300, 30), (400, 30), (500, 30), (600, 30), (700, 30), (800, 30), (900, 30), (950, 30)]
>>> LoadPlanBuilder().stairway(100, 950, 100, 30000).instances
950
>>> LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).instances
10
>>> LoadPlanBuilder().line(1, 100, 60000).instances
100
|
yandextank/stepper/instance_plan.py
|
def create(instances_schedule):
'''
Creates load plan timestamps generator
>>> from util import take
>>> take(7, LoadPlanBuilder().ramp(5, 4000).create())
[0, 1000, 2000, 3000, 4000, 0, 0]
>>> take(7, create(['ramp(5, 4s)']))
[0, 1000, 2000, 3000, 4000, 0, 0]
>>> take(12, create(['ramp(5, 4s)', 'wait(5s)', 'ramp(5,4s)']))
[0, 1000, 2000, 3000, 4000, 9000, 10000, 11000, 12000, 13000, 0, 0]
>>> take(7, create(['wait(5s)', 'ramp(5, 0)']))
[5000, 5000, 5000, 5000, 5000, 0, 0]
>>> take(7, create([]))
[0, 0, 0, 0, 0, 0, 0]
>>> take(12, create(['line(1, 9, 4s)']))
[0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 0, 0, 0]
>>> take(12, create(['const(3, 5s)', 'line(7, 11, 2s)']))
[0, 0, 0, 5000, 5000, 5000, 5000, 5500, 6000, 6500, 7000, 0]
>>> take(12, create(['step(2, 10, 2, 3s)']))
[0, 0, 3000, 3000, 6000, 6000, 9000, 9000, 12000, 12000, 0, 0]
>>> take(12, LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).steps)
[(3, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)]
>>> take(12, LoadPlanBuilder().stairway(100, 950, 100, 30000).steps)
[(100, 30), (200, 30), (300, 30), (400, 30), (500, 30), (600, 30), (700, 30), (800, 30), (900, 30), (950, 30)]
>>> LoadPlanBuilder().stairway(100, 950, 100, 30000).instances
950
>>> LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).instances
10
>>> LoadPlanBuilder().line(1, 100, 60000).instances
100
'''
lpb = LoadPlanBuilder().add_all_steps(instances_schedule)
lp = lpb.create()
info.status.publish('duration', 0)
# info.status.publish('steps', lpb.steps)
info.status.publish('steps', [])
info.status.publish('instances', lpb.instances)
return lp
|
def create(instances_schedule):
'''
Creates load plan timestamps generator
>>> from util import take
>>> take(7, LoadPlanBuilder().ramp(5, 4000).create())
[0, 1000, 2000, 3000, 4000, 0, 0]
>>> take(7, create(['ramp(5, 4s)']))
[0, 1000, 2000, 3000, 4000, 0, 0]
>>> take(12, create(['ramp(5, 4s)', 'wait(5s)', 'ramp(5,4s)']))
[0, 1000, 2000, 3000, 4000, 9000, 10000, 11000, 12000, 13000, 0, 0]
>>> take(7, create(['wait(5s)', 'ramp(5, 0)']))
[5000, 5000, 5000, 5000, 5000, 0, 0]
>>> take(7, create([]))
[0, 0, 0, 0, 0, 0, 0]
>>> take(12, create(['line(1, 9, 4s)']))
[0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 0, 0, 0]
>>> take(12, create(['const(3, 5s)', 'line(7, 11, 2s)']))
[0, 0, 0, 5000, 5000, 5000, 5000, 5500, 6000, 6500, 7000, 0]
>>> take(12, create(['step(2, 10, 2, 3s)']))
[0, 0, 3000, 3000, 6000, 6000, 9000, 9000, 12000, 12000, 0, 0]
>>> take(12, LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).steps)
[(3, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)]
>>> take(12, LoadPlanBuilder().stairway(100, 950, 100, 30000).steps)
[(100, 30), (200, 30), (300, 30), (400, 30), (500, 30), (600, 30), (700, 30), (800, 30), (900, 30), (950, 30)]
>>> LoadPlanBuilder().stairway(100, 950, 100, 30000).instances
950
>>> LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).instances
10
>>> LoadPlanBuilder().line(1, 100, 60000).instances
100
'''
lpb = LoadPlanBuilder().add_all_steps(instances_schedule)
lp = lpb.create()
info.status.publish('duration', 0)
# info.status.publish('steps', lpb.steps)
info.status.publish('steps', [])
info.status.publish('instances', lpb.instances)
return lp
|
[
"Creates",
"load",
"plan",
"timestamps",
"generator"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/stepper/instance_plan.py#L182-L233
|
[
"def",
"create",
"(",
"instances_schedule",
")",
":",
"lpb",
"=",
"LoadPlanBuilder",
"(",
")",
".",
"add_all_steps",
"(",
"instances_schedule",
")",
"lp",
"=",
"lpb",
".",
"create",
"(",
")",
"info",
".",
"status",
".",
"publish",
"(",
"'duration'",
",",
"0",
")",
"# info.status.publish('steps', lpb.steps)",
"info",
".",
"status",
".",
"publish",
"(",
"'steps'",
",",
"[",
"]",
")",
"info",
".",
"status",
".",
"publish",
"(",
"'instances'",
",",
"lpb",
".",
"instances",
")",
"return",
"lp"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TotalHTTPCodesCriterion.get_level_str
|
format level str
|
yandextank/plugins/Autostop/cumulative_criterions.py
|
def get_level_str(self):
''' format level str '''
if self.is_relative:
level_str = str(self.level) + "%"
else:
level_str = self.level
return level_str
|
def get_level_str(self):
''' format level str '''
if self.is_relative:
level_str = str(self.level) + "%"
else:
level_str = self.level
return level_str
|
[
"format",
"level",
"str"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Autostop/cumulative_criterions.py#L205-L211
|
[
"def",
"get_level_str",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_relative",
":",
"level_str",
"=",
"str",
"(",
"self",
".",
"level",
")",
"+",
"\"%\"",
"else",
":",
"level_str",
"=",
"self",
".",
"level",
"return",
"level_str"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TotalHTTPTrendCriterion.calc_measurement_error
|
formula for measurement error
sqrt ( (sum(1, n, (k_i - <k>)**2) / (n*(n-1)))
|
yandextank/plugins/Autostop/cumulative_criterions.py
|
def calc_measurement_error(self, tangents):
'''
formula for measurement error
sqrt ( (sum(1, n, (k_i - <k>)**2) / (n*(n-1)))
'''
if len(tangents) < 2:
return 0.0
avg_tan = float(sum(tangents) / len(tangents))
numerator = float()
for i in tangents:
numerator += (i - avg_tan) * (i - avg_tan)
return math.sqrt(numerator / len(tangents) / (len(tangents) - 1))
|
def calc_measurement_error(self, tangents):
'''
formula for measurement error
sqrt ( (sum(1, n, (k_i - <k>)**2) / (n*(n-1)))
'''
if len(tangents) < 2:
return 0.0
avg_tan = float(sum(tangents) / len(tangents))
numerator = float()
for i in tangents:
numerator += (i - avg_tan) * (i - avg_tan)
return math.sqrt(numerator / len(tangents) / (len(tangents) - 1))
|
[
"formula",
"for",
"measurement",
"error",
"sqrt",
"(",
"(",
"sum",
"(",
"1",
"n",
"(",
"k_i",
"-",
"<k",
">",
")",
"**",
"2",
")",
"/",
"(",
"n",
"*",
"(",
"n",
"-",
"1",
")))"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Autostop/cumulative_criterions.py#L652-L666
|
[
"def",
"calc_measurement_error",
"(",
"self",
",",
"tangents",
")",
":",
"if",
"len",
"(",
"tangents",
")",
"<",
"2",
":",
"return",
"0.0",
"avg_tan",
"=",
"float",
"(",
"sum",
"(",
"tangents",
")",
"/",
"len",
"(",
"tangents",
")",
")",
"numerator",
"=",
"float",
"(",
")",
"for",
"i",
"in",
"tangents",
":",
"numerator",
"+=",
"(",
"i",
"-",
"avg_tan",
")",
"*",
"(",
"i",
"-",
"avg_tan",
")",
"return",
"math",
".",
"sqrt",
"(",
"numerator",
"/",
"len",
"(",
"tangents",
")",
"/",
"(",
"len",
"(",
"tangents",
")",
"-",
"1",
")",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
Plugin.add_info_widget
|
add right panel widget
|
yandextank/plugins/Console/plugin.py
|
def add_info_widget(self, widget):
''' add right panel widget '''
if not self.screen:
self.log.debug("No screen instance to add widget")
else:
self.screen.add_info_widget(widget)
|
def add_info_widget(self, widget):
''' add right panel widget '''
if not self.screen:
self.log.debug("No screen instance to add widget")
else:
self.screen.add_info_widget(widget)
|
[
"add",
"right",
"panel",
"widget"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Console/plugin.py#L119-L124
|
[
"def",
"add_info_widget",
"(",
"self",
",",
"widget",
")",
":",
"if",
"not",
"self",
".",
"screen",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"No screen instance to add widget\"",
")",
"else",
":",
"self",
".",
"screen",
".",
"add_info_widget",
"(",
"widget",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
RealConsoleMarkup.clean_markup
|
clean markup from string
|
yandextank/plugins/Console/plugin.py
|
def clean_markup(self, orig_str):
''' clean markup from string '''
for val in self.get_markup_vars():
orig_str = orig_str.replace(val, '')
return orig_str
|
def clean_markup(self, orig_str):
''' clean markup from string '''
for val in self.get_markup_vars():
orig_str = orig_str.replace(val, '')
return orig_str
|
[
"clean",
"markup",
"from",
"string"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Console/plugin.py#L159-L163
|
[
"def",
"clean_markup",
"(",
"self",
",",
"orig_str",
")",
":",
"for",
"val",
"in",
"self",
".",
"get_markup_vars",
"(",
")",
":",
"orig_str",
"=",
"orig_str",
".",
"replace",
"(",
"val",
",",
"''",
")",
"return",
"orig_str"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
APIClient.__make_writer_request
|
Send request to writer service.
|
yandextank/plugins/DataUploader/client.py
|
def __make_writer_request(
self,
params=None,
json=None,
http_method="POST",
trace=False):
'''
Send request to writer service.
'''
request = requests.Request(
http_method,
self.writer_url,
params=params,
json=json,
headers={
'User-Agent': self.user_agent})
ids = id_gen(str(uuid.uuid4()))
network_timeouts = self.network_timeouts()
maintenance_timeouts = self.maintenance_timeouts()
while True:
try:
response = self.__send_single_request(request, ids.next(), trace=trace)
return response
except (Timeout, ConnectionError, ProtocolError):
logger.warn(traceback.format_exc())
try:
timeout = next(network_timeouts)
logger.warn(
"Network error, will retry in %ss..." %
timeout)
time.sleep(timeout)
continue
except StopIteration:
raise self.NetworkError()
except self.UnderMaintenance as e:
try:
timeout = next(maintenance_timeouts)
logger.warn(
"Writer is under maintenance, will retry in %ss..." %
timeout)
time.sleep(timeout)
continue
except StopIteration:
raise e
|
def __make_writer_request(
self,
params=None,
json=None,
http_method="POST",
trace=False):
'''
Send request to writer service.
'''
request = requests.Request(
http_method,
self.writer_url,
params=params,
json=json,
headers={
'User-Agent': self.user_agent})
ids = id_gen(str(uuid.uuid4()))
network_timeouts = self.network_timeouts()
maintenance_timeouts = self.maintenance_timeouts()
while True:
try:
response = self.__send_single_request(request, ids.next(), trace=trace)
return response
except (Timeout, ConnectionError, ProtocolError):
logger.warn(traceback.format_exc())
try:
timeout = next(network_timeouts)
logger.warn(
"Network error, will retry in %ss..." %
timeout)
time.sleep(timeout)
continue
except StopIteration:
raise self.NetworkError()
except self.UnderMaintenance as e:
try:
timeout = next(maintenance_timeouts)
logger.warn(
"Writer is under maintenance, will retry in %ss..." %
timeout)
time.sleep(timeout)
continue
except StopIteration:
raise e
|
[
"Send",
"request",
"to",
"writer",
"service",
"."
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/DataUploader/client.py#L221-L264
|
[
"def",
"__make_writer_request",
"(",
"self",
",",
"params",
"=",
"None",
",",
"json",
"=",
"None",
",",
"http_method",
"=",
"\"POST\"",
",",
"trace",
"=",
"False",
")",
":",
"request",
"=",
"requests",
".",
"Request",
"(",
"http_method",
",",
"self",
".",
"writer_url",
",",
"params",
"=",
"params",
",",
"json",
"=",
"json",
",",
"headers",
"=",
"{",
"'User-Agent'",
":",
"self",
".",
"user_agent",
"}",
")",
"ids",
"=",
"id_gen",
"(",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
")",
"network_timeouts",
"=",
"self",
".",
"network_timeouts",
"(",
")",
"maintenance_timeouts",
"=",
"self",
".",
"maintenance_timeouts",
"(",
")",
"while",
"True",
":",
"try",
":",
"response",
"=",
"self",
".",
"__send_single_request",
"(",
"request",
",",
"ids",
".",
"next",
"(",
")",
",",
"trace",
"=",
"trace",
")",
"return",
"response",
"except",
"(",
"Timeout",
",",
"ConnectionError",
",",
"ProtocolError",
")",
":",
"logger",
".",
"warn",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"try",
":",
"timeout",
"=",
"next",
"(",
"network_timeouts",
")",
"logger",
".",
"warn",
"(",
"\"Network error, will retry in %ss...\"",
"%",
"timeout",
")",
"time",
".",
"sleep",
"(",
"timeout",
")",
"continue",
"except",
"StopIteration",
":",
"raise",
"self",
".",
"NetworkError",
"(",
")",
"except",
"self",
".",
"UnderMaintenance",
"as",
"e",
":",
"try",
":",
"timeout",
"=",
"next",
"(",
"maintenance_timeouts",
")",
"logger",
".",
"warn",
"(",
"\"Writer is under maintenance, will retry in %ss...\"",
"%",
"timeout",
")",
"time",
".",
"sleep",
"(",
"timeout",
")",
"continue",
"except",
"StopIteration",
":",
"raise",
"e"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
APIClient.new_job
|
:return: job_nr, upload_token
:rtype: tuple
|
yandextank/plugins/DataUploader/client.py
|
def new_job(
self,
task,
person,
tank,
target_host,
target_port,
loadscheme=None,
detailed_time=None,
notify_list=None,
trace=False):
"""
:return: job_nr, upload_token
:rtype: tuple
"""
if not notify_list:
notify_list = []
data = {
'task': task,
'person': person,
'tank': tank,
'host': target_host,
'port': target_port,
'loadscheme': loadscheme,
'detailed_time': detailed_time,
'notify': notify_list
}
logger.debug("Job create request: %s", data)
api_timeouts = self.api_timeouts()
while True:
try:
response = self.__post(
"api/job/create.json", data, trace=trace)[0]
# [{"upload_token": "1864a3b2547d40f19b5012eb038be6f6", "job": 904317}]
return response['job'], response['upload_token']
except (self.NotAvailable, self.StoppedFromOnline) as e:
try:
timeout = next(api_timeouts)
logger.warn("API error, will retry in %ss..." % timeout)
time.sleep(timeout)
continue
except StopIteration:
logger.warn('Failed to create job on lunapark')
raise self.JobNotCreated(e.message)
except requests.HTTPError as e:
raise self.JobNotCreated('Failed to create job on lunapark\n{}'.format(e.response.content))
except Exception as e:
logger.warn('Failed to create job on lunapark')
logger.warn(repr(e), )
raise self.JobNotCreated()
|
def new_job(
self,
task,
person,
tank,
target_host,
target_port,
loadscheme=None,
detailed_time=None,
notify_list=None,
trace=False):
"""
:return: job_nr, upload_token
:rtype: tuple
"""
if not notify_list:
notify_list = []
data = {
'task': task,
'person': person,
'tank': tank,
'host': target_host,
'port': target_port,
'loadscheme': loadscheme,
'detailed_time': detailed_time,
'notify': notify_list
}
logger.debug("Job create request: %s", data)
api_timeouts = self.api_timeouts()
while True:
try:
response = self.__post(
"api/job/create.json", data, trace=trace)[0]
# [{"upload_token": "1864a3b2547d40f19b5012eb038be6f6", "job": 904317}]
return response['job'], response['upload_token']
except (self.NotAvailable, self.StoppedFromOnline) as e:
try:
timeout = next(api_timeouts)
logger.warn("API error, will retry in %ss..." % timeout)
time.sleep(timeout)
continue
except StopIteration:
logger.warn('Failed to create job on lunapark')
raise self.JobNotCreated(e.message)
except requests.HTTPError as e:
raise self.JobNotCreated('Failed to create job on lunapark\n{}'.format(e.response.content))
except Exception as e:
logger.warn('Failed to create job on lunapark')
logger.warn(repr(e), )
raise self.JobNotCreated()
|
[
":",
"return",
":",
"job_nr",
"upload_token",
":",
"rtype",
":",
"tuple"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/DataUploader/client.py#L308-L358
|
[
"def",
"new_job",
"(",
"self",
",",
"task",
",",
"person",
",",
"tank",
",",
"target_host",
",",
"target_port",
",",
"loadscheme",
"=",
"None",
",",
"detailed_time",
"=",
"None",
",",
"notify_list",
"=",
"None",
",",
"trace",
"=",
"False",
")",
":",
"if",
"not",
"notify_list",
":",
"notify_list",
"=",
"[",
"]",
"data",
"=",
"{",
"'task'",
":",
"task",
",",
"'person'",
":",
"person",
",",
"'tank'",
":",
"tank",
",",
"'host'",
":",
"target_host",
",",
"'port'",
":",
"target_port",
",",
"'loadscheme'",
":",
"loadscheme",
",",
"'detailed_time'",
":",
"detailed_time",
",",
"'notify'",
":",
"notify_list",
"}",
"logger",
".",
"debug",
"(",
"\"Job create request: %s\"",
",",
"data",
")",
"api_timeouts",
"=",
"self",
".",
"api_timeouts",
"(",
")",
"while",
"True",
":",
"try",
":",
"response",
"=",
"self",
".",
"__post",
"(",
"\"api/job/create.json\"",
",",
"data",
",",
"trace",
"=",
"trace",
")",
"[",
"0",
"]",
"# [{\"upload_token\": \"1864a3b2547d40f19b5012eb038be6f6\", \"job\": 904317}]",
"return",
"response",
"[",
"'job'",
"]",
",",
"response",
"[",
"'upload_token'",
"]",
"except",
"(",
"self",
".",
"NotAvailable",
",",
"self",
".",
"StoppedFromOnline",
")",
"as",
"e",
":",
"try",
":",
"timeout",
"=",
"next",
"(",
"api_timeouts",
")",
"logger",
".",
"warn",
"(",
"\"API error, will retry in %ss...\"",
"%",
"timeout",
")",
"time",
".",
"sleep",
"(",
"timeout",
")",
"continue",
"except",
"StopIteration",
":",
"logger",
".",
"warn",
"(",
"'Failed to create job on lunapark'",
")",
"raise",
"self",
".",
"JobNotCreated",
"(",
"e",
".",
"message",
")",
"except",
"requests",
".",
"HTTPError",
"as",
"e",
":",
"raise",
"self",
".",
"JobNotCreated",
"(",
"'Failed to create job on lunapark\\n{}'",
".",
"format",
"(",
"e",
".",
"response",
".",
"content",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"warn",
"(",
"'Failed to create job on lunapark'",
")",
"logger",
".",
"warn",
"(",
"repr",
"(",
"e",
")",
",",
")",
"raise",
"self",
".",
"JobNotCreated",
"(",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TankCore.plugins
|
:returns: {plugin_name: plugin_class, ...}
:rtype: dict
|
yandextank/core/tankcore.py
|
def plugins(self):
"""
:returns: {plugin_name: plugin_class, ...}
:rtype: dict
"""
if self._plugins is None:
self.load_plugins()
if self._plugins is None:
self._plugins = {}
return self._plugins
|
def plugins(self):
"""
:returns: {plugin_name: plugin_class, ...}
:rtype: dict
"""
if self._plugins is None:
self.load_plugins()
if self._plugins is None:
self._plugins = {}
return self._plugins
|
[
":",
"returns",
":",
"{",
"plugin_name",
":",
"plugin_class",
"...",
"}",
":",
"rtype",
":",
"dict"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L162-L171
|
[
"def",
"plugins",
"(",
"self",
")",
":",
"if",
"self",
".",
"_plugins",
"is",
"None",
":",
"self",
".",
"load_plugins",
"(",
")",
"if",
"self",
".",
"_plugins",
"is",
"None",
":",
"self",
".",
"_plugins",
"=",
"{",
"}",
"return",
"self",
".",
"_plugins"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TankCore.load_plugins
|
Tells core to take plugin options and instantiate plugin classes
|
yandextank/core/tankcore.py
|
def load_plugins(self):
"""
Tells core to take plugin options and instantiate plugin classes
"""
logger.info("Loading plugins...")
for (plugin_name, plugin_path, plugin_cfg) in self.config.plugins:
logger.debug("Loading plugin %s from %s", plugin_name, plugin_path)
if plugin_path == "yandextank.plugins.Overload":
logger.warning(
"Deprecated plugin name: 'yandextank.plugins.Overload'\n"
"There is a new generic plugin now.\n"
"Correcting to 'yandextank.plugins.DataUploader overload'")
plugin_path = "yandextank.plugins.DataUploader overload"
try:
plugin = il.import_module(plugin_path)
except ImportError:
logger.warning('Plugin name %s path %s import error', plugin_name, plugin_path)
logger.debug('Plugin name %s path %s import error', plugin_name, plugin_path, exc_info=True)
raise
try:
instance = getattr(plugin, 'Plugin')(self, cfg=plugin_cfg, name=plugin_name)
except AttributeError:
logger.warning('Plugin %s classname should be `Plugin`', plugin_name)
raise
else:
self.register_plugin(self.PLUGIN_PREFIX + plugin_name, instance)
logger.debug("Plugin instances: %s", self._plugins)
|
def load_plugins(self):
"""
Tells core to take plugin options and instantiate plugin classes
"""
logger.info("Loading plugins...")
for (plugin_name, plugin_path, plugin_cfg) in self.config.plugins:
logger.debug("Loading plugin %s from %s", plugin_name, plugin_path)
if plugin_path == "yandextank.plugins.Overload":
logger.warning(
"Deprecated plugin name: 'yandextank.plugins.Overload'\n"
"There is a new generic plugin now.\n"
"Correcting to 'yandextank.plugins.DataUploader overload'")
plugin_path = "yandextank.plugins.DataUploader overload"
try:
plugin = il.import_module(plugin_path)
except ImportError:
logger.warning('Plugin name %s path %s import error', plugin_name, plugin_path)
logger.debug('Plugin name %s path %s import error', plugin_name, plugin_path, exc_info=True)
raise
try:
instance = getattr(plugin, 'Plugin')(self, cfg=plugin_cfg, name=plugin_name)
except AttributeError:
logger.warning('Plugin %s classname should be `Plugin`', plugin_name)
raise
else:
self.register_plugin(self.PLUGIN_PREFIX + plugin_name, instance)
logger.debug("Plugin instances: %s", self._plugins)
|
[
"Tells",
"core",
"to",
"take",
"plugin",
"options",
"and",
"instantiate",
"plugin",
"classes"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L186-L212
|
[
"def",
"load_plugins",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"Loading plugins...\"",
")",
"for",
"(",
"plugin_name",
",",
"plugin_path",
",",
"plugin_cfg",
")",
"in",
"self",
".",
"config",
".",
"plugins",
":",
"logger",
".",
"debug",
"(",
"\"Loading plugin %s from %s\"",
",",
"plugin_name",
",",
"plugin_path",
")",
"if",
"plugin_path",
"==",
"\"yandextank.plugins.Overload\"",
":",
"logger",
".",
"warning",
"(",
"\"Deprecated plugin name: 'yandextank.plugins.Overload'\\n\"",
"\"There is a new generic plugin now.\\n\"",
"\"Correcting to 'yandextank.plugins.DataUploader overload'\"",
")",
"plugin_path",
"=",
"\"yandextank.plugins.DataUploader overload\"",
"try",
":",
"plugin",
"=",
"il",
".",
"import_module",
"(",
"plugin_path",
")",
"except",
"ImportError",
":",
"logger",
".",
"warning",
"(",
"'Plugin name %s path %s import error'",
",",
"plugin_name",
",",
"plugin_path",
")",
"logger",
".",
"debug",
"(",
"'Plugin name %s path %s import error'",
",",
"plugin_name",
",",
"plugin_path",
",",
"exc_info",
"=",
"True",
")",
"raise",
"try",
":",
"instance",
"=",
"getattr",
"(",
"plugin",
",",
"'Plugin'",
")",
"(",
"self",
",",
"cfg",
"=",
"plugin_cfg",
",",
"name",
"=",
"plugin_name",
")",
"except",
"AttributeError",
":",
"logger",
".",
"warning",
"(",
"'Plugin %s classname should be `Plugin`'",
",",
"plugin_name",
")",
"raise",
"else",
":",
"self",
".",
"register_plugin",
"(",
"self",
".",
"PLUGIN_PREFIX",
"+",
"plugin_name",
",",
"instance",
")",
"logger",
".",
"debug",
"(",
"\"Plugin instances: %s\"",
",",
"self",
".",
"_plugins",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TankCore.plugins_configure
|
Call configure() on all plugins
|
yandextank/core/tankcore.py
|
def plugins_configure(self):
""" Call configure() on all plugins """
self.publish("core", "stage", "configure")
logger.info("Configuring plugins...")
self.taskset_affinity = self.get_option(self.SECTION, 'affinity')
if self.taskset_affinity:
self.__setup_taskset(self.taskset_affinity, pid=os.getpid())
for plugin in self.plugins.values():
if not self.interrupted.is_set():
logger.debug("Configuring %s", plugin)
plugin.configure()
|
def plugins_configure(self):
""" Call configure() on all plugins """
self.publish("core", "stage", "configure")
logger.info("Configuring plugins...")
self.taskset_affinity = self.get_option(self.SECTION, 'affinity')
if self.taskset_affinity:
self.__setup_taskset(self.taskset_affinity, pid=os.getpid())
for plugin in self.plugins.values():
if not self.interrupted.is_set():
logger.debug("Configuring %s", plugin)
plugin.configure()
|
[
"Call",
"configure",
"()",
"on",
"all",
"plugins"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L233-L245
|
[
"def",
"plugins_configure",
"(",
"self",
")",
":",
"self",
".",
"publish",
"(",
"\"core\"",
",",
"\"stage\"",
",",
"\"configure\"",
")",
"logger",
".",
"info",
"(",
"\"Configuring plugins...\"",
")",
"self",
".",
"taskset_affinity",
"=",
"self",
".",
"get_option",
"(",
"self",
".",
"SECTION",
",",
"'affinity'",
")",
"if",
"self",
".",
"taskset_affinity",
":",
"self",
".",
"__setup_taskset",
"(",
"self",
".",
"taskset_affinity",
",",
"pid",
"=",
"os",
".",
"getpid",
"(",
")",
")",
"for",
"plugin",
"in",
"self",
".",
"plugins",
".",
"values",
"(",
")",
":",
"if",
"not",
"self",
".",
"interrupted",
".",
"is_set",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"Configuring %s\"",
",",
"plugin",
")",
"plugin",
".",
"configure",
"(",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TankCore.wait_for_finish
|
Call is_test_finished() on all plugins 'till one of them initiates exit
|
yandextank/core/tankcore.py
|
def wait_for_finish(self):
"""
Call is_test_finished() on all plugins 'till one of them initiates exit
"""
if not self.interrupted.is_set():
logger.info("Waiting for test to finish...")
logger.info('Artifacts dir: {dir}'.format(dir=self.artifacts_dir))
self.publish("core", "stage", "shoot")
if not self.plugins:
raise RuntimeError("It's strange: we have no plugins loaded...")
while not self.interrupted.is_set():
begin_time = time.time()
aggr_retcode = self.job.aggregator.is_test_finished()
if aggr_retcode >= 0:
return aggr_retcode
for plugin in self.plugins.values():
logger.debug("Polling %s", plugin)
retcode = plugin.is_test_finished()
if retcode >= 0:
return retcode
end_time = time.time()
diff = end_time - begin_time
logger.debug("Polling took %s", diff)
logger.debug("Tank status: %s", json.dumps(self.status))
# screen refresh every 0.5 s
if diff < 0.5:
time.sleep(0.5 - diff)
return 1
|
def wait_for_finish(self):
"""
Call is_test_finished() on all plugins 'till one of them initiates exit
"""
if not self.interrupted.is_set():
logger.info("Waiting for test to finish...")
logger.info('Artifacts dir: {dir}'.format(dir=self.artifacts_dir))
self.publish("core", "stage", "shoot")
if not self.plugins:
raise RuntimeError("It's strange: we have no plugins loaded...")
while not self.interrupted.is_set():
begin_time = time.time()
aggr_retcode = self.job.aggregator.is_test_finished()
if aggr_retcode >= 0:
return aggr_retcode
for plugin in self.plugins.values():
logger.debug("Polling %s", plugin)
retcode = plugin.is_test_finished()
if retcode >= 0:
return retcode
end_time = time.time()
diff = end_time - begin_time
logger.debug("Polling took %s", diff)
logger.debug("Tank status: %s", json.dumps(self.status))
# screen refresh every 0.5 s
if diff < 0.5:
time.sleep(0.5 - diff)
return 1
|
[
"Call",
"is_test_finished",
"()",
"on",
"all",
"plugins",
"till",
"one",
"of",
"them",
"initiates",
"exit"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L269-L297
|
[
"def",
"wait_for_finish",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"interrupted",
".",
"is_set",
"(",
")",
":",
"logger",
".",
"info",
"(",
"\"Waiting for test to finish...\"",
")",
"logger",
".",
"info",
"(",
"'Artifacts dir: {dir}'",
".",
"format",
"(",
"dir",
"=",
"self",
".",
"artifacts_dir",
")",
")",
"self",
".",
"publish",
"(",
"\"core\"",
",",
"\"stage\"",
",",
"\"shoot\"",
")",
"if",
"not",
"self",
".",
"plugins",
":",
"raise",
"RuntimeError",
"(",
"\"It's strange: we have no plugins loaded...\"",
")",
"while",
"not",
"self",
".",
"interrupted",
".",
"is_set",
"(",
")",
":",
"begin_time",
"=",
"time",
".",
"time",
"(",
")",
"aggr_retcode",
"=",
"self",
".",
"job",
".",
"aggregator",
".",
"is_test_finished",
"(",
")",
"if",
"aggr_retcode",
">=",
"0",
":",
"return",
"aggr_retcode",
"for",
"plugin",
"in",
"self",
".",
"plugins",
".",
"values",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"Polling %s\"",
",",
"plugin",
")",
"retcode",
"=",
"plugin",
".",
"is_test_finished",
"(",
")",
"if",
"retcode",
">=",
"0",
":",
"return",
"retcode",
"end_time",
"=",
"time",
".",
"time",
"(",
")",
"diff",
"=",
"end_time",
"-",
"begin_time",
"logger",
".",
"debug",
"(",
"\"Polling took %s\"",
",",
"diff",
")",
"logger",
".",
"debug",
"(",
"\"Tank status: %s\"",
",",
"json",
".",
"dumps",
"(",
"self",
".",
"status",
")",
")",
"# screen refresh every 0.5 s",
"if",
"diff",
"<",
"0.5",
":",
"time",
".",
"sleep",
"(",
"0.5",
"-",
"diff",
")",
"return",
"1"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TankCore.plugins_post_process
|
Call post_process() on all plugins
|
yandextank/core/tankcore.py
|
def plugins_post_process(self, retcode):
"""
Call post_process() on all plugins
"""
logger.info("Post-processing test...")
self.publish("core", "stage", "post_process")
for plugin in self.plugins.values():
logger.debug("Post-process %s", plugin)
try:
logger.debug("RC before: %s", retcode)
retcode = plugin.post_process(retcode)
logger.debug("RC after: %s", retcode)
except Exception: # FIXME too broad exception clause
logger.error("Failed post-processing plugin %s", plugin, exc_info=True)
if not retcode:
retcode = 1
return retcode
|
def plugins_post_process(self, retcode):
"""
Call post_process() on all plugins
"""
logger.info("Post-processing test...")
self.publish("core", "stage", "post_process")
for plugin in self.plugins.values():
logger.debug("Post-process %s", plugin)
try:
logger.debug("RC before: %s", retcode)
retcode = plugin.post_process(retcode)
logger.debug("RC after: %s", retcode)
except Exception: # FIXME too broad exception clause
logger.error("Failed post-processing plugin %s", plugin, exc_info=True)
if not retcode:
retcode = 1
return retcode
|
[
"Call",
"post_process",
"()",
"on",
"all",
"plugins"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L326-L342
|
[
"def",
"plugins_post_process",
"(",
"self",
",",
"retcode",
")",
":",
"logger",
".",
"info",
"(",
"\"Post-processing test...\"",
")",
"self",
".",
"publish",
"(",
"\"core\"",
",",
"\"stage\"",
",",
"\"post_process\"",
")",
"for",
"plugin",
"in",
"self",
".",
"plugins",
".",
"values",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"Post-process %s\"",
",",
"plugin",
")",
"try",
":",
"logger",
".",
"debug",
"(",
"\"RC before: %s\"",
",",
"retcode",
")",
"retcode",
"=",
"plugin",
".",
"post_process",
"(",
"retcode",
")",
"logger",
".",
"debug",
"(",
"\"RC after: %s\"",
",",
"retcode",
")",
"except",
"Exception",
":",
"# FIXME too broad exception clause",
"logger",
".",
"error",
"(",
"\"Failed post-processing plugin %s\"",
",",
"plugin",
",",
"exc_info",
"=",
"True",
")",
"if",
"not",
"retcode",
":",
"retcode",
"=",
"1",
"return",
"retcode"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TankCore.__setup_taskset
|
if pid specified: set process w/ pid `pid` CPU affinity to specified `affinity` core(s)
if args specified: modify list of args for Popen to start w/ taskset w/ affinity `affinity`
|
yandextank/core/tankcore.py
|
def __setup_taskset(self, affinity, pid=None, args=None):
""" if pid specified: set process w/ pid `pid` CPU affinity to specified `affinity` core(s)
if args specified: modify list of args for Popen to start w/ taskset w/ affinity `affinity`
"""
self.taskset_path = self.get_option(self.SECTION, 'taskset_path')
if args:
return [self.taskset_path, '-c', affinity] + args
if pid:
args = "%s -pc %s %s" % (self.taskset_path, affinity, pid)
retcode, stdout, stderr = execute(args, shell=True, poll_period=0.1, catch_out=True)
logger.debug('taskset for pid %s stdout: %s', pid, stdout)
if retcode == 0:
logger.info("Enabled taskset for pid %s with affinity %s", str(pid), affinity)
else:
logger.debug('Taskset setup failed w/ retcode :%s', retcode)
raise KeyError(stderr)
|
def __setup_taskset(self, affinity, pid=None, args=None):
""" if pid specified: set process w/ pid `pid` CPU affinity to specified `affinity` core(s)
if args specified: modify list of args for Popen to start w/ taskset w/ affinity `affinity`
"""
self.taskset_path = self.get_option(self.SECTION, 'taskset_path')
if args:
return [self.taskset_path, '-c', affinity] + args
if pid:
args = "%s -pc %s %s" % (self.taskset_path, affinity, pid)
retcode, stdout, stderr = execute(args, shell=True, poll_period=0.1, catch_out=True)
logger.debug('taskset for pid %s stdout: %s', pid, stdout)
if retcode == 0:
logger.info("Enabled taskset for pid %s with affinity %s", str(pid), affinity)
else:
logger.debug('Taskset setup failed w/ retcode :%s', retcode)
raise KeyError(stderr)
|
[
"if",
"pid",
"specified",
":",
"set",
"process",
"w",
"/",
"pid",
"pid",
"CPU",
"affinity",
"to",
"specified",
"affinity",
"core",
"(",
"s",
")",
"if",
"args",
"specified",
":",
"modify",
"list",
"of",
"args",
"for",
"Popen",
"to",
"start",
"w",
"/",
"taskset",
"w",
"/",
"affinity",
"affinity"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L347-L364
|
[
"def",
"__setup_taskset",
"(",
"self",
",",
"affinity",
",",
"pid",
"=",
"None",
",",
"args",
"=",
"None",
")",
":",
"self",
".",
"taskset_path",
"=",
"self",
".",
"get_option",
"(",
"self",
".",
"SECTION",
",",
"'taskset_path'",
")",
"if",
"args",
":",
"return",
"[",
"self",
".",
"taskset_path",
",",
"'-c'",
",",
"affinity",
"]",
"+",
"args",
"if",
"pid",
":",
"args",
"=",
"\"%s -pc %s %s\"",
"%",
"(",
"self",
".",
"taskset_path",
",",
"affinity",
",",
"pid",
")",
"retcode",
",",
"stdout",
",",
"stderr",
"=",
"execute",
"(",
"args",
",",
"shell",
"=",
"True",
",",
"poll_period",
"=",
"0.1",
",",
"catch_out",
"=",
"True",
")",
"logger",
".",
"debug",
"(",
"'taskset for pid %s stdout: %s'",
",",
"pid",
",",
"stdout",
")",
"if",
"retcode",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"\"Enabled taskset for pid %s with affinity %s\"",
",",
"str",
"(",
"pid",
")",
",",
"affinity",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'Taskset setup failed w/ retcode :%s'",
",",
"retcode",
")",
"raise",
"KeyError",
"(",
"stderr",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TankCore.get_plugin_of_type
|
Retrieve a plugin of desired class, KeyError raised otherwise
|
yandextank/core/tankcore.py
|
def get_plugin_of_type(self, plugin_class):
"""
Retrieve a plugin of desired class, KeyError raised otherwise
"""
logger.debug("Searching for plugin: %s", plugin_class)
matches = [plugin for plugin in self.plugins.values() if isinstance(plugin, plugin_class)]
if matches:
if len(matches) > 1:
logger.debug(
"More then one plugin of type %s found. Using first one.",
plugin_class)
return matches[-1]
else:
raise KeyError("Requested plugin type not found: %s" % plugin_class)
|
def get_plugin_of_type(self, plugin_class):
"""
Retrieve a plugin of desired class, KeyError raised otherwise
"""
logger.debug("Searching for plugin: %s", plugin_class)
matches = [plugin for plugin in self.plugins.values() if isinstance(plugin, plugin_class)]
if matches:
if len(matches) > 1:
logger.debug(
"More then one plugin of type %s found. Using first one.",
plugin_class)
return matches[-1]
else:
raise KeyError("Requested plugin type not found: %s" % plugin_class)
|
[
"Retrieve",
"a",
"plugin",
"of",
"desired",
"class",
"KeyError",
"raised",
"otherwise"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L387-L400
|
[
"def",
"get_plugin_of_type",
"(",
"self",
",",
"plugin_class",
")",
":",
"logger",
".",
"debug",
"(",
"\"Searching for plugin: %s\"",
",",
"plugin_class",
")",
"matches",
"=",
"[",
"plugin",
"for",
"plugin",
"in",
"self",
".",
"plugins",
".",
"values",
"(",
")",
"if",
"isinstance",
"(",
"plugin",
",",
"plugin_class",
")",
"]",
"if",
"matches",
":",
"if",
"len",
"(",
"matches",
")",
">",
"1",
":",
"logger",
".",
"debug",
"(",
"\"More then one plugin of type %s found. Using first one.\"",
",",
"plugin_class",
")",
"return",
"matches",
"[",
"-",
"1",
"]",
"else",
":",
"raise",
"KeyError",
"(",
"\"Requested plugin type not found: %s\"",
"%",
"plugin_class",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TankCore.get_plugins_of_type
|
Retrieve a list of plugins of desired class, KeyError raised otherwise
|
yandextank/core/tankcore.py
|
def get_plugins_of_type(self, plugin_class):
"""
Retrieve a list of plugins of desired class, KeyError raised otherwise
"""
logger.debug("Searching for plugins: %s", plugin_class)
matches = [plugin for plugin in self.plugins.values() if isinstance(plugin, plugin_class)]
if matches:
return matches
else:
raise KeyError("Requested plugin type not found: %s" % plugin_class)
|
def get_plugins_of_type(self, plugin_class):
"""
Retrieve a list of plugins of desired class, KeyError raised otherwise
"""
logger.debug("Searching for plugins: %s", plugin_class)
matches = [plugin for plugin in self.plugins.values() if isinstance(plugin, plugin_class)]
if matches:
return matches
else:
raise KeyError("Requested plugin type not found: %s" % plugin_class)
|
[
"Retrieve",
"a",
"list",
"of",
"plugins",
"of",
"desired",
"class",
"KeyError",
"raised",
"otherwise"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L402-L411
|
[
"def",
"get_plugins_of_type",
"(",
"self",
",",
"plugin_class",
")",
":",
"logger",
".",
"debug",
"(",
"\"Searching for plugins: %s\"",
",",
"plugin_class",
")",
"matches",
"=",
"[",
"plugin",
"for",
"plugin",
"in",
"self",
".",
"plugins",
".",
"values",
"(",
")",
"if",
"isinstance",
"(",
"plugin",
",",
"plugin_class",
")",
"]",
"if",
"matches",
":",
"return",
"matches",
"else",
":",
"raise",
"KeyError",
"(",
"\"Requested plugin type not found: %s\"",
"%",
"plugin_class",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TankCore.__collect_file
|
Move or copy single file to artifacts dir
|
yandextank/core/tankcore.py
|
def __collect_file(self, filename, keep_original=False):
"""
Move or copy single file to artifacts dir
"""
dest = self.artifacts_dir + '/' + os.path.basename(filename)
logger.debug("Collecting file: %s to %s", filename, dest)
if not filename or not os.path.exists(filename):
logger.warning("File not found to collect: %s", filename)
return
if os.path.exists(dest):
# FIXME: 3 find a way to store artifacts anyway
logger.warning("File already exists: %s", dest)
return
if keep_original:
shutil.copy(filename, self.artifacts_dir)
else:
shutil.move(filename, self.artifacts_dir)
os.chmod(dest, 0o644)
|
def __collect_file(self, filename, keep_original=False):
"""
Move or copy single file to artifacts dir
"""
dest = self.artifacts_dir + '/' + os.path.basename(filename)
logger.debug("Collecting file: %s to %s", filename, dest)
if not filename or not os.path.exists(filename):
logger.warning("File not found to collect: %s", filename)
return
if os.path.exists(dest):
# FIXME: 3 find a way to store artifacts anyway
logger.warning("File already exists: %s", dest)
return
if keep_original:
shutil.copy(filename, self.artifacts_dir)
else:
shutil.move(filename, self.artifacts_dir)
os.chmod(dest, 0o644)
|
[
"Move",
"or",
"copy",
"single",
"file",
"to",
"artifacts",
"dir"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L417-L437
|
[
"def",
"__collect_file",
"(",
"self",
",",
"filename",
",",
"keep_original",
"=",
"False",
")",
":",
"dest",
"=",
"self",
".",
"artifacts_dir",
"+",
"'/'",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"filename",
")",
"logger",
".",
"debug",
"(",
"\"Collecting file: %s to %s\"",
",",
"filename",
",",
"dest",
")",
"if",
"not",
"filename",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"logger",
".",
"warning",
"(",
"\"File not found to collect: %s\"",
",",
"filename",
")",
"return",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dest",
")",
":",
"# FIXME: 3 find a way to store artifacts anyway",
"logger",
".",
"warning",
"(",
"\"File already exists: %s\"",
",",
"dest",
")",
"return",
"if",
"keep_original",
":",
"shutil",
".",
"copy",
"(",
"filename",
",",
"self",
".",
"artifacts_dir",
")",
"else",
":",
"shutil",
".",
"move",
"(",
"filename",
",",
"self",
".",
"artifacts_dir",
")",
"os",
".",
"chmod",
"(",
"dest",
",",
"0o644",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TankCore.add_artifact_file
|
Add file to be stored as result artifact on post-process phase
|
yandextank/core/tankcore.py
|
def add_artifact_file(self, filename, keep_original=False):
"""
Add file to be stored as result artifact on post-process phase
"""
if filename:
logger.debug(
"Adding artifact file to collect (keep=%s): %s", keep_original,
filename)
self.artifact_files[filename] = keep_original
|
def add_artifact_file(self, filename, keep_original=False):
"""
Add file to be stored as result artifact on post-process phase
"""
if filename:
logger.debug(
"Adding artifact file to collect (keep=%s): %s", keep_original,
filename)
self.artifact_files[filename] = keep_original
|
[
"Add",
"file",
"to",
"be",
"stored",
"as",
"result",
"artifact",
"on",
"post",
"-",
"process",
"phase"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L439-L447
|
[
"def",
"add_artifact_file",
"(",
"self",
",",
"filename",
",",
"keep_original",
"=",
"False",
")",
":",
"if",
"filename",
":",
"logger",
".",
"debug",
"(",
"\"Adding artifact file to collect (keep=%s): %s\"",
",",
"keep_original",
",",
"filename",
")",
"self",
".",
"artifact_files",
"[",
"filename",
"]",
"=",
"keep_original"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TankCore.mkstemp
|
Generate temp file name in artifacts base dir
and close temp file handle
|
yandextank/core/tankcore.py
|
def mkstemp(self, suffix, prefix, directory=None):
"""
Generate temp file name in artifacts base dir
and close temp file handle
"""
if not directory:
directory = self.artifacts_dir
fd, fname = tempfile.mkstemp(suffix, prefix, directory)
os.close(fd)
os.chmod(fname, 0o644) # FIXME: chmod to parent dir's mode?
return fname
|
def mkstemp(self, suffix, prefix, directory=None):
"""
Generate temp file name in artifacts base dir
and close temp file handle
"""
if not directory:
directory = self.artifacts_dir
fd, fname = tempfile.mkstemp(suffix, prefix, directory)
os.close(fd)
os.chmod(fname, 0o644) # FIXME: chmod to parent dir's mode?
return fname
|
[
"Generate",
"temp",
"file",
"name",
"in",
"artifacts",
"base",
"dir",
"and",
"close",
"temp",
"file",
"handle"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L465-L475
|
[
"def",
"mkstemp",
"(",
"self",
",",
"suffix",
",",
"prefix",
",",
"directory",
"=",
"None",
")",
":",
"if",
"not",
"directory",
":",
"directory",
"=",
"self",
".",
"artifacts_dir",
"fd",
",",
"fname",
"=",
"tempfile",
".",
"mkstemp",
"(",
"suffix",
",",
"prefix",
",",
"directory",
")",
"os",
".",
"close",
"(",
"fd",
")",
"os",
".",
"chmod",
"(",
"fname",
",",
"0o644",
")",
"# FIXME: chmod to parent dir's mode?",
"return",
"fname"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
TankCore.close
|
Call close() for all plugins
|
yandextank/core/tankcore.py
|
def close(self):
"""
Call close() for all plugins
"""
logger.info("Close allocated resources...")
for plugin in self.plugins.values():
logger.debug("Close %s", plugin)
try:
plugin.close()
except Exception as ex:
logger.error("Failed closing plugin %s: %s", plugin, ex)
logger.debug(
"Failed closing plugin: %s", traceback.format_exc(ex))
|
def close(self):
"""
Call close() for all plugins
"""
logger.info("Close allocated resources...")
for plugin in self.plugins.values():
logger.debug("Close %s", plugin)
try:
plugin.close()
except Exception as ex:
logger.error("Failed closing plugin %s: %s", plugin, ex)
logger.debug(
"Failed closing plugin: %s", traceback.format_exc(ex))
|
[
"Call",
"close",
"()",
"for",
"all",
"plugins"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L480-L492
|
[
"def",
"close",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"Close allocated resources...\"",
")",
"for",
"plugin",
"in",
"self",
".",
"plugins",
".",
"values",
"(",
")",
":",
"logger",
".",
"debug",
"(",
"\"Close %s\"",
",",
"plugin",
")",
"try",
":",
"plugin",
".",
"close",
"(",
")",
"except",
"Exception",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"\"Failed closing plugin %s: %s\"",
",",
"plugin",
",",
"ex",
")",
"logger",
".",
"debug",
"(",
"\"Failed closing plugin: %s\"",
",",
"traceback",
".",
"format_exc",
"(",
"ex",
")",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
ConfigManager.load_files
|
Read configs set into storage
|
yandextank/core/tankcore.py
|
def load_files(self, configs):
""" Read configs set into storage """
logger.debug("Reading configs: %s", configs)
config_filenames = [resource.resource_filename(config) for config in configs]
try:
self.config.read(config_filenames)
except Exception as ex:
logger.error("Can't load configs: %s", ex)
raise ex
|
def load_files(self, configs):
""" Read configs set into storage """
logger.debug("Reading configs: %s", configs)
config_filenames = [resource.resource_filename(config) for config in configs]
try:
self.config.read(config_filenames)
except Exception as ex:
logger.error("Can't load configs: %s", ex)
raise ex
|
[
"Read",
"configs",
"set",
"into",
"storage"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L618-L626
|
[
"def",
"load_files",
"(",
"self",
",",
"configs",
")",
":",
"logger",
".",
"debug",
"(",
"\"Reading configs: %s\"",
",",
"configs",
")",
"config_filenames",
"=",
"[",
"resource",
".",
"resource_filename",
"(",
"config",
")",
"for",
"config",
"in",
"configs",
"]",
"try",
":",
"self",
".",
"config",
".",
"read",
"(",
"config_filenames",
")",
"except",
"Exception",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"\"Can't load configs: %s\"",
",",
"ex",
")",
"raise",
"ex"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
ConfigManager.flush
|
Flush current stat to file
|
yandextank/core/tankcore.py
|
def flush(self, filename=None):
""" Flush current stat to file """
if not filename:
filename = self.file
if filename:
with open(filename, 'w') as handle:
self.config.write(handle)
|
def flush(self, filename=None):
""" Flush current stat to file """
if not filename:
filename = self.file
if filename:
with open(filename, 'w') as handle:
self.config.write(handle)
|
[
"Flush",
"current",
"stat",
"to",
"file"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L628-L635
|
[
"def",
"flush",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"if",
"not",
"filename",
":",
"filename",
"=",
"self",
".",
"file",
"if",
"filename",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"handle",
":",
"self",
".",
"config",
".",
"write",
"(",
"handle",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
ConfigManager.get_options
|
Get options list with requested prefix
|
yandextank/core/tankcore.py
|
def get_options(self, section, prefix=''):
""" Get options list with requested prefix """
res = []
try:
for option in self.config.options(section):
if not prefix or option.find(prefix) == 0:
res += [(
option[len(prefix):], self.config.get(section, option))]
except ConfigParser.NoSectionError as ex:
logger.warning("No section: %s", ex)
logger.debug(
"Section: [%s] prefix: '%s' options:\n%s", section, prefix, res)
return res
|
def get_options(self, section, prefix=''):
""" Get options list with requested prefix """
res = []
try:
for option in self.config.options(section):
if not prefix or option.find(prefix) == 0:
res += [(
option[len(prefix):], self.config.get(section, option))]
except ConfigParser.NoSectionError as ex:
logger.warning("No section: %s", ex)
logger.debug(
"Section: [%s] prefix: '%s' options:\n%s", section, prefix, res)
return res
|
[
"Get",
"options",
"list",
"with",
"requested",
"prefix"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L637-L650
|
[
"def",
"get_options",
"(",
"self",
",",
"section",
",",
"prefix",
"=",
"''",
")",
":",
"res",
"=",
"[",
"]",
"try",
":",
"for",
"option",
"in",
"self",
".",
"config",
".",
"options",
"(",
"section",
")",
":",
"if",
"not",
"prefix",
"or",
"option",
".",
"find",
"(",
"prefix",
")",
"==",
"0",
":",
"res",
"+=",
"[",
"(",
"option",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
",",
"self",
".",
"config",
".",
"get",
"(",
"section",
",",
"option",
")",
")",
"]",
"except",
"ConfigParser",
".",
"NoSectionError",
"as",
"ex",
":",
"logger",
".",
"warning",
"(",
"\"No section: %s\"",
",",
"ex",
")",
"logger",
".",
"debug",
"(",
"\"Section: [%s] prefix: '%s' options:\\n%s\"",
",",
"section",
",",
"prefix",
",",
"res",
")",
"return",
"res"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
ConfigManager.find_sections
|
return sections with specified prefix
|
yandextank/core/tankcore.py
|
def find_sections(self, prefix):
""" return sections with specified prefix """
res = []
for section in self.config.sections():
if section.startswith(prefix):
res.append(section)
return res
|
def find_sections(self, prefix):
""" return sections with specified prefix """
res = []
for section in self.config.sections():
if section.startswith(prefix):
res.append(section)
return res
|
[
"return",
"sections",
"with",
"specified",
"prefix"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/core/tankcore.py#L652-L658
|
[
"def",
"find_sections",
"(",
"self",
",",
"prefix",
")",
":",
"res",
"=",
"[",
"]",
"for",
"section",
"in",
"self",
".",
"config",
".",
"sections",
"(",
")",
":",
"if",
"section",
".",
"startswith",
"(",
"prefix",
")",
":",
"res",
".",
"append",
"(",
"section",
")",
"return",
"res"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
PhantomStatsReader._decode_stat_data
|
Return all items found in this chunk
|
yandextank/plugins/Phantom/reader.py
|
def _decode_stat_data(self, chunk):
"""
Return all items found in this chunk
"""
for date_str, statistics in chunk.iteritems():
date_obj = datetime.datetime.strptime(
date_str.split(".")[0], '%Y-%m-%d %H:%M:%S')
chunk_date = int(time.mktime(date_obj.timetuple()))
instances = 0
for benchmark_name, benchmark in statistics.iteritems():
if not benchmark_name.startswith("benchmark_io"):
continue
for method, meth_obj in benchmark.iteritems():
if "mmtasks" in meth_obj:
instances += meth_obj["mmtasks"][2]
offset = chunk_date - 1 - self.start_time
reqps = 0
if 0 <= offset < len(self.phantom_info.steps):
reqps = self.phantom_info.steps[offset][0]
yield self.stats_item(chunk_date - 1, instances, reqps)
|
def _decode_stat_data(self, chunk):
"""
Return all items found in this chunk
"""
for date_str, statistics in chunk.iteritems():
date_obj = datetime.datetime.strptime(
date_str.split(".")[0], '%Y-%m-%d %H:%M:%S')
chunk_date = int(time.mktime(date_obj.timetuple()))
instances = 0
for benchmark_name, benchmark in statistics.iteritems():
if not benchmark_name.startswith("benchmark_io"):
continue
for method, meth_obj in benchmark.iteritems():
if "mmtasks" in meth_obj:
instances += meth_obj["mmtasks"][2]
offset = chunk_date - 1 - self.start_time
reqps = 0
if 0 <= offset < len(self.phantom_info.steps):
reqps = self.phantom_info.steps[offset][0]
yield self.stats_item(chunk_date - 1, instances, reqps)
|
[
"Return",
"all",
"items",
"found",
"in",
"this",
"chunk"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Phantom/reader.py#L115-L135
|
[
"def",
"_decode_stat_data",
"(",
"self",
",",
"chunk",
")",
":",
"for",
"date_str",
",",
"statistics",
"in",
"chunk",
".",
"iteritems",
"(",
")",
":",
"date_obj",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"date_str",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
",",
"'%Y-%m-%d %H:%M:%S'",
")",
"chunk_date",
"=",
"int",
"(",
"time",
".",
"mktime",
"(",
"date_obj",
".",
"timetuple",
"(",
")",
")",
")",
"instances",
"=",
"0",
"for",
"benchmark_name",
",",
"benchmark",
"in",
"statistics",
".",
"iteritems",
"(",
")",
":",
"if",
"not",
"benchmark_name",
".",
"startswith",
"(",
"\"benchmark_io\"",
")",
":",
"continue",
"for",
"method",
",",
"meth_obj",
"in",
"benchmark",
".",
"iteritems",
"(",
")",
":",
"if",
"\"mmtasks\"",
"in",
"meth_obj",
":",
"instances",
"+=",
"meth_obj",
"[",
"\"mmtasks\"",
"]",
"[",
"2",
"]",
"offset",
"=",
"chunk_date",
"-",
"1",
"-",
"self",
".",
"start_time",
"reqps",
"=",
"0",
"if",
"0",
"<=",
"offset",
"<",
"len",
"(",
"self",
".",
"phantom_info",
".",
"steps",
")",
":",
"reqps",
"=",
"self",
".",
"phantom_info",
".",
"steps",
"[",
"offset",
"]",
"[",
"0",
"]",
"yield",
"self",
".",
"stats_item",
"(",
"chunk_date",
"-",
"1",
",",
"instances",
",",
"reqps",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
Plugin.phantom
|
:rtype: PhantomConfig
|
yandextank/plugins/Phantom/plugin.py
|
def phantom(self):
"""
:rtype: PhantomConfig
"""
if not self._phantom:
self._phantom = PhantomConfig(self.core, self.cfg, self.stat_log)
self._phantom.read_config()
return self._phantom
|
def phantom(self):
"""
:rtype: PhantomConfig
"""
if not self._phantom:
self._phantom = PhantomConfig(self.core, self.cfg, self.stat_log)
self._phantom.read_config()
return self._phantom
|
[
":",
"rtype",
":",
"PhantomConfig"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Phantom/plugin.py#L75-L82
|
[
"def",
"phantom",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_phantom",
":",
"self",
".",
"_phantom",
"=",
"PhantomConfig",
"(",
"self",
".",
"core",
",",
"self",
".",
"cfg",
",",
"self",
".",
"stat_log",
")",
"self",
".",
"_phantom",
".",
"read_config",
"(",
")",
"return",
"self",
".",
"_phantom"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
Plugin.get_info
|
returns info object
|
yandextank/plugins/Phantom/plugin.py
|
def get_info(self):
""" returns info object """
if not self.cached_info:
if not self.phantom:
return None
self.cached_info = self.phantom.get_info()
return self.cached_info
|
def get_info(self):
""" returns info object """
if not self.cached_info:
if not self.phantom:
return None
self.cached_info = self.phantom.get_info()
return self.cached_info
|
[
"returns",
"info",
"object"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Phantom/plugin.py#L195-L201
|
[
"def",
"get_info",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"cached_info",
":",
"if",
"not",
"self",
".",
"phantom",
":",
"return",
"None",
"self",
".",
"cached_info",
"=",
"self",
".",
"phantom",
".",
"get_info",
"(",
")",
"return",
"self",
".",
"cached_info"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
MonitoringCollector.prepare
|
Prepare for monitoring - install agents etc
|
yandextank/plugins/Telegraf/collector.py
|
def prepare(self):
"""Prepare for monitoring - install agents etc"""
# Parse config
agent_configs = []
if self.config:
agent_configs = self.config_manager.getconfig(
self.config, self.default_target)
# Creating agent for hosts
for config in agent_configs:
if config['host'] in ['localhost', '127.0.0.1', '::1']:
client = self.clients['localhost'](
config, self.old_style_configs, kill_old=self.kill_old)
else:
client = self.clients['ssh'](
config, self.old_style_configs, timeout=5, kill_old=self.kill_old)
logger.debug('Installing monitoring agent. Host: %s', client.host)
agent_config, startup_config, customs_script = client.install()
if agent_config:
self.agents.append(client)
self.artifact_files.append(agent_config)
if startup_config:
self.artifact_files.append(startup_config)
if customs_script:
self.artifact_files.append(customs_script)
|
def prepare(self):
"""Prepare for monitoring - install agents etc"""
# Parse config
agent_configs = []
if self.config:
agent_configs = self.config_manager.getconfig(
self.config, self.default_target)
# Creating agent for hosts
for config in agent_configs:
if config['host'] in ['localhost', '127.0.0.1', '::1']:
client = self.clients['localhost'](
config, self.old_style_configs, kill_old=self.kill_old)
else:
client = self.clients['ssh'](
config, self.old_style_configs, timeout=5, kill_old=self.kill_old)
logger.debug('Installing monitoring agent. Host: %s', client.host)
agent_config, startup_config, customs_script = client.install()
if agent_config:
self.agents.append(client)
self.artifact_files.append(agent_config)
if startup_config:
self.artifact_files.append(startup_config)
if customs_script:
self.artifact_files.append(customs_script)
|
[
"Prepare",
"for",
"monitoring",
"-",
"install",
"agents",
"etc"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Telegraf/collector.py#L51-L76
|
[
"def",
"prepare",
"(",
"self",
")",
":",
"# Parse config",
"agent_configs",
"=",
"[",
"]",
"if",
"self",
".",
"config",
":",
"agent_configs",
"=",
"self",
".",
"config_manager",
".",
"getconfig",
"(",
"self",
".",
"config",
",",
"self",
".",
"default_target",
")",
"# Creating agent for hosts",
"for",
"config",
"in",
"agent_configs",
":",
"if",
"config",
"[",
"'host'",
"]",
"in",
"[",
"'localhost'",
",",
"'127.0.0.1'",
",",
"'::1'",
"]",
":",
"client",
"=",
"self",
".",
"clients",
"[",
"'localhost'",
"]",
"(",
"config",
",",
"self",
".",
"old_style_configs",
",",
"kill_old",
"=",
"self",
".",
"kill_old",
")",
"else",
":",
"client",
"=",
"self",
".",
"clients",
"[",
"'ssh'",
"]",
"(",
"config",
",",
"self",
".",
"old_style_configs",
",",
"timeout",
"=",
"5",
",",
"kill_old",
"=",
"self",
".",
"kill_old",
")",
"logger",
".",
"debug",
"(",
"'Installing monitoring agent. Host: %s'",
",",
"client",
".",
"host",
")",
"agent_config",
",",
"startup_config",
",",
"customs_script",
"=",
"client",
".",
"install",
"(",
")",
"if",
"agent_config",
":",
"self",
".",
"agents",
".",
"append",
"(",
"client",
")",
"self",
".",
"artifact_files",
".",
"append",
"(",
"agent_config",
")",
"if",
"startup_config",
":",
"self",
".",
"artifact_files",
".",
"append",
"(",
"startup_config",
")",
"if",
"customs_script",
":",
"self",
".",
"artifact_files",
".",
"append",
"(",
"customs_script",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
MonitoringCollector.start
|
Start agents
execute popen of agent.py on target and start output reader thread.
|
yandextank/plugins/Telegraf/collector.py
|
def start(self):
""" Start agents
execute popen of agent.py on target and start output reader thread.
"""
[agent.start() for agent in self.agents]
[agent.reader_thread.start() for agent in self.agents]
|
def start(self):
""" Start agents
execute popen of agent.py on target and start output reader thread.
"""
[agent.start() for agent in self.agents]
[agent.reader_thread.start() for agent in self.agents]
|
[
"Start",
"agents"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Telegraf/collector.py#L78-L84
|
[
"def",
"start",
"(",
"self",
")",
":",
"[",
"agent",
".",
"start",
"(",
")",
"for",
"agent",
"in",
"self",
".",
"agents",
"]",
"[",
"agent",
".",
"reader_thread",
".",
"start",
"(",
")",
"for",
"agent",
"in",
"self",
".",
"agents",
"]"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
MonitoringCollector.poll
|
Poll agents for data
|
yandextank/plugins/Telegraf/collector.py
|
def poll(self):
""" Poll agents for data
"""
start_time = time.time()
for agent in self.agents:
for collect in agent.reader:
# don't crush if trash or traceback came from agent to stdout
if not collect:
return 0
for chunk in collect:
ts, prepared_results = chunk
if self.load_start_time and int(
ts) >= self.load_start_time:
ready_to_send = {
"timestamp": int(ts),
"data": {
self.hash_hostname(agent.host): {
"comment": agent.config.comment,
"metrics": prepared_results
}
}
}
self.__collected_data.append(ready_to_send)
logger.debug(
'Polling/decoding agents data took: %.2fms',
(time.time() - start_time) * 1000)
collected_data_length = len(self.__collected_data)
if not self.first_data_received and self.__collected_data:
self.first_data_received = True
logger.info("Monitoring received first data.")
else:
self.send_collected_data()
return collected_data_length
|
def poll(self):
""" Poll agents for data
"""
start_time = time.time()
for agent in self.agents:
for collect in agent.reader:
# don't crush if trash or traceback came from agent to stdout
if not collect:
return 0
for chunk in collect:
ts, prepared_results = chunk
if self.load_start_time and int(
ts) >= self.load_start_time:
ready_to_send = {
"timestamp": int(ts),
"data": {
self.hash_hostname(agent.host): {
"comment": agent.config.comment,
"metrics": prepared_results
}
}
}
self.__collected_data.append(ready_to_send)
logger.debug(
'Polling/decoding agents data took: %.2fms',
(time.time() - start_time) * 1000)
collected_data_length = len(self.__collected_data)
if not self.first_data_received and self.__collected_data:
self.first_data_received = True
logger.info("Monitoring received first data.")
else:
self.send_collected_data()
return collected_data_length
|
[
"Poll",
"agents",
"for",
"data"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Telegraf/collector.py#L86-L121
|
[
"def",
"poll",
"(",
"self",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"for",
"agent",
"in",
"self",
".",
"agents",
":",
"for",
"collect",
"in",
"agent",
".",
"reader",
":",
"# don't crush if trash or traceback came from agent to stdout",
"if",
"not",
"collect",
":",
"return",
"0",
"for",
"chunk",
"in",
"collect",
":",
"ts",
",",
"prepared_results",
"=",
"chunk",
"if",
"self",
".",
"load_start_time",
"and",
"int",
"(",
"ts",
")",
">=",
"self",
".",
"load_start_time",
":",
"ready_to_send",
"=",
"{",
"\"timestamp\"",
":",
"int",
"(",
"ts",
")",
",",
"\"data\"",
":",
"{",
"self",
".",
"hash_hostname",
"(",
"agent",
".",
"host",
")",
":",
"{",
"\"comment\"",
":",
"agent",
".",
"config",
".",
"comment",
",",
"\"metrics\"",
":",
"prepared_results",
"}",
"}",
"}",
"self",
".",
"__collected_data",
".",
"append",
"(",
"ready_to_send",
")",
"logger",
".",
"debug",
"(",
"'Polling/decoding agents data took: %.2fms'",
",",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
")",
"*",
"1000",
")",
"collected_data_length",
"=",
"len",
"(",
"self",
".",
"__collected_data",
")",
"if",
"not",
"self",
".",
"first_data_received",
"and",
"self",
".",
"__collected_data",
":",
"self",
".",
"first_data_received",
"=",
"True",
"logger",
".",
"info",
"(",
"\"Monitoring received first data.\"",
")",
"else",
":",
"self",
".",
"send_collected_data",
"(",
")",
"return",
"collected_data_length"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
MonitoringCollector.stop
|
Shutdown agents
|
yandextank/plugins/Telegraf/collector.py
|
def stop(self):
"""Shutdown agents"""
logger.debug("Uninstalling monitoring agents")
for agent in self.agents:
log_filename, data_filename = agent.uninstall()
self.artifact_files.append(log_filename)
self.artifact_files.append(data_filename)
for agent in self.agents:
try:
logger.debug(
'Waiting for agent %s reader thread to finish.', agent)
agent.reader_thread.join(10)
except BaseException:
logger.error('Monitoring reader thread stuck!', exc_info=True)
|
def stop(self):
"""Shutdown agents"""
logger.debug("Uninstalling monitoring agents")
for agent in self.agents:
log_filename, data_filename = agent.uninstall()
self.artifact_files.append(log_filename)
self.artifact_files.append(data_filename)
for agent in self.agents:
try:
logger.debug(
'Waiting for agent %s reader thread to finish.', agent)
agent.reader_thread.join(10)
except BaseException:
logger.error('Monitoring reader thread stuck!', exc_info=True)
|
[
"Shutdown",
"agents"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Telegraf/collector.py#L123-L136
|
[
"def",
"stop",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"\"Uninstalling monitoring agents\"",
")",
"for",
"agent",
"in",
"self",
".",
"agents",
":",
"log_filename",
",",
"data_filename",
"=",
"agent",
".",
"uninstall",
"(",
")",
"self",
".",
"artifact_files",
".",
"append",
"(",
"log_filename",
")",
"self",
".",
"artifact_files",
".",
"append",
"(",
"data_filename",
")",
"for",
"agent",
"in",
"self",
".",
"agents",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"'Waiting for agent %s reader thread to finish.'",
",",
"agent",
")",
"agent",
".",
"reader_thread",
".",
"join",
"(",
"10",
")",
"except",
"BaseException",
":",
"logger",
".",
"error",
"(",
"'Monitoring reader thread stuck!'",
",",
"exc_info",
"=",
"True",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
MonitoringCollector.send_collected_data
|
sends pending data set to listeners
|
yandextank/plugins/Telegraf/collector.py
|
def send_collected_data(self):
"""sends pending data set to listeners"""
data = self.__collected_data
self.__collected_data = []
for listener in self.listeners:
# deep copy to ensure each listener gets it's own copy
listener.monitoring_data(copy.deepcopy(data))
|
def send_collected_data(self):
"""sends pending data set to listeners"""
data = self.__collected_data
self.__collected_data = []
for listener in self.listeners:
# deep copy to ensure each listener gets it's own copy
listener.monitoring_data(copy.deepcopy(data))
|
[
"sends",
"pending",
"data",
"set",
"to",
"listeners"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Telegraf/collector.py#L138-L144
|
[
"def",
"send_collected_data",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"__collected_data",
"self",
".",
"__collected_data",
"=",
"[",
"]",
"for",
"listener",
"in",
"self",
".",
"listeners",
":",
"# deep copy to ensure each listener gets it's own copy",
"listener",
".",
"monitoring_data",
"(",
"copy",
".",
"deepcopy",
"(",
"data",
")",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
Plugin.__detect_configuration
|
we need to be flexible in order to determine which plugin's configuration
specified and make appropriate configs to metrics collector
:return: SECTION name or None for defaults
|
yandextank/plugins/Telegraf/plugin.py
|
def __detect_configuration(self):
"""
we need to be flexible in order to determine which plugin's configuration
specified and make appropriate configs to metrics collector
:return: SECTION name or None for defaults
"""
try:
is_telegraf = self.core.get_option('telegraf', "config")
except KeyError:
is_telegraf = None
try:
is_monitoring = self.core.get_option('monitoring', "config")
except KeyError:
is_monitoring = None
if is_telegraf and is_monitoring:
raise ValueError(
'Both telegraf and monitoring configs specified. '
'Clean up your config and delete one of them')
if is_telegraf and not is_monitoring:
return 'telegraf'
if not is_telegraf and is_monitoring:
return 'monitoring'
if not is_telegraf and not is_monitoring:
# defaults target logic
try:
is_telegraf_dt = self.core.get_option('telegraf')
except NoOptionError:
is_telegraf_dt = None
try:
is_monitoring_dt = self.core.get_option('monitoring')
except BaseException:
is_monitoring_dt = None
if is_telegraf_dt and is_monitoring_dt:
raise ValueError(
'Both telegraf and monitoring default targets specified. '
'Clean up your config and delete one of them')
if is_telegraf_dt and not is_monitoring_dt:
return
if not is_telegraf_dt and is_monitoring_dt:
self.core.set_option(
"telegraf", "default_target", is_monitoring_dt)
if not is_telegraf_dt and not is_monitoring_dt:
return
|
def __detect_configuration(self):
"""
we need to be flexible in order to determine which plugin's configuration
specified and make appropriate configs to metrics collector
:return: SECTION name or None for defaults
"""
try:
is_telegraf = self.core.get_option('telegraf', "config")
except KeyError:
is_telegraf = None
try:
is_monitoring = self.core.get_option('monitoring', "config")
except KeyError:
is_monitoring = None
if is_telegraf and is_monitoring:
raise ValueError(
'Both telegraf and monitoring configs specified. '
'Clean up your config and delete one of them')
if is_telegraf and not is_monitoring:
return 'telegraf'
if not is_telegraf and is_monitoring:
return 'monitoring'
if not is_telegraf and not is_monitoring:
# defaults target logic
try:
is_telegraf_dt = self.core.get_option('telegraf')
except NoOptionError:
is_telegraf_dt = None
try:
is_monitoring_dt = self.core.get_option('monitoring')
except BaseException:
is_monitoring_dt = None
if is_telegraf_dt and is_monitoring_dt:
raise ValueError(
'Both telegraf and monitoring default targets specified. '
'Clean up your config and delete one of them')
if is_telegraf_dt and not is_monitoring_dt:
return
if not is_telegraf_dt and is_monitoring_dt:
self.core.set_option(
"telegraf", "default_target", is_monitoring_dt)
if not is_telegraf_dt and not is_monitoring_dt:
return
|
[
"we",
"need",
"to",
"be",
"flexible",
"in",
"order",
"to",
"determine",
"which",
"plugin",
"s",
"configuration",
"specified",
"and",
"make",
"appropriate",
"configs",
"to",
"metrics",
"collector"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Telegraf/plugin.py#L72-L116
|
[
"def",
"__detect_configuration",
"(",
"self",
")",
":",
"try",
":",
"is_telegraf",
"=",
"self",
".",
"core",
".",
"get_option",
"(",
"'telegraf'",
",",
"\"config\"",
")",
"except",
"KeyError",
":",
"is_telegraf",
"=",
"None",
"try",
":",
"is_monitoring",
"=",
"self",
".",
"core",
".",
"get_option",
"(",
"'monitoring'",
",",
"\"config\"",
")",
"except",
"KeyError",
":",
"is_monitoring",
"=",
"None",
"if",
"is_telegraf",
"and",
"is_monitoring",
":",
"raise",
"ValueError",
"(",
"'Both telegraf and monitoring configs specified. '",
"'Clean up your config and delete one of them'",
")",
"if",
"is_telegraf",
"and",
"not",
"is_monitoring",
":",
"return",
"'telegraf'",
"if",
"not",
"is_telegraf",
"and",
"is_monitoring",
":",
"return",
"'monitoring'",
"if",
"not",
"is_telegraf",
"and",
"not",
"is_monitoring",
":",
"# defaults target logic",
"try",
":",
"is_telegraf_dt",
"=",
"self",
".",
"core",
".",
"get_option",
"(",
"'telegraf'",
")",
"except",
"NoOptionError",
":",
"is_telegraf_dt",
"=",
"None",
"try",
":",
"is_monitoring_dt",
"=",
"self",
".",
"core",
".",
"get_option",
"(",
"'monitoring'",
")",
"except",
"BaseException",
":",
"is_monitoring_dt",
"=",
"None",
"if",
"is_telegraf_dt",
"and",
"is_monitoring_dt",
":",
"raise",
"ValueError",
"(",
"'Both telegraf and monitoring default targets specified. '",
"'Clean up your config and delete one of them'",
")",
"if",
"is_telegraf_dt",
"and",
"not",
"is_monitoring_dt",
":",
"return",
"if",
"not",
"is_telegraf_dt",
"and",
"is_monitoring_dt",
":",
"self",
".",
"core",
".",
"set_option",
"(",
"\"telegraf\"",
",",
"\"default_target\"",
",",
"is_monitoring_dt",
")",
"if",
"not",
"is_telegraf_dt",
"and",
"not",
"is_monitoring_dt",
":",
"return"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
MonitoringWidget.__handle_data_items
|
store metric in data tree and calc offset signs
sign < 0 is CYAN, means metric value is lower then previous,
sign > 1 is YELLOW, means metric value is higher then prevoius,
sign == 0 is WHITE, means initial or equal metric value
|
yandextank/plugins/Telegraf/plugin.py
|
def __handle_data_items(self, host, data):
""" store metric in data tree and calc offset signs
sign < 0 is CYAN, means metric value is lower then previous,
sign > 1 is YELLOW, means metric value is higher then prevoius,
sign == 0 is WHITE, means initial or equal metric value
"""
for metric, value in data.iteritems():
if value == '':
self.sign[host][metric] = -1
self.data[host][metric] = value
else:
if not self.data[host].get(metric, None):
self.sign[host][metric] = 1
elif float(value) > float(self.data[host][metric]):
self.sign[host][metric] = 1
elif float(value) < float(self.data[host][metric]):
self.sign[host][metric] = -1
else:
self.sign[host][metric] = 0
self.data[host][metric] = "%.2f" % float(value)
|
def __handle_data_items(self, host, data):
""" store metric in data tree and calc offset signs
sign < 0 is CYAN, means metric value is lower then previous,
sign > 1 is YELLOW, means metric value is higher then prevoius,
sign == 0 is WHITE, means initial or equal metric value
"""
for metric, value in data.iteritems():
if value == '':
self.sign[host][metric] = -1
self.data[host][metric] = value
else:
if not self.data[host].get(metric, None):
self.sign[host][metric] = 1
elif float(value) > float(self.data[host][metric]):
self.sign[host][metric] = 1
elif float(value) < float(self.data[host][metric]):
self.sign[host][metric] = -1
else:
self.sign[host][metric] = 0
self.data[host][metric] = "%.2f" % float(value)
|
[
"store",
"metric",
"in",
"data",
"tree",
"and",
"calc",
"offset",
"signs"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Telegraf/plugin.py#L284-L304
|
[
"def",
"__handle_data_items",
"(",
"self",
",",
"host",
",",
"data",
")",
":",
"for",
"metric",
",",
"value",
"in",
"data",
".",
"iteritems",
"(",
")",
":",
"if",
"value",
"==",
"''",
":",
"self",
".",
"sign",
"[",
"host",
"]",
"[",
"metric",
"]",
"=",
"-",
"1",
"self",
".",
"data",
"[",
"host",
"]",
"[",
"metric",
"]",
"=",
"value",
"else",
":",
"if",
"not",
"self",
".",
"data",
"[",
"host",
"]",
".",
"get",
"(",
"metric",
",",
"None",
")",
":",
"self",
".",
"sign",
"[",
"host",
"]",
"[",
"metric",
"]",
"=",
"1",
"elif",
"float",
"(",
"value",
")",
">",
"float",
"(",
"self",
".",
"data",
"[",
"host",
"]",
"[",
"metric",
"]",
")",
":",
"self",
".",
"sign",
"[",
"host",
"]",
"[",
"metric",
"]",
"=",
"1",
"elif",
"float",
"(",
"value",
")",
"<",
"float",
"(",
"self",
".",
"data",
"[",
"host",
"]",
"[",
"metric",
"]",
")",
":",
"self",
".",
"sign",
"[",
"host",
"]",
"[",
"metric",
"]",
"=",
"-",
"1",
"else",
":",
"self",
".",
"sign",
"[",
"host",
"]",
"[",
"metric",
"]",
"=",
"0",
"self",
".",
"data",
"[",
"host",
"]",
"[",
"metric",
"]",
"=",
"\"%.2f\"",
"%",
"float",
"(",
"value",
")"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
MonitoringReader._decode_agents_data
|
decode agents jsons, count diffs
|
yandextank/plugins/Telegraf/reader.py
|
def _decode_agents_data(self, block):
"""
decode agents jsons, count diffs
"""
collect = []
if block:
for chunk in block.split('\n'):
try:
if chunk:
prepared_results = {}
jsn = json.loads(chunk)
for ts, values in jsn.iteritems():
for key, value in values.iteritems():
# key sample: diskio-sda1_io_time
# key_group sample: diskio
# key_name sample: io_time
try:
key_group, key_name = key.split('_')[0].split('-')[0], '_'.join(key.split('_')[1:])
except: # noqa: E722
key_group, key_name = key.split('_')[0], '_'.join(key.split('_')[1:])
if key_group in decoder.diff_metrics.keys():
if key_name in decoder.diff_metrics[key_group]:
decoded_key = decoder.find_common_names(
key)
if self.prev_check:
try:
value = jsn[ts][key] - \
self.prev_check[key]
except KeyError:
logger.debug(
'There is no diff value for metric %s.\n'
'Timestamp: %s. Is it initial data?', key, ts, exc_info=True)
value = 0
prepared_results[decoded_key] = value
else:
decoded_key = decoder.find_common_names(
key)
prepared_results[decoded_key] = value
else:
decoded_key = decoder.find_common_names(
key)
prepared_results[decoded_key] = value
self.prev_check = jsn[ts]
collect.append((ts, prepared_results))
except ValueError:
logger.error(
'Telegraf agent send trash to output: %s', chunk)
logger.debug(
'Telegraf agent data block w/ trash: %s',
exc_info=True)
return []
except BaseException:
logger.error(
'Exception trying to parse agent data: %s',
chunk,
exc_info=True)
return []
if collect:
return collect
|
def _decode_agents_data(self, block):
"""
decode agents jsons, count diffs
"""
collect = []
if block:
for chunk in block.split('\n'):
try:
if chunk:
prepared_results = {}
jsn = json.loads(chunk)
for ts, values in jsn.iteritems():
for key, value in values.iteritems():
# key sample: diskio-sda1_io_time
# key_group sample: diskio
# key_name sample: io_time
try:
key_group, key_name = key.split('_')[0].split('-')[0], '_'.join(key.split('_')[1:])
except: # noqa: E722
key_group, key_name = key.split('_')[0], '_'.join(key.split('_')[1:])
if key_group in decoder.diff_metrics.keys():
if key_name in decoder.diff_metrics[key_group]:
decoded_key = decoder.find_common_names(
key)
if self.prev_check:
try:
value = jsn[ts][key] - \
self.prev_check[key]
except KeyError:
logger.debug(
'There is no diff value for metric %s.\n'
'Timestamp: %s. Is it initial data?', key, ts, exc_info=True)
value = 0
prepared_results[decoded_key] = value
else:
decoded_key = decoder.find_common_names(
key)
prepared_results[decoded_key] = value
else:
decoded_key = decoder.find_common_names(
key)
prepared_results[decoded_key] = value
self.prev_check = jsn[ts]
collect.append((ts, prepared_results))
except ValueError:
logger.error(
'Telegraf agent send trash to output: %s', chunk)
logger.debug(
'Telegraf agent data block w/ trash: %s',
exc_info=True)
return []
except BaseException:
logger.error(
'Exception trying to parse agent data: %s',
chunk,
exc_info=True)
return []
if collect:
return collect
|
[
"decode",
"agents",
"jsons",
"count",
"diffs"
] |
yandex/yandex-tank
|
python
|
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/plugins/Telegraf/reader.py#L27-L85
|
[
"def",
"_decode_agents_data",
"(",
"self",
",",
"block",
")",
":",
"collect",
"=",
"[",
"]",
"if",
"block",
":",
"for",
"chunk",
"in",
"block",
".",
"split",
"(",
"'\\n'",
")",
":",
"try",
":",
"if",
"chunk",
":",
"prepared_results",
"=",
"{",
"}",
"jsn",
"=",
"json",
".",
"loads",
"(",
"chunk",
")",
"for",
"ts",
",",
"values",
"in",
"jsn",
".",
"iteritems",
"(",
")",
":",
"for",
"key",
",",
"value",
"in",
"values",
".",
"iteritems",
"(",
")",
":",
"# key sample: diskio-sda1_io_time",
"# key_group sample: diskio",
"# key_name sample: io_time",
"try",
":",
"key_group",
",",
"key_name",
"=",
"key",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
",",
"'_'",
".",
"join",
"(",
"key",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
":",
"]",
")",
"except",
":",
"# noqa: E722",
"key_group",
",",
"key_name",
"=",
"key",
".",
"split",
"(",
"'_'",
")",
"[",
"0",
"]",
",",
"'_'",
".",
"join",
"(",
"key",
".",
"split",
"(",
"'_'",
")",
"[",
"1",
":",
"]",
")",
"if",
"key_group",
"in",
"decoder",
".",
"diff_metrics",
".",
"keys",
"(",
")",
":",
"if",
"key_name",
"in",
"decoder",
".",
"diff_metrics",
"[",
"key_group",
"]",
":",
"decoded_key",
"=",
"decoder",
".",
"find_common_names",
"(",
"key",
")",
"if",
"self",
".",
"prev_check",
":",
"try",
":",
"value",
"=",
"jsn",
"[",
"ts",
"]",
"[",
"key",
"]",
"-",
"self",
".",
"prev_check",
"[",
"key",
"]",
"except",
"KeyError",
":",
"logger",
".",
"debug",
"(",
"'There is no diff value for metric %s.\\n'",
"'Timestamp: %s. Is it initial data?'",
",",
"key",
",",
"ts",
",",
"exc_info",
"=",
"True",
")",
"value",
"=",
"0",
"prepared_results",
"[",
"decoded_key",
"]",
"=",
"value",
"else",
":",
"decoded_key",
"=",
"decoder",
".",
"find_common_names",
"(",
"key",
")",
"prepared_results",
"[",
"decoded_key",
"]",
"=",
"value",
"else",
":",
"decoded_key",
"=",
"decoder",
".",
"find_common_names",
"(",
"key",
")",
"prepared_results",
"[",
"decoded_key",
"]",
"=",
"value",
"self",
".",
"prev_check",
"=",
"jsn",
"[",
"ts",
"]",
"collect",
".",
"append",
"(",
"(",
"ts",
",",
"prepared_results",
")",
")",
"except",
"ValueError",
":",
"logger",
".",
"error",
"(",
"'Telegraf agent send trash to output: %s'",
",",
"chunk",
")",
"logger",
".",
"debug",
"(",
"'Telegraf agent data block w/ trash: %s'",
",",
"exc_info",
"=",
"True",
")",
"return",
"[",
"]",
"except",
"BaseException",
":",
"logger",
".",
"error",
"(",
"'Exception trying to parse agent data: %s'",
",",
"chunk",
",",
"exc_info",
"=",
"True",
")",
"return",
"[",
"]",
"if",
"collect",
":",
"return",
"collect"
] |
d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b
|
test
|
StreamConn.subscribe
|
Start subscribing channels.
If the necessary connection isn't open yet, it opens now.
|
alpaca_trade_api/stream2.py
|
async def subscribe(self, channels):
'''Start subscribing channels.
If the necessary connection isn't open yet, it opens now.
'''
ws_channels = []
nats_channels = []
for c in channels:
if c.startswith(('Q.', 'T.', 'A.', 'AM.',)):
nats_channels.append(c)
else:
ws_channels.append(c)
if len(ws_channels) > 0:
await self._ensure_ws()
await self._ws.send(json.dumps({
'action': 'listen',
'data': {
'streams': ws_channels,
}
}))
if len(nats_channels) > 0:
await self._ensure_nats()
await self.polygon.subscribe(nats_channels)
|
async def subscribe(self, channels):
'''Start subscribing channels.
If the necessary connection isn't open yet, it opens now.
'''
ws_channels = []
nats_channels = []
for c in channels:
if c.startswith(('Q.', 'T.', 'A.', 'AM.',)):
nats_channels.append(c)
else:
ws_channels.append(c)
if len(ws_channels) > 0:
await self._ensure_ws()
await self._ws.send(json.dumps({
'action': 'listen',
'data': {
'streams': ws_channels,
}
}))
if len(nats_channels) > 0:
await self._ensure_nats()
await self.polygon.subscribe(nats_channels)
|
[
"Start",
"subscribing",
"channels",
".",
"If",
"the",
"necessary",
"connection",
"isn",
"t",
"open",
"yet",
"it",
"opens",
"now",
"."
] |
alpacahq/alpaca-trade-api-python
|
python
|
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/stream2.py#L76-L99
|
[
"async",
"def",
"subscribe",
"(",
"self",
",",
"channels",
")",
":",
"ws_channels",
"=",
"[",
"]",
"nats_channels",
"=",
"[",
"]",
"for",
"c",
"in",
"channels",
":",
"if",
"c",
".",
"startswith",
"(",
"(",
"'Q.'",
",",
"'T.'",
",",
"'A.'",
",",
"'AM.'",
",",
")",
")",
":",
"nats_channels",
".",
"append",
"(",
"c",
")",
"else",
":",
"ws_channels",
".",
"append",
"(",
"c",
")",
"if",
"len",
"(",
"ws_channels",
")",
">",
"0",
":",
"await",
"self",
".",
"_ensure_ws",
"(",
")",
"await",
"self",
".",
"_ws",
".",
"send",
"(",
"json",
".",
"dumps",
"(",
"{",
"'action'",
":",
"'listen'",
",",
"'data'",
":",
"{",
"'streams'",
":",
"ws_channels",
",",
"}",
"}",
")",
")",
"if",
"len",
"(",
"nats_channels",
")",
">",
"0",
":",
"await",
"self",
".",
"_ensure_nats",
"(",
")",
"await",
"self",
".",
"polygon",
".",
"subscribe",
"(",
"nats_channels",
")"
] |
9c9dea3b4a37c909f88391b202e86ff356a8b4d7
|
test
|
StreamConn.run
|
Run forever and block until exception is rasised.
initial_channels is the channels to start with.
|
alpaca_trade_api/stream2.py
|
def run(self, initial_channels=[]):
'''Run forever and block until exception is rasised.
initial_channels is the channels to start with.
'''
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(self.subscribe(initial_channels))
loop.run_forever()
finally:
loop.run_until_complete(self.close())
|
def run(self, initial_channels=[]):
'''Run forever and block until exception is rasised.
initial_channels is the channels to start with.
'''
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(self.subscribe(initial_channels))
loop.run_forever()
finally:
loop.run_until_complete(self.close())
|
[
"Run",
"forever",
"and",
"block",
"until",
"exception",
"is",
"rasised",
".",
"initial_channels",
"is",
"the",
"channels",
"to",
"start",
"with",
"."
] |
alpacahq/alpaca-trade-api-python
|
python
|
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/stream2.py#L101-L110
|
[
"def",
"run",
"(",
"self",
",",
"initial_channels",
"=",
"[",
"]",
")",
":",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"try",
":",
"loop",
".",
"run_until_complete",
"(",
"self",
".",
"subscribe",
"(",
"initial_channels",
")",
")",
"loop",
".",
"run_forever",
"(",
")",
"finally",
":",
"loop",
".",
"run_until_complete",
"(",
"self",
".",
"close",
"(",
")",
")"
] |
9c9dea3b4a37c909f88391b202e86ff356a8b4d7
|
test
|
StreamConn.close
|
Close any of open connections
|
alpaca_trade_api/stream2.py
|
async def close(self):
'''Close any of open connections'''
if self._ws is not None:
await self._ws.close()
if self.polygon is not None:
await self.polygon.close()
|
async def close(self):
'''Close any of open connections'''
if self._ws is not None:
await self._ws.close()
if self.polygon is not None:
await self.polygon.close()
|
[
"Close",
"any",
"of",
"open",
"connections"
] |
alpacahq/alpaca-trade-api-python
|
python
|
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/stream2.py#L112-L117
|
[
"async",
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_ws",
"is",
"not",
"None",
":",
"await",
"self",
".",
"_ws",
".",
"close",
"(",
")",
"if",
"self",
".",
"polygon",
"is",
"not",
"None",
":",
"await",
"self",
".",
"polygon",
".",
"close",
"(",
")"
] |
9c9dea3b4a37c909f88391b202e86ff356a8b4d7
|
test
|
BarSet.df
|
## Experimental
|
alpaca_trade_api/entity.py
|
def df(self):
'''## Experimental '''
if not hasattr(self, '_df'):
dfs = []
for symbol, bars in self.items():
df = bars.df.copy()
df.columns = pd.MultiIndex.from_product(
[[symbol, ], df.columns])
dfs.append(df)
if len(dfs) == 0:
self._df = pd.DataFrame()
else:
self._df = pd.concat(dfs, axis=1)
return self._df
|
def df(self):
'''## Experimental '''
if not hasattr(self, '_df'):
dfs = []
for symbol, bars in self.items():
df = bars.df.copy()
df.columns = pd.MultiIndex.from_product(
[[symbol, ], df.columns])
dfs.append(df)
if len(dfs) == 0:
self._df = pd.DataFrame()
else:
self._df = pd.concat(dfs, axis=1)
return self._df
|
[
"##",
"Experimental"
] |
alpacahq/alpaca-trade-api-python
|
python
|
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/entity.py#L102-L115
|
[
"def",
"df",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_df'",
")",
":",
"dfs",
"=",
"[",
"]",
"for",
"symbol",
",",
"bars",
"in",
"self",
".",
"items",
"(",
")",
":",
"df",
"=",
"bars",
".",
"df",
".",
"copy",
"(",
")",
"df",
".",
"columns",
"=",
"pd",
".",
"MultiIndex",
".",
"from_product",
"(",
"[",
"[",
"symbol",
",",
"]",
",",
"df",
".",
"columns",
"]",
")",
"dfs",
".",
"append",
"(",
"df",
")",
"if",
"len",
"(",
"dfs",
")",
"==",
"0",
":",
"self",
".",
"_df",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"else",
":",
"self",
".",
"_df",
"=",
"pd",
".",
"concat",
"(",
"dfs",
",",
"axis",
"=",
"1",
")",
"return",
"self",
".",
"_df"
] |
9c9dea3b4a37c909f88391b202e86ff356a8b4d7
|
test
|
REST._one_request
|
Perform one request, possibly raising RetryException in the case
the response is 429. Otherwise, if error text contain "code" string,
then it decodes to json object and returns APIError.
Returns the body json in the 200 status.
|
alpaca_trade_api/rest.py
|
def _one_request(self, method, url, opts, retry):
'''
Perform one request, possibly raising RetryException in the case
the response is 429. Otherwise, if error text contain "code" string,
then it decodes to json object and returns APIError.
Returns the body json in the 200 status.
'''
retry_codes = self._retry_codes
resp = self._session.request(method, url, **opts)
try:
resp.raise_for_status()
except HTTPError as http_error:
# retry if we hit Rate Limit
if resp.status_code in retry_codes and retry > 0:
raise RetryException()
if 'code' in resp.text:
error = resp.json()
if 'code' in error:
raise APIError(error, http_error)
else:
raise
if resp.text != '':
return resp.json()
return None
|
def _one_request(self, method, url, opts, retry):
'''
Perform one request, possibly raising RetryException in the case
the response is 429. Otherwise, if error text contain "code" string,
then it decodes to json object and returns APIError.
Returns the body json in the 200 status.
'''
retry_codes = self._retry_codes
resp = self._session.request(method, url, **opts)
try:
resp.raise_for_status()
except HTTPError as http_error:
# retry if we hit Rate Limit
if resp.status_code in retry_codes and retry > 0:
raise RetryException()
if 'code' in resp.text:
error = resp.json()
if 'code' in error:
raise APIError(error, http_error)
else:
raise
if resp.text != '':
return resp.json()
return None
|
[
"Perform",
"one",
"request",
"possibly",
"raising",
"RetryException",
"in",
"the",
"case",
"the",
"response",
"is",
"429",
".",
"Otherwise",
"if",
"error",
"text",
"contain",
"code",
"string",
"then",
"it",
"decodes",
"to",
"json",
"object",
"and",
"returns",
"APIError",
".",
"Returns",
"the",
"body",
"json",
"in",
"the",
"200",
"status",
"."
] |
alpacahq/alpaca-trade-api-python
|
python
|
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/rest.py#L111-L134
|
[
"def",
"_one_request",
"(",
"self",
",",
"method",
",",
"url",
",",
"opts",
",",
"retry",
")",
":",
"retry_codes",
"=",
"self",
".",
"_retry_codes",
"resp",
"=",
"self",
".",
"_session",
".",
"request",
"(",
"method",
",",
"url",
",",
"*",
"*",
"opts",
")",
"try",
":",
"resp",
".",
"raise_for_status",
"(",
")",
"except",
"HTTPError",
"as",
"http_error",
":",
"# retry if we hit Rate Limit",
"if",
"resp",
".",
"status_code",
"in",
"retry_codes",
"and",
"retry",
">",
"0",
":",
"raise",
"RetryException",
"(",
")",
"if",
"'code'",
"in",
"resp",
".",
"text",
":",
"error",
"=",
"resp",
".",
"json",
"(",
")",
"if",
"'code'",
"in",
"error",
":",
"raise",
"APIError",
"(",
"error",
",",
"http_error",
")",
"else",
":",
"raise",
"if",
"resp",
".",
"text",
"!=",
"''",
":",
"return",
"resp",
".",
"json",
"(",
")",
"return",
"None"
] |
9c9dea3b4a37c909f88391b202e86ff356a8b4d7
|
test
|
REST.list_orders
|
Get a list of orders
https://docs.alpaca.markets/web-api/orders/#get-a-list-of-orders
|
alpaca_trade_api/rest.py
|
def list_orders(self, status=None, limit=None, after=None, until=None,
direction=None, params=None):
'''
Get a list of orders
https://docs.alpaca.markets/web-api/orders/#get-a-list-of-orders
'''
if params is None:
params = dict()
if limit is not None:
params['limit'] = limit
if after is not None:
params['after'] = after
if until is not None:
params['until'] = until
if direction is not None:
params['direction'] = direction
if status is not None:
params['status'] = status
resp = self.get('/orders', params)
return [Order(o) for o in resp]
|
def list_orders(self, status=None, limit=None, after=None, until=None,
direction=None, params=None):
'''
Get a list of orders
https://docs.alpaca.markets/web-api/orders/#get-a-list-of-orders
'''
if params is None:
params = dict()
if limit is not None:
params['limit'] = limit
if after is not None:
params['after'] = after
if until is not None:
params['until'] = until
if direction is not None:
params['direction'] = direction
if status is not None:
params['status'] = status
resp = self.get('/orders', params)
return [Order(o) for o in resp]
|
[
"Get",
"a",
"list",
"of",
"orders",
"https",
":",
"//",
"docs",
".",
"alpaca",
".",
"markets",
"/",
"web",
"-",
"api",
"/",
"orders",
"/",
"#get",
"-",
"a",
"-",
"list",
"-",
"of",
"-",
"orders"
] |
alpacahq/alpaca-trade-api-python
|
python
|
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/rest.py#L154-L173
|
[
"def",
"list_orders",
"(",
"self",
",",
"status",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"after",
"=",
"None",
",",
"until",
"=",
"None",
",",
"direction",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"dict",
"(",
")",
"if",
"limit",
"is",
"not",
"None",
":",
"params",
"[",
"'limit'",
"]",
"=",
"limit",
"if",
"after",
"is",
"not",
"None",
":",
"params",
"[",
"'after'",
"]",
"=",
"after",
"if",
"until",
"is",
"not",
"None",
":",
"params",
"[",
"'until'",
"]",
"=",
"until",
"if",
"direction",
"is",
"not",
"None",
":",
"params",
"[",
"'direction'",
"]",
"=",
"direction",
"if",
"status",
"is",
"not",
"None",
":",
"params",
"[",
"'status'",
"]",
"=",
"status",
"resp",
"=",
"self",
".",
"get",
"(",
"'/orders'",
",",
"params",
")",
"return",
"[",
"Order",
"(",
"o",
")",
"for",
"o",
"in",
"resp",
"]"
] |
9c9dea3b4a37c909f88391b202e86ff356a8b4d7
|
test
|
REST.submit_order
|
Request a new order
|
alpaca_trade_api/rest.py
|
def submit_order(self, symbol, qty, side, type, time_in_force,
limit_price=None, stop_price=None, client_order_id=None):
'''Request a new order'''
params = {
'symbol': symbol,
'qty': qty,
'side': side,
'type': type,
'time_in_force': time_in_force,
}
if limit_price is not None:
params['limit_price'] = limit_price
if stop_price is not None:
params['stop_price'] = stop_price
if client_order_id is not None:
params['client_order_id'] = client_order_id
resp = self.post('/orders', params)
return Order(resp)
|
def submit_order(self, symbol, qty, side, type, time_in_force,
limit_price=None, stop_price=None, client_order_id=None):
'''Request a new order'''
params = {
'symbol': symbol,
'qty': qty,
'side': side,
'type': type,
'time_in_force': time_in_force,
}
if limit_price is not None:
params['limit_price'] = limit_price
if stop_price is not None:
params['stop_price'] = stop_price
if client_order_id is not None:
params['client_order_id'] = client_order_id
resp = self.post('/orders', params)
return Order(resp)
|
[
"Request",
"a",
"new",
"order"
] |
alpacahq/alpaca-trade-api-python
|
python
|
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/rest.py#L175-L192
|
[
"def",
"submit_order",
"(",
"self",
",",
"symbol",
",",
"qty",
",",
"side",
",",
"type",
",",
"time_in_force",
",",
"limit_price",
"=",
"None",
",",
"stop_price",
"=",
"None",
",",
"client_order_id",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'symbol'",
":",
"symbol",
",",
"'qty'",
":",
"qty",
",",
"'side'",
":",
"side",
",",
"'type'",
":",
"type",
",",
"'time_in_force'",
":",
"time_in_force",
",",
"}",
"if",
"limit_price",
"is",
"not",
"None",
":",
"params",
"[",
"'limit_price'",
"]",
"=",
"limit_price",
"if",
"stop_price",
"is",
"not",
"None",
":",
"params",
"[",
"'stop_price'",
"]",
"=",
"stop_price",
"if",
"client_order_id",
"is",
"not",
"None",
":",
"params",
"[",
"'client_order_id'",
"]",
"=",
"client_order_id",
"resp",
"=",
"self",
".",
"post",
"(",
"'/orders'",
",",
"params",
")",
"return",
"Order",
"(",
"resp",
")"
] |
9c9dea3b4a37c909f88391b202e86ff356a8b4d7
|
test
|
REST.get_order
|
Get an order
|
alpaca_trade_api/rest.py
|
def get_order(self, order_id):
'''Get an order'''
resp = self.get('/orders/{}'.format(order_id))
return Order(resp)
|
def get_order(self, order_id):
'''Get an order'''
resp = self.get('/orders/{}'.format(order_id))
return Order(resp)
|
[
"Get",
"an",
"order"
] |
alpacahq/alpaca-trade-api-python
|
python
|
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/rest.py#L201-L204
|
[
"def",
"get_order",
"(",
"self",
",",
"order_id",
")",
":",
"resp",
"=",
"self",
".",
"get",
"(",
"'/orders/{}'",
".",
"format",
"(",
"order_id",
")",
")",
"return",
"Order",
"(",
"resp",
")"
] |
9c9dea3b4a37c909f88391b202e86ff356a8b4d7
|
test
|
REST.get_position
|
Get an open position
|
alpaca_trade_api/rest.py
|
def get_position(self, symbol):
'''Get an open position'''
resp = self.get('/positions/{}'.format(symbol))
return Position(resp)
|
def get_position(self, symbol):
'''Get an open position'''
resp = self.get('/positions/{}'.format(symbol))
return Position(resp)
|
[
"Get",
"an",
"open",
"position"
] |
alpacahq/alpaca-trade-api-python
|
python
|
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/rest.py#L215-L218
|
[
"def",
"get_position",
"(",
"self",
",",
"symbol",
")",
":",
"resp",
"=",
"self",
".",
"get",
"(",
"'/positions/{}'",
".",
"format",
"(",
"symbol",
")",
")",
"return",
"Position",
"(",
"resp",
")"
] |
9c9dea3b4a37c909f88391b202e86ff356a8b4d7
|
test
|
REST.list_assets
|
Get a list of assets
|
alpaca_trade_api/rest.py
|
def list_assets(self, status=None, asset_class=None):
'''Get a list of assets'''
params = {
'status': status,
'assert_class': asset_class,
}
resp = self.get('/assets', params)
return [Asset(o) for o in resp]
|
def list_assets(self, status=None, asset_class=None):
'''Get a list of assets'''
params = {
'status': status,
'assert_class': asset_class,
}
resp = self.get('/assets', params)
return [Asset(o) for o in resp]
|
[
"Get",
"a",
"list",
"of",
"assets"
] |
alpacahq/alpaca-trade-api-python
|
python
|
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/rest.py#L220-L227
|
[
"def",
"list_assets",
"(",
"self",
",",
"status",
"=",
"None",
",",
"asset_class",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'status'",
":",
"status",
",",
"'assert_class'",
":",
"asset_class",
",",
"}",
"resp",
"=",
"self",
".",
"get",
"(",
"'/assets'",
",",
"params",
")",
"return",
"[",
"Asset",
"(",
"o",
")",
"for",
"o",
"in",
"resp",
"]"
] |
9c9dea3b4a37c909f88391b202e86ff356a8b4d7
|
test
|
REST.get_asset
|
Get an asset
|
alpaca_trade_api/rest.py
|
def get_asset(self, symbol):
'''Get an asset'''
resp = self.get('/assets/{}'.format(symbol))
return Asset(resp)
|
def get_asset(self, symbol):
'''Get an asset'''
resp = self.get('/assets/{}'.format(symbol))
return Asset(resp)
|
[
"Get",
"an",
"asset"
] |
alpacahq/alpaca-trade-api-python
|
python
|
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/rest.py#L229-L232
|
[
"def",
"get_asset",
"(",
"self",
",",
"symbol",
")",
":",
"resp",
"=",
"self",
".",
"get",
"(",
"'/assets/{}'",
".",
"format",
"(",
"symbol",
")",
")",
"return",
"Asset",
"(",
"resp",
")"
] |
9c9dea3b4a37c909f88391b202e86ff356a8b4d7
|
test
|
REST.get_barset
|
Get BarSet(dict[str]->list[Bar])
The parameter symbols can be either a comma-split string
or a list of string. Each symbol becomes the key of
the returned value.
|
alpaca_trade_api/rest.py
|
def get_barset(self,
symbols,
timeframe,
limit=None,
start=None,
end=None,
after=None,
until=None):
'''Get BarSet(dict[str]->list[Bar])
The parameter symbols can be either a comma-split string
or a list of string. Each symbol becomes the key of
the returned value.
'''
if not isinstance(symbols, str):
symbols = ','.join(symbols)
params = {
'symbols': symbols,
}
if limit is not None:
params['limit'] = limit
if start is not None:
params['start'] = start
if end is not None:
params['end'] = end
if after is not None:
params['after'] = after
if until is not None:
params['until'] = until
resp = self.data_get('/bars/{}'.format(timeframe), params)
return BarSet(resp)
|
def get_barset(self,
symbols,
timeframe,
limit=None,
start=None,
end=None,
after=None,
until=None):
'''Get BarSet(dict[str]->list[Bar])
The parameter symbols can be either a comma-split string
or a list of string. Each symbol becomes the key of
the returned value.
'''
if not isinstance(symbols, str):
symbols = ','.join(symbols)
params = {
'symbols': symbols,
}
if limit is not None:
params['limit'] = limit
if start is not None:
params['start'] = start
if end is not None:
params['end'] = end
if after is not None:
params['after'] = after
if until is not None:
params['until'] = until
resp = self.data_get('/bars/{}'.format(timeframe), params)
return BarSet(resp)
|
[
"Get",
"BarSet",
"(",
"dict",
"[",
"str",
"]",
"-",
">",
"list",
"[",
"Bar",
"]",
")",
"The",
"parameter",
"symbols",
"can",
"be",
"either",
"a",
"comma",
"-",
"split",
"string",
"or",
"a",
"list",
"of",
"string",
".",
"Each",
"symbol",
"becomes",
"the",
"key",
"of",
"the",
"returned",
"value",
"."
] |
alpacahq/alpaca-trade-api-python
|
python
|
https://github.com/alpacahq/alpaca-trade-api-python/blob/9c9dea3b4a37c909f88391b202e86ff356a8b4d7/alpaca_trade_api/rest.py#L234-L263
|
[
"def",
"get_barset",
"(",
"self",
",",
"symbols",
",",
"timeframe",
",",
"limit",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"after",
"=",
"None",
",",
"until",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"symbols",
",",
"str",
")",
":",
"symbols",
"=",
"','",
".",
"join",
"(",
"symbols",
")",
"params",
"=",
"{",
"'symbols'",
":",
"symbols",
",",
"}",
"if",
"limit",
"is",
"not",
"None",
":",
"params",
"[",
"'limit'",
"]",
"=",
"limit",
"if",
"start",
"is",
"not",
"None",
":",
"params",
"[",
"'start'",
"]",
"=",
"start",
"if",
"end",
"is",
"not",
"None",
":",
"params",
"[",
"'end'",
"]",
"=",
"end",
"if",
"after",
"is",
"not",
"None",
":",
"params",
"[",
"'after'",
"]",
"=",
"after",
"if",
"until",
"is",
"not",
"None",
":",
"params",
"[",
"'until'",
"]",
"=",
"until",
"resp",
"=",
"self",
".",
"data_get",
"(",
"'/bars/{}'",
".",
"format",
"(",
"timeframe",
")",
",",
"params",
")",
"return",
"BarSet",
"(",
"resp",
")"
] |
9c9dea3b4a37c909f88391b202e86ff356a8b4d7
|
test
|
lambda_solid
|
(decorator) Create a simple solid.
This shortcut allows the creation of simple solids that do not require
configuration and whose implementations do not require a context.
Lambda solids take inputs and produce a single output. The body of the function
should return a single value.
Args:
name (str): Name of solid.
inputs (list[InputDefinition]): List of inputs.
output (OutputDefinition): The output of the solid. Defaults to ``OutputDefinition()``.
description (str): Solid description.
Examples:
.. code-block:: python
@lambda_solid
def hello_world():
return 'hello'
@lambda_solid(inputs=[InputDefinition(name='foo')])
def hello_world(foo):
return foo
|
python_modules/dagster/dagster/core/definitions/decorators.py
|
def lambda_solid(name=None, inputs=None, output=None, description=None):
'''(decorator) Create a simple solid.
This shortcut allows the creation of simple solids that do not require
configuration and whose implementations do not require a context.
Lambda solids take inputs and produce a single output. The body of the function
should return a single value.
Args:
name (str): Name of solid.
inputs (list[InputDefinition]): List of inputs.
output (OutputDefinition): The output of the solid. Defaults to ``OutputDefinition()``.
description (str): Solid description.
Examples:
.. code-block:: python
@lambda_solid
def hello_world():
return 'hello'
@lambda_solid(inputs=[InputDefinition(name='foo')])
def hello_world(foo):
return foo
'''
output = output or OutputDefinition()
if callable(name):
check.invariant(inputs is None)
check.invariant(description is None)
return _LambdaSolid(output=output)(name)
return _LambdaSolid(name=name, inputs=inputs, output=output, description=description)
|
def lambda_solid(name=None, inputs=None, output=None, description=None):
'''(decorator) Create a simple solid.
This shortcut allows the creation of simple solids that do not require
configuration and whose implementations do not require a context.
Lambda solids take inputs and produce a single output. The body of the function
should return a single value.
Args:
name (str): Name of solid.
inputs (list[InputDefinition]): List of inputs.
output (OutputDefinition): The output of the solid. Defaults to ``OutputDefinition()``.
description (str): Solid description.
Examples:
.. code-block:: python
@lambda_solid
def hello_world():
return 'hello'
@lambda_solid(inputs=[InputDefinition(name='foo')])
def hello_world(foo):
return foo
'''
output = output or OutputDefinition()
if callable(name):
check.invariant(inputs is None)
check.invariant(description is None)
return _LambdaSolid(output=output)(name)
return _LambdaSolid(name=name, inputs=inputs, output=output, description=description)
|
[
"(",
"decorator",
")",
"Create",
"a",
"simple",
"solid",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/definitions/decorators.py#L135-L170
|
[
"def",
"lambda_solid",
"(",
"name",
"=",
"None",
",",
"inputs",
"=",
"None",
",",
"output",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"output",
"=",
"output",
"or",
"OutputDefinition",
"(",
")",
"if",
"callable",
"(",
"name",
")",
":",
"check",
".",
"invariant",
"(",
"inputs",
"is",
"None",
")",
"check",
".",
"invariant",
"(",
"description",
"is",
"None",
")",
"return",
"_LambdaSolid",
"(",
"output",
"=",
"output",
")",
"(",
"name",
")",
"return",
"_LambdaSolid",
"(",
"name",
"=",
"name",
",",
"inputs",
"=",
"inputs",
",",
"output",
"=",
"output",
",",
"description",
"=",
"description",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
solid
|
(decorator) Create a solid with specified parameters.
This shortcut simplifies the core solid API by exploding arguments into kwargs of the
transform function and omitting additional parameters when they are not needed.
Parameters are otherwise as in the core API, :py:class:`SolidDefinition`.
The decorated function will be used as the solid's transform function. Unlike in the core API,
the transform function does not have to yield :py:class:`Result` object directly. Several
simpler alternatives are available:
1. Return a value. This is returned as a :py:class:`Result` for a single output solid.
2. Return a :py:class:`Result`. Works like yielding result.
3. Return an instance of :py:class:`MultipleResults`. Works like yielding several results for
multiple outputs. Useful for solids that have multiple outputs.
4. Yield :py:class:`Result`. Same as default transform behaviour.
Args:
name (str): Name of solid.
inputs (list[InputDefinition]): List of inputs.
outputs (list[OutputDefinition]): List of outputs.
config_field (Field):
The configuration for this solid.
description (str): Description of this solid.
Examples:
.. code-block:: python
@solid
def hello_world(_context):
print('hello')
@solid()
def hello_world(_context):
print('hello')
@solid(outputs=[OutputDefinition()])
def hello_world(_context):
return {'foo': 'bar'}
@solid(outputs=[OutputDefinition()])
def hello_world(_context):
return Result(value={'foo': 'bar'})
@solid(outputs=[OutputDefinition()])
def hello_world(_context):
yield Result(value={'foo': 'bar'})
@solid(outputs=[
OutputDefinition(name="left"),
OutputDefinition(name="right"),
])
def hello_world(_context):
return MultipleResults.from_dict({
'left': {'foo': 'left'},
'right': {'foo': 'right'},
})
@solid(
inputs=[InputDefinition(name="foo")],
outputs=[OutputDefinition()]
)
def hello_world(_context, foo):
return foo
@solid(
inputs=[InputDefinition(name="foo")],
outputs=[OutputDefinition()],
)
def hello_world(context, foo):
context.log.info('log something')
return foo
@solid(
inputs=[InputDefinition(name="foo")],
outputs=[OutputDefinition()],
config_field=Field(types.Dict({'str_value' : Field(types.String)})),
)
def hello_world(context, foo):
# context.solid_config is a dictionary with 'str_value' key
return foo + context.solid_config['str_value']
|
python_modules/dagster/dagster/core/definitions/decorators.py
|
def solid(name=None, inputs=None, outputs=None, config_field=None, description=None):
'''(decorator) Create a solid with specified parameters.
This shortcut simplifies the core solid API by exploding arguments into kwargs of the
transform function and omitting additional parameters when they are not needed.
Parameters are otherwise as in the core API, :py:class:`SolidDefinition`.
The decorated function will be used as the solid's transform function. Unlike in the core API,
the transform function does not have to yield :py:class:`Result` object directly. Several
simpler alternatives are available:
1. Return a value. This is returned as a :py:class:`Result` for a single output solid.
2. Return a :py:class:`Result`. Works like yielding result.
3. Return an instance of :py:class:`MultipleResults`. Works like yielding several results for
multiple outputs. Useful for solids that have multiple outputs.
4. Yield :py:class:`Result`. Same as default transform behaviour.
Args:
name (str): Name of solid.
inputs (list[InputDefinition]): List of inputs.
outputs (list[OutputDefinition]): List of outputs.
config_field (Field):
The configuration for this solid.
description (str): Description of this solid.
Examples:
.. code-block:: python
@solid
def hello_world(_context):
print('hello')
@solid()
def hello_world(_context):
print('hello')
@solid(outputs=[OutputDefinition()])
def hello_world(_context):
return {'foo': 'bar'}
@solid(outputs=[OutputDefinition()])
def hello_world(_context):
return Result(value={'foo': 'bar'})
@solid(outputs=[OutputDefinition()])
def hello_world(_context):
yield Result(value={'foo': 'bar'})
@solid(outputs=[
OutputDefinition(name="left"),
OutputDefinition(name="right"),
])
def hello_world(_context):
return MultipleResults.from_dict({
'left': {'foo': 'left'},
'right': {'foo': 'right'},
})
@solid(
inputs=[InputDefinition(name="foo")],
outputs=[OutputDefinition()]
)
def hello_world(_context, foo):
return foo
@solid(
inputs=[InputDefinition(name="foo")],
outputs=[OutputDefinition()],
)
def hello_world(context, foo):
context.log.info('log something')
return foo
@solid(
inputs=[InputDefinition(name="foo")],
outputs=[OutputDefinition()],
config_field=Field(types.Dict({'str_value' : Field(types.String)})),
)
def hello_world(context, foo):
# context.solid_config is a dictionary with 'str_value' key
return foo + context.solid_config['str_value']
'''
# This case is for when decorator is used bare, without arguments. e.g. @solid versus @solid()
if callable(name):
check.invariant(inputs is None)
check.invariant(outputs is None)
check.invariant(description is None)
check.invariant(config_field is None)
return _Solid()(name)
return _Solid(
name=name,
inputs=inputs,
outputs=outputs,
config_field=config_field,
description=description,
)
|
def solid(name=None, inputs=None, outputs=None, config_field=None, description=None):
'''(decorator) Create a solid with specified parameters.
This shortcut simplifies the core solid API by exploding arguments into kwargs of the
transform function and omitting additional parameters when they are not needed.
Parameters are otherwise as in the core API, :py:class:`SolidDefinition`.
The decorated function will be used as the solid's transform function. Unlike in the core API,
the transform function does not have to yield :py:class:`Result` object directly. Several
simpler alternatives are available:
1. Return a value. This is returned as a :py:class:`Result` for a single output solid.
2. Return a :py:class:`Result`. Works like yielding result.
3. Return an instance of :py:class:`MultipleResults`. Works like yielding several results for
multiple outputs. Useful for solids that have multiple outputs.
4. Yield :py:class:`Result`. Same as default transform behaviour.
Args:
name (str): Name of solid.
inputs (list[InputDefinition]): List of inputs.
outputs (list[OutputDefinition]): List of outputs.
config_field (Field):
The configuration for this solid.
description (str): Description of this solid.
Examples:
.. code-block:: python
@solid
def hello_world(_context):
print('hello')
@solid()
def hello_world(_context):
print('hello')
@solid(outputs=[OutputDefinition()])
def hello_world(_context):
return {'foo': 'bar'}
@solid(outputs=[OutputDefinition()])
def hello_world(_context):
return Result(value={'foo': 'bar'})
@solid(outputs=[OutputDefinition()])
def hello_world(_context):
yield Result(value={'foo': 'bar'})
@solid(outputs=[
OutputDefinition(name="left"),
OutputDefinition(name="right"),
])
def hello_world(_context):
return MultipleResults.from_dict({
'left': {'foo': 'left'},
'right': {'foo': 'right'},
})
@solid(
inputs=[InputDefinition(name="foo")],
outputs=[OutputDefinition()]
)
def hello_world(_context, foo):
return foo
@solid(
inputs=[InputDefinition(name="foo")],
outputs=[OutputDefinition()],
)
def hello_world(context, foo):
context.log.info('log something')
return foo
@solid(
inputs=[InputDefinition(name="foo")],
outputs=[OutputDefinition()],
config_field=Field(types.Dict({'str_value' : Field(types.String)})),
)
def hello_world(context, foo):
# context.solid_config is a dictionary with 'str_value' key
return foo + context.solid_config['str_value']
'''
# This case is for when decorator is used bare, without arguments. e.g. @solid versus @solid()
if callable(name):
check.invariant(inputs is None)
check.invariant(outputs is None)
check.invariant(description is None)
check.invariant(config_field is None)
return _Solid()(name)
return _Solid(
name=name,
inputs=inputs,
outputs=outputs,
config_field=config_field,
description=description,
)
|
[
"(",
"decorator",
")",
"Create",
"a",
"solid",
"with",
"specified",
"parameters",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/definitions/decorators.py#L173-L271
|
[
"def",
"solid",
"(",
"name",
"=",
"None",
",",
"inputs",
"=",
"None",
",",
"outputs",
"=",
"None",
",",
"config_field",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"# This case is for when decorator is used bare, without arguments. e.g. @solid versus @solid()",
"if",
"callable",
"(",
"name",
")",
":",
"check",
".",
"invariant",
"(",
"inputs",
"is",
"None",
")",
"check",
".",
"invariant",
"(",
"outputs",
"is",
"None",
")",
"check",
".",
"invariant",
"(",
"description",
"is",
"None",
")",
"check",
".",
"invariant",
"(",
"config_field",
"is",
"None",
")",
"return",
"_Solid",
"(",
")",
"(",
"name",
")",
"return",
"_Solid",
"(",
"name",
"=",
"name",
",",
"inputs",
"=",
"inputs",
",",
"outputs",
"=",
"outputs",
",",
"config_field",
"=",
"config_field",
",",
"description",
"=",
"description",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
MultipleResults.from_dict
|
Create a new ``MultipleResults`` object from a dictionary.
Keys of the dictionary are unpacked into result names.
Args:
result_dict (dict) - The dictionary to unpack.
Returns:
(:py:class:`MultipleResults <dagster.MultipleResults>`) A new ``MultipleResults`` object
|
python_modules/dagster/dagster/core/definitions/decorators.py
|
def from_dict(result_dict):
'''Create a new ``MultipleResults`` object from a dictionary.
Keys of the dictionary are unpacked into result names.
Args:
result_dict (dict) - The dictionary to unpack.
Returns:
(:py:class:`MultipleResults <dagster.MultipleResults>`) A new ``MultipleResults`` object
'''
check.dict_param(result_dict, 'result_dict', key_type=str)
results = []
for name, value in result_dict.items():
results.append(Result(value, name))
return MultipleResults(*results)
|
def from_dict(result_dict):
'''Create a new ``MultipleResults`` object from a dictionary.
Keys of the dictionary are unpacked into result names.
Args:
result_dict (dict) - The dictionary to unpack.
Returns:
(:py:class:`MultipleResults <dagster.MultipleResults>`) A new ``MultipleResults`` object
'''
check.dict_param(result_dict, 'result_dict', key_type=str)
results = []
for name, value in result_dict.items():
results.append(Result(value, name))
return MultipleResults(*results)
|
[
"Create",
"a",
"new",
"MultipleResults",
"object",
"from",
"a",
"dictionary",
".",
"Keys",
"of",
"the",
"dictionary",
"are",
"unpacked",
"into",
"result",
"names",
".",
"Args",
":",
"result_dict",
"(",
"dict",
")",
"-",
"The",
"dictionary",
"to",
"unpack",
".",
"Returns",
":",
"(",
":",
"py",
":",
"class",
":",
"MultipleResults",
"<dagster",
".",
"MultipleResults",
">",
")",
"A",
"new",
"MultipleResults",
"object"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/definitions/decorators.py#L64-L80
|
[
"def",
"from_dict",
"(",
"result_dict",
")",
":",
"check",
".",
"dict_param",
"(",
"result_dict",
",",
"'result_dict'",
",",
"key_type",
"=",
"str",
")",
"results",
"=",
"[",
"]",
"for",
"name",
",",
"value",
"in",
"result_dict",
".",
"items",
"(",
")",
":",
"results",
".",
"append",
"(",
"Result",
"(",
"value",
",",
"name",
")",
")",
"return",
"MultipleResults",
"(",
"*",
"results",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
create_joining_subplan
|
This captures a common pattern of fanning out a single value to N steps,
where each step has similar structure. The strict requirement here is that each step
must provide an output named the parameters parallel_step_output.
This takes those steps and then uses a join node to coalesce them so that downstream
steps can depend on a single output.
Currently the join step just does a passthrough with no computation. It remains
to be seen if there should be any work or verification done in this step, especially
in multi-process environments that require persistence between steps.
|
python_modules/dagster/dagster/core/execution_plan/utility.py
|
def create_joining_subplan(
pipeline_def, solid, join_step_key, parallel_steps, parallel_step_output
):
'''
This captures a common pattern of fanning out a single value to N steps,
where each step has similar structure. The strict requirement here is that each step
must provide an output named the parameters parallel_step_output.
This takes those steps and then uses a join node to coalesce them so that downstream
steps can depend on a single output.
Currently the join step just does a passthrough with no computation. It remains
to be seen if there should be any work or verification done in this step, especially
in multi-process environments that require persistence between steps.
'''
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
check.inst_param(solid, 'solid', Solid)
check.str_param(join_step_key, 'join_step_key')
check.list_param(parallel_steps, 'parallel_steps', of_type=ExecutionStep)
check.str_param(parallel_step_output, 'parallel_step_output')
for parallel_step in parallel_steps:
check.invariant(parallel_step.has_step_output(parallel_step_output))
join_step = create_join_step(
pipeline_def, solid, join_step_key, parallel_steps, parallel_step_output
)
output_name = join_step.step_outputs[0].name
return ExecutionValueSubplan(
parallel_steps + [join_step], StepOutputHandle.from_step(join_step, output_name)
)
|
def create_joining_subplan(
pipeline_def, solid, join_step_key, parallel_steps, parallel_step_output
):
'''
This captures a common pattern of fanning out a single value to N steps,
where each step has similar structure. The strict requirement here is that each step
must provide an output named the parameters parallel_step_output.
This takes those steps and then uses a join node to coalesce them so that downstream
steps can depend on a single output.
Currently the join step just does a passthrough with no computation. It remains
to be seen if there should be any work or verification done in this step, especially
in multi-process environments that require persistence between steps.
'''
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
check.inst_param(solid, 'solid', Solid)
check.str_param(join_step_key, 'join_step_key')
check.list_param(parallel_steps, 'parallel_steps', of_type=ExecutionStep)
check.str_param(parallel_step_output, 'parallel_step_output')
for parallel_step in parallel_steps:
check.invariant(parallel_step.has_step_output(parallel_step_output))
join_step = create_join_step(
pipeline_def, solid, join_step_key, parallel_steps, parallel_step_output
)
output_name = join_step.step_outputs[0].name
return ExecutionValueSubplan(
parallel_steps + [join_step], StepOutputHandle.from_step(join_step, output_name)
)
|
[
"This",
"captures",
"a",
"common",
"pattern",
"of",
"fanning",
"out",
"a",
"single",
"value",
"to",
"N",
"steps",
"where",
"each",
"step",
"has",
"similar",
"structure",
".",
"The",
"strict",
"requirement",
"here",
"is",
"that",
"each",
"step",
"must",
"provide",
"an",
"output",
"named",
"the",
"parameters",
"parallel_step_output",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution_plan/utility.py#L60-L91
|
[
"def",
"create_joining_subplan",
"(",
"pipeline_def",
",",
"solid",
",",
"join_step_key",
",",
"parallel_steps",
",",
"parallel_step_output",
")",
":",
"check",
".",
"inst_param",
"(",
"pipeline_def",
",",
"'pipeline_def'",
",",
"PipelineDefinition",
")",
"check",
".",
"inst_param",
"(",
"solid",
",",
"'solid'",
",",
"Solid",
")",
"check",
".",
"str_param",
"(",
"join_step_key",
",",
"'join_step_key'",
")",
"check",
".",
"list_param",
"(",
"parallel_steps",
",",
"'parallel_steps'",
",",
"of_type",
"=",
"ExecutionStep",
")",
"check",
".",
"str_param",
"(",
"parallel_step_output",
",",
"'parallel_step_output'",
")",
"for",
"parallel_step",
"in",
"parallel_steps",
":",
"check",
".",
"invariant",
"(",
"parallel_step",
".",
"has_step_output",
"(",
"parallel_step_output",
")",
")",
"join_step",
"=",
"create_join_step",
"(",
"pipeline_def",
",",
"solid",
",",
"join_step_key",
",",
"parallel_steps",
",",
"parallel_step_output",
")",
"output_name",
"=",
"join_step",
".",
"step_outputs",
"[",
"0",
"]",
".",
"name",
"return",
"ExecutionValueSubplan",
"(",
"parallel_steps",
"+",
"[",
"join_step",
"]",
",",
"StepOutputHandle",
".",
"from_step",
"(",
"join_step",
",",
"output_name",
")",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
gunzipper
|
gunzips /path/to/foo.gz to /path/to/raw/2019/01/01/data.json
|
examples/event-pipeline-demo/event_pipeline_demo/pipelines.py
|
def gunzipper(gzip_file):
'''gunzips /path/to/foo.gz to /path/to/raw/2019/01/01/data.json
'''
# TODO: take date as an input
path_prefix = os.path.dirname(gzip_file)
output_folder = os.path.join(path_prefix, 'raw/2019/01/01')
outfile = os.path.join(output_folder, 'data.json')
if not safe_isfile(outfile):
mkdir_p(output_folder)
with gzip.open(gzip_file, 'rb') as f_in, open(outfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
return [path_prefix]
|
def gunzipper(gzip_file):
'''gunzips /path/to/foo.gz to /path/to/raw/2019/01/01/data.json
'''
# TODO: take date as an input
path_prefix = os.path.dirname(gzip_file)
output_folder = os.path.join(path_prefix, 'raw/2019/01/01')
outfile = os.path.join(output_folder, 'data.json')
if not safe_isfile(outfile):
mkdir_p(output_folder)
with gzip.open(gzip_file, 'rb') as f_in, open(outfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
return [path_prefix]
|
[
"gunzips",
"/",
"path",
"/",
"to",
"/",
"foo",
".",
"gz",
"to",
"/",
"path",
"/",
"to",
"/",
"raw",
"/",
"2019",
"/",
"01",
"/",
"01",
"/",
"data",
".",
"json"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/examples/event-pipeline-demo/event_pipeline_demo/pipelines.py#L31-L46
|
[
"def",
"gunzipper",
"(",
"gzip_file",
")",
":",
"# TODO: take date as an input",
"path_prefix",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"gzip_file",
")",
"output_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_prefix",
",",
"'raw/2019/01/01'",
")",
"outfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_folder",
",",
"'data.json'",
")",
"if",
"not",
"safe_isfile",
"(",
"outfile",
")",
":",
"mkdir_p",
"(",
"output_folder",
")",
"with",
"gzip",
".",
"open",
"(",
"gzip_file",
",",
"'rb'",
")",
"as",
"f_in",
",",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"f_out",
":",
"shutil",
".",
"copyfileobj",
"(",
"f_in",
",",
"f_out",
")",
"return",
"[",
"path_prefix",
"]"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
_check_key_value_types
|
Ensures argument obj is a dictionary, and enforces that the keys/values conform to the types
specified by key_type, value_type.
|
python_modules/dagster/dagster/check/__init__.py
|
def _check_key_value_types(obj, key_type, value_type, key_check=isinstance, value_check=isinstance):
'''Ensures argument obj is a dictionary, and enforces that the keys/values conform to the types
specified by key_type, value_type.
'''
if not isinstance(obj, dict):
raise_with_traceback(_type_mismatch_error(obj, dict))
if key_type is str:
key_type = string_types
if value_type is str:
value_type = string_types
for key, value in obj.items():
if key_type and not key_check(key, key_type):
raise_with_traceback(
CheckError(
'Key in dictionary mismatches type. Expected {key_type}. Got {obj_repr}'.format(
key_type=repr(key_type), obj_repr=repr(key)
)
)
)
if value_type and not value_check(value, value_type):
raise_with_traceback(
CheckError(
'Value in dictionary mismatches expected type for key {key}. Expected value '
'of type {vtype}. Got value {value} of type {obj_type}.'.format(
vtype=repr(value_type), obj_type=type(value), key=key, value=value
)
)
)
return obj
|
def _check_key_value_types(obj, key_type, value_type, key_check=isinstance, value_check=isinstance):
'''Ensures argument obj is a dictionary, and enforces that the keys/values conform to the types
specified by key_type, value_type.
'''
if not isinstance(obj, dict):
raise_with_traceback(_type_mismatch_error(obj, dict))
if key_type is str:
key_type = string_types
if value_type is str:
value_type = string_types
for key, value in obj.items():
if key_type and not key_check(key, key_type):
raise_with_traceback(
CheckError(
'Key in dictionary mismatches type. Expected {key_type}. Got {obj_repr}'.format(
key_type=repr(key_type), obj_repr=repr(key)
)
)
)
if value_type and not value_check(value, value_type):
raise_with_traceback(
CheckError(
'Value in dictionary mismatches expected type for key {key}. Expected value '
'of type {vtype}. Got value {value} of type {obj_type}.'.format(
vtype=repr(value_type), obj_type=type(value), key=key, value=value
)
)
)
return obj
|
[
"Ensures",
"argument",
"obj",
"is",
"a",
"dictionary",
"and",
"enforces",
"that",
"the",
"keys",
"/",
"values",
"conform",
"to",
"the",
"types",
"specified",
"by",
"key_type",
"value_type",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/check/__init__.py#L328-L359
|
[
"def",
"_check_key_value_types",
"(",
"obj",
",",
"key_type",
",",
"value_type",
",",
"key_check",
"=",
"isinstance",
",",
"value_check",
"=",
"isinstance",
")",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"raise_with_traceback",
"(",
"_type_mismatch_error",
"(",
"obj",
",",
"dict",
")",
")",
"if",
"key_type",
"is",
"str",
":",
"key_type",
"=",
"string_types",
"if",
"value_type",
"is",
"str",
":",
"value_type",
"=",
"string_types",
"for",
"key",
",",
"value",
"in",
"obj",
".",
"items",
"(",
")",
":",
"if",
"key_type",
"and",
"not",
"key_check",
"(",
"key",
",",
"key_type",
")",
":",
"raise_with_traceback",
"(",
"CheckError",
"(",
"'Key in dictionary mismatches type. Expected {key_type}. Got {obj_repr}'",
".",
"format",
"(",
"key_type",
"=",
"repr",
"(",
"key_type",
")",
",",
"obj_repr",
"=",
"repr",
"(",
"key",
")",
")",
")",
")",
"if",
"value_type",
"and",
"not",
"value_check",
"(",
"value",
",",
"value_type",
")",
":",
"raise_with_traceback",
"(",
"CheckError",
"(",
"'Value in dictionary mismatches expected type for key {key}. Expected value '",
"'of type {vtype}. Got value {value} of type {obj_type}.'",
".",
"format",
"(",
"vtype",
"=",
"repr",
"(",
"value_type",
")",
",",
"obj_type",
"=",
"type",
"(",
"value",
")",
",",
"key",
"=",
"key",
",",
"value",
"=",
"value",
")",
")",
")",
"return",
"obj"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
dict_param
|
Ensures argument obj is a native Python dictionary, raises an exception if not, and otherwise
returns obj.
|
python_modules/dagster/dagster/check/__init__.py
|
def dict_param(obj, param_name, key_type=None, value_type=None):
'''Ensures argument obj is a native Python dictionary, raises an exception if not, and otherwise
returns obj.
'''
if not isinstance(obj, dict):
raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name))
if not (key_type or value_type):
return obj
return _check_key_value_types(obj, key_type, value_type)
|
def dict_param(obj, param_name, key_type=None, value_type=None):
'''Ensures argument obj is a native Python dictionary, raises an exception if not, and otherwise
returns obj.
'''
if not isinstance(obj, dict):
raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name))
if not (key_type or value_type):
return obj
return _check_key_value_types(obj, key_type, value_type)
|
[
"Ensures",
"argument",
"obj",
"is",
"a",
"native",
"Python",
"dictionary",
"raises",
"an",
"exception",
"if",
"not",
"and",
"otherwise",
"returns",
"obj",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/check/__init__.py#L362-L372
|
[
"def",
"dict_param",
"(",
"obj",
",",
"param_name",
",",
"key_type",
"=",
"None",
",",
"value_type",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"raise_with_traceback",
"(",
"_param_type_mismatch_exception",
"(",
"obj",
",",
"dict",
",",
"param_name",
")",
")",
"if",
"not",
"(",
"key_type",
"or",
"value_type",
")",
":",
"return",
"obj",
"return",
"_check_key_value_types",
"(",
"obj",
",",
"key_type",
",",
"value_type",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
opt_dict_param
|
Ensures argument obj is either a dictionary or None; if the latter, instantiates an empty
dictionary.
|
python_modules/dagster/dagster/check/__init__.py
|
def opt_dict_param(obj, param_name, key_type=None, value_type=None, value_class=None):
'''Ensures argument obj is either a dictionary or None; if the latter, instantiates an empty
dictionary.
'''
if obj is not None and not isinstance(obj, dict):
raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name))
if not obj:
return {}
if value_class:
return _check_key_value_types(obj, key_type, value_type=value_class, value_check=issubclass)
return _check_key_value_types(obj, key_type, value_type)
|
def opt_dict_param(obj, param_name, key_type=None, value_type=None, value_class=None):
'''Ensures argument obj is either a dictionary or None; if the latter, instantiates an empty
dictionary.
'''
if obj is not None and not isinstance(obj, dict):
raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name))
if not obj:
return {}
if value_class:
return _check_key_value_types(obj, key_type, value_type=value_class, value_check=issubclass)
return _check_key_value_types(obj, key_type, value_type)
|
[
"Ensures",
"argument",
"obj",
"is",
"either",
"a",
"dictionary",
"or",
"None",
";",
"if",
"the",
"latter",
"instantiates",
"an",
"empty",
"dictionary",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/check/__init__.py#L375-L387
|
[
"def",
"opt_dict_param",
"(",
"obj",
",",
"param_name",
",",
"key_type",
"=",
"None",
",",
"value_type",
"=",
"None",
",",
"value_class",
"=",
"None",
")",
":",
"if",
"obj",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"raise_with_traceback",
"(",
"_param_type_mismatch_exception",
"(",
"obj",
",",
"dict",
",",
"param_name",
")",
")",
"if",
"not",
"obj",
":",
"return",
"{",
"}",
"if",
"value_class",
":",
"return",
"_check_key_value_types",
"(",
"obj",
",",
"key_type",
",",
"value_type",
"=",
"value_class",
",",
"value_check",
"=",
"issubclass",
")",
"return",
"_check_key_value_types",
"(",
"obj",
",",
"key_type",
",",
"value_type",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
construct_event_logger
|
Callback receives a stream of event_records
|
python_modules/dagster/dagster/core/events/logging.py
|
def construct_event_logger(event_record_callback):
'''
Callback receives a stream of event_records
'''
check.callable_param(event_record_callback, 'event_record_callback')
return construct_single_handler_logger(
'event-logger',
DEBUG,
StructuredLoggerHandler(
lambda logger_message: event_record_callback(construct_event_record(logger_message))
),
)
|
def construct_event_logger(event_record_callback):
'''
Callback receives a stream of event_records
'''
check.callable_param(event_record_callback, 'event_record_callback')
return construct_single_handler_logger(
'event-logger',
DEBUG,
StructuredLoggerHandler(
lambda logger_message: event_record_callback(construct_event_record(logger_message))
),
)
|
[
"Callback",
"receives",
"a",
"stream",
"of",
"event_records"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/events/logging.py#L134-L146
|
[
"def",
"construct_event_logger",
"(",
"event_record_callback",
")",
":",
"check",
".",
"callable_param",
"(",
"event_record_callback",
",",
"'event_record_callback'",
")",
"return",
"construct_single_handler_logger",
"(",
"'event-logger'",
",",
"DEBUG",
",",
"StructuredLoggerHandler",
"(",
"lambda",
"logger_message",
":",
"event_record_callback",
"(",
"construct_event_record",
"(",
"logger_message",
")",
")",
")",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
construct_json_event_logger
|
Record a stream of event records to json
|
python_modules/dagster/dagster/core/events/logging.py
|
def construct_json_event_logger(json_path):
'''Record a stream of event records to json'''
check.str_param(json_path, 'json_path')
return construct_single_handler_logger(
"json-event-record-logger",
DEBUG,
JsonEventLoggerHandler(
json_path,
lambda record: construct_event_record(
StructuredLoggerMessage(
name=record.name,
message=record.msg,
level=record.levelno,
meta=record.dagster_meta,
record=record,
)
),
),
)
|
def construct_json_event_logger(json_path):
'''Record a stream of event records to json'''
check.str_param(json_path, 'json_path')
return construct_single_handler_logger(
"json-event-record-logger",
DEBUG,
JsonEventLoggerHandler(
json_path,
lambda record: construct_event_record(
StructuredLoggerMessage(
name=record.name,
message=record.msg,
level=record.levelno,
meta=record.dagster_meta,
record=record,
)
),
),
)
|
[
"Record",
"a",
"stream",
"of",
"event",
"records",
"to",
"json"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/events/logging.py#L149-L167
|
[
"def",
"construct_json_event_logger",
"(",
"json_path",
")",
":",
"check",
".",
"str_param",
"(",
"json_path",
",",
"'json_path'",
")",
"return",
"construct_single_handler_logger",
"(",
"\"json-event-record-logger\"",
",",
"DEBUG",
",",
"JsonEventLoggerHandler",
"(",
"json_path",
",",
"lambda",
"record",
":",
"construct_event_record",
"(",
"StructuredLoggerMessage",
"(",
"name",
"=",
"record",
".",
"name",
",",
"message",
"=",
"record",
".",
"msg",
",",
"level",
"=",
"record",
".",
"levelno",
",",
"meta",
"=",
"record",
".",
"dagster_meta",
",",
"record",
"=",
"record",
",",
")",
")",
",",
")",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
RCParser.from_file
|
Read a config file and instantiate the RCParser.
Create new :class:`configparser.ConfigParser` for the given **path**
and instantiate the :class:`RCParser` with the ConfigParser as
:attr:`config` attribute.
If the **path** doesn't exist, raise :exc:`ConfigFileError`.
Otherwise return a new :class:`RCParser` instance.
:param path:
Optional path to the config file to parse.
If not given, use ``'~/.pypirc'``.
|
bin/pypirc.py
|
def from_file(cls, path=None):
"""Read a config file and instantiate the RCParser.
Create new :class:`configparser.ConfigParser` for the given **path**
and instantiate the :class:`RCParser` with the ConfigParser as
:attr:`config` attribute.
If the **path** doesn't exist, raise :exc:`ConfigFileError`.
Otherwise return a new :class:`RCParser` instance.
:param path:
Optional path to the config file to parse.
If not given, use ``'~/.pypirc'``.
"""
path = path or cls.CONFIG_PATH
if not os.path.exists(path):
error = 'Config file not found: {0!r}'.format(path)
raise ConfigFileError(error)
config = read_config(path)
return cls(config)
|
def from_file(cls, path=None):
"""Read a config file and instantiate the RCParser.
Create new :class:`configparser.ConfigParser` for the given **path**
and instantiate the :class:`RCParser` with the ConfigParser as
:attr:`config` attribute.
If the **path** doesn't exist, raise :exc:`ConfigFileError`.
Otherwise return a new :class:`RCParser` instance.
:param path:
Optional path to the config file to parse.
If not given, use ``'~/.pypirc'``.
"""
path = path or cls.CONFIG_PATH
if not os.path.exists(path):
error = 'Config file not found: {0!r}'.format(path)
raise ConfigFileError(error)
config = read_config(path)
return cls(config)
|
[
"Read",
"a",
"config",
"file",
"and",
"instantiate",
"the",
"RCParser",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/bin/pypirc.py#L49-L69
|
[
"def",
"from_file",
"(",
"cls",
",",
"path",
"=",
"None",
")",
":",
"path",
"=",
"path",
"or",
"cls",
".",
"CONFIG_PATH",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"error",
"=",
"'Config file not found: {0!r}'",
".",
"format",
"(",
"path",
")",
"raise",
"ConfigFileError",
"(",
"error",
")",
"config",
"=",
"read_config",
"(",
"path",
")",
"return",
"cls",
"(",
"config",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
RCParser.get_repository_config
|
Get config dictionary for the given repository.
If the repository section is not found in the config file,
return ``None``. If the file is invalid, raise
:exc:`configparser.Error`.
Otherwise return a dictionary with:
* ``'repository'`` -- the repository URL
* ``'username'`` -- username for authentication
* ``'password'`` -- password for authentication
:param repository:
Name or URL of the repository to find in the ``.pypirc`` file.
The repository section must be defined in the config file.
|
bin/pypirc.py
|
def get_repository_config(self, repository):
"""Get config dictionary for the given repository.
If the repository section is not found in the config file,
return ``None``. If the file is invalid, raise
:exc:`configparser.Error`.
Otherwise return a dictionary with:
* ``'repository'`` -- the repository URL
* ``'username'`` -- username for authentication
* ``'password'`` -- password for authentication
:param repository:
Name or URL of the repository to find in the ``.pypirc`` file.
The repository section must be defined in the config file.
"""
servers = self._read_index_servers()
repo_config = self._find_repo_config(servers, repository)
return repo_config
|
def get_repository_config(self, repository):
"""Get config dictionary for the given repository.
If the repository section is not found in the config file,
return ``None``. If the file is invalid, raise
:exc:`configparser.Error`.
Otherwise return a dictionary with:
* ``'repository'`` -- the repository URL
* ``'username'`` -- username for authentication
* ``'password'`` -- password for authentication
:param repository:
Name or URL of the repository to find in the ``.pypirc`` file.
The repository section must be defined in the config file.
"""
servers = self._read_index_servers()
repo_config = self._find_repo_config(servers, repository)
return repo_config
|
[
"Get",
"config",
"dictionary",
"for",
"the",
"given",
"repository",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/bin/pypirc.py#L71-L91
|
[
"def",
"get_repository_config",
"(",
"self",
",",
"repository",
")",
":",
"servers",
"=",
"self",
".",
"_read_index_servers",
"(",
")",
"repo_config",
"=",
"self",
".",
"_find_repo_config",
"(",
"servers",
",",
"repository",
")",
"return",
"repo_config"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
replace_parameters
|
Assigned parameters into the appropiate place in the input notebook
Args:
nb (NotebookNode): Executable notebook object
parameters (dict): Arbitrary keyword arguments to pass to the notebook parameters.
|
python_modules/dagstermill/dagstermill/__init__.py
|
def replace_parameters(context, nb, parameters):
# Uma: This is a copy-paste from papermill papermill/execute.py:104 (execute_parameters).
# Typically, papermill injects the injected-parameters cell *below* the parameters cell
# but we want to *replace* the parameters cell, which is what this function does.
'''Assigned parameters into the appropiate place in the input notebook
Args:
nb (NotebookNode): Executable notebook object
parameters (dict): Arbitrary keyword arguments to pass to the notebook parameters.
'''
# Copy the nb object to avoid polluting the input
nb = copy.deepcopy(nb)
# Generate parameter content based on the kernel_name
param_content = DagsterTranslator.codify(parameters)
# papermill method choosed translator based on kernel_name and language,
# but we just call the DagsterTranslator
# translate_parameters(kernel_name, language, parameters)
newcell = nbformat.v4.new_code_cell(source=param_content)
newcell.metadata['tags'] = ['injected-parameters']
param_cell_index = _find_first_tagged_cell_index(nb, 'parameters')
injected_cell_index = _find_first_tagged_cell_index(nb, 'injected-parameters')
if injected_cell_index >= 0:
# Replace the injected cell with a new version
before = nb.cells[:injected_cell_index]
after = nb.cells[injected_cell_index + 1 :]
check.int_value_param(param_cell_index, -1, 'param_cell_index')
# We should have blown away the parameters cell if there is an injected-parameters cell
elif param_cell_index >= 0:
# Replace the parameter cell with the injected-parameters cell
before = nb.cells[:param_cell_index]
after = nb.cells[param_cell_index + 1 :]
else:
# Inject to the top of the notebook, presumably first cell includes dagstermill import
context.log.debug(
(
'Warning notebook has no parameters cell, '
'so first cell must import dagstermill and call dm.register_repo()'
)
)
before = nb.cells[:1]
after = nb.cells[1:]
nb.cells = before + [newcell] + after
nb.metadata.papermill['parameters'] = parameters
return nb
|
def replace_parameters(context, nb, parameters):
# Uma: This is a copy-paste from papermill papermill/execute.py:104 (execute_parameters).
# Typically, papermill injects the injected-parameters cell *below* the parameters cell
# but we want to *replace* the parameters cell, which is what this function does.
'''Assigned parameters into the appropiate place in the input notebook
Args:
nb (NotebookNode): Executable notebook object
parameters (dict): Arbitrary keyword arguments to pass to the notebook parameters.
'''
# Copy the nb object to avoid polluting the input
nb = copy.deepcopy(nb)
# Generate parameter content based on the kernel_name
param_content = DagsterTranslator.codify(parameters)
# papermill method choosed translator based on kernel_name and language,
# but we just call the DagsterTranslator
# translate_parameters(kernel_name, language, parameters)
newcell = nbformat.v4.new_code_cell(source=param_content)
newcell.metadata['tags'] = ['injected-parameters']
param_cell_index = _find_first_tagged_cell_index(nb, 'parameters')
injected_cell_index = _find_first_tagged_cell_index(nb, 'injected-parameters')
if injected_cell_index >= 0:
# Replace the injected cell with a new version
before = nb.cells[:injected_cell_index]
after = nb.cells[injected_cell_index + 1 :]
check.int_value_param(param_cell_index, -1, 'param_cell_index')
# We should have blown away the parameters cell if there is an injected-parameters cell
elif param_cell_index >= 0:
# Replace the parameter cell with the injected-parameters cell
before = nb.cells[:param_cell_index]
after = nb.cells[param_cell_index + 1 :]
else:
# Inject to the top of the notebook, presumably first cell includes dagstermill import
context.log.debug(
(
'Warning notebook has no parameters cell, '
'so first cell must import dagstermill and call dm.register_repo()'
)
)
before = nb.cells[:1]
after = nb.cells[1:]
nb.cells = before + [newcell] + after
nb.metadata.papermill['parameters'] = parameters
return nb
|
[
"Assigned",
"parameters",
"into",
"the",
"appropiate",
"place",
"in",
"the",
"input",
"notebook",
"Args",
":",
"nb",
"(",
"NotebookNode",
")",
":",
"Executable",
"notebook",
"object",
"parameters",
"(",
"dict",
")",
":",
"Arbitrary",
"keyword",
"arguments",
"to",
"pass",
"to",
"the",
"notebook",
"parameters",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagstermill/dagstermill/__init__.py#L478-L526
|
[
"def",
"replace_parameters",
"(",
"context",
",",
"nb",
",",
"parameters",
")",
":",
"# Uma: This is a copy-paste from papermill papermill/execute.py:104 (execute_parameters).",
"# Typically, papermill injects the injected-parameters cell *below* the parameters cell",
"# but we want to *replace* the parameters cell, which is what this function does.",
"# Copy the nb object to avoid polluting the input",
"nb",
"=",
"copy",
".",
"deepcopy",
"(",
"nb",
")",
"# Generate parameter content based on the kernel_name",
"param_content",
"=",
"DagsterTranslator",
".",
"codify",
"(",
"parameters",
")",
"# papermill method choosed translator based on kernel_name and language,",
"# but we just call the DagsterTranslator",
"# translate_parameters(kernel_name, language, parameters)",
"newcell",
"=",
"nbformat",
".",
"v4",
".",
"new_code_cell",
"(",
"source",
"=",
"param_content",
")",
"newcell",
".",
"metadata",
"[",
"'tags'",
"]",
"=",
"[",
"'injected-parameters'",
"]",
"param_cell_index",
"=",
"_find_first_tagged_cell_index",
"(",
"nb",
",",
"'parameters'",
")",
"injected_cell_index",
"=",
"_find_first_tagged_cell_index",
"(",
"nb",
",",
"'injected-parameters'",
")",
"if",
"injected_cell_index",
">=",
"0",
":",
"# Replace the injected cell with a new version",
"before",
"=",
"nb",
".",
"cells",
"[",
":",
"injected_cell_index",
"]",
"after",
"=",
"nb",
".",
"cells",
"[",
"injected_cell_index",
"+",
"1",
":",
"]",
"check",
".",
"int_value_param",
"(",
"param_cell_index",
",",
"-",
"1",
",",
"'param_cell_index'",
")",
"# We should have blown away the parameters cell if there is an injected-parameters cell",
"elif",
"param_cell_index",
">=",
"0",
":",
"# Replace the parameter cell with the injected-parameters cell",
"before",
"=",
"nb",
".",
"cells",
"[",
":",
"param_cell_index",
"]",
"after",
"=",
"nb",
".",
"cells",
"[",
"param_cell_index",
"+",
"1",
":",
"]",
"else",
":",
"# Inject to the top of the notebook, presumably first cell includes dagstermill import",
"context",
".",
"log",
".",
"debug",
"(",
"(",
"'Warning notebook has no parameters cell, '",
"'so first cell must import dagstermill and call dm.register_repo()'",
")",
")",
"before",
"=",
"nb",
".",
"cells",
"[",
":",
"1",
"]",
"after",
"=",
"nb",
".",
"cells",
"[",
"1",
":",
"]",
"nb",
".",
"cells",
"=",
"before",
"+",
"[",
"newcell",
"]",
"+",
"after",
"nb",
".",
"metadata",
".",
"papermill",
"[",
"'parameters'",
"]",
"=",
"parameters",
"return",
"nb"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
nonce_solid
|
Creates a solid with the given number of (meaningless) inputs and outputs.
Config controls the behavior of the nonce solid.
|
examples/toys/log_spew.py
|
def nonce_solid(name, n_inputs, n_outputs):
"""Creates a solid with the given number of (meaningless) inputs and outputs.
Config controls the behavior of the nonce solid."""
@solid(
name=name,
inputs=[
InputDefinition(name='input_{}'.format(i)) for i in range(n_inputs)
],
outputs=[
OutputDefinition(name='output_{}'.format(i))
for i in range(n_outputs)
],
)
def solid_fn(context, **_kwargs):
for i in range(200):
time.sleep(0.02)
if i % 1000 == 420:
context.log.error(
'Error message seq={i} from solid {name}'.format(
i=i, name=name
)
)
elif i % 100 == 0:
context.log.warning(
'Warning message seq={i} from solid {name}'.format(
i=i, name=name
)
)
elif i % 10 == 0:
context.log.info(
'Info message seq={i} from solid {name}'.format(
i=i, name=name
)
)
else:
context.log.debug(
'Debug message seq={i} from solid {name}'.format(
i=i, name=name
)
)
return MultipleResults.from_dict(
{'output_{}'.format(i): 'foo' for i in range(n_outputs)}
)
return solid_fn
|
def nonce_solid(name, n_inputs, n_outputs):
"""Creates a solid with the given number of (meaningless) inputs and outputs.
Config controls the behavior of the nonce solid."""
@solid(
name=name,
inputs=[
InputDefinition(name='input_{}'.format(i)) for i in range(n_inputs)
],
outputs=[
OutputDefinition(name='output_{}'.format(i))
for i in range(n_outputs)
],
)
def solid_fn(context, **_kwargs):
for i in range(200):
time.sleep(0.02)
if i % 1000 == 420:
context.log.error(
'Error message seq={i} from solid {name}'.format(
i=i, name=name
)
)
elif i % 100 == 0:
context.log.warning(
'Warning message seq={i} from solid {name}'.format(
i=i, name=name
)
)
elif i % 10 == 0:
context.log.info(
'Info message seq={i} from solid {name}'.format(
i=i, name=name
)
)
else:
context.log.debug(
'Debug message seq={i} from solid {name}'.format(
i=i, name=name
)
)
return MultipleResults.from_dict(
{'output_{}'.format(i): 'foo' for i in range(n_outputs)}
)
return solid_fn
|
[
"Creates",
"a",
"solid",
"with",
"the",
"given",
"number",
"of",
"(",
"meaningless",
")",
"inputs",
"and",
"outputs",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/examples/toys/log_spew.py#L14-L60
|
[
"def",
"nonce_solid",
"(",
"name",
",",
"n_inputs",
",",
"n_outputs",
")",
":",
"@",
"solid",
"(",
"name",
"=",
"name",
",",
"inputs",
"=",
"[",
"InputDefinition",
"(",
"name",
"=",
"'input_{}'",
".",
"format",
"(",
"i",
")",
")",
"for",
"i",
"in",
"range",
"(",
"n_inputs",
")",
"]",
",",
"outputs",
"=",
"[",
"OutputDefinition",
"(",
"name",
"=",
"'output_{}'",
".",
"format",
"(",
"i",
")",
")",
"for",
"i",
"in",
"range",
"(",
"n_outputs",
")",
"]",
",",
")",
"def",
"solid_fn",
"(",
"context",
",",
"*",
"*",
"_kwargs",
")",
":",
"for",
"i",
"in",
"range",
"(",
"200",
")",
":",
"time",
".",
"sleep",
"(",
"0.02",
")",
"if",
"i",
"%",
"1000",
"==",
"420",
":",
"context",
".",
"log",
".",
"error",
"(",
"'Error message seq={i} from solid {name}'",
".",
"format",
"(",
"i",
"=",
"i",
",",
"name",
"=",
"name",
")",
")",
"elif",
"i",
"%",
"100",
"==",
"0",
":",
"context",
".",
"log",
".",
"warning",
"(",
"'Warning message seq={i} from solid {name}'",
".",
"format",
"(",
"i",
"=",
"i",
",",
"name",
"=",
"name",
")",
")",
"elif",
"i",
"%",
"10",
"==",
"0",
":",
"context",
".",
"log",
".",
"info",
"(",
"'Info message seq={i} from solid {name}'",
".",
"format",
"(",
"i",
"=",
"i",
",",
"name",
"=",
"name",
")",
")",
"else",
":",
"context",
".",
"log",
".",
"debug",
"(",
"'Debug message seq={i} from solid {name}'",
".",
"format",
"(",
"i",
"=",
"i",
",",
"name",
"=",
"name",
")",
")",
"return",
"MultipleResults",
".",
"from_dict",
"(",
"{",
"'output_{}'",
".",
"format",
"(",
"i",
")",
":",
"'foo'",
"for",
"i",
"in",
"range",
"(",
"n_outputs",
")",
"}",
")",
"return",
"solid_fn"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
format_config_for_graphql
|
This recursive descent thing formats a config dict for GraphQL.
|
python_modules/dagster-airflow/dagster_airflow/format.py
|
def format_config_for_graphql(config):
'''This recursive descent thing formats a config dict for GraphQL.'''
def _format_config_subdict(config, current_indent=0):
check.dict_param(config, 'config', key_type=str)
printer = IndentingStringIoPrinter(indent_level=2, current_indent=current_indent)
printer.line('{')
n_elements = len(config)
for i, key in enumerate(sorted(config, key=lambda x: x[0])):
value = config[key]
with printer.with_indent():
formatted_value = (
_format_config_item(value, current_indent=printer.current_indent)
.lstrip(' ')
.rstrip('\n')
)
printer.line(
'{key}: {formatted_value}{comma}'.format(
key=key,
formatted_value=formatted_value,
comma=',' if i != n_elements - 1 else '',
)
)
printer.line('}')
return printer.read()
def _format_config_sublist(config, current_indent=0):
printer = IndentingStringIoPrinter(indent_level=2, current_indent=current_indent)
printer.line('[')
n_elements = len(config)
for i, value in enumerate(config):
with printer.with_indent():
formatted_value = (
_format_config_item(value, current_indent=printer.current_indent)
.lstrip(' ')
.rstrip('\n')
)
printer.line(
'{formatted_value}{comma}'.format(
formatted_value=formatted_value, comma=',' if i != n_elements - 1 else ''
)
)
printer.line(']')
return printer.read()
def _format_config_item(config, current_indent=0):
printer = IndentingStringIoPrinter(indent_level=2, current_indent=current_indent)
if isinstance(config, dict):
return _format_config_subdict(config, printer.current_indent)
elif isinstance(config, list):
return _format_config_sublist(config, printer.current_indent)
elif isinstance(config, bool):
return repr(config).lower()
else:
return repr(config).replace('\'', '"')
check.dict_param(config, 'config', key_type=str)
if not isinstance(config, dict):
check.failed('Expected a dict to format as config, got: {item}'.format(item=repr(config)))
return _format_config_subdict(config)
|
def format_config_for_graphql(config):
'''This recursive descent thing formats a config dict for GraphQL.'''
def _format_config_subdict(config, current_indent=0):
check.dict_param(config, 'config', key_type=str)
printer = IndentingStringIoPrinter(indent_level=2, current_indent=current_indent)
printer.line('{')
n_elements = len(config)
for i, key in enumerate(sorted(config, key=lambda x: x[0])):
value = config[key]
with printer.with_indent():
formatted_value = (
_format_config_item(value, current_indent=printer.current_indent)
.lstrip(' ')
.rstrip('\n')
)
printer.line(
'{key}: {formatted_value}{comma}'.format(
key=key,
formatted_value=formatted_value,
comma=',' if i != n_elements - 1 else '',
)
)
printer.line('}')
return printer.read()
def _format_config_sublist(config, current_indent=0):
printer = IndentingStringIoPrinter(indent_level=2, current_indent=current_indent)
printer.line('[')
n_elements = len(config)
for i, value in enumerate(config):
with printer.with_indent():
formatted_value = (
_format_config_item(value, current_indent=printer.current_indent)
.lstrip(' ')
.rstrip('\n')
)
printer.line(
'{formatted_value}{comma}'.format(
formatted_value=formatted_value, comma=',' if i != n_elements - 1 else ''
)
)
printer.line(']')
return printer.read()
def _format_config_item(config, current_indent=0):
printer = IndentingStringIoPrinter(indent_level=2, current_indent=current_indent)
if isinstance(config, dict):
return _format_config_subdict(config, printer.current_indent)
elif isinstance(config, list):
return _format_config_sublist(config, printer.current_indent)
elif isinstance(config, bool):
return repr(config).lower()
else:
return repr(config).replace('\'', '"')
check.dict_param(config, 'config', key_type=str)
if not isinstance(config, dict):
check.failed('Expected a dict to format as config, got: {item}'.format(item=repr(config)))
return _format_config_subdict(config)
|
[
"This",
"recursive",
"descent",
"thing",
"formats",
"a",
"config",
"dict",
"for",
"GraphQL",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster-airflow/dagster_airflow/format.py#L5-L71
|
[
"def",
"format_config_for_graphql",
"(",
"config",
")",
":",
"def",
"_format_config_subdict",
"(",
"config",
",",
"current_indent",
"=",
"0",
")",
":",
"check",
".",
"dict_param",
"(",
"config",
",",
"'config'",
",",
"key_type",
"=",
"str",
")",
"printer",
"=",
"IndentingStringIoPrinter",
"(",
"indent_level",
"=",
"2",
",",
"current_indent",
"=",
"current_indent",
")",
"printer",
".",
"line",
"(",
"'{'",
")",
"n_elements",
"=",
"len",
"(",
"config",
")",
"for",
"i",
",",
"key",
"in",
"enumerate",
"(",
"sorted",
"(",
"config",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
")",
":",
"value",
"=",
"config",
"[",
"key",
"]",
"with",
"printer",
".",
"with_indent",
"(",
")",
":",
"formatted_value",
"=",
"(",
"_format_config_item",
"(",
"value",
",",
"current_indent",
"=",
"printer",
".",
"current_indent",
")",
".",
"lstrip",
"(",
"' '",
")",
".",
"rstrip",
"(",
"'\\n'",
")",
")",
"printer",
".",
"line",
"(",
"'{key}: {formatted_value}{comma}'",
".",
"format",
"(",
"key",
"=",
"key",
",",
"formatted_value",
"=",
"formatted_value",
",",
"comma",
"=",
"','",
"if",
"i",
"!=",
"n_elements",
"-",
"1",
"else",
"''",
",",
")",
")",
"printer",
".",
"line",
"(",
"'}'",
")",
"return",
"printer",
".",
"read",
"(",
")",
"def",
"_format_config_sublist",
"(",
"config",
",",
"current_indent",
"=",
"0",
")",
":",
"printer",
"=",
"IndentingStringIoPrinter",
"(",
"indent_level",
"=",
"2",
",",
"current_indent",
"=",
"current_indent",
")",
"printer",
".",
"line",
"(",
"'['",
")",
"n_elements",
"=",
"len",
"(",
"config",
")",
"for",
"i",
",",
"value",
"in",
"enumerate",
"(",
"config",
")",
":",
"with",
"printer",
".",
"with_indent",
"(",
")",
":",
"formatted_value",
"=",
"(",
"_format_config_item",
"(",
"value",
",",
"current_indent",
"=",
"printer",
".",
"current_indent",
")",
".",
"lstrip",
"(",
"' '",
")",
".",
"rstrip",
"(",
"'\\n'",
")",
")",
"printer",
".",
"line",
"(",
"'{formatted_value}{comma}'",
".",
"format",
"(",
"formatted_value",
"=",
"formatted_value",
",",
"comma",
"=",
"','",
"if",
"i",
"!=",
"n_elements",
"-",
"1",
"else",
"''",
")",
")",
"printer",
".",
"line",
"(",
"']'",
")",
"return",
"printer",
".",
"read",
"(",
")",
"def",
"_format_config_item",
"(",
"config",
",",
"current_indent",
"=",
"0",
")",
":",
"printer",
"=",
"IndentingStringIoPrinter",
"(",
"indent_level",
"=",
"2",
",",
"current_indent",
"=",
"current_indent",
")",
"if",
"isinstance",
"(",
"config",
",",
"dict",
")",
":",
"return",
"_format_config_subdict",
"(",
"config",
",",
"printer",
".",
"current_indent",
")",
"elif",
"isinstance",
"(",
"config",
",",
"list",
")",
":",
"return",
"_format_config_sublist",
"(",
"config",
",",
"printer",
".",
"current_indent",
")",
"elif",
"isinstance",
"(",
"config",
",",
"bool",
")",
":",
"return",
"repr",
"(",
"config",
")",
".",
"lower",
"(",
")",
"else",
":",
"return",
"repr",
"(",
"config",
")",
".",
"replace",
"(",
"'\\''",
",",
"'\"'",
")",
"check",
".",
"dict_param",
"(",
"config",
",",
"'config'",
",",
"key_type",
"=",
"str",
")",
"if",
"not",
"isinstance",
"(",
"config",
",",
"dict",
")",
":",
"check",
".",
"failed",
"(",
"'Expected a dict to format as config, got: {item}'",
".",
"format",
"(",
"item",
"=",
"repr",
"(",
"config",
")",
")",
")",
"return",
"_format_config_subdict",
"(",
"config",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
RepositoryDefinition.get_pipeline
|
Get a pipeline by name. Only constructs that pipeline and caches it.
Args:
name (str): Name of the pipeline to retriever
Returns:
PipelineDefinition: Instance of PipelineDefinition with that name.
|
python_modules/dagster/dagster/core/definitions/repository.py
|
def get_pipeline(self, name):
'''Get a pipeline by name. Only constructs that pipeline and caches it.
Args:
name (str): Name of the pipeline to retriever
Returns:
PipelineDefinition: Instance of PipelineDefinition with that name.
'''
check.str_param(name, 'name')
if name in self._pipeline_cache:
return self._pipeline_cache[name]
try:
pipeline = self.pipeline_dict[name]()
except KeyError:
raise DagsterInvariantViolationError(
'Could not find pipeline "{name}". Found: {pipeline_names}.'.format(
name=name,
pipeline_names=', '.join(
[
'"{pipeline_name}"'.format(pipeline_name=pipeline_name)
for pipeline_name in self.pipeline_dict.keys()
]
),
)
)
check.invariant(
pipeline.name == name,
'Name does not match. Name in dict {name}. Name in pipeline {pipeline.name}'.format(
name=name, pipeline=pipeline
),
)
self._pipeline_cache[name] = check.inst(
pipeline,
PipelineDefinition,
(
'Function passed into pipeline_dict with key {key} must return a '
'PipelineDefinition'
).format(key=name),
)
return pipeline
|
def get_pipeline(self, name):
'''Get a pipeline by name. Only constructs that pipeline and caches it.
Args:
name (str): Name of the pipeline to retriever
Returns:
PipelineDefinition: Instance of PipelineDefinition with that name.
'''
check.str_param(name, 'name')
if name in self._pipeline_cache:
return self._pipeline_cache[name]
try:
pipeline = self.pipeline_dict[name]()
except KeyError:
raise DagsterInvariantViolationError(
'Could not find pipeline "{name}". Found: {pipeline_names}.'.format(
name=name,
pipeline_names=', '.join(
[
'"{pipeline_name}"'.format(pipeline_name=pipeline_name)
for pipeline_name in self.pipeline_dict.keys()
]
),
)
)
check.invariant(
pipeline.name == name,
'Name does not match. Name in dict {name}. Name in pipeline {pipeline.name}'.format(
name=name, pipeline=pipeline
),
)
self._pipeline_cache[name] = check.inst(
pipeline,
PipelineDefinition,
(
'Function passed into pipeline_dict with key {key} must return a '
'PipelineDefinition'
).format(key=name),
)
return pipeline
|
[
"Get",
"a",
"pipeline",
"by",
"name",
".",
"Only",
"constructs",
"that",
"pipeline",
"and",
"caches",
"it",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/definitions/repository.py#L56-L100
|
[
"def",
"get_pipeline",
"(",
"self",
",",
"name",
")",
":",
"check",
".",
"str_param",
"(",
"name",
",",
"'name'",
")",
"if",
"name",
"in",
"self",
".",
"_pipeline_cache",
":",
"return",
"self",
".",
"_pipeline_cache",
"[",
"name",
"]",
"try",
":",
"pipeline",
"=",
"self",
".",
"pipeline_dict",
"[",
"name",
"]",
"(",
")",
"except",
"KeyError",
":",
"raise",
"DagsterInvariantViolationError",
"(",
"'Could not find pipeline \"{name}\". Found: {pipeline_names}.'",
".",
"format",
"(",
"name",
"=",
"name",
",",
"pipeline_names",
"=",
"', '",
".",
"join",
"(",
"[",
"'\"{pipeline_name}\"'",
".",
"format",
"(",
"pipeline_name",
"=",
"pipeline_name",
")",
"for",
"pipeline_name",
"in",
"self",
".",
"pipeline_dict",
".",
"keys",
"(",
")",
"]",
")",
",",
")",
")",
"check",
".",
"invariant",
"(",
"pipeline",
".",
"name",
"==",
"name",
",",
"'Name does not match. Name in dict {name}. Name in pipeline {pipeline.name}'",
".",
"format",
"(",
"name",
"=",
"name",
",",
"pipeline",
"=",
"pipeline",
")",
",",
")",
"self",
".",
"_pipeline_cache",
"[",
"name",
"]",
"=",
"check",
".",
"inst",
"(",
"pipeline",
",",
"PipelineDefinition",
",",
"(",
"'Function passed into pipeline_dict with key {key} must return a '",
"'PipelineDefinition'",
")",
".",
"format",
"(",
"key",
"=",
"name",
")",
",",
")",
"return",
"pipeline"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
RepositoryDefinition.get_all_pipelines
|
Return all pipelines as a list
Returns:
List[PipelineDefinition]:
|
python_modules/dagster/dagster/core/definitions/repository.py
|
def get_all_pipelines(self):
'''Return all pipelines as a list
Returns:
List[PipelineDefinition]:
'''
pipelines = list(map(self.get_pipeline, self.pipeline_dict.keys()))
# This does uniqueness check
self._construct_solid_defs(pipelines)
return pipelines
|
def get_all_pipelines(self):
'''Return all pipelines as a list
Returns:
List[PipelineDefinition]:
'''
pipelines = list(map(self.get_pipeline, self.pipeline_dict.keys()))
# This does uniqueness check
self._construct_solid_defs(pipelines)
return pipelines
|
[
"Return",
"all",
"pipelines",
"as",
"a",
"list"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/definitions/repository.py#L102-L112
|
[
"def",
"get_all_pipelines",
"(",
"self",
")",
":",
"pipelines",
"=",
"list",
"(",
"map",
"(",
"self",
".",
"get_pipeline",
",",
"self",
".",
"pipeline_dict",
".",
"keys",
"(",
")",
")",
")",
"# This does uniqueness check",
"self",
".",
"_construct_solid_defs",
"(",
"pipelines",
")",
"return",
"pipelines"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
define_spark_config
|
Spark configuration.
See the Spark documentation for reference:
https://spark.apache.org/docs/latest/submitting-applications.html
|
python_modules/libraries/dagster-spark/dagster_spark/configs.py
|
def define_spark_config():
'''Spark configuration.
See the Spark documentation for reference:
https://spark.apache.org/docs/latest/submitting-applications.html
'''
master_url = Field(
String,
description='The master URL for the cluster (e.g. spark://23.195.26.187:7077)',
is_optional=False,
)
deploy_mode = Field(
SparkDeployMode,
description='''Whether to deploy your driver on the worker nodes (cluster) or locally as an
external client (client) (default: client). A common deployment strategy is to submit your
application from a gateway machine that is physically co-located with your worker machines
(e.g. Master node in a standalone EC2 cluster). In this setup, client mode is appropriate.
In client mode, the driver is launched directly within the spark-submit process which acts
as a client to the cluster. The input and output of the application is attached to the
console. Thus, this mode is especially suitable for applications that involve the REPL (e.g.
Spark shell).''',
is_optional=True,
)
application_jar = Field(
Path,
description='''Path to a bundled jar including your application and all
dependencies. The URL must be globally visible inside of your cluster, for
instance, an hdfs:// path or a file:// path that is present on all nodes.
''',
is_optional=False,
)
application_arguments = Field(
String,
description='Arguments passed to the main method of your main class, if any',
is_optional=True,
)
spark_home = Field(
String,
description='The path to your spark installation. Defaults to $SPARK_HOME at runtime if not provided.',
is_optional=True,
)
spark_outputs = Field(List(String), description='The outputs that this Spark job will produce')
return Field(
Dict(
fields={
'master_url': master_url,
'deploy_mode': deploy_mode,
'application_jar': application_jar,
'spark_conf': spark_config(),
'spark_home': spark_home,
'application_arguments': application_arguments,
'spark_outputs': spark_outputs,
}
)
)
|
def define_spark_config():
'''Spark configuration.
See the Spark documentation for reference:
https://spark.apache.org/docs/latest/submitting-applications.html
'''
master_url = Field(
String,
description='The master URL for the cluster (e.g. spark://23.195.26.187:7077)',
is_optional=False,
)
deploy_mode = Field(
SparkDeployMode,
description='''Whether to deploy your driver on the worker nodes (cluster) or locally as an
external client (client) (default: client). A common deployment strategy is to submit your
application from a gateway machine that is physically co-located with your worker machines
(e.g. Master node in a standalone EC2 cluster). In this setup, client mode is appropriate.
In client mode, the driver is launched directly within the spark-submit process which acts
as a client to the cluster. The input and output of the application is attached to the
console. Thus, this mode is especially suitable for applications that involve the REPL (e.g.
Spark shell).''',
is_optional=True,
)
application_jar = Field(
Path,
description='''Path to a bundled jar including your application and all
dependencies. The URL must be globally visible inside of your cluster, for
instance, an hdfs:// path or a file:// path that is present on all nodes.
''',
is_optional=False,
)
application_arguments = Field(
String,
description='Arguments passed to the main method of your main class, if any',
is_optional=True,
)
spark_home = Field(
String,
description='The path to your spark installation. Defaults to $SPARK_HOME at runtime if not provided.',
is_optional=True,
)
spark_outputs = Field(List(String), description='The outputs that this Spark job will produce')
return Field(
Dict(
fields={
'master_url': master_url,
'deploy_mode': deploy_mode,
'application_jar': application_jar,
'spark_conf': spark_config(),
'spark_home': spark_home,
'application_arguments': application_arguments,
'spark_outputs': spark_outputs,
}
)
)
|
[
"Spark",
"configuration",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/libraries/dagster-spark/dagster_spark/configs.py#L14-L75
|
[
"def",
"define_spark_config",
"(",
")",
":",
"master_url",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'The master URL for the cluster (e.g. spark://23.195.26.187:7077)'",
",",
"is_optional",
"=",
"False",
",",
")",
"deploy_mode",
"=",
"Field",
"(",
"SparkDeployMode",
",",
"description",
"=",
"'''Whether to deploy your driver on the worker nodes (cluster) or locally as an\n external client (client) (default: client). A common deployment strategy is to submit your\n application from a gateway machine that is physically co-located with your worker machines\n (e.g. Master node in a standalone EC2 cluster). In this setup, client mode is appropriate. \n In client mode, the driver is launched directly within the spark-submit process which acts \n as a client to the cluster. The input and output of the application is attached to the \n console. Thus, this mode is especially suitable for applications that involve the REPL (e.g.\n Spark shell).'''",
",",
"is_optional",
"=",
"True",
",",
")",
"application_jar",
"=",
"Field",
"(",
"Path",
",",
"description",
"=",
"'''Path to a bundled jar including your application and all\n dependencies. The URL must be globally visible inside of your cluster, for\n instance, an hdfs:// path or a file:// path that is present on all nodes.\n '''",
",",
"is_optional",
"=",
"False",
",",
")",
"application_arguments",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'Arguments passed to the main method of your main class, if any'",
",",
"is_optional",
"=",
"True",
",",
")",
"spark_home",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'The path to your spark installation. Defaults to $SPARK_HOME at runtime if not provided.'",
",",
"is_optional",
"=",
"True",
",",
")",
"spark_outputs",
"=",
"Field",
"(",
"List",
"(",
"String",
")",
",",
"description",
"=",
"'The outputs that this Spark job will produce'",
")",
"return",
"Field",
"(",
"Dict",
"(",
"fields",
"=",
"{",
"'master_url'",
":",
"master_url",
",",
"'deploy_mode'",
":",
"deploy_mode",
",",
"'application_jar'",
":",
"application_jar",
",",
"'spark_conf'",
":",
"spark_config",
"(",
")",
",",
"'spark_home'",
":",
"spark_home",
",",
"'application_arguments'",
":",
"application_arguments",
",",
"'spark_outputs'",
":",
"spark_outputs",
",",
"}",
")",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
get_next_event
|
This function polls the process until it returns a valid
item or returns PROCESS_DEAD_AND_QUEUE_EMPTY if it is in
a state where the process has terminated and the queue is empty
Warning: if the child process is in an infinite loop. This will
also infinitely loop.
|
python_modules/dagster/dagster/core/execution_plan/child_process_executor.py
|
def get_next_event(process, queue):
'''
This function polls the process until it returns a valid
item or returns PROCESS_DEAD_AND_QUEUE_EMPTY if it is in
a state where the process has terminated and the queue is empty
Warning: if the child process is in an infinite loop. This will
also infinitely loop.
'''
while True:
try:
return queue.get(block=True, timeout=TICK)
except multiprocessing.queues.Empty:
if not process.is_alive():
# There is a possibility that after the last queue.get the
# process created another event and then died. In that case
# we want to continue draining the queue.
try:
return queue.get(block=False)
except multiprocessing.queues.Empty:
# If the queue empty we know that there are no more events
# and that the process has died.
return PROCESS_DEAD_AND_QUEUE_EMPTY
check.failed('unreachable')
|
def get_next_event(process, queue):
'''
This function polls the process until it returns a valid
item or returns PROCESS_DEAD_AND_QUEUE_EMPTY if it is in
a state where the process has terminated and the queue is empty
Warning: if the child process is in an infinite loop. This will
also infinitely loop.
'''
while True:
try:
return queue.get(block=True, timeout=TICK)
except multiprocessing.queues.Empty:
if not process.is_alive():
# There is a possibility that after the last queue.get the
# process created another event and then died. In that case
# we want to continue draining the queue.
try:
return queue.get(block=False)
except multiprocessing.queues.Empty:
# If the queue empty we know that there are no more events
# and that the process has died.
return PROCESS_DEAD_AND_QUEUE_EMPTY
check.failed('unreachable')
|
[
"This",
"function",
"polls",
"the",
"process",
"until",
"it",
"returns",
"a",
"valid",
"item",
"or",
"returns",
"PROCESS_DEAD_AND_QUEUE_EMPTY",
"if",
"it",
"is",
"in",
"a",
"state",
"where",
"the",
"process",
"has",
"terminated",
"and",
"the",
"queue",
"is",
"empty"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution_plan/child_process_executor.py#L65-L89
|
[
"def",
"get_next_event",
"(",
"process",
",",
"queue",
")",
":",
"while",
"True",
":",
"try",
":",
"return",
"queue",
".",
"get",
"(",
"block",
"=",
"True",
",",
"timeout",
"=",
"TICK",
")",
"except",
"multiprocessing",
".",
"queues",
".",
"Empty",
":",
"if",
"not",
"process",
".",
"is_alive",
"(",
")",
":",
"# There is a possibility that after the last queue.get the",
"# process created another event and then died. In that case",
"# we want to continue draining the queue.",
"try",
":",
"return",
"queue",
".",
"get",
"(",
"block",
"=",
"False",
")",
"except",
"multiprocessing",
".",
"queues",
".",
"Empty",
":",
"# If the queue empty we know that there are no more events",
"# and that the process has died.",
"return",
"PROCESS_DEAD_AND_QUEUE_EMPTY",
"check",
".",
"failed",
"(",
"'unreachable'",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
execute_pipeline_through_queue
|
Execute pipeline using message queue as a transport
|
python_modules/dagster-graphql/dagster_graphql/implementation/pipeline_execution_manager.py
|
def execute_pipeline_through_queue(
repository_info,
pipeline_name,
solid_subset,
environment_dict,
run_id,
message_queue,
reexecution_config,
step_keys_to_execute,
):
"""
Execute pipeline using message queue as a transport
"""
message_queue.put(ProcessStartedSentinel(os.getpid()))
run_config = RunConfig(
run_id,
event_callback=message_queue.put,
executor_config=InProcessExecutorConfig(raise_on_error=False),
reexecution_config=reexecution_config,
step_keys_to_execute=step_keys_to_execute,
)
repository_container = RepositoryContainer(repository_info)
if repository_container.repo_error:
message_queue.put(
MultiprocessingError(
serializable_error_info_from_exc_info(repository_container.repo_error)
)
)
return
try:
result = execute_pipeline(
repository_container.repository.get_pipeline(pipeline_name).build_sub_pipeline(
solid_subset
),
environment_dict,
run_config=run_config,
)
return result
except: # pylint: disable=W0702
error_info = serializable_error_info_from_exc_info(sys.exc_info())
message_queue.put(MultiprocessingError(error_info))
finally:
message_queue.put(MultiprocessingDone())
message_queue.close()
|
def execute_pipeline_through_queue(
repository_info,
pipeline_name,
solid_subset,
environment_dict,
run_id,
message_queue,
reexecution_config,
step_keys_to_execute,
):
"""
Execute pipeline using message queue as a transport
"""
message_queue.put(ProcessStartedSentinel(os.getpid()))
run_config = RunConfig(
run_id,
event_callback=message_queue.put,
executor_config=InProcessExecutorConfig(raise_on_error=False),
reexecution_config=reexecution_config,
step_keys_to_execute=step_keys_to_execute,
)
repository_container = RepositoryContainer(repository_info)
if repository_container.repo_error:
message_queue.put(
MultiprocessingError(
serializable_error_info_from_exc_info(repository_container.repo_error)
)
)
return
try:
result = execute_pipeline(
repository_container.repository.get_pipeline(pipeline_name).build_sub_pipeline(
solid_subset
),
environment_dict,
run_config=run_config,
)
return result
except: # pylint: disable=W0702
error_info = serializable_error_info_from_exc_info(sys.exc_info())
message_queue.put(MultiprocessingError(error_info))
finally:
message_queue.put(MultiprocessingDone())
message_queue.close()
|
[
"Execute",
"pipeline",
"using",
"message",
"queue",
"as",
"a",
"transport"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster-graphql/dagster_graphql/implementation/pipeline_execution_manager.py#L268-L315
|
[
"def",
"execute_pipeline_through_queue",
"(",
"repository_info",
",",
"pipeline_name",
",",
"solid_subset",
",",
"environment_dict",
",",
"run_id",
",",
"message_queue",
",",
"reexecution_config",
",",
"step_keys_to_execute",
",",
")",
":",
"message_queue",
".",
"put",
"(",
"ProcessStartedSentinel",
"(",
"os",
".",
"getpid",
"(",
")",
")",
")",
"run_config",
"=",
"RunConfig",
"(",
"run_id",
",",
"event_callback",
"=",
"message_queue",
".",
"put",
",",
"executor_config",
"=",
"InProcessExecutorConfig",
"(",
"raise_on_error",
"=",
"False",
")",
",",
"reexecution_config",
"=",
"reexecution_config",
",",
"step_keys_to_execute",
"=",
"step_keys_to_execute",
",",
")",
"repository_container",
"=",
"RepositoryContainer",
"(",
"repository_info",
")",
"if",
"repository_container",
".",
"repo_error",
":",
"message_queue",
".",
"put",
"(",
"MultiprocessingError",
"(",
"serializable_error_info_from_exc_info",
"(",
"repository_container",
".",
"repo_error",
")",
")",
")",
"return",
"try",
":",
"result",
"=",
"execute_pipeline",
"(",
"repository_container",
".",
"repository",
".",
"get_pipeline",
"(",
"pipeline_name",
")",
".",
"build_sub_pipeline",
"(",
"solid_subset",
")",
",",
"environment_dict",
",",
"run_config",
"=",
"run_config",
",",
")",
"return",
"result",
"except",
":",
"# pylint: disable=W0702",
"error_info",
"=",
"serializable_error_info_from_exc_info",
"(",
"sys",
".",
"exc_info",
"(",
")",
")",
"message_queue",
".",
"put",
"(",
"MultiprocessingError",
"(",
"error_info",
")",
")",
"finally",
":",
"message_queue",
".",
"put",
"(",
"MultiprocessingDone",
"(",
")",
")",
"message_queue",
".",
"close",
"(",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
MultiprocessingExecutionManager.join
|
Waits until all there are no processes enqueued.
|
python_modules/dagster-graphql/dagster_graphql/implementation/pipeline_execution_manager.py
|
def join(self):
'''Waits until all there are no processes enqueued.'''
while True:
with self._processes_lock:
if not self._processes and self._processing_semaphore.locked():
return True
gevent.sleep(0.1)
|
def join(self):
'''Waits until all there are no processes enqueued.'''
while True:
with self._processes_lock:
if not self._processes and self._processing_semaphore.locked():
return True
gevent.sleep(0.1)
|
[
"Waits",
"until",
"all",
"there",
"are",
"no",
"processes",
"enqueued",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster-graphql/dagster_graphql/implementation/pipeline_execution_manager.py#L223-L229
|
[
"def",
"join",
"(",
"self",
")",
":",
"while",
"True",
":",
"with",
"self",
".",
"_processes_lock",
":",
"if",
"not",
"self",
".",
"_processes",
"and",
"self",
".",
"_processing_semaphore",
".",
"locked",
"(",
")",
":",
"return",
"True",
"gevent",
".",
"sleep",
"(",
"0.1",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
Field
|
The schema for configuration data that describes the type, optionality, defaults, and description.
Args:
dagster_type (DagsterType):
A ``DagsterType`` describing the schema of this field, ie `Dict({'example': Field(String)})`
default_value (Any):
A default value to use that respects the schema provided via dagster_type
is_optional (bool): Whether the presence of this field is optional
despcription (str):
|
python_modules/dagster/dagster/core/types/field.py
|
def Field(
dagster_type,
default_value=FIELD_NO_DEFAULT_PROVIDED,
is_optional=INFER_OPTIONAL_COMPOSITE_FIELD,
is_secret=False,
description=None,
):
'''
The schema for configuration data that describes the type, optionality, defaults, and description.
Args:
dagster_type (DagsterType):
A ``DagsterType`` describing the schema of this field, ie `Dict({'example': Field(String)})`
default_value (Any):
A default value to use that respects the schema provided via dagster_type
is_optional (bool): Whether the presence of this field is optional
despcription (str):
'''
config_type = resolve_to_config_type(dagster_type)
if not config_type:
raise DagsterInvalidDefinitionError(
(
'Attempted to pass {value_repr} to a Field that expects a valid '
'dagster type usable in config (e.g. Dict, NamedDict, Int, String et al).'
).format(value_repr=repr(dagster_type))
)
return FieldImpl(
config_type=resolve_to_config_type(dagster_type),
default_value=default_value,
is_optional=is_optional,
is_secret=is_secret,
description=description,
)
|
def Field(
dagster_type,
default_value=FIELD_NO_DEFAULT_PROVIDED,
is_optional=INFER_OPTIONAL_COMPOSITE_FIELD,
is_secret=False,
description=None,
):
'''
The schema for configuration data that describes the type, optionality, defaults, and description.
Args:
dagster_type (DagsterType):
A ``DagsterType`` describing the schema of this field, ie `Dict({'example': Field(String)})`
default_value (Any):
A default value to use that respects the schema provided via dagster_type
is_optional (bool): Whether the presence of this field is optional
despcription (str):
'''
config_type = resolve_to_config_type(dagster_type)
if not config_type:
raise DagsterInvalidDefinitionError(
(
'Attempted to pass {value_repr} to a Field that expects a valid '
'dagster type usable in config (e.g. Dict, NamedDict, Int, String et al).'
).format(value_repr=repr(dagster_type))
)
return FieldImpl(
config_type=resolve_to_config_type(dagster_type),
default_value=default_value,
is_optional=is_optional,
is_secret=is_secret,
description=description,
)
|
[
"The",
"schema",
"for",
"configuration",
"data",
"that",
"describes",
"the",
"type",
"optionality",
"defaults",
"and",
"description",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/types/field.py#L34-L66
|
[
"def",
"Field",
"(",
"dagster_type",
",",
"default_value",
"=",
"FIELD_NO_DEFAULT_PROVIDED",
",",
"is_optional",
"=",
"INFER_OPTIONAL_COMPOSITE_FIELD",
",",
"is_secret",
"=",
"False",
",",
"description",
"=",
"None",
",",
")",
":",
"config_type",
"=",
"resolve_to_config_type",
"(",
"dagster_type",
")",
"if",
"not",
"config_type",
":",
"raise",
"DagsterInvalidDefinitionError",
"(",
"(",
"'Attempted to pass {value_repr} to a Field that expects a valid '",
"'dagster type usable in config (e.g. Dict, NamedDict, Int, String et al).'",
")",
".",
"format",
"(",
"value_repr",
"=",
"repr",
"(",
"dagster_type",
")",
")",
")",
"return",
"FieldImpl",
"(",
"config_type",
"=",
"resolve_to_config_type",
"(",
"dagster_type",
")",
",",
"default_value",
"=",
"default_value",
",",
"is_optional",
"=",
"is_optional",
",",
"is_secret",
"=",
"is_secret",
",",
"description",
"=",
"description",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
define_snowflake_config
|
Snowflake configuration.
See the Snowflake documentation for reference:
https://docs.snowflake.net/manuals/user-guide/python-connector-api.html
|
python_modules/libraries/dagster-snowflake/dagster_snowflake/configs.py
|
def define_snowflake_config():
'''Snowflake configuration.
See the Snowflake documentation for reference:
https://docs.snowflake.net/manuals/user-guide/python-connector-api.html
'''
account = Field(
String,
description='Your Snowflake account name. For more details, see https://bit.ly/2FBL320.',
is_optional=True,
)
user = Field(String, description='User login name.', is_optional=False)
password = Field(String, description='User password.', is_optional=False)
database = Field(
String,
description='''Name of the default database to use. After login, you can use USE DATABASE
to change the database.''',
is_optional=True,
)
schema = Field(
String,
description='''Name of the default schema to use. After login, you can use USE SCHEMA to
change the schema.''',
is_optional=True,
)
role = Field(
String,
description='''Name of the default role to use. After login, you can use USE ROLE to change
the role.''',
is_optional=True,
)
warehouse = Field(
String,
description='''Name of the default warehouse to use. After login, you can use USE WAREHOUSE
to change the role.''',
is_optional=True,
)
autocommit = Field(
Bool,
description='''None by default, which honors the Snowflake parameter AUTOCOMMIT. Set to True
or False to enable or disable autocommit mode in the session, respectively.''',
is_optional=True,
)
client_prefetch_threads = Field(
Int,
description='''Number of threads used to download the results sets (4 by default).
Increasing the value improves fetch performance but requires more memory.''',
is_optional=True,
)
client_session_keep_alive = Field(
String,
description='''False by default. Set this to True to keep the session active indefinitely,
even if there is no activity from the user. Make certain to call the close method to
terminate the thread properly or the process may hang.''',
is_optional=True,
)
login_timeout = Field(
Int,
description='''Timeout in seconds for login. By default, 60 seconds. The login request gives
up after the timeout length if the HTTP response is "success".''',
is_optional=True,
)
network_timeout = Field(
Int,
description='''Timeout in seconds for all other operations. By default, none/infinite. A
general request gives up after the timeout length if the HTTP response is not "success"''',
is_optional=True,
)
ocsp_response_cache_filename = Field(
Path,
description='''URI for the OCSP response cache file.
By default, the OCSP response cache file is created in the cache directory.''',
is_optional=True,
)
validate_default_parameters = Field(
Bool,
description='''False by default. Raise an exception if either one of specified database,
schema or warehouse doesn't exists if True.''',
is_optional=True,
)
paramstyle = Field(
# TODO should validate only against permissible values for this
String,
description='''pyformat by default for client side binding. Specify qmark or numeric to
change bind variable formats for server side binding.''',
is_optional=True,
)
timezone = Field(
String,
description='''None by default, which honors the Snowflake parameter TIMEZONE. Set to a
valid time zone (e.g. America/Los_Angeles) to set the session time zone.''',
is_optional=True,
)
return Field(
Dict(
fields={
'account': account,
'user': user,
'password': password,
'database': database,
'schema': schema,
'role': role,
'warehouse': warehouse,
'autocommit': autocommit,
'client_prefetch_threads': client_prefetch_threads,
'client_session_keep_alive': client_session_keep_alive,
'login_timeout': login_timeout,
'network_timeout': network_timeout,
'ocsp_response_cache_filename': ocsp_response_cache_filename,
'validate_default_parameters': validate_default_parameters,
'paramstyle': paramstyle,
'timezone': timezone,
}
),
description='Snowflake configuration',
)
|
def define_snowflake_config():
'''Snowflake configuration.
See the Snowflake documentation for reference:
https://docs.snowflake.net/manuals/user-guide/python-connector-api.html
'''
account = Field(
String,
description='Your Snowflake account name. For more details, see https://bit.ly/2FBL320.',
is_optional=True,
)
user = Field(String, description='User login name.', is_optional=False)
password = Field(String, description='User password.', is_optional=False)
database = Field(
String,
description='''Name of the default database to use. After login, you can use USE DATABASE
to change the database.''',
is_optional=True,
)
schema = Field(
String,
description='''Name of the default schema to use. After login, you can use USE SCHEMA to
change the schema.''',
is_optional=True,
)
role = Field(
String,
description='''Name of the default role to use. After login, you can use USE ROLE to change
the role.''',
is_optional=True,
)
warehouse = Field(
String,
description='''Name of the default warehouse to use. After login, you can use USE WAREHOUSE
to change the role.''',
is_optional=True,
)
autocommit = Field(
Bool,
description='''None by default, which honors the Snowflake parameter AUTOCOMMIT. Set to True
or False to enable or disable autocommit mode in the session, respectively.''',
is_optional=True,
)
client_prefetch_threads = Field(
Int,
description='''Number of threads used to download the results sets (4 by default).
Increasing the value improves fetch performance but requires more memory.''',
is_optional=True,
)
client_session_keep_alive = Field(
String,
description='''False by default. Set this to True to keep the session active indefinitely,
even if there is no activity from the user. Make certain to call the close method to
terminate the thread properly or the process may hang.''',
is_optional=True,
)
login_timeout = Field(
Int,
description='''Timeout in seconds for login. By default, 60 seconds. The login request gives
up after the timeout length if the HTTP response is "success".''',
is_optional=True,
)
network_timeout = Field(
Int,
description='''Timeout in seconds for all other operations. By default, none/infinite. A
general request gives up after the timeout length if the HTTP response is not "success"''',
is_optional=True,
)
ocsp_response_cache_filename = Field(
Path,
description='''URI for the OCSP response cache file.
By default, the OCSP response cache file is created in the cache directory.''',
is_optional=True,
)
validate_default_parameters = Field(
Bool,
description='''False by default. Raise an exception if either one of specified database,
schema or warehouse doesn't exists if True.''',
is_optional=True,
)
paramstyle = Field(
# TODO should validate only against permissible values for this
String,
description='''pyformat by default for client side binding. Specify qmark or numeric to
change bind variable formats for server side binding.''',
is_optional=True,
)
timezone = Field(
String,
description='''None by default, which honors the Snowflake parameter TIMEZONE. Set to a
valid time zone (e.g. America/Los_Angeles) to set the session time zone.''',
is_optional=True,
)
return Field(
Dict(
fields={
'account': account,
'user': user,
'password': password,
'database': database,
'schema': schema,
'role': role,
'warehouse': warehouse,
'autocommit': autocommit,
'client_prefetch_threads': client_prefetch_threads,
'client_session_keep_alive': client_session_keep_alive,
'login_timeout': login_timeout,
'network_timeout': network_timeout,
'ocsp_response_cache_filename': ocsp_response_cache_filename,
'validate_default_parameters': validate_default_parameters,
'paramstyle': paramstyle,
'timezone': timezone,
}
),
description='Snowflake configuration',
)
|
[
"Snowflake",
"configuration",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/libraries/dagster-snowflake/dagster_snowflake/configs.py#L4-L136
|
[
"def",
"define_snowflake_config",
"(",
")",
":",
"account",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'Your Snowflake account name. For more details, see https://bit.ly/2FBL320.'",
",",
"is_optional",
"=",
"True",
",",
")",
"user",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'User login name.'",
",",
"is_optional",
"=",
"False",
")",
"password",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'User password.'",
",",
"is_optional",
"=",
"False",
")",
"database",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''Name of the default database to use. After login, you can use USE DATABASE\n to change the database.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"schema",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''Name of the default schema to use. After login, you can use USE SCHEMA to \n change the schema.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"role",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''Name of the default role to use. After login, you can use USE ROLE to change\n the role.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"warehouse",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''Name of the default warehouse to use. After login, you can use USE WAREHOUSE\n to change the role.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"autocommit",
"=",
"Field",
"(",
"Bool",
",",
"description",
"=",
"'''None by default, which honors the Snowflake parameter AUTOCOMMIT. Set to True\n or False to enable or disable autocommit mode in the session, respectively.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"client_prefetch_threads",
"=",
"Field",
"(",
"Int",
",",
"description",
"=",
"'''Number of threads used to download the results sets (4 by default).\n Increasing the value improves fetch performance but requires more memory.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"client_session_keep_alive",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''False by default. Set this to True to keep the session active indefinitely,\n even if there is no activity from the user. Make certain to call the close method to\n terminate the thread properly or the process may hang.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"login_timeout",
"=",
"Field",
"(",
"Int",
",",
"description",
"=",
"'''Timeout in seconds for login. By default, 60 seconds. The login request gives\n up after the timeout length if the HTTP response is \"success\".'''",
",",
"is_optional",
"=",
"True",
",",
")",
"network_timeout",
"=",
"Field",
"(",
"Int",
",",
"description",
"=",
"'''Timeout in seconds for all other operations. By default, none/infinite. A\n general request gives up after the timeout length if the HTTP response is not \"success\"'''",
",",
"is_optional",
"=",
"True",
",",
")",
"ocsp_response_cache_filename",
"=",
"Field",
"(",
"Path",
",",
"description",
"=",
"'''URI for the OCSP response cache file.\n By default, the OCSP response cache file is created in the cache directory.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"validate_default_parameters",
"=",
"Field",
"(",
"Bool",
",",
"description",
"=",
"'''False by default. Raise an exception if either one of specified database,\n schema or warehouse doesn't exists if True.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"paramstyle",
"=",
"Field",
"(",
"# TODO should validate only against permissible values for this",
"String",
",",
"description",
"=",
"'''pyformat by default for client side binding. Specify qmark or numeric to\n change bind variable formats for server side binding.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"timezone",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''None by default, which honors the Snowflake parameter TIMEZONE. Set to a\n valid time zone (e.g. America/Los_Angeles) to set the session time zone.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"return",
"Field",
"(",
"Dict",
"(",
"fields",
"=",
"{",
"'account'",
":",
"account",
",",
"'user'",
":",
"user",
",",
"'password'",
":",
"password",
",",
"'database'",
":",
"database",
",",
"'schema'",
":",
"schema",
",",
"'role'",
":",
"role",
",",
"'warehouse'",
":",
"warehouse",
",",
"'autocommit'",
":",
"autocommit",
",",
"'client_prefetch_threads'",
":",
"client_prefetch_threads",
",",
"'client_session_keep_alive'",
":",
"client_session_keep_alive",
",",
"'login_timeout'",
":",
"login_timeout",
",",
"'network_timeout'",
":",
"network_timeout",
",",
"'ocsp_response_cache_filename'",
":",
"ocsp_response_cache_filename",
",",
"'validate_default_parameters'",
":",
"validate_default_parameters",
",",
"'paramstyle'",
":",
"paramstyle",
",",
"'timezone'",
":",
"timezone",
",",
"}",
")",
",",
"description",
"=",
"'Snowflake configuration'",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
_PlanBuilder.build
|
Builds the execution plan.
|
python_modules/dagster/dagster/core/execution_plan/plan.py
|
def build(self, pipeline_def, artifacts_persisted):
'''Builds the execution plan.
'''
# Construct dependency dictionary
deps = {step.key: set() for step in self.steps}
for step in self.steps:
for step_input in step.step_inputs:
deps[step.key].add(step_input.prev_output_handle.step_key)
step_dict = {step.key: step for step in self.steps}
return ExecutionPlan(pipeline_def, step_dict, deps, artifacts_persisted)
|
def build(self, pipeline_def, artifacts_persisted):
'''Builds the execution plan.
'''
# Construct dependency dictionary
deps = {step.key: set() for step in self.steps}
for step in self.steps:
for step_input in step.step_inputs:
deps[step.key].add(step_input.prev_output_handle.step_key)
step_dict = {step.key: step for step in self.steps}
return ExecutionPlan(pipeline_def, step_dict, deps, artifacts_persisted)
|
[
"Builds",
"the",
"execution",
"plan",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution_plan/plan.py#L72-L85
|
[
"def",
"build",
"(",
"self",
",",
"pipeline_def",
",",
"artifacts_persisted",
")",
":",
"# Construct dependency dictionary",
"deps",
"=",
"{",
"step",
".",
"key",
":",
"set",
"(",
")",
"for",
"step",
"in",
"self",
".",
"steps",
"}",
"for",
"step",
"in",
"self",
".",
"steps",
":",
"for",
"step_input",
"in",
"step",
".",
"step_inputs",
":",
"deps",
"[",
"step",
".",
"key",
"]",
".",
"add",
"(",
"step_input",
".",
"prev_output_handle",
".",
"step_key",
")",
"step_dict",
"=",
"{",
"step",
".",
"key",
":",
"step",
"for",
"step",
"in",
"self",
".",
"steps",
"}",
"return",
"ExecutionPlan",
"(",
"pipeline_def",
",",
"step_dict",
",",
"deps",
",",
"artifacts_persisted",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
ExecutionPlan.build
|
Here we build a new ExecutionPlan from a pipeline definition and the environment config.
To do this, we iterate through the pipeline's solids in topological order, and hand off the
execution steps for each solid to a companion _PlanBuilder object.
Once we've processed the entire pipeline, we invoke _PlanBuilder.build() to construct the
ExecutionPlan object.
|
python_modules/dagster/dagster/core/execution_plan/plan.py
|
def build(pipeline_def, environment_config):
'''Here we build a new ExecutionPlan from a pipeline definition and the environment config.
To do this, we iterate through the pipeline's solids in topological order, and hand off the
execution steps for each solid to a companion _PlanBuilder object.
Once we've processed the entire pipeline, we invoke _PlanBuilder.build() to construct the
ExecutionPlan object.
'''
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
check.inst_param(environment_config, 'environment_config', EnvironmentConfig)
plan_builder = _PlanBuilder()
for solid in solids_in_topological_order(pipeline_def):
### 1. INPUTS
# Create and add execution plan steps for solid inputs
step_inputs = []
for input_def in solid.definition.input_defs:
prev_step_output_handle = get_input_source_step_handle(
pipeline_def, environment_config, plan_builder, solid, input_def
)
# We return None for the handle (see above in get_input_source_step_handle) when the
# input def runtime type is "Nothing"
if not prev_step_output_handle:
continue
subplan = create_subplan_for_input(
pipeline_def, environment_config, solid, prev_step_output_handle, input_def
)
plan_builder.add_steps(subplan.steps)
step_inputs.append(
StepInput(
input_def.name, input_def.runtime_type, subplan.terminal_step_output_handle
)
)
### 2. TRANSFORM FUNCTION
# Create and add execution plan step for the solid transform function
solid_transform_step = create_transform_step(
pipeline_def, environment_config, solid, step_inputs
)
plan_builder.add_step(solid_transform_step)
### 3. OUTPUTS
# Create and add execution plan steps (and output handles) for solid outputs
for output_def in solid.definition.output_defs:
subplan = create_subplan_for_output(
pipeline_def, environment_config, solid, solid_transform_step, output_def
)
plan_builder.add_steps(subplan.steps)
output_handle = solid.output_handle(output_def.name)
plan_builder.set_output_handle(output_handle, subplan.terminal_step_output_handle)
# Finally, we build and return the execution plan
return plan_builder.build(
pipeline_def=pipeline_def,
artifacts_persisted=environment_config.storage.construct_run_storage().is_persistent,
)
|
def build(pipeline_def, environment_config):
'''Here we build a new ExecutionPlan from a pipeline definition and the environment config.
To do this, we iterate through the pipeline's solids in topological order, and hand off the
execution steps for each solid to a companion _PlanBuilder object.
Once we've processed the entire pipeline, we invoke _PlanBuilder.build() to construct the
ExecutionPlan object.
'''
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
check.inst_param(environment_config, 'environment_config', EnvironmentConfig)
plan_builder = _PlanBuilder()
for solid in solids_in_topological_order(pipeline_def):
### 1. INPUTS
# Create and add execution plan steps for solid inputs
step_inputs = []
for input_def in solid.definition.input_defs:
prev_step_output_handle = get_input_source_step_handle(
pipeline_def, environment_config, plan_builder, solid, input_def
)
# We return None for the handle (see above in get_input_source_step_handle) when the
# input def runtime type is "Nothing"
if not prev_step_output_handle:
continue
subplan = create_subplan_for_input(
pipeline_def, environment_config, solid, prev_step_output_handle, input_def
)
plan_builder.add_steps(subplan.steps)
step_inputs.append(
StepInput(
input_def.name, input_def.runtime_type, subplan.terminal_step_output_handle
)
)
### 2. TRANSFORM FUNCTION
# Create and add execution plan step for the solid transform function
solid_transform_step = create_transform_step(
pipeline_def, environment_config, solid, step_inputs
)
plan_builder.add_step(solid_transform_step)
### 3. OUTPUTS
# Create and add execution plan steps (and output handles) for solid outputs
for output_def in solid.definition.output_defs:
subplan = create_subplan_for_output(
pipeline_def, environment_config, solid, solid_transform_step, output_def
)
plan_builder.add_steps(subplan.steps)
output_handle = solid.output_handle(output_def.name)
plan_builder.set_output_handle(output_handle, subplan.terminal_step_output_handle)
# Finally, we build and return the execution plan
return plan_builder.build(
pipeline_def=pipeline_def,
artifacts_persisted=environment_config.storage.construct_run_storage().is_persistent,
)
|
[
"Here",
"we",
"build",
"a",
"new",
"ExecutionPlan",
"from",
"a",
"pipeline",
"definition",
"and",
"the",
"environment",
"config",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution_plan/plan.py#L193-L255
|
[
"def",
"build",
"(",
"pipeline_def",
",",
"environment_config",
")",
":",
"check",
".",
"inst_param",
"(",
"pipeline_def",
",",
"'pipeline_def'",
",",
"PipelineDefinition",
")",
"check",
".",
"inst_param",
"(",
"environment_config",
",",
"'environment_config'",
",",
"EnvironmentConfig",
")",
"plan_builder",
"=",
"_PlanBuilder",
"(",
")",
"for",
"solid",
"in",
"solids_in_topological_order",
"(",
"pipeline_def",
")",
":",
"### 1. INPUTS",
"# Create and add execution plan steps for solid inputs",
"step_inputs",
"=",
"[",
"]",
"for",
"input_def",
"in",
"solid",
".",
"definition",
".",
"input_defs",
":",
"prev_step_output_handle",
"=",
"get_input_source_step_handle",
"(",
"pipeline_def",
",",
"environment_config",
",",
"plan_builder",
",",
"solid",
",",
"input_def",
")",
"# We return None for the handle (see above in get_input_source_step_handle) when the",
"# input def runtime type is \"Nothing\"",
"if",
"not",
"prev_step_output_handle",
":",
"continue",
"subplan",
"=",
"create_subplan_for_input",
"(",
"pipeline_def",
",",
"environment_config",
",",
"solid",
",",
"prev_step_output_handle",
",",
"input_def",
")",
"plan_builder",
".",
"add_steps",
"(",
"subplan",
".",
"steps",
")",
"step_inputs",
".",
"append",
"(",
"StepInput",
"(",
"input_def",
".",
"name",
",",
"input_def",
".",
"runtime_type",
",",
"subplan",
".",
"terminal_step_output_handle",
")",
")",
"### 2. TRANSFORM FUNCTION",
"# Create and add execution plan step for the solid transform function",
"solid_transform_step",
"=",
"create_transform_step",
"(",
"pipeline_def",
",",
"environment_config",
",",
"solid",
",",
"step_inputs",
")",
"plan_builder",
".",
"add_step",
"(",
"solid_transform_step",
")",
"### 3. OUTPUTS",
"# Create and add execution plan steps (and output handles) for solid outputs",
"for",
"output_def",
"in",
"solid",
".",
"definition",
".",
"output_defs",
":",
"subplan",
"=",
"create_subplan_for_output",
"(",
"pipeline_def",
",",
"environment_config",
",",
"solid",
",",
"solid_transform_step",
",",
"output_def",
")",
"plan_builder",
".",
"add_steps",
"(",
"subplan",
".",
"steps",
")",
"output_handle",
"=",
"solid",
".",
"output_handle",
"(",
"output_def",
".",
"name",
")",
"plan_builder",
".",
"set_output_handle",
"(",
"output_handle",
",",
"subplan",
".",
"terminal_step_output_handle",
")",
"# Finally, we build and return the execution plan",
"return",
"plan_builder",
".",
"build",
"(",
"pipeline_def",
"=",
"pipeline_def",
",",
"artifacts_persisted",
"=",
"environment_config",
".",
"storage",
".",
"construct_run_storage",
"(",
")",
".",
"is_persistent",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
_build_sub_pipeline
|
Build a pipeline which is a subset of another pipeline.
Only includes the solids which are in solid_names.
|
python_modules/dagster/dagster/core/definitions/pipeline.py
|
def _build_sub_pipeline(pipeline_def, solid_names):
'''
Build a pipeline which is a subset of another pipeline.
Only includes the solids which are in solid_names.
'''
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
check.list_param(solid_names, 'solid_names', of_type=str)
solid_name_set = set(solid_names)
solids = list(map(pipeline_def.solid_named, solid_names))
deps = {_dep_key_of(solid): {} for solid in solids}
def _out_handle_of_inp(input_handle):
if pipeline_def.dependency_structure.has_dep(input_handle):
output_handle = pipeline_def.dependency_structure.get_dep(input_handle)
if output_handle.solid.name in solid_name_set:
return output_handle
return None
for solid in solids:
for input_handle in solid.input_handles():
output_handle = _out_handle_of_inp(input_handle)
if output_handle:
deps[_dep_key_of(solid)][input_handle.input_def.name] = DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
return PipelineDefinition(
name=pipeline_def.name,
solids=list({solid.definition for solid in solids}),
context_definitions=pipeline_def.context_definitions,
dependencies=deps,
)
|
def _build_sub_pipeline(pipeline_def, solid_names):
'''
Build a pipeline which is a subset of another pipeline.
Only includes the solids which are in solid_names.
'''
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
check.list_param(solid_names, 'solid_names', of_type=str)
solid_name_set = set(solid_names)
solids = list(map(pipeline_def.solid_named, solid_names))
deps = {_dep_key_of(solid): {} for solid in solids}
def _out_handle_of_inp(input_handle):
if pipeline_def.dependency_structure.has_dep(input_handle):
output_handle = pipeline_def.dependency_structure.get_dep(input_handle)
if output_handle.solid.name in solid_name_set:
return output_handle
return None
for solid in solids:
for input_handle in solid.input_handles():
output_handle = _out_handle_of_inp(input_handle)
if output_handle:
deps[_dep_key_of(solid)][input_handle.input_def.name] = DependencyDefinition(
solid=output_handle.solid.name, output=output_handle.output_def.name
)
return PipelineDefinition(
name=pipeline_def.name,
solids=list({solid.definition for solid in solids}),
context_definitions=pipeline_def.context_definitions,
dependencies=deps,
)
|
[
"Build",
"a",
"pipeline",
"which",
"is",
"a",
"subset",
"of",
"another",
"pipeline",
".",
"Only",
"includes",
"the",
"solids",
"which",
"are",
"in",
"solid_names",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/definitions/pipeline.py#L302-L335
|
[
"def",
"_build_sub_pipeline",
"(",
"pipeline_def",
",",
"solid_names",
")",
":",
"check",
".",
"inst_param",
"(",
"pipeline_def",
",",
"'pipeline_def'",
",",
"PipelineDefinition",
")",
"check",
".",
"list_param",
"(",
"solid_names",
",",
"'solid_names'",
",",
"of_type",
"=",
"str",
")",
"solid_name_set",
"=",
"set",
"(",
"solid_names",
")",
"solids",
"=",
"list",
"(",
"map",
"(",
"pipeline_def",
".",
"solid_named",
",",
"solid_names",
")",
")",
"deps",
"=",
"{",
"_dep_key_of",
"(",
"solid",
")",
":",
"{",
"}",
"for",
"solid",
"in",
"solids",
"}",
"def",
"_out_handle_of_inp",
"(",
"input_handle",
")",
":",
"if",
"pipeline_def",
".",
"dependency_structure",
".",
"has_dep",
"(",
"input_handle",
")",
":",
"output_handle",
"=",
"pipeline_def",
".",
"dependency_structure",
".",
"get_dep",
"(",
"input_handle",
")",
"if",
"output_handle",
".",
"solid",
".",
"name",
"in",
"solid_name_set",
":",
"return",
"output_handle",
"return",
"None",
"for",
"solid",
"in",
"solids",
":",
"for",
"input_handle",
"in",
"solid",
".",
"input_handles",
"(",
")",
":",
"output_handle",
"=",
"_out_handle_of_inp",
"(",
"input_handle",
")",
"if",
"output_handle",
":",
"deps",
"[",
"_dep_key_of",
"(",
"solid",
")",
"]",
"[",
"input_handle",
".",
"input_def",
".",
"name",
"]",
"=",
"DependencyDefinition",
"(",
"solid",
"=",
"output_handle",
".",
"solid",
".",
"name",
",",
"output",
"=",
"output_handle",
".",
"output_def",
".",
"name",
")",
"return",
"PipelineDefinition",
"(",
"name",
"=",
"pipeline_def",
".",
"name",
",",
"solids",
"=",
"list",
"(",
"{",
"solid",
".",
"definition",
"for",
"solid",
"in",
"solids",
"}",
")",
",",
"context_definitions",
"=",
"pipeline_def",
".",
"context_definitions",
",",
"dependencies",
"=",
"deps",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
PipelineDefinition.solid_named
|
Return the solid named "name". Throws if it does not exist.
Args:
name (str): Name of solid
Returns:
SolidDefinition: SolidDefinition with correct name.
|
python_modules/dagster/dagster/core/definitions/pipeline.py
|
def solid_named(self, name):
'''Return the solid named "name". Throws if it does not exist.
Args:
name (str): Name of solid
Returns:
SolidDefinition: SolidDefinition with correct name.
'''
check.str_param(name, 'name')
if name not in self._solid_dict:
raise DagsterInvariantViolationError(
'Pipeline {pipeline_name} has no solid named {name}.'.format(
pipeline_name=self.name, name=name
)
)
return self._solid_dict[name]
|
def solid_named(self, name):
'''Return the solid named "name". Throws if it does not exist.
Args:
name (str): Name of solid
Returns:
SolidDefinition: SolidDefinition with correct name.
'''
check.str_param(name, 'name')
if name not in self._solid_dict:
raise DagsterInvariantViolationError(
'Pipeline {pipeline_name} has no solid named {name}.'.format(
pipeline_name=self.name, name=name
)
)
return self._solid_dict[name]
|
[
"Return",
"the",
"solid",
"named",
"name",
".",
"Throws",
"if",
"it",
"does",
"not",
"exist",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/definitions/pipeline.py#L185-L201
|
[
"def",
"solid_named",
"(",
"self",
",",
"name",
")",
":",
"check",
".",
"str_param",
"(",
"name",
",",
"'name'",
")",
"if",
"name",
"not",
"in",
"self",
".",
"_solid_dict",
":",
"raise",
"DagsterInvariantViolationError",
"(",
"'Pipeline {pipeline_name} has no solid named {name}.'",
".",
"format",
"(",
"pipeline_name",
"=",
"self",
".",
"name",
",",
"name",
"=",
"name",
")",
")",
"return",
"self",
".",
"_solid_dict",
"[",
"name",
"]"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
construct_publish_comands
|
Get the shell commands we'll use to actually build and publish a package to PyPI.
|
bin/publish.py
|
def construct_publish_comands(additional_steps=None, nightly=False):
'''Get the shell commands we'll use to actually build and publish a package to PyPI.'''
publish_commands = (
['rm -rf dist']
+ (additional_steps if additional_steps else [])
+ [
'python setup.py sdist bdist_wheel{nightly}'.format(
nightly=' --nightly' if nightly else ''
),
'twine upload dist/*',
]
)
return publish_commands
|
def construct_publish_comands(additional_steps=None, nightly=False):
'''Get the shell commands we'll use to actually build and publish a package to PyPI.'''
publish_commands = (
['rm -rf dist']
+ (additional_steps if additional_steps else [])
+ [
'python setup.py sdist bdist_wheel{nightly}'.format(
nightly=' --nightly' if nightly else ''
),
'twine upload dist/*',
]
)
return publish_commands
|
[
"Get",
"the",
"shell",
"commands",
"we",
"ll",
"use",
"to",
"actually",
"build",
"and",
"publish",
"a",
"package",
"to",
"PyPI",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/bin/publish.py#L50-L63
|
[
"def",
"construct_publish_comands",
"(",
"additional_steps",
"=",
"None",
",",
"nightly",
"=",
"False",
")",
":",
"publish_commands",
"=",
"(",
"[",
"'rm -rf dist'",
"]",
"+",
"(",
"additional_steps",
"if",
"additional_steps",
"else",
"[",
"]",
")",
"+",
"[",
"'python setup.py sdist bdist_wheel{nightly}'",
".",
"format",
"(",
"nightly",
"=",
"' --nightly'",
"if",
"nightly",
"else",
"''",
")",
",",
"'twine upload dist/*'",
",",
"]",
")",
"return",
"publish_commands"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
publish
|
Publishes (uploads) all submodules to PyPI.
Appropriate credentials must be available to twine, e.g. in a ~/.pypirc file, and users must
be permissioned as maintainers on the PyPI projects. Publishing will fail if versions (git
tags and Python versions) are not in lockstep, if the current commit is not tagged, or if
there are untracked changes.
|
bin/publish.py
|
def publish(nightly):
"""Publishes (uploads) all submodules to PyPI.
Appropriate credentials must be available to twine, e.g. in a ~/.pypirc file, and users must
be permissioned as maintainers on the PyPI projects. Publishing will fail if versions (git
tags and Python versions) are not in lockstep, if the current commit is not tagged, or if
there are untracked changes.
"""
try:
RCParser.from_file()
except ConfigFileError:
raise ConfigFileError(PYPIRC_EXCEPTION_MESSAGE)
assert '\nwheel' in subprocess.check_output(['pip', 'list']).decode('utf-8'), (
'You must have wheel installed in order to build packages for release -- run '
'`pip install wheel`.'
)
assert which_('twine'), (
'You must have twin installed in order to upload packages to PyPI -- run '
'`pip install twine`.'
)
assert which_('yarn'), (
'You must have yarn installed in order to build dagit for release -- see '
'https://yarnpkg.com/lang/en/docs/install/'
)
print('Checking that module versions are in lockstep')
check_versions(nightly=nightly)
if not nightly:
print('... and match git tag on most recent commit...')
check_git_status()
print('Publishing packages to PyPI...')
if nightly:
new_version = increment_nightly_versions()
commit_new_version('nightly: {nightly}'.format(nightly=new_version['__nightly__']))
set_git_tag('{nightly}'.format(nightly=new_version['__nightly__']))
git_push()
git_push(tags=True)
publish_all(nightly)
|
def publish(nightly):
"""Publishes (uploads) all submodules to PyPI.
Appropriate credentials must be available to twine, e.g. in a ~/.pypirc file, and users must
be permissioned as maintainers on the PyPI projects. Publishing will fail if versions (git
tags and Python versions) are not in lockstep, if the current commit is not tagged, or if
there are untracked changes.
"""
try:
RCParser.from_file()
except ConfigFileError:
raise ConfigFileError(PYPIRC_EXCEPTION_MESSAGE)
assert '\nwheel' in subprocess.check_output(['pip', 'list']).decode('utf-8'), (
'You must have wheel installed in order to build packages for release -- run '
'`pip install wheel`.'
)
assert which_('twine'), (
'You must have twin installed in order to upload packages to PyPI -- run '
'`pip install twine`.'
)
assert which_('yarn'), (
'You must have yarn installed in order to build dagit for release -- see '
'https://yarnpkg.com/lang/en/docs/install/'
)
print('Checking that module versions are in lockstep')
check_versions(nightly=nightly)
if not nightly:
print('... and match git tag on most recent commit...')
check_git_status()
print('Publishing packages to PyPI...')
if nightly:
new_version = increment_nightly_versions()
commit_new_version('nightly: {nightly}'.format(nightly=new_version['__nightly__']))
set_git_tag('{nightly}'.format(nightly=new_version['__nightly__']))
git_push()
git_push(tags=True)
publish_all(nightly)
|
[
"Publishes",
"(",
"uploads",
")",
"all",
"submodules",
"to",
"PyPI",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/bin/publish.py#L416-L459
|
[
"def",
"publish",
"(",
"nightly",
")",
":",
"try",
":",
"RCParser",
".",
"from_file",
"(",
")",
"except",
"ConfigFileError",
":",
"raise",
"ConfigFileError",
"(",
"PYPIRC_EXCEPTION_MESSAGE",
")",
"assert",
"'\\nwheel'",
"in",
"subprocess",
".",
"check_output",
"(",
"[",
"'pip'",
",",
"'list'",
"]",
")",
".",
"decode",
"(",
"'utf-8'",
")",
",",
"(",
"'You must have wheel installed in order to build packages for release -- run '",
"'`pip install wheel`.'",
")",
"assert",
"which_",
"(",
"'twine'",
")",
",",
"(",
"'You must have twin installed in order to upload packages to PyPI -- run '",
"'`pip install twine`.'",
")",
"assert",
"which_",
"(",
"'yarn'",
")",
",",
"(",
"'You must have yarn installed in order to build dagit for release -- see '",
"'https://yarnpkg.com/lang/en/docs/install/'",
")",
"print",
"(",
"'Checking that module versions are in lockstep'",
")",
"check_versions",
"(",
"nightly",
"=",
"nightly",
")",
"if",
"not",
"nightly",
":",
"print",
"(",
"'... and match git tag on most recent commit...'",
")",
"check_git_status",
"(",
")",
"print",
"(",
"'Publishing packages to PyPI...'",
")",
"if",
"nightly",
":",
"new_version",
"=",
"increment_nightly_versions",
"(",
")",
"commit_new_version",
"(",
"'nightly: {nightly}'",
".",
"format",
"(",
"nightly",
"=",
"new_version",
"[",
"'__nightly__'",
"]",
")",
")",
"set_git_tag",
"(",
"'{nightly}'",
".",
"format",
"(",
"nightly",
"=",
"new_version",
"[",
"'__nightly__'",
"]",
")",
")",
"git_push",
"(",
")",
"git_push",
"(",
"tags",
"=",
"True",
")",
"publish_all",
"(",
"nightly",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
release
|
Tags all submodules for a new release.
Ensures that git tags, as well as the version.py files in each submodule, agree and that the
new version is strictly greater than the current version. Will fail if the new version
is not an increment (following PEP 440). Creates a new git tag and commit.
|
bin/publish.py
|
def release(version):
"""Tags all submodules for a new release.
Ensures that git tags, as well as the version.py files in each submodule, agree and that the
new version is strictly greater than the current version. Will fail if the new version
is not an increment (following PEP 440). Creates a new git tag and commit.
"""
check_new_version(version)
set_new_version(version)
commit_new_version(version)
set_git_tag(version)
|
def release(version):
"""Tags all submodules for a new release.
Ensures that git tags, as well as the version.py files in each submodule, agree and that the
new version is strictly greater than the current version. Will fail if the new version
is not an increment (following PEP 440). Creates a new git tag and commit.
"""
check_new_version(version)
set_new_version(version)
commit_new_version(version)
set_git_tag(version)
|
[
"Tags",
"all",
"submodules",
"for",
"a",
"new",
"release",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/bin/publish.py#L464-L474
|
[
"def",
"release",
"(",
"version",
")",
":",
"check_new_version",
"(",
"version",
")",
"set_new_version",
"(",
"version",
")",
"commit_new_version",
"(",
"version",
")",
"set_git_tag",
"(",
"version",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
PipelineContextDefinition.passthrough_context_definition
|
Create a context definition from a pre-existing context. This can be useful
in testing contexts where you may want to create a context manually and then
pass it into a one-off PipelineDefinition
Args:
context (ExecutionContext): The context that will provided to the pipeline.
Returns:
PipelineContextDefinition: The passthrough context definition.
|
python_modules/dagster/dagster/core/definitions/context.py
|
def passthrough_context_definition(context_params):
'''Create a context definition from a pre-existing context. This can be useful
in testing contexts where you may want to create a context manually and then
pass it into a one-off PipelineDefinition
Args:
context (ExecutionContext): The context that will provided to the pipeline.
Returns:
PipelineContextDefinition: The passthrough context definition.
'''
check.inst_param(context_params, 'context', ExecutionContext)
context_definition = PipelineContextDefinition(context_fn=lambda *_args: context_params)
return {DEFAULT_CONTEXT_NAME: context_definition}
|
def passthrough_context_definition(context_params):
'''Create a context definition from a pre-existing context. This can be useful
in testing contexts where you may want to create a context manually and then
pass it into a one-off PipelineDefinition
Args:
context (ExecutionContext): The context that will provided to the pipeline.
Returns:
PipelineContextDefinition: The passthrough context definition.
'''
check.inst_param(context_params, 'context', ExecutionContext)
context_definition = PipelineContextDefinition(context_fn=lambda *_args: context_params)
return {DEFAULT_CONTEXT_NAME: context_definition}
|
[
"Create",
"a",
"context",
"definition",
"from",
"a",
"pre",
"-",
"existing",
"context",
".",
"This",
"can",
"be",
"useful",
"in",
"testing",
"contexts",
"where",
"you",
"may",
"want",
"to",
"create",
"a",
"context",
"manually",
"and",
"then",
"pass",
"it",
"into",
"a",
"one",
"-",
"off",
"PipelineDefinition"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/definitions/context.py#L58-L71
|
[
"def",
"passthrough_context_definition",
"(",
"context_params",
")",
":",
"check",
".",
"inst_param",
"(",
"context_params",
",",
"'context'",
",",
"ExecutionContext",
")",
"context_definition",
"=",
"PipelineContextDefinition",
"(",
"context_fn",
"=",
"lambda",
"*",
"_args",
":",
"context_params",
")",
"return",
"{",
"DEFAULT_CONTEXT_NAME",
":",
"context_definition",
"}"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
input_selector_schema
|
A decorator for annotating a function that can take the selected properties
from a ``config_value`` in to an instance of a custom type.
Args:
config_cls (Selector)
|
python_modules/dagster/dagster/core/types/config_schema.py
|
def input_selector_schema(config_cls):
'''
A decorator for annotating a function that can take the selected properties
from a ``config_value`` in to an instance of a custom type.
Args:
config_cls (Selector)
'''
config_type = resolve_config_cls_arg(config_cls)
check.param_invariant(config_type.is_selector, 'config_cls')
def _wrap(func):
def _selector(context, config_value):
selector_key, selector_value = single_item(config_value)
return func(context, selector_key, selector_value)
return _create_input_schema(config_type, _selector)
return _wrap
|
def input_selector_schema(config_cls):
'''
A decorator for annotating a function that can take the selected properties
from a ``config_value`` in to an instance of a custom type.
Args:
config_cls (Selector)
'''
config_type = resolve_config_cls_arg(config_cls)
check.param_invariant(config_type.is_selector, 'config_cls')
def _wrap(func):
def _selector(context, config_value):
selector_key, selector_value = single_item(config_value)
return func(context, selector_key, selector_value)
return _create_input_schema(config_type, _selector)
return _wrap
|
[
"A",
"decorator",
"for",
"annotating",
"a",
"function",
"that",
"can",
"take",
"the",
"selected",
"properties",
"from",
"a",
"config_value",
"in",
"to",
"an",
"instance",
"of",
"a",
"custom",
"type",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/types/config_schema.py#L85-L103
|
[
"def",
"input_selector_schema",
"(",
"config_cls",
")",
":",
"config_type",
"=",
"resolve_config_cls_arg",
"(",
"config_cls",
")",
"check",
".",
"param_invariant",
"(",
"config_type",
".",
"is_selector",
",",
"'config_cls'",
")",
"def",
"_wrap",
"(",
"func",
")",
":",
"def",
"_selector",
"(",
"context",
",",
"config_value",
")",
":",
"selector_key",
",",
"selector_value",
"=",
"single_item",
"(",
"config_value",
")",
"return",
"func",
"(",
"context",
",",
"selector_key",
",",
"selector_value",
")",
"return",
"_create_input_schema",
"(",
"config_type",
",",
"_selector",
")",
"return",
"_wrap"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
output_selector_schema
|
A decorator for a annotating a function that can take the selected properties
of a ``config_value`` and an instance of a custom type and materialize it.
Args:
config_cls (Selector):
|
python_modules/dagster/dagster/core/types/config_schema.py
|
def output_selector_schema(config_cls):
'''
A decorator for a annotating a function that can take the selected properties
of a ``config_value`` and an instance of a custom type and materialize it.
Args:
config_cls (Selector):
'''
config_type = resolve_config_cls_arg(config_cls)
check.param_invariant(config_type.is_selector, 'config_cls')
def _wrap(func):
def _selector(context, config_value, runtime_value):
selector_key, selector_value = single_item(config_value)
return func(context, selector_key, selector_value, runtime_value)
return _create_output_schema(config_type, _selector)
return _wrap
|
def output_selector_schema(config_cls):
'''
A decorator for a annotating a function that can take the selected properties
of a ``config_value`` and an instance of a custom type and materialize it.
Args:
config_cls (Selector):
'''
config_type = resolve_config_cls_arg(config_cls)
check.param_invariant(config_type.is_selector, 'config_cls')
def _wrap(func):
def _selector(context, config_value, runtime_value):
selector_key, selector_value = single_item(config_value)
return func(context, selector_key, selector_value, runtime_value)
return _create_output_schema(config_type, _selector)
return _wrap
|
[
"A",
"decorator",
"for",
"a",
"annotating",
"a",
"function",
"that",
"can",
"take",
"the",
"selected",
"properties",
"of",
"a",
"config_value",
"and",
"an",
"instance",
"of",
"a",
"custom",
"type",
"and",
"materialize",
"it",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/types/config_schema.py#L130-L148
|
[
"def",
"output_selector_schema",
"(",
"config_cls",
")",
":",
"config_type",
"=",
"resolve_config_cls_arg",
"(",
"config_cls",
")",
"check",
".",
"param_invariant",
"(",
"config_type",
".",
"is_selector",
",",
"'config_cls'",
")",
"def",
"_wrap",
"(",
"func",
")",
":",
"def",
"_selector",
"(",
"context",
",",
"config_value",
",",
"runtime_value",
")",
":",
"selector_key",
",",
"selector_value",
"=",
"single_item",
"(",
"config_value",
")",
"return",
"func",
"(",
"context",
",",
"selector_key",
",",
"selector_value",
",",
"runtime_value",
")",
"return",
"_create_output_schema",
"(",
"config_type",
",",
"_selector",
")",
"return",
"_wrap"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
IndentingPrinter.block
|
Automagically wrap a block of text.
|
python_modules/dagster/dagster/utils/indenting_printer.py
|
def block(self, text, prefix=''):
'''Automagically wrap a block of text.'''
wrapper = TextWrapper(
width=self.line_length - len(self.current_indent_str),
initial_indent=prefix,
subsequent_indent=prefix,
break_long_words=False,
break_on_hyphens=False,
)
for line in wrapper.wrap(text):
self.line(line)
|
def block(self, text, prefix=''):
'''Automagically wrap a block of text.'''
wrapper = TextWrapper(
width=self.line_length - len(self.current_indent_str),
initial_indent=prefix,
subsequent_indent=prefix,
break_long_words=False,
break_on_hyphens=False,
)
for line in wrapper.wrap(text):
self.line(line)
|
[
"Automagically",
"wrap",
"a",
"block",
"of",
"text",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/utils/indenting_printer.py#L32-L42
|
[
"def",
"block",
"(",
"self",
",",
"text",
",",
"prefix",
"=",
"''",
")",
":",
"wrapper",
"=",
"TextWrapper",
"(",
"width",
"=",
"self",
".",
"line_length",
"-",
"len",
"(",
"self",
".",
"current_indent_str",
")",
",",
"initial_indent",
"=",
"prefix",
",",
"subsequent_indent",
"=",
"prefix",
",",
"break_long_words",
"=",
"False",
",",
"break_on_hyphens",
"=",
"False",
",",
")",
"for",
"line",
"in",
"wrapper",
".",
"wrap",
"(",
"text",
")",
":",
"self",
".",
"line",
"(",
"line",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
_define_shared_fields
|
The following fields are shared between both QueryJobConfig and LoadJobConfig.
|
python_modules/libraries/dagster-gcp/dagster_gcp/configs.py
|
def _define_shared_fields():
'''The following fields are shared between both QueryJobConfig and LoadJobConfig.
'''
clustering_fields = Field(
List(String),
description='''Fields defining clustering for the table
(Defaults to None).
Clustering fields are immutable after table creation.
''',
is_optional=True,
)
create_disposition = Field(
BQCreateDisposition,
description='''Specifies behavior for creating tables.
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.createDisposition
''',
is_optional=True,
)
destination_encryption_configuration = Field(
String,
description='''Custom encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys) or None if using default encryption.
See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationEncryptionConfiguration
''',
is_optional=True,
)
schema_update_options = Field(
List(BQSchemaUpdateOption),
description='''Specifies updates to the destination table schema to allow as a side effect
of the query job.''',
is_optional=True,
)
time_partitioning = Field(
Dict(
fields={
'expiration_ms': Field(
Int,
description='''Number of milliseconds for which to keep the storage for a
partition.''',
is_optional=True,
),
'field': Field(
String,
description='''If set, the table is partitioned by this field. If not set, the
table is partitioned by pseudo column _PARTITIONTIME. The field must be a
top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED.''',
is_optional=True,
),
'require_partition_filter': Field(
Bool,
description='''If set to true, queries over the partitioned table require a
partition filter that can be used for partition elimination to be specified.''',
is_optional=True,
),
}
),
description='Specifies time-based partitioning for the destination table.',
is_optional=True,
)
write_disposition = Field(
BQWriteDisposition,
description='''
Action that occurs if the destination table already exists.
See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.writeDisposition
''',
is_optional=True,
)
return {
'clustering_fields': clustering_fields,
'create_disposition': create_disposition,
'destination_encryption_configuration': destination_encryption_configuration,
'schema_update_options': schema_update_options,
'time_partitioning': time_partitioning,
'write_disposition': write_disposition,
}
|
def _define_shared_fields():
'''The following fields are shared between both QueryJobConfig and LoadJobConfig.
'''
clustering_fields = Field(
List(String),
description='''Fields defining clustering for the table
(Defaults to None).
Clustering fields are immutable after table creation.
''',
is_optional=True,
)
create_disposition = Field(
BQCreateDisposition,
description='''Specifies behavior for creating tables.
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.createDisposition
''',
is_optional=True,
)
destination_encryption_configuration = Field(
String,
description='''Custom encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys) or None if using default encryption.
See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationEncryptionConfiguration
''',
is_optional=True,
)
schema_update_options = Field(
List(BQSchemaUpdateOption),
description='''Specifies updates to the destination table schema to allow as a side effect
of the query job.''',
is_optional=True,
)
time_partitioning = Field(
Dict(
fields={
'expiration_ms': Field(
Int,
description='''Number of milliseconds for which to keep the storage for a
partition.''',
is_optional=True,
),
'field': Field(
String,
description='''If set, the table is partitioned by this field. If not set, the
table is partitioned by pseudo column _PARTITIONTIME. The field must be a
top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED.''',
is_optional=True,
),
'require_partition_filter': Field(
Bool,
description='''If set to true, queries over the partitioned table require a
partition filter that can be used for partition elimination to be specified.''',
is_optional=True,
),
}
),
description='Specifies time-based partitioning for the destination table.',
is_optional=True,
)
write_disposition = Field(
BQWriteDisposition,
description='''
Action that occurs if the destination table already exists.
See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.writeDisposition
''',
is_optional=True,
)
return {
'clustering_fields': clustering_fields,
'create_disposition': create_disposition,
'destination_encryption_configuration': destination_encryption_configuration,
'schema_update_options': schema_update_options,
'time_partitioning': time_partitioning,
'write_disposition': write_disposition,
}
|
[
"The",
"following",
"fields",
"are",
"shared",
"between",
"both",
"QueryJobConfig",
"and",
"LoadJobConfig",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/libraries/dagster-gcp/dagster_gcp/configs.py#L40-L122
|
[
"def",
"_define_shared_fields",
"(",
")",
":",
"clustering_fields",
"=",
"Field",
"(",
"List",
"(",
"String",
")",
",",
"description",
"=",
"'''Fields defining clustering for the table\n\n (Defaults to None).\n\n Clustering fields are immutable after table creation.\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"create_disposition",
"=",
"Field",
"(",
"BQCreateDisposition",
",",
"description",
"=",
"'''Specifies behavior for creating tables.\n See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.createDisposition\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"destination_encryption_configuration",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''Custom encryption configuration for the destination table.\n Custom encryption configuration (e.g., Cloud KMS keys) or None if using default encryption.\n See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationEncryptionConfiguration\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"schema_update_options",
"=",
"Field",
"(",
"List",
"(",
"BQSchemaUpdateOption",
")",
",",
"description",
"=",
"'''Specifies updates to the destination table schema to allow as a side effect\n of the query job.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"time_partitioning",
"=",
"Field",
"(",
"Dict",
"(",
"fields",
"=",
"{",
"'expiration_ms'",
":",
"Field",
"(",
"Int",
",",
"description",
"=",
"'''Number of milliseconds for which to keep the storage for a\n partition.'''",
",",
"is_optional",
"=",
"True",
",",
")",
",",
"'field'",
":",
"Field",
"(",
"String",
",",
"description",
"=",
"'''If set, the table is partitioned by this field. If not set, the\n table is partitioned by pseudo column _PARTITIONTIME. The field must be a\n top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED.'''",
",",
"is_optional",
"=",
"True",
",",
")",
",",
"'require_partition_filter'",
":",
"Field",
"(",
"Bool",
",",
"description",
"=",
"'''If set to true, queries over the partitioned table require a\n partition filter that can be used for partition elimination to be specified.'''",
",",
"is_optional",
"=",
"True",
",",
")",
",",
"}",
")",
",",
"description",
"=",
"'Specifies time-based partitioning for the destination table.'",
",",
"is_optional",
"=",
"True",
",",
")",
"write_disposition",
"=",
"Field",
"(",
"BQWriteDisposition",
",",
"description",
"=",
"'''\n Action that occurs if the destination table already exists.\n See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.writeDisposition\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"return",
"{",
"'clustering_fields'",
":",
"clustering_fields",
",",
"'create_disposition'",
":",
"create_disposition",
",",
"'destination_encryption_configuration'",
":",
"destination_encryption_configuration",
",",
"'schema_update_options'",
":",
"schema_update_options",
",",
"'time_partitioning'",
":",
"time_partitioning",
",",
"'write_disposition'",
":",
"write_disposition",
",",
"}"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
define_bigquery_query_config
|
See:
https://googleapis.github.io/google-cloud-python/latest/bigquery/generated/google.cloud.bigquery.job.QueryJobConfig.html
|
python_modules/libraries/dagster-gcp/dagster_gcp/configs.py
|
def define_bigquery_query_config():
'''See:
https://googleapis.github.io/google-cloud-python/latest/bigquery/generated/google.cloud.bigquery.job.QueryJobConfig.html
'''
sf = _define_shared_fields()
allow_large_results = Field(
Bool,
description='''Allow large query results tables (legacy SQL, only)
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.allowLargeResults
''',
is_optional=True,
)
default_dataset = Field(
Dataset,
description='''the default dataset to use for unqualified table names in the query or None
if not set. The default_dataset setter accepts a str of the fully-qualified dataset ID in
standard SQL format. The value must included a project ID and dataset ID separated by ".".
For example: your-project.your_dataset.
See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.defaultDataset
''',
is_optional=True,
)
destination = Field(
Table,
description='''table where results are written or None if not set. The destination setter
accepts a str of the fully-qualified table ID in standard SQL format. The value must
included a project ID, dataset ID, and table ID, each separated by ".". For example:
your-project.your_dataset.your_table.
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationTable
''',
is_optional=True,
)
dry_run = Field(
Bool,
description='''True if this query should be a dry run to estimate costs.
See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.dryRun
''',
is_optional=True,
)
flatten_results = Field(
Bool,
description='''Flatten nested/repeated fields in results. (Legacy SQL only)
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.flattenResults
''',
is_optional=True,
)
maximum_billing_tier = Field(
Int,
description='''Deprecated. Changes the billing tier to allow high-compute queries.
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBillingTier
''',
is_optional=True,
)
maximum_bytes_billed = Field(
Int,
description='''Maximum bytes to be billed for this job or None if not set.
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBytesBilled
''',
is_optional=True,
)
priority = Field(
BQPriority,
description='''Priority of the query.
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.priority
''',
is_optional=True,
)
query_parameters = Field(
List(String),
description='''list of parameters for parameterized query (empty by default)
See: https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.queryParameters
''',
is_optional=True,
)
# TODO:
# Type: Dict[str, google.cloud.bigquery.external_config.ExternalConfig]
# table_definitions = Field(
# PermissiveDict(),
# description='''Definitions for external tables or None if not set.
# See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions
# ''',
# is_optional=True,
# )
# TODO: Need to add this
# Type: List[google.cloud.bigquery.query.UDFResource]
# udf_resources = Field(
# String,
# description='''user defined function resources (empty by default)
# See: https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.userDefinedFunctionResources
# ''',
# is_optional=True
# )
use_legacy_sql = Field(
Bool,
description='''Use legacy SQL syntax.
See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.useLegacySql
''',
is_optional=True,
)
use_query_cache = Field(
Bool,
description='''Look for the query result in the cache.
See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.useQueryCache
''',
is_optional=True,
)
return Field(
Dict(
fields={
'query_job_config': Field(
Dict(
fields={
'allow_large_results': allow_large_results,
'clustering_fields': sf['clustering_fields'],
'create_disposition': sf['create_disposition'],
'default_dataset': default_dataset,
'destination': destination,
'destination_encryption_configuration': sf[
'destination_encryption_configuration'
],
'dry_run': dry_run,
'flatten_results': flatten_results,
# TODO: labels
'maximum_billing_tier': maximum_billing_tier,
'maximum_bytes_billed': maximum_bytes_billed,
'priority': priority,
'query_parameters': query_parameters,
# TODO: table_definitions
'schema_update_options': sf['schema_update_options'],
'time_partitioning': sf['time_partitioning'],
# TODO: udf_resources
'use_legacy_sql': use_legacy_sql,
'use_query_cache': use_query_cache,
'write_disposition': sf['write_disposition'],
}
)
)
}
),
description='BigQuery query configuration',
)
|
def define_bigquery_query_config():
'''See:
https://googleapis.github.io/google-cloud-python/latest/bigquery/generated/google.cloud.bigquery.job.QueryJobConfig.html
'''
sf = _define_shared_fields()
allow_large_results = Field(
Bool,
description='''Allow large query results tables (legacy SQL, only)
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.allowLargeResults
''',
is_optional=True,
)
default_dataset = Field(
Dataset,
description='''the default dataset to use for unqualified table names in the query or None
if not set. The default_dataset setter accepts a str of the fully-qualified dataset ID in
standard SQL format. The value must included a project ID and dataset ID separated by ".".
For example: your-project.your_dataset.
See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.defaultDataset
''',
is_optional=True,
)
destination = Field(
Table,
description='''table where results are written or None if not set. The destination setter
accepts a str of the fully-qualified table ID in standard SQL format. The value must
included a project ID, dataset ID, and table ID, each separated by ".". For example:
your-project.your_dataset.your_table.
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationTable
''',
is_optional=True,
)
dry_run = Field(
Bool,
description='''True if this query should be a dry run to estimate costs.
See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.dryRun
''',
is_optional=True,
)
flatten_results = Field(
Bool,
description='''Flatten nested/repeated fields in results. (Legacy SQL only)
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.flattenResults
''',
is_optional=True,
)
maximum_billing_tier = Field(
Int,
description='''Deprecated. Changes the billing tier to allow high-compute queries.
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBillingTier
''',
is_optional=True,
)
maximum_bytes_billed = Field(
Int,
description='''Maximum bytes to be billed for this job or None if not set.
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBytesBilled
''',
is_optional=True,
)
priority = Field(
BQPriority,
description='''Priority of the query.
See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.priority
''',
is_optional=True,
)
query_parameters = Field(
List(String),
description='''list of parameters for parameterized query (empty by default)
See: https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.queryParameters
''',
is_optional=True,
)
# TODO:
# Type: Dict[str, google.cloud.bigquery.external_config.ExternalConfig]
# table_definitions = Field(
# PermissiveDict(),
# description='''Definitions for external tables or None if not set.
# See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions
# ''',
# is_optional=True,
# )
# TODO: Need to add this
# Type: List[google.cloud.bigquery.query.UDFResource]
# udf_resources = Field(
# String,
# description='''user defined function resources (empty by default)
# See: https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.userDefinedFunctionResources
# ''',
# is_optional=True
# )
use_legacy_sql = Field(
Bool,
description='''Use legacy SQL syntax.
See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.useLegacySql
''',
is_optional=True,
)
use_query_cache = Field(
Bool,
description='''Look for the query result in the cache.
See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.useQueryCache
''',
is_optional=True,
)
return Field(
Dict(
fields={
'query_job_config': Field(
Dict(
fields={
'allow_large_results': allow_large_results,
'clustering_fields': sf['clustering_fields'],
'create_disposition': sf['create_disposition'],
'default_dataset': default_dataset,
'destination': destination,
'destination_encryption_configuration': sf[
'destination_encryption_configuration'
],
'dry_run': dry_run,
'flatten_results': flatten_results,
# TODO: labels
'maximum_billing_tier': maximum_billing_tier,
'maximum_bytes_billed': maximum_bytes_billed,
'priority': priority,
'query_parameters': query_parameters,
# TODO: table_definitions
'schema_update_options': sf['schema_update_options'],
'time_partitioning': sf['time_partitioning'],
# TODO: udf_resources
'use_legacy_sql': use_legacy_sql,
'use_query_cache': use_query_cache,
'write_disposition': sf['write_disposition'],
}
)
)
}
),
description='BigQuery query configuration',
)
|
[
"See",
":",
"https",
":",
"//",
"googleapis",
".",
"github",
".",
"io",
"/",
"google",
"-",
"cloud",
"-",
"python",
"/",
"latest",
"/",
"bigquery",
"/",
"generated",
"/",
"google",
".",
"cloud",
".",
"bigquery",
".",
"job",
".",
"QueryJobConfig",
".",
"html"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/libraries/dagster-gcp/dagster_gcp/configs.py#L125-L280
|
[
"def",
"define_bigquery_query_config",
"(",
")",
":",
"sf",
"=",
"_define_shared_fields",
"(",
")",
"allow_large_results",
"=",
"Field",
"(",
"Bool",
",",
"description",
"=",
"'''Allow large query results tables (legacy SQL, only)\n See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.allowLargeResults\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"default_dataset",
"=",
"Field",
"(",
"Dataset",
",",
"description",
"=",
"'''the default dataset to use for unqualified table names in the query or None\n if not set. The default_dataset setter accepts a str of the fully-qualified dataset ID in\n standard SQL format. The value must included a project ID and dataset ID separated by \".\".\n For example: your-project.your_dataset.\n See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.defaultDataset\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"destination",
"=",
"Field",
"(",
"Table",
",",
"description",
"=",
"'''table where results are written or None if not set. The destination setter\n accepts a str of the fully-qualified table ID in standard SQL format. The value must\n included a project ID, dataset ID, and table ID, each separated by \".\". For example:\n your-project.your_dataset.your_table.\n See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationTable\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"dry_run",
"=",
"Field",
"(",
"Bool",
",",
"description",
"=",
"'''True if this query should be a dry run to estimate costs.\n See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.dryRun\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"flatten_results",
"=",
"Field",
"(",
"Bool",
",",
"description",
"=",
"'''Flatten nested/repeated fields in results. (Legacy SQL only)\n See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.flattenResults\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"maximum_billing_tier",
"=",
"Field",
"(",
"Int",
",",
"description",
"=",
"'''Deprecated. Changes the billing tier to allow high-compute queries.\n See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBillingTier\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"maximum_bytes_billed",
"=",
"Field",
"(",
"Int",
",",
"description",
"=",
"'''Maximum bytes to be billed for this job or None if not set.\n\n See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBytesBilled\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"priority",
"=",
"Field",
"(",
"BQPriority",
",",
"description",
"=",
"'''Priority of the query.\n See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.priority\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"query_parameters",
"=",
"Field",
"(",
"List",
"(",
"String",
")",
",",
"description",
"=",
"'''list of parameters for parameterized query (empty by default)\n See: https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.queryParameters\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"# TODO:",
"# Type:\tDict[str, google.cloud.bigquery.external_config.ExternalConfig]",
"# table_definitions = Field(",
"# PermissiveDict(),",
"# description='''Definitions for external tables or None if not set.",
"# See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions",
"# ''',",
"# is_optional=True,",
"# )",
"# TODO: Need to add this",
"# Type:\tList[google.cloud.bigquery.query.UDFResource]",
"# udf_resources = Field(",
"# String,",
"# description='''user defined function resources (empty by default)",
"# See: https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.userDefinedFunctionResources",
"# ''',",
"# is_optional=True",
"# )",
"use_legacy_sql",
"=",
"Field",
"(",
"Bool",
",",
"description",
"=",
"'''Use legacy SQL syntax.\n See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.useLegacySql\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"use_query_cache",
"=",
"Field",
"(",
"Bool",
",",
"description",
"=",
"'''Look for the query result in the cache.\n See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.useQueryCache\n '''",
",",
"is_optional",
"=",
"True",
",",
")",
"return",
"Field",
"(",
"Dict",
"(",
"fields",
"=",
"{",
"'query_job_config'",
":",
"Field",
"(",
"Dict",
"(",
"fields",
"=",
"{",
"'allow_large_results'",
":",
"allow_large_results",
",",
"'clustering_fields'",
":",
"sf",
"[",
"'clustering_fields'",
"]",
",",
"'create_disposition'",
":",
"sf",
"[",
"'create_disposition'",
"]",
",",
"'default_dataset'",
":",
"default_dataset",
",",
"'destination'",
":",
"destination",
",",
"'destination_encryption_configuration'",
":",
"sf",
"[",
"'destination_encryption_configuration'",
"]",
",",
"'dry_run'",
":",
"dry_run",
",",
"'flatten_results'",
":",
"flatten_results",
",",
"# TODO: labels",
"'maximum_billing_tier'",
":",
"maximum_billing_tier",
",",
"'maximum_bytes_billed'",
":",
"maximum_bytes_billed",
",",
"'priority'",
":",
"priority",
",",
"'query_parameters'",
":",
"query_parameters",
",",
"# TODO: table_definitions",
"'schema_update_options'",
":",
"sf",
"[",
"'schema_update_options'",
"]",
",",
"'time_partitioning'",
":",
"sf",
"[",
"'time_partitioning'",
"]",
",",
"# TODO: udf_resources",
"'use_legacy_sql'",
":",
"use_legacy_sql",
",",
"'use_query_cache'",
":",
"use_query_cache",
",",
"'write_disposition'",
":",
"sf",
"[",
"'write_disposition'",
"]",
",",
"}",
")",
")",
"}",
")",
",",
"description",
"=",
"'BigQuery query configuration'",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
sql_solid
|
Return a new solid that executes and materializes a SQL select statement.
Args:
name (str): The name of the new solid.
select_statement (str): The select statement to execute.
materialization_strategy (str): Must be 'table', the only currently supported
materialization strategy. If 'table', the kwarg `table_name` must also be passed.
Kwargs:
table_name (str): THe name of the new table to create, if the materialization strategy
is 'table'. Default: None.
inputs (list[InputDefinition]): Inputs, if any, for the new solid. Default: None.
Returns:
function:
The new SQL solid.
|
examples/airline-demo/airline_demo/solids.py
|
def sql_solid(name, select_statement, materialization_strategy, table_name=None, inputs=None):
'''Return a new solid that executes and materializes a SQL select statement.
Args:
name (str): The name of the new solid.
select_statement (str): The select statement to execute.
materialization_strategy (str): Must be 'table', the only currently supported
materialization strategy. If 'table', the kwarg `table_name` must also be passed.
Kwargs:
table_name (str): THe name of the new table to create, if the materialization strategy
is 'table'. Default: None.
inputs (list[InputDefinition]): Inputs, if any, for the new solid. Default: None.
Returns:
function:
The new SQL solid.
'''
inputs = check.opt_list_param(inputs, 'inputs', InputDefinition)
materialization_strategy_output_types = { # pylint:disable=C0103
'table': SqlTableName,
# 'view': String,
# 'query': SqlAlchemyQueryType,
# 'subquery': SqlAlchemySubqueryType,
# 'result_proxy': SqlAlchemyResultProxyType,
# could also materialize as a Pandas table, as a Spark table, as an intermediate file, etc.
}
if materialization_strategy not in materialization_strategy_output_types:
raise Exception(
'Invalid materialization strategy {materialization_strategy}, must '
'be one of {materialization_strategies}'.format(
materialization_strategy=materialization_strategy,
materialization_strategies=str(list(materialization_strategy_output_types.keys())),
)
)
if materialization_strategy == 'table':
if table_name is None:
raise Exception('Missing table_name: required for materialization strategy \'table\'')
output_description = (
'The string name of the new table created by the solid'
if materialization_strategy == 'table'
else 'The materialized SQL statement. If the materialization_strategy is '
'\'table\', this is the string name of the new table created by the solid.'
)
description = '''This solid executes the following SQL statement:
{select_statement}'''.format(
select_statement=select_statement
)
# n.b., we will eventually want to make this resources key configurable
sql_statement = (
'drop table if exists {table_name};\n' 'create table {table_name} as {select_statement};'
).format(table_name=table_name, select_statement=select_statement)
def transform_fn(context, _inputs):
'''Inner function defining the new solid.
Args:
context (TransformExecutionContext): Must expose a `db` resource with an `execute` method,
like a SQLAlchemy engine, that can execute raw SQL against a database.
Returns:
str:
The table name of the newly materialized SQL select statement.
'''
context.log.info(
'Executing sql statement:\n{sql_statement}'.format(sql_statement=sql_statement)
)
context.resources.db_info.engine.execute(text(sql_statement))
yield Result(value=table_name, output_name='result')
return SolidDefinition(
name=name,
inputs=inputs,
outputs=[
OutputDefinition(
materialization_strategy_output_types[materialization_strategy],
description=output_description,
)
],
transform_fn=transform_fn,
description=description,
metadata={'kind': 'sql', 'sql': sql_statement},
)
|
def sql_solid(name, select_statement, materialization_strategy, table_name=None, inputs=None):
'''Return a new solid that executes and materializes a SQL select statement.
Args:
name (str): The name of the new solid.
select_statement (str): The select statement to execute.
materialization_strategy (str): Must be 'table', the only currently supported
materialization strategy. If 'table', the kwarg `table_name` must also be passed.
Kwargs:
table_name (str): THe name of the new table to create, if the materialization strategy
is 'table'. Default: None.
inputs (list[InputDefinition]): Inputs, if any, for the new solid. Default: None.
Returns:
function:
The new SQL solid.
'''
inputs = check.opt_list_param(inputs, 'inputs', InputDefinition)
materialization_strategy_output_types = { # pylint:disable=C0103
'table': SqlTableName,
# 'view': String,
# 'query': SqlAlchemyQueryType,
# 'subquery': SqlAlchemySubqueryType,
# 'result_proxy': SqlAlchemyResultProxyType,
# could also materialize as a Pandas table, as a Spark table, as an intermediate file, etc.
}
if materialization_strategy not in materialization_strategy_output_types:
raise Exception(
'Invalid materialization strategy {materialization_strategy}, must '
'be one of {materialization_strategies}'.format(
materialization_strategy=materialization_strategy,
materialization_strategies=str(list(materialization_strategy_output_types.keys())),
)
)
if materialization_strategy == 'table':
if table_name is None:
raise Exception('Missing table_name: required for materialization strategy \'table\'')
output_description = (
'The string name of the new table created by the solid'
if materialization_strategy == 'table'
else 'The materialized SQL statement. If the materialization_strategy is '
'\'table\', this is the string name of the new table created by the solid.'
)
description = '''This solid executes the following SQL statement:
{select_statement}'''.format(
select_statement=select_statement
)
# n.b., we will eventually want to make this resources key configurable
sql_statement = (
'drop table if exists {table_name};\n' 'create table {table_name} as {select_statement};'
).format(table_name=table_name, select_statement=select_statement)
def transform_fn(context, _inputs):
'''Inner function defining the new solid.
Args:
context (TransformExecutionContext): Must expose a `db` resource with an `execute` method,
like a SQLAlchemy engine, that can execute raw SQL against a database.
Returns:
str:
The table name of the newly materialized SQL select statement.
'''
context.log.info(
'Executing sql statement:\n{sql_statement}'.format(sql_statement=sql_statement)
)
context.resources.db_info.engine.execute(text(sql_statement))
yield Result(value=table_name, output_name='result')
return SolidDefinition(
name=name,
inputs=inputs,
outputs=[
OutputDefinition(
materialization_strategy_output_types[materialization_strategy],
description=output_description,
)
],
transform_fn=transform_fn,
description=description,
metadata={'kind': 'sql', 'sql': sql_statement},
)
|
[
"Return",
"a",
"new",
"solid",
"that",
"executes",
"and",
"materializes",
"a",
"SQL",
"select",
"statement",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/examples/airline-demo/airline_demo/solids.py#L42-L130
|
[
"def",
"sql_solid",
"(",
"name",
",",
"select_statement",
",",
"materialization_strategy",
",",
"table_name",
"=",
"None",
",",
"inputs",
"=",
"None",
")",
":",
"inputs",
"=",
"check",
".",
"opt_list_param",
"(",
"inputs",
",",
"'inputs'",
",",
"InputDefinition",
")",
"materialization_strategy_output_types",
"=",
"{",
"# pylint:disable=C0103",
"'table'",
":",
"SqlTableName",
",",
"# 'view': String,",
"# 'query': SqlAlchemyQueryType,",
"# 'subquery': SqlAlchemySubqueryType,",
"# 'result_proxy': SqlAlchemyResultProxyType,",
"# could also materialize as a Pandas table, as a Spark table, as an intermediate file, etc.",
"}",
"if",
"materialization_strategy",
"not",
"in",
"materialization_strategy_output_types",
":",
"raise",
"Exception",
"(",
"'Invalid materialization strategy {materialization_strategy}, must '",
"'be one of {materialization_strategies}'",
".",
"format",
"(",
"materialization_strategy",
"=",
"materialization_strategy",
",",
"materialization_strategies",
"=",
"str",
"(",
"list",
"(",
"materialization_strategy_output_types",
".",
"keys",
"(",
")",
")",
")",
",",
")",
")",
"if",
"materialization_strategy",
"==",
"'table'",
":",
"if",
"table_name",
"is",
"None",
":",
"raise",
"Exception",
"(",
"'Missing table_name: required for materialization strategy \\'table\\''",
")",
"output_description",
"=",
"(",
"'The string name of the new table created by the solid'",
"if",
"materialization_strategy",
"==",
"'table'",
"else",
"'The materialized SQL statement. If the materialization_strategy is '",
"'\\'table\\', this is the string name of the new table created by the solid.'",
")",
"description",
"=",
"'''This solid executes the following SQL statement:\n {select_statement}'''",
".",
"format",
"(",
"select_statement",
"=",
"select_statement",
")",
"# n.b., we will eventually want to make this resources key configurable",
"sql_statement",
"=",
"(",
"'drop table if exists {table_name};\\n'",
"'create table {table_name} as {select_statement};'",
")",
".",
"format",
"(",
"table_name",
"=",
"table_name",
",",
"select_statement",
"=",
"select_statement",
")",
"def",
"transform_fn",
"(",
"context",
",",
"_inputs",
")",
":",
"'''Inner function defining the new solid.\n\n Args:\n context (TransformExecutionContext): Must expose a `db` resource with an `execute` method,\n like a SQLAlchemy engine, that can execute raw SQL against a database.\n\n Returns:\n str:\n The table name of the newly materialized SQL select statement.\n '''",
"context",
".",
"log",
".",
"info",
"(",
"'Executing sql statement:\\n{sql_statement}'",
".",
"format",
"(",
"sql_statement",
"=",
"sql_statement",
")",
")",
"context",
".",
"resources",
".",
"db_info",
".",
"engine",
".",
"execute",
"(",
"text",
"(",
"sql_statement",
")",
")",
"yield",
"Result",
"(",
"value",
"=",
"table_name",
",",
"output_name",
"=",
"'result'",
")",
"return",
"SolidDefinition",
"(",
"name",
"=",
"name",
",",
"inputs",
"=",
"inputs",
",",
"outputs",
"=",
"[",
"OutputDefinition",
"(",
"materialization_strategy_output_types",
"[",
"materialization_strategy",
"]",
",",
"description",
"=",
"output_description",
",",
")",
"]",
",",
"transform_fn",
"=",
"transform_fn",
",",
"description",
"=",
"description",
",",
"metadata",
"=",
"{",
"'kind'",
":",
"'sql'",
",",
"'sql'",
":",
"sql_statement",
"}",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
download_from_s3
|
Download an object from s3.
Args:
info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.
Returns:
str:
The path to the downloaded object.
|
examples/airline-demo/airline_demo/solids.py
|
def download_from_s3(context):
'''Download an object from s3.
Args:
info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.
Returns:
str:
The path to the downloaded object.
'''
target_file = context.solid_config['target_file']
return context.resources.download_manager.download_file_contents(context, target_file)
|
def download_from_s3(context):
'''Download an object from s3.
Args:
info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.
Returns:
str:
The path to the downloaded object.
'''
target_file = context.solid_config['target_file']
return context.resources.download_manager.download_file_contents(context, target_file)
|
[
"Download",
"an",
"object",
"from",
"s3",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/examples/airline-demo/airline_demo/solids.py#L141-L152
|
[
"def",
"download_from_s3",
"(",
"context",
")",
":",
"target_file",
"=",
"context",
".",
"solid_config",
"[",
"'target_file'",
"]",
"return",
"context",
".",
"resources",
".",
"download_manager",
".",
"download_file_contents",
"(",
"context",
",",
"target_file",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
upload_to_s3
|
Upload a file to s3.
Args:
info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.
Returns:
(str, str):
The bucket and key to which the file was uploaded.
|
examples/airline-demo/airline_demo/solids.py
|
def upload_to_s3(context, file_obj):
'''Upload a file to s3.
Args:
info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.
Returns:
(str, str):
The bucket and key to which the file was uploaded.
'''
bucket = context.solid_config['bucket']
key = context.solid_config['key']
context.resources.s3.put_object(
Bucket=bucket, Body=file_obj.read(), Key=key, **(context.solid_config.get('kwargs') or {})
)
yield Result(bucket, 'bucket')
yield Result(key, 'key')
|
def upload_to_s3(context, file_obj):
'''Upload a file to s3.
Args:
info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.
Returns:
(str, str):
The bucket and key to which the file was uploaded.
'''
bucket = context.solid_config['bucket']
key = context.solid_config['key']
context.resources.s3.put_object(
Bucket=bucket, Body=file_obj.read(), Key=key, **(context.solid_config.get('kwargs') or {})
)
yield Result(bucket, 'bucket')
yield Result(key, 'key')
|
[
"Upload",
"a",
"file",
"to",
"s3",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/examples/airline-demo/airline_demo/solids.py#L180-L197
|
[
"def",
"upload_to_s3",
"(",
"context",
",",
"file_obj",
")",
":",
"bucket",
"=",
"context",
".",
"solid_config",
"[",
"'bucket'",
"]",
"key",
"=",
"context",
".",
"solid_config",
"[",
"'key'",
"]",
"context",
".",
"resources",
".",
"s3",
".",
"put_object",
"(",
"Bucket",
"=",
"bucket",
",",
"Body",
"=",
"file_obj",
".",
"read",
"(",
")",
",",
"Key",
"=",
"key",
",",
"*",
"*",
"(",
"context",
".",
"solid_config",
".",
"get",
"(",
"'kwargs'",
")",
"or",
"{",
"}",
")",
")",
"yield",
"Result",
"(",
"bucket",
",",
"'bucket'",
")",
"yield",
"Result",
"(",
"key",
",",
"'key'",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
user_code_error_boundary
|
Wraps the execution of user-space code in an error boundary. This places a uniform
policy around an user code invoked by the framework. This ensures that all user
errors are wrapped in the DagsterUserCodeExecutionError, and that the original stack
trace of the user error is preserved, so that it can be reported without confusing
framework code in the stack trace, if a tool author wishes to do so. This has
been especially help in a notebooking context.
|
python_modules/dagster/dagster/core/errors.py
|
def user_code_error_boundary(error_cls, msg, **kwargs):
'''
Wraps the execution of user-space code in an error boundary. This places a uniform
policy around an user code invoked by the framework. This ensures that all user
errors are wrapped in the DagsterUserCodeExecutionError, and that the original stack
trace of the user error is preserved, so that it can be reported without confusing
framework code in the stack trace, if a tool author wishes to do so. This has
been especially help in a notebooking context.
'''
check.str_param(msg, 'msg')
check.subclass_param(error_cls, 'error_cls', DagsterUserCodeExecutionError)
try:
yield
except Exception as e: # pylint: disable=W0703
if isinstance(e, DagsterError):
# The system has thrown an error that is part of the user-framework contract
raise e
else:
# An exception has been thrown by user code and computation should cease
# with the error reported further up the stack
raise_from(
error_cls(msg, user_exception=e, original_exc_info=sys.exc_info(), **kwargs), e
)
|
def user_code_error_boundary(error_cls, msg, **kwargs):
'''
Wraps the execution of user-space code in an error boundary. This places a uniform
policy around an user code invoked by the framework. This ensures that all user
errors are wrapped in the DagsterUserCodeExecutionError, and that the original stack
trace of the user error is preserved, so that it can be reported without confusing
framework code in the stack trace, if a tool author wishes to do so. This has
been especially help in a notebooking context.
'''
check.str_param(msg, 'msg')
check.subclass_param(error_cls, 'error_cls', DagsterUserCodeExecutionError)
try:
yield
except Exception as e: # pylint: disable=W0703
if isinstance(e, DagsterError):
# The system has thrown an error that is part of the user-framework contract
raise e
else:
# An exception has been thrown by user code and computation should cease
# with the error reported further up the stack
raise_from(
error_cls(msg, user_exception=e, original_exc_info=sys.exc_info(), **kwargs), e
)
|
[
"Wraps",
"the",
"execution",
"of",
"user",
"-",
"space",
"code",
"in",
"an",
"error",
"boundary",
".",
"This",
"places",
"a",
"uniform",
"policy",
"around",
"an",
"user",
"code",
"invoked",
"by",
"the",
"framework",
".",
"This",
"ensures",
"that",
"all",
"user",
"errors",
"are",
"wrapped",
"in",
"the",
"DagsterUserCodeExecutionError",
"and",
"that",
"the",
"original",
"stack",
"trace",
"of",
"the",
"user",
"error",
"is",
"preserved",
"so",
"that",
"it",
"can",
"be",
"reported",
"without",
"confusing",
"framework",
"code",
"in",
"the",
"stack",
"trace",
"if",
"a",
"tool",
"author",
"wishes",
"to",
"do",
"so",
".",
"This",
"has",
"been",
"especially",
"help",
"in",
"a",
"notebooking",
"context",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/errors.py#L164-L187
|
[
"def",
"user_code_error_boundary",
"(",
"error_cls",
",",
"msg",
",",
"*",
"*",
"kwargs",
")",
":",
"check",
".",
"str_param",
"(",
"msg",
",",
"'msg'",
")",
"check",
".",
"subclass_param",
"(",
"error_cls",
",",
"'error_cls'",
",",
"DagsterUserCodeExecutionError",
")",
"try",
":",
"yield",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=W0703",
"if",
"isinstance",
"(",
"e",
",",
"DagsterError",
")",
":",
"# The system has thrown an error that is part of the user-framework contract",
"raise",
"e",
"else",
":",
"# An exception has been thrown by user code and computation should cease",
"# with the error reported further up the stack",
"raise_from",
"(",
"error_cls",
"(",
"msg",
",",
"user_exception",
"=",
"e",
",",
"original_exc_info",
"=",
"sys",
".",
"exc_info",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
",",
"e",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
mkdir_p
|
The missing mkdir -p functionality in os.
|
examples/airline-demo/airline_demo/utils.py
|
def mkdir_p(newdir, mode=0o777):
"""The missing mkdir -p functionality in os."""
try:
os.makedirs(newdir, mode)
except OSError as err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(newdir):
raise
|
def mkdir_p(newdir, mode=0o777):
"""The missing mkdir -p functionality in os."""
try:
os.makedirs(newdir, mode)
except OSError as err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(newdir):
raise
|
[
"The",
"missing",
"mkdir",
"-",
"p",
"functionality",
"in",
"os",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/examples/airline-demo/airline_demo/utils.py#L10-L17
|
[
"def",
"mkdir_p",
"(",
"newdir",
",",
"mode",
"=",
"0o777",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"newdir",
",",
"mode",
")",
"except",
"OSError",
"as",
"err",
":",
"# Reraise the error unless it's about an already existing directory",
"if",
"err",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
"or",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"newdir",
")",
":",
"raise"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
user_code_context_manager
|
Wraps the output of a user provided function that may yield or return a value and
returns a generator that asserts it only yields a single value.
|
python_modules/dagster/dagster/core/execution.py
|
def user_code_context_manager(user_fn, error_cls, msg):
'''Wraps the output of a user provided function that may yield or return a value and
returns a generator that asserts it only yields a single value.
'''
check.callable_param(user_fn, 'user_fn')
check.subclass_param(error_cls, 'error_cls', DagsterUserCodeExecutionError)
with user_code_error_boundary(error_cls, msg):
thing_or_gen = user_fn()
gen = _ensure_gen(thing_or_gen)
try:
thing = next(gen)
except StopIteration:
check.failed('Must yield one item. You did not yield anything.')
yield thing
stopped = False
try:
next(gen)
except StopIteration:
stopped = True
check.invariant(stopped, 'Must yield one item. Yielded more than one item')
|
def user_code_context_manager(user_fn, error_cls, msg):
'''Wraps the output of a user provided function that may yield or return a value and
returns a generator that asserts it only yields a single value.
'''
check.callable_param(user_fn, 'user_fn')
check.subclass_param(error_cls, 'error_cls', DagsterUserCodeExecutionError)
with user_code_error_boundary(error_cls, msg):
thing_or_gen = user_fn()
gen = _ensure_gen(thing_or_gen)
try:
thing = next(gen)
except StopIteration:
check.failed('Must yield one item. You did not yield anything.')
yield thing
stopped = False
try:
next(gen)
except StopIteration:
stopped = True
check.invariant(stopped, 'Must yield one item. Yielded more than one item')
|
[
"Wraps",
"the",
"output",
"of",
"a",
"user",
"provided",
"function",
"that",
"may",
"yield",
"or",
"return",
"a",
"value",
"and",
"returns",
"a",
"generator",
"that",
"asserts",
"it",
"only",
"yields",
"a",
"single",
"value",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L346-L371
|
[
"def",
"user_code_context_manager",
"(",
"user_fn",
",",
"error_cls",
",",
"msg",
")",
":",
"check",
".",
"callable_param",
"(",
"user_fn",
",",
"'user_fn'",
")",
"check",
".",
"subclass_param",
"(",
"error_cls",
",",
"'error_cls'",
",",
"DagsterUserCodeExecutionError",
")",
"with",
"user_code_error_boundary",
"(",
"error_cls",
",",
"msg",
")",
":",
"thing_or_gen",
"=",
"user_fn",
"(",
")",
"gen",
"=",
"_ensure_gen",
"(",
"thing_or_gen",
")",
"try",
":",
"thing",
"=",
"next",
"(",
"gen",
")",
"except",
"StopIteration",
":",
"check",
".",
"failed",
"(",
"'Must yield one item. You did not yield anything.'",
")",
"yield",
"thing",
"stopped",
"=",
"False",
"try",
":",
"next",
"(",
"gen",
")",
"except",
"StopIteration",
":",
"stopped",
"=",
"True",
"check",
".",
"invariant",
"(",
"stopped",
",",
"'Must yield one item. Yielded more than one item'",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
construct_run_storage
|
Construct the run storage for this pipeline. Our rules are the following:
If the RunConfig has a storage_mode provided, we use that.
Then we fallback to environment config.
If there is no config, we default to in memory storage. This is mostly so
that tests default to in-memory.
|
python_modules/dagster/dagster/core/execution.py
|
def construct_run_storage(run_config, environment_config):
'''
Construct the run storage for this pipeline. Our rules are the following:
If the RunConfig has a storage_mode provided, we use that.
Then we fallback to environment config.
If there is no config, we default to in memory storage. This is mostly so
that tests default to in-memory.
'''
check.inst_param(run_config, 'run_config', RunConfig)
check.inst_param(environment_config, 'environment_config', EnvironmentConfig)
if run_config.storage_mode:
if run_config.storage_mode == RunStorageMode.FILESYSTEM:
return FileSystemRunStorage()
elif run_config.storage_mode == RunStorageMode.IN_MEMORY:
return InMemoryRunStorage()
elif run_config.storage_mode == RunStorageMode.S3:
# TODO: Revisit whether we want to use S3 run storage
return FileSystemRunStorage()
else:
check.failed('Unexpected enum {}'.format(run_config.storage_mode))
elif environment_config.storage.storage_mode == 'filesystem':
return FileSystemRunStorage()
elif environment_config.storage.storage_mode == 'in_memory':
return InMemoryRunStorage()
elif environment_config.storage.storage_mode == 's3':
# TODO: Revisit whether we want to use S3 run storage
return FileSystemRunStorage()
elif environment_config.storage.storage_mode is None:
return InMemoryRunStorage()
else:
raise DagsterInvariantViolationError(
'Invalid storage specified {}'.format(environment_config.storage.storage_mode)
)
|
def construct_run_storage(run_config, environment_config):
'''
Construct the run storage for this pipeline. Our rules are the following:
If the RunConfig has a storage_mode provided, we use that.
Then we fallback to environment config.
If there is no config, we default to in memory storage. This is mostly so
that tests default to in-memory.
'''
check.inst_param(run_config, 'run_config', RunConfig)
check.inst_param(environment_config, 'environment_config', EnvironmentConfig)
if run_config.storage_mode:
if run_config.storage_mode == RunStorageMode.FILESYSTEM:
return FileSystemRunStorage()
elif run_config.storage_mode == RunStorageMode.IN_MEMORY:
return InMemoryRunStorage()
elif run_config.storage_mode == RunStorageMode.S3:
# TODO: Revisit whether we want to use S3 run storage
return FileSystemRunStorage()
else:
check.failed('Unexpected enum {}'.format(run_config.storage_mode))
elif environment_config.storage.storage_mode == 'filesystem':
return FileSystemRunStorage()
elif environment_config.storage.storage_mode == 'in_memory':
return InMemoryRunStorage()
elif environment_config.storage.storage_mode == 's3':
# TODO: Revisit whether we want to use S3 run storage
return FileSystemRunStorage()
elif environment_config.storage.storage_mode is None:
return InMemoryRunStorage()
else:
raise DagsterInvariantViolationError(
'Invalid storage specified {}'.format(environment_config.storage.storage_mode)
)
|
[
"Construct",
"the",
"run",
"storage",
"for",
"this",
"pipeline",
".",
"Our",
"rules",
"are",
"the",
"following",
":"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L374-L410
|
[
"def",
"construct_run_storage",
"(",
"run_config",
",",
"environment_config",
")",
":",
"check",
".",
"inst_param",
"(",
"run_config",
",",
"'run_config'",
",",
"RunConfig",
")",
"check",
".",
"inst_param",
"(",
"environment_config",
",",
"'environment_config'",
",",
"EnvironmentConfig",
")",
"if",
"run_config",
".",
"storage_mode",
":",
"if",
"run_config",
".",
"storage_mode",
"==",
"RunStorageMode",
".",
"FILESYSTEM",
":",
"return",
"FileSystemRunStorage",
"(",
")",
"elif",
"run_config",
".",
"storage_mode",
"==",
"RunStorageMode",
".",
"IN_MEMORY",
":",
"return",
"InMemoryRunStorage",
"(",
")",
"elif",
"run_config",
".",
"storage_mode",
"==",
"RunStorageMode",
".",
"S3",
":",
"# TODO: Revisit whether we want to use S3 run storage",
"return",
"FileSystemRunStorage",
"(",
")",
"else",
":",
"check",
".",
"failed",
"(",
"'Unexpected enum {}'",
".",
"format",
"(",
"run_config",
".",
"storage_mode",
")",
")",
"elif",
"environment_config",
".",
"storage",
".",
"storage_mode",
"==",
"'filesystem'",
":",
"return",
"FileSystemRunStorage",
"(",
")",
"elif",
"environment_config",
".",
"storage",
".",
"storage_mode",
"==",
"'in_memory'",
":",
"return",
"InMemoryRunStorage",
"(",
")",
"elif",
"environment_config",
".",
"storage",
".",
"storage_mode",
"==",
"'s3'",
":",
"# TODO: Revisit whether we want to use S3 run storage",
"return",
"FileSystemRunStorage",
"(",
")",
"elif",
"environment_config",
".",
"storage",
".",
"storage_mode",
"is",
"None",
":",
"return",
"InMemoryRunStorage",
"(",
")",
"else",
":",
"raise",
"DagsterInvariantViolationError",
"(",
"'Invalid storage specified {}'",
".",
"format",
"(",
"environment_config",
".",
"storage",
".",
"storage_mode",
")",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
_create_context_free_log
|
In the event of pipeline initialization failure, we want to be able to log the failure
without a dependency on the ExecutionContext to initialize DagsterLog
|
python_modules/dagster/dagster/core/execution.py
|
def _create_context_free_log(run_config, pipeline_def):
'''In the event of pipeline initialization failure, we want to be able to log the failure
without a dependency on the ExecutionContext to initialize DagsterLog
'''
check.inst_param(run_config, 'run_config', RunConfig)
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
# Use the default logger
loggers = [define_colored_console_logger('dagster')]
if run_config.event_callback:
loggers += [construct_event_logger(run_config.event_callback)]
elif run_config.loggers:
loggers += run_config.loggers
return DagsterLog(run_config.run_id, get_logging_tags(None, run_config, pipeline_def), loggers)
|
def _create_context_free_log(run_config, pipeline_def):
'''In the event of pipeline initialization failure, we want to be able to log the failure
without a dependency on the ExecutionContext to initialize DagsterLog
'''
check.inst_param(run_config, 'run_config', RunConfig)
check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)
# Use the default logger
loggers = [define_colored_console_logger('dagster')]
if run_config.event_callback:
loggers += [construct_event_logger(run_config.event_callback)]
elif run_config.loggers:
loggers += run_config.loggers
return DagsterLog(run_config.run_id, get_logging_tags(None, run_config, pipeline_def), loggers)
|
[
"In",
"the",
"event",
"of",
"pipeline",
"initialization",
"failure",
"we",
"want",
"to",
"be",
"able",
"to",
"log",
"the",
"failure",
"without",
"a",
"dependency",
"on",
"the",
"ExecutionContext",
"to",
"initialize",
"DagsterLog"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L598-L612
|
[
"def",
"_create_context_free_log",
"(",
"run_config",
",",
"pipeline_def",
")",
":",
"check",
".",
"inst_param",
"(",
"run_config",
",",
"'run_config'",
",",
"RunConfig",
")",
"check",
".",
"inst_param",
"(",
"pipeline_def",
",",
"'pipeline_def'",
",",
"PipelineDefinition",
")",
"# Use the default logger",
"loggers",
"=",
"[",
"define_colored_console_logger",
"(",
"'dagster'",
")",
"]",
"if",
"run_config",
".",
"event_callback",
":",
"loggers",
"+=",
"[",
"construct_event_logger",
"(",
"run_config",
".",
"event_callback",
")",
"]",
"elif",
"run_config",
".",
"loggers",
":",
"loggers",
"+=",
"run_config",
".",
"loggers",
"return",
"DagsterLog",
"(",
"run_config",
".",
"run_id",
",",
"get_logging_tags",
"(",
"None",
",",
"run_config",
",",
"pipeline_def",
")",
",",
"loggers",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
execute_pipeline_iterator
|
Returns iterator that yields :py:class:`SolidExecutionResult` for each
solid executed in the pipeline.
This is intended to allow the caller to do things between each executed
node. For the 'synchronous' API, see :py:func:`execute_pipeline`.
Parameters:
pipeline (PipelineDefinition): Pipeline to run
environment_dict (dict): The enviroment configuration that parameterizes this run
run_config (RunConfig): Configuration for how this pipeline will be executed
Returns:
Iterator[DagsterEvent]
|
python_modules/dagster/dagster/core/execution.py
|
def execute_pipeline_iterator(pipeline, environment_dict=None, run_config=None):
'''Returns iterator that yields :py:class:`SolidExecutionResult` for each
solid executed in the pipeline.
This is intended to allow the caller to do things between each executed
node. For the 'synchronous' API, see :py:func:`execute_pipeline`.
Parameters:
pipeline (PipelineDefinition): Pipeline to run
environment_dict (dict): The enviroment configuration that parameterizes this run
run_config (RunConfig): Configuration for how this pipeline will be executed
Returns:
Iterator[DagsterEvent]
'''
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
environment_dict = check.opt_dict_param(environment_dict, 'environment_dict')
run_config = check_run_config_param(run_config)
environment_config = create_environment_config(pipeline, environment_dict)
intermediates_manager = construct_intermediates_manager(
run_config, environment_config, pipeline
)
with _pipeline_execution_context_manager(
pipeline, environment_config, run_config, intermediates_manager
) as pipeline_context:
return _execute_pipeline_iterator(pipeline_context)
|
def execute_pipeline_iterator(pipeline, environment_dict=None, run_config=None):
'''Returns iterator that yields :py:class:`SolidExecutionResult` for each
solid executed in the pipeline.
This is intended to allow the caller to do things between each executed
node. For the 'synchronous' API, see :py:func:`execute_pipeline`.
Parameters:
pipeline (PipelineDefinition): Pipeline to run
environment_dict (dict): The enviroment configuration that parameterizes this run
run_config (RunConfig): Configuration for how this pipeline will be executed
Returns:
Iterator[DagsterEvent]
'''
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
environment_dict = check.opt_dict_param(environment_dict, 'environment_dict')
run_config = check_run_config_param(run_config)
environment_config = create_environment_config(pipeline, environment_dict)
intermediates_manager = construct_intermediates_manager(
run_config, environment_config, pipeline
)
with _pipeline_execution_context_manager(
pipeline, environment_config, run_config, intermediates_manager
) as pipeline_context:
return _execute_pipeline_iterator(pipeline_context)
|
[
"Returns",
"iterator",
"that",
"yields",
":",
"py",
":",
"class",
":",
"SolidExecutionResult",
"for",
"each",
"solid",
"executed",
"in",
"the",
"pipeline",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L731-L757
|
[
"def",
"execute_pipeline_iterator",
"(",
"pipeline",
",",
"environment_dict",
"=",
"None",
",",
"run_config",
"=",
"None",
")",
":",
"check",
".",
"inst_param",
"(",
"pipeline",
",",
"'pipeline'",
",",
"PipelineDefinition",
")",
"environment_dict",
"=",
"check",
".",
"opt_dict_param",
"(",
"environment_dict",
",",
"'environment_dict'",
")",
"run_config",
"=",
"check_run_config_param",
"(",
"run_config",
")",
"environment_config",
"=",
"create_environment_config",
"(",
"pipeline",
",",
"environment_dict",
")",
"intermediates_manager",
"=",
"construct_intermediates_manager",
"(",
"run_config",
",",
"environment_config",
",",
"pipeline",
")",
"with",
"_pipeline_execution_context_manager",
"(",
"pipeline",
",",
"environment_config",
",",
"run_config",
",",
"intermediates_manager",
")",
"as",
"pipeline_context",
":",
"return",
"_execute_pipeline_iterator",
"(",
"pipeline_context",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
execute_pipeline
|
"Synchronous" version of :py:func:`execute_pipeline_iterator`.
Note: raise_on_error is very useful in testing contexts when not testing for error
conditions
Parameters:
pipeline (PipelineDefinition): Pipeline to run
environment_dict (dict): The enviroment configuration that parameterizes this run
run_config (RunConfig): Configuration for how this pipeline will be executed
Returns:
:py:class:`PipelineExecutionResult`
|
python_modules/dagster/dagster/core/execution.py
|
def execute_pipeline(pipeline, environment_dict=None, run_config=None):
'''
"Synchronous" version of :py:func:`execute_pipeline_iterator`.
Note: raise_on_error is very useful in testing contexts when not testing for error
conditions
Parameters:
pipeline (PipelineDefinition): Pipeline to run
environment_dict (dict): The enviroment configuration that parameterizes this run
run_config (RunConfig): Configuration for how this pipeline will be executed
Returns:
:py:class:`PipelineExecutionResult`
'''
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
environment_dict = check.opt_dict_param(environment_dict, 'environment_dict')
run_config = check_run_config_param(run_config)
environment_config = create_environment_config(pipeline, environment_dict)
intermediates_manager = construct_intermediates_manager(
run_config, environment_config, pipeline
)
with _pipeline_execution_context_manager(
pipeline, environment_config, run_config, intermediates_manager
) as pipeline_context:
event_list = list(_execute_pipeline_iterator(pipeline_context))
return PipelineExecutionResult(
pipeline,
run_config.run_id,
event_list,
lambda: _pipeline_execution_context_manager(
pipeline, environment_config, run_config, intermediates_manager
),
)
|
def execute_pipeline(pipeline, environment_dict=None, run_config=None):
'''
"Synchronous" version of :py:func:`execute_pipeline_iterator`.
Note: raise_on_error is very useful in testing contexts when not testing for error
conditions
Parameters:
pipeline (PipelineDefinition): Pipeline to run
environment_dict (dict): The enviroment configuration that parameterizes this run
run_config (RunConfig): Configuration for how this pipeline will be executed
Returns:
:py:class:`PipelineExecutionResult`
'''
check.inst_param(pipeline, 'pipeline', PipelineDefinition)
environment_dict = check.opt_dict_param(environment_dict, 'environment_dict')
run_config = check_run_config_param(run_config)
environment_config = create_environment_config(pipeline, environment_dict)
intermediates_manager = construct_intermediates_manager(
run_config, environment_config, pipeline
)
with _pipeline_execution_context_manager(
pipeline, environment_config, run_config, intermediates_manager
) as pipeline_context:
event_list = list(_execute_pipeline_iterator(pipeline_context))
return PipelineExecutionResult(
pipeline,
run_config.run_id,
event_list,
lambda: _pipeline_execution_context_manager(
pipeline, environment_config, run_config, intermediates_manager
),
)
|
[
"Synchronous",
"version",
"of",
":",
"py",
":",
"func",
":",
"execute_pipeline_iterator",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L760-L796
|
[
"def",
"execute_pipeline",
"(",
"pipeline",
",",
"environment_dict",
"=",
"None",
",",
"run_config",
"=",
"None",
")",
":",
"check",
".",
"inst_param",
"(",
"pipeline",
",",
"'pipeline'",
",",
"PipelineDefinition",
")",
"environment_dict",
"=",
"check",
".",
"opt_dict_param",
"(",
"environment_dict",
",",
"'environment_dict'",
")",
"run_config",
"=",
"check_run_config_param",
"(",
"run_config",
")",
"environment_config",
"=",
"create_environment_config",
"(",
"pipeline",
",",
"environment_dict",
")",
"intermediates_manager",
"=",
"construct_intermediates_manager",
"(",
"run_config",
",",
"environment_config",
",",
"pipeline",
")",
"with",
"_pipeline_execution_context_manager",
"(",
"pipeline",
",",
"environment_config",
",",
"run_config",
",",
"intermediates_manager",
")",
"as",
"pipeline_context",
":",
"event_list",
"=",
"list",
"(",
"_execute_pipeline_iterator",
"(",
"pipeline_context",
")",
")",
"return",
"PipelineExecutionResult",
"(",
"pipeline",
",",
"run_config",
".",
"run_id",
",",
"event_list",
",",
"lambda",
":",
"_pipeline_execution_context_manager",
"(",
"pipeline",
",",
"environment_config",
",",
"run_config",
",",
"intermediates_manager",
")",
",",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
PipelineExecutionResult.result_for_solid
|
Get a :py:class:`SolidExecutionResult` for a given solid name.
|
python_modules/dagster/dagster/core/execution.py
|
def result_for_solid(self, name):
'''Get a :py:class:`SolidExecutionResult` for a given solid name.
'''
check.str_param(name, 'name')
if not self.pipeline.has_solid(name):
raise DagsterInvariantViolationError(
'Try to get result for solid {name} in {pipeline}. No such solid.'.format(
name=name, pipeline=self.pipeline.display_name
)
)
if name not in self.solid_result_dict:
raise DagsterInvariantViolationError(
'Did not find result for solid {name} in pipeline execution result'.format(
name=name
)
)
return self.solid_result_dict[name]
|
def result_for_solid(self, name):
'''Get a :py:class:`SolidExecutionResult` for a given solid name.
'''
check.str_param(name, 'name')
if not self.pipeline.has_solid(name):
raise DagsterInvariantViolationError(
'Try to get result for solid {name} in {pipeline}. No such solid.'.format(
name=name, pipeline=self.pipeline.display_name
)
)
if name not in self.solid_result_dict:
raise DagsterInvariantViolationError(
'Did not find result for solid {name} in pipeline execution result'.format(
name=name
)
)
return self.solid_result_dict[name]
|
[
"Get",
"a",
":",
"py",
":",
"class",
":",
"SolidExecutionResult",
"for",
"a",
"given",
"solid",
"name",
"."
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L141-L160
|
[
"def",
"result_for_solid",
"(",
"self",
",",
"name",
")",
":",
"check",
".",
"str_param",
"(",
"name",
",",
"'name'",
")",
"if",
"not",
"self",
".",
"pipeline",
".",
"has_solid",
"(",
"name",
")",
":",
"raise",
"DagsterInvariantViolationError",
"(",
"'Try to get result for solid {name} in {pipeline}. No such solid.'",
".",
"format",
"(",
"name",
"=",
"name",
",",
"pipeline",
"=",
"self",
".",
"pipeline",
".",
"display_name",
")",
")",
"if",
"name",
"not",
"in",
"self",
".",
"solid_result_dict",
":",
"raise",
"DagsterInvariantViolationError",
"(",
"'Did not find result for solid {name} in pipeline execution result'",
".",
"format",
"(",
"name",
"=",
"name",
")",
")",
"return",
"self",
".",
"solid_result_dict",
"[",
"name",
"]"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
SolidExecutionResult.success
|
Whether the solid execution was successful
|
python_modules/dagster/dagster/core/execution.py
|
def success(self):
'''Whether the solid execution was successful'''
any_success = False
for step_event in itertools.chain(
self.input_expectations, self.output_expectations, self.transforms
):
if step_event.event_type == DagsterEventType.STEP_FAILURE:
return False
if step_event.event_type == DagsterEventType.STEP_SUCCESS:
any_success = True
return any_success
|
def success(self):
'''Whether the solid execution was successful'''
any_success = False
for step_event in itertools.chain(
self.input_expectations, self.output_expectations, self.transforms
):
if step_event.event_type == DagsterEventType.STEP_FAILURE:
return False
if step_event.event_type == DagsterEventType.STEP_SUCCESS:
any_success = True
return any_success
|
[
"Whether",
"the",
"solid",
"execution",
"was",
"successful"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L196-L207
|
[
"def",
"success",
"(",
"self",
")",
":",
"any_success",
"=",
"False",
"for",
"step_event",
"in",
"itertools",
".",
"chain",
"(",
"self",
".",
"input_expectations",
",",
"self",
".",
"output_expectations",
",",
"self",
".",
"transforms",
")",
":",
"if",
"step_event",
".",
"event_type",
"==",
"DagsterEventType",
".",
"STEP_FAILURE",
":",
"return",
"False",
"if",
"step_event",
".",
"event_type",
"==",
"DagsterEventType",
".",
"STEP_SUCCESS",
":",
"any_success",
"=",
"True",
"return",
"any_success"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
test
|
SolidExecutionResult.skipped
|
Whether the solid execution was skipped
|
python_modules/dagster/dagster/core/execution.py
|
def skipped(self):
'''Whether the solid execution was skipped'''
return all(
[
step_event.event_type == DagsterEventType.STEP_SKIPPED
for step_event in itertools.chain(
self.input_expectations, self.output_expectations, self.transforms
)
]
)
|
def skipped(self):
'''Whether the solid execution was skipped'''
return all(
[
step_event.event_type == DagsterEventType.STEP_SKIPPED
for step_event in itertools.chain(
self.input_expectations, self.output_expectations, self.transforms
)
]
)
|
[
"Whether",
"the",
"solid",
"execution",
"was",
"skipped"
] |
dagster-io/dagster
|
python
|
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/execution.py#L210-L219
|
[
"def",
"skipped",
"(",
"self",
")",
":",
"return",
"all",
"(",
"[",
"step_event",
".",
"event_type",
"==",
"DagsterEventType",
".",
"STEP_SKIPPED",
"for",
"step_event",
"in",
"itertools",
".",
"chain",
"(",
"self",
".",
"input_expectations",
",",
"self",
".",
"output_expectations",
",",
"self",
".",
"transforms",
")",
"]",
")"
] |
4119f8c773089de64831b1dfb9e168e353d401dc
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.