INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Render left blocks
def __render_left_panel(self): ''' Render left blocks ''' self.log.debug("Rendering left blocks") left_block = self.left_panel left_block.render() blank_space = self.left_panel_width - left_block.width lines = [] pre_space = ' ' * int(blank_space / 2) if not left_block.lines: lines = [(''), (self.markup.RED + 'BROKEN LEFT PANEL' + self.markup.RESET)] else: while self.left_panel.lines: src_line = self.left_panel.lines.pop(0) line = pre_space + self.__truncate(src_line, self.left_panel_width) post_space = ' ' * (self.left_panel_width - len(self.markup.clean_markup(line))) line += post_space + self.markup.RESET lines.append(line) return lines
Main method to render screen view
def render_screen(self): ''' Main method to render screen view ''' self.term_width, self.term_height = get_terminal_size() self.log.debug( "Terminal size: %sx%s", self.term_width, self.term_height) self.right_panel_width = int( (self.term_width - len(self.RIGHT_PANEL_SEPARATOR)) * (float(self.info_panel_percent) / 100)) - 1 if self.right_panel_width > 0: self.left_panel_width = self.term_width - \ self.right_panel_width - len(self.RIGHT_PANEL_SEPARATOR) - 2 else: self.right_panel_width = 0 self.left_panel_width = self.term_width - 1 self.log.debug( "Left/right panels width: %s/%s", self.left_panel_width, self.right_panel_width) widget_output = [] if self.right_panel_width: widget_output = [] self.log.debug("There are %d info widgets" % len(self.info_widgets)) for index, widget in sorted( self.info_widgets.iteritems(), key=lambda item: (item[1].get_index(), item[0])): self.log.debug("Rendering info widget #%s: %s", index, widget) widget_out = widget.render(self).strip() if widget_out: widget_output += widget_out.split("\n") widget_output += [""] left_lines = self.__render_left_panel() self.log.debug("Composing final screen output") output = [] for line_no in range(1, self.term_height): line = " " if line_no > 1 and left_lines: left_line = left_lines.pop(0) left_line_plain = self.markup.clean_markup(left_line) left_line += ( ' ' * (self.left_panel_width - len(left_line_plain))) line += left_line else: line += ' ' * self.left_panel_width if self.right_panel_width: line += self.markup.RESET line += self.markup.WHITE line += self.RIGHT_PANEL_SEPARATOR line += self.markup.RESET right_line = self.__get_right_line(widget_output) line += right_line output.append(line) return self.markup.new_line.join(output) + self.markup.new_line
Add widget string to right panel of the screen
def add_info_widget(self, widget): ''' Add widget string to right panel of the screen ''' index = widget.get_index() while index in self.info_widgets.keys(): index += 1 self.info_widgets[widget.get_index()] = widget
Right - pad lines of block to equal width
def fill_rectangle(self, prepared): ''' Right-pad lines of block to equal width ''' result = [] width = max([self.clean_len(line) for line in prepared]) for line in prepared: spacer = ' ' * (width - self.clean_len(line)) result.append(line + (self.screen.markup.RESET, spacer)) return (width, result)
Calculate wisible length of string
def clean_len(self, line): ''' Calculate wisible length of string ''' if isinstance(line, basestring): return len(self.screen.markup.clean_markup(line)) elif isinstance(line, tuple) or isinstance(line, list): markups = self.screen.markup.get_markup_vars() length = 0 for i in line: if i not in markups: length += len(i) return length
Creates load plan timestamps generator
def create(instances_schedule): ''' Creates load plan timestamps generator >>> from util import take >>> take(7, LoadPlanBuilder().ramp(5, 4000).create()) [0, 1000, 2000, 3000, 4000, 0, 0] >>> take(7, create(['ramp(5, 4s)'])) [0, 1000, 2000, 3000, 4000, 0, 0] >>> take(12, create(['ramp(5, 4s)', 'wait(5s)', 'ramp(5,4s)'])) [0, 1000, 2000, 3000, 4000, 9000, 10000, 11000, 12000, 13000, 0, 0] >>> take(7, create(['wait(5s)', 'ramp(5, 0)'])) [5000, 5000, 5000, 5000, 5000, 0, 0] >>> take(7, create([])) [0, 0, 0, 0, 0, 0, 0] >>> take(12, create(['line(1, 9, 4s)'])) [0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 0, 0, 0] >>> take(12, create(['const(3, 5s)', 'line(7, 11, 2s)'])) [0, 0, 0, 5000, 5000, 5000, 5000, 5500, 6000, 6500, 7000, 0] >>> take(12, create(['step(2, 10, 2, 3s)'])) [0, 0, 3000, 3000, 6000, 6000, 9000, 9000, 12000, 12000, 0, 0] >>> take(12, LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).steps) [(3, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)] >>> take(12, LoadPlanBuilder().stairway(100, 950, 100, 30000).steps) [(100, 30), (200, 30), (300, 30), (400, 30), (500, 30), (600, 30), (700, 30), (800, 30), (900, 30), (950, 30)] >>> LoadPlanBuilder().stairway(100, 950, 100, 30000).instances 950 >>> LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).instances 10 >>> LoadPlanBuilder().line(1, 100, 60000).instances 100 ''' lpb = LoadPlanBuilder().add_all_steps(instances_schedule) lp = lpb.create() info.status.publish('duration', 0) # info.status.publish('steps', lpb.steps) info.status.publish('steps', []) info.status.publish('instances', lpb.instances) return lp
format level str
def get_level_str(self): ''' format level str ''' if self.is_relative: level_str = str(self.level) + "%" else: level_str = self.level return level_str
formula for measurement error sqrt ( ( sum ( 1 n ( k_i - <k > ) ** 2 )/ ( n * ( n - 1 )))
def calc_measurement_error(self, tangents): ''' formula for measurement error sqrt ( (sum(1, n, (k_i - <k>)**2) / (n*(n-1))) ''' if len(tangents) < 2: return 0.0 avg_tan = float(sum(tangents) / len(tangents)) numerator = float() for i in tangents: numerator += (i - avg_tan) * (i - avg_tan) return math.sqrt(numerator / len(tangents) / (len(tangents) - 1))
add right panel widget
def add_info_widget(self, widget): ''' add right panel widget ''' if not self.screen: self.log.debug("No screen instance to add widget") else: self.screen.add_info_widget(widget)
clean markup from string
def clean_markup(self, orig_str): ''' clean markup from string ''' for val in self.get_markup_vars(): orig_str = orig_str.replace(val, '') return orig_str
Send request to writer service.
def __make_writer_request( self, params=None, json=None, http_method="POST", trace=False): ''' Send request to writer service. ''' request = requests.Request( http_method, self.writer_url, params=params, json=json, headers={ 'User-Agent': self.user_agent}) ids = id_gen(str(uuid.uuid4())) network_timeouts = self.network_timeouts() maintenance_timeouts = self.maintenance_timeouts() while True: try: response = self.__send_single_request(request, ids.next(), trace=trace) return response except (Timeout, ConnectionError, ProtocolError): logger.warn(traceback.format_exc()) try: timeout = next(network_timeouts) logger.warn( "Network error, will retry in %ss..." % timeout) time.sleep(timeout) continue except StopIteration: raise self.NetworkError() except self.UnderMaintenance as e: try: timeout = next(maintenance_timeouts) logger.warn( "Writer is under maintenance, will retry in %ss..." % timeout) time.sleep(timeout) continue except StopIteration: raise e
: return: job_nr upload_token: rtype: tuple
def new_job( self, task, person, tank, target_host, target_port, loadscheme=None, detailed_time=None, notify_list=None, trace=False): """ :return: job_nr, upload_token :rtype: tuple """ if not notify_list: notify_list = [] data = { 'task': task, 'person': person, 'tank': tank, 'host': target_host, 'port': target_port, 'loadscheme': loadscheme, 'detailed_time': detailed_time, 'notify': notify_list } logger.debug("Job create request: %s", data) api_timeouts = self.api_timeouts() while True: try: response = self.__post( "api/job/create.json", data, trace=trace)[0] # [{"upload_token": "1864a3b2547d40f19b5012eb038be6f6", "job": 904317}] return response['job'], response['upload_token'] except (self.NotAvailable, self.StoppedFromOnline) as e: try: timeout = next(api_timeouts) logger.warn("API error, will retry in %ss..." % timeout) time.sleep(timeout) continue except StopIteration: logger.warn('Failed to create job on lunapark') raise self.JobNotCreated(e.message) except requests.HTTPError as e: raise self.JobNotCreated('Failed to create job on lunapark\n{}'.format(e.response.content)) except Exception as e: logger.warn('Failed to create job on lunapark') logger.warn(repr(e), ) raise self.JobNotCreated()
: returns: { plugin_name: plugin_class... }: rtype: dict
def plugins(self): """ :returns: {plugin_name: plugin_class, ...} :rtype: dict """ if self._plugins is None: self.load_plugins() if self._plugins is None: self._plugins = {} return self._plugins
Tells core to take plugin options and instantiate plugin classes
def load_plugins(self): """ Tells core to take plugin options and instantiate plugin classes """ logger.info("Loading plugins...") for (plugin_name, plugin_path, plugin_cfg) in self.config.plugins: logger.debug("Loading plugin %s from %s", plugin_name, plugin_path) if plugin_path == "yandextank.plugins.Overload": logger.warning( "Deprecated plugin name: 'yandextank.plugins.Overload'\n" "There is a new generic plugin now.\n" "Correcting to 'yandextank.plugins.DataUploader overload'") plugin_path = "yandextank.plugins.DataUploader overload" try: plugin = il.import_module(plugin_path) except ImportError: logger.warning('Plugin name %s path %s import error', plugin_name, plugin_path) logger.debug('Plugin name %s path %s import error', plugin_name, plugin_path, exc_info=True) raise try: instance = getattr(plugin, 'Plugin')(self, cfg=plugin_cfg, name=plugin_name) except AttributeError: logger.warning('Plugin %s classname should be `Plugin`', plugin_name) raise else: self.register_plugin(self.PLUGIN_PREFIX + plugin_name, instance) logger.debug("Plugin instances: %s", self._plugins)
Call configure () on all plugins
def plugins_configure(self): """ Call configure() on all plugins """ self.publish("core", "stage", "configure") logger.info("Configuring plugins...") self.taskset_affinity = self.get_option(self.SECTION, 'affinity') if self.taskset_affinity: self.__setup_taskset(self.taskset_affinity, pid=os.getpid()) for plugin in self.plugins.values(): if not self.interrupted.is_set(): logger.debug("Configuring %s", plugin) plugin.configure()
Call is_test_finished () on all plugins till one of them initiates exit
def wait_for_finish(self): """ Call is_test_finished() on all plugins 'till one of them initiates exit """ if not self.interrupted.is_set(): logger.info("Waiting for test to finish...") logger.info('Artifacts dir: {dir}'.format(dir=self.artifacts_dir)) self.publish("core", "stage", "shoot") if not self.plugins: raise RuntimeError("It's strange: we have no plugins loaded...") while not self.interrupted.is_set(): begin_time = time.time() aggr_retcode = self.job.aggregator.is_test_finished() if aggr_retcode >= 0: return aggr_retcode for plugin in self.plugins.values(): logger.debug("Polling %s", plugin) retcode = plugin.is_test_finished() if retcode >= 0: return retcode end_time = time.time() diff = end_time - begin_time logger.debug("Polling took %s", diff) logger.debug("Tank status: %s", json.dumps(self.status)) # screen refresh every 0.5 s if diff < 0.5: time.sleep(0.5 - diff) return 1
Call post_process () on all plugins
def plugins_post_process(self, retcode): """ Call post_process() on all plugins """ logger.info("Post-processing test...") self.publish("core", "stage", "post_process") for plugin in self.plugins.values(): logger.debug("Post-process %s", plugin) try: logger.debug("RC before: %s", retcode) retcode = plugin.post_process(retcode) logger.debug("RC after: %s", retcode) except Exception: # FIXME too broad exception clause logger.error("Failed post-processing plugin %s", plugin, exc_info=True) if not retcode: retcode = 1 return retcode
if pid specified: set process w/ pid pid CPU affinity to specified affinity core ( s ) if args specified: modify list of args for Popen to start w/ taskset w/ affinity affinity
def __setup_taskset(self, affinity, pid=None, args=None): """ if pid specified: set process w/ pid `pid` CPU affinity to specified `affinity` core(s) if args specified: modify list of args for Popen to start w/ taskset w/ affinity `affinity` """ self.taskset_path = self.get_option(self.SECTION, 'taskset_path') if args: return [self.taskset_path, '-c', affinity] + args if pid: args = "%s -pc %s %s" % (self.taskset_path, affinity, pid) retcode, stdout, stderr = execute(args, shell=True, poll_period=0.1, catch_out=True) logger.debug('taskset for pid %s stdout: %s', pid, stdout) if retcode == 0: logger.info("Enabled taskset for pid %s with affinity %s", str(pid), affinity) else: logger.debug('Taskset setup failed w/ retcode :%s', retcode) raise KeyError(stderr)
Retrieve a plugin of desired class KeyError raised otherwise
def get_plugin_of_type(self, plugin_class): """ Retrieve a plugin of desired class, KeyError raised otherwise """ logger.debug("Searching for plugin: %s", plugin_class) matches = [plugin for plugin in self.plugins.values() if isinstance(plugin, plugin_class)] if matches: if len(matches) > 1: logger.debug( "More then one plugin of type %s found. Using first one.", plugin_class) return matches[-1] else: raise KeyError("Requested plugin type not found: %s" % plugin_class)
Retrieve a list of plugins of desired class KeyError raised otherwise
def get_plugins_of_type(self, plugin_class): """ Retrieve a list of plugins of desired class, KeyError raised otherwise """ logger.debug("Searching for plugins: %s", plugin_class) matches = [plugin for plugin in self.plugins.values() if isinstance(plugin, plugin_class)] if matches: return matches else: raise KeyError("Requested plugin type not found: %s" % plugin_class)
Move or copy single file to artifacts dir
def __collect_file(self, filename, keep_original=False): """ Move or copy single file to artifacts dir """ dest = self.artifacts_dir + '/' + os.path.basename(filename) logger.debug("Collecting file: %s to %s", filename, dest) if not filename or not os.path.exists(filename): logger.warning("File not found to collect: %s", filename) return if os.path.exists(dest): # FIXME: 3 find a way to store artifacts anyway logger.warning("File already exists: %s", dest) return if keep_original: shutil.copy(filename, self.artifacts_dir) else: shutil.move(filename, self.artifacts_dir) os.chmod(dest, 0o644)
Add file to be stored as result artifact on post - process phase
def add_artifact_file(self, filename, keep_original=False): """ Add file to be stored as result artifact on post-process phase """ if filename: logger.debug( "Adding artifact file to collect (keep=%s): %s", keep_original, filename) self.artifact_files[filename] = keep_original
Generate temp file name in artifacts base dir and close temp file handle
def mkstemp(self, suffix, prefix, directory=None): """ Generate temp file name in artifacts base dir and close temp file handle """ if not directory: directory = self.artifacts_dir fd, fname = tempfile.mkstemp(suffix, prefix, directory) os.close(fd) os.chmod(fname, 0o644) # FIXME: chmod to parent dir's mode? return fname
Call close () for all plugins
def close(self): """ Call close() for all plugins """ logger.info("Close allocated resources...") for plugin in self.plugins.values(): logger.debug("Close %s", plugin) try: plugin.close() except Exception as ex: logger.error("Failed closing plugin %s: %s", plugin, ex) logger.debug( "Failed closing plugin: %s", traceback.format_exc(ex))
Read configs set into storage
def load_files(self, configs): """ Read configs set into storage """ logger.debug("Reading configs: %s", configs) config_filenames = [resource.resource_filename(config) for config in configs] try: self.config.read(config_filenames) except Exception as ex: logger.error("Can't load configs: %s", ex) raise ex
Flush current stat to file
def flush(self, filename=None): """ Flush current stat to file """ if not filename: filename = self.file if filename: with open(filename, 'w') as handle: self.config.write(handle)
Get options list with requested prefix
def get_options(self, section, prefix=''): """ Get options list with requested prefix """ res = [] try: for option in self.config.options(section): if not prefix or option.find(prefix) == 0: res += [( option[len(prefix):], self.config.get(section, option))] except ConfigParser.NoSectionError as ex: logger.warning("No section: %s", ex) logger.debug( "Section: [%s] prefix: '%s' options:\n%s", section, prefix, res) return res
return sections with specified prefix
def find_sections(self, prefix): """ return sections with specified prefix """ res = [] for section in self.config.sections(): if section.startswith(prefix): res.append(section) return res
Return all items found in this chunk
def _decode_stat_data(self, chunk): """ Return all items found in this chunk """ for date_str, statistics in chunk.iteritems(): date_obj = datetime.datetime.strptime( date_str.split(".")[0], '%Y-%m-%d %H:%M:%S') chunk_date = int(time.mktime(date_obj.timetuple())) instances = 0 for benchmark_name, benchmark in statistics.iteritems(): if not benchmark_name.startswith("benchmark_io"): continue for method, meth_obj in benchmark.iteritems(): if "mmtasks" in meth_obj: instances += meth_obj["mmtasks"][2] offset = chunk_date - 1 - self.start_time reqps = 0 if 0 <= offset < len(self.phantom_info.steps): reqps = self.phantom_info.steps[offset][0] yield self.stats_item(chunk_date - 1, instances, reqps)
: rtype: PhantomConfig
def phantom(self): """ :rtype: PhantomConfig """ if not self._phantom: self._phantom = PhantomConfig(self.core, self.cfg, self.stat_log) self._phantom.read_config() return self._phantom
returns info object
def get_info(self): """ returns info object """ if not self.cached_info: if not self.phantom: return None self.cached_info = self.phantom.get_info() return self.cached_info
Prepare for monitoring - install agents etc
def prepare(self): """Prepare for monitoring - install agents etc""" # Parse config agent_configs = [] if self.config: agent_configs = self.config_manager.getconfig( self.config, self.default_target) # Creating agent for hosts for config in agent_configs: if config['host'] in ['localhost', '127.0.0.1', '::1']: client = self.clients['localhost']( config, self.old_style_configs, kill_old=self.kill_old) else: client = self.clients['ssh']( config, self.old_style_configs, timeout=5, kill_old=self.kill_old) logger.debug('Installing monitoring agent. Host: %s', client.host) agent_config, startup_config, customs_script = client.install() if agent_config: self.agents.append(client) self.artifact_files.append(agent_config) if startup_config: self.artifact_files.append(startup_config) if customs_script: self.artifact_files.append(customs_script)
Start agents
def start(self): """ Start agents execute popen of agent.py on target and start output reader thread. """ [agent.start() for agent in self.agents] [agent.reader_thread.start() for agent in self.agents]
Poll agents for data
def poll(self): """ Poll agents for data """ start_time = time.time() for agent in self.agents: for collect in agent.reader: # don't crush if trash or traceback came from agent to stdout if not collect: return 0 for chunk in collect: ts, prepared_results = chunk if self.load_start_time and int( ts) >= self.load_start_time: ready_to_send = { "timestamp": int(ts), "data": { self.hash_hostname(agent.host): { "comment": agent.config.comment, "metrics": prepared_results } } } self.__collected_data.append(ready_to_send) logger.debug( 'Polling/decoding agents data took: %.2fms', (time.time() - start_time) * 1000) collected_data_length = len(self.__collected_data) if not self.first_data_received and self.__collected_data: self.first_data_received = True logger.info("Monitoring received first data.") else: self.send_collected_data() return collected_data_length
Shutdown agents
def stop(self): """Shutdown agents""" logger.debug("Uninstalling monitoring agents") for agent in self.agents: log_filename, data_filename = agent.uninstall() self.artifact_files.append(log_filename) self.artifact_files.append(data_filename) for agent in self.agents: try: logger.debug( 'Waiting for agent %s reader thread to finish.', agent) agent.reader_thread.join(10) except BaseException: logger.error('Monitoring reader thread stuck!', exc_info=True)
sends pending data set to listeners
def send_collected_data(self): """sends pending data set to listeners""" data = self.__collected_data self.__collected_data = [] for listener in self.listeners: # deep copy to ensure each listener gets it's own copy listener.monitoring_data(copy.deepcopy(data))
we need to be flexible in order to determine which plugin s configuration specified and make appropriate configs to metrics collector
def __detect_configuration(self): """ we need to be flexible in order to determine which plugin's configuration specified and make appropriate configs to metrics collector :return: SECTION name or None for defaults """ try: is_telegraf = self.core.get_option('telegraf', "config") except KeyError: is_telegraf = None try: is_monitoring = self.core.get_option('monitoring', "config") except KeyError: is_monitoring = None if is_telegraf and is_monitoring: raise ValueError( 'Both telegraf and monitoring configs specified. ' 'Clean up your config and delete one of them') if is_telegraf and not is_monitoring: return 'telegraf' if not is_telegraf and is_monitoring: return 'monitoring' if not is_telegraf and not is_monitoring: # defaults target logic try: is_telegraf_dt = self.core.get_option('telegraf') except NoOptionError: is_telegraf_dt = None try: is_monitoring_dt = self.core.get_option('monitoring') except BaseException: is_monitoring_dt = None if is_telegraf_dt and is_monitoring_dt: raise ValueError( 'Both telegraf and monitoring default targets specified. ' 'Clean up your config and delete one of them') if is_telegraf_dt and not is_monitoring_dt: return if not is_telegraf_dt and is_monitoring_dt: self.core.set_option( "telegraf", "default_target", is_monitoring_dt) if not is_telegraf_dt and not is_monitoring_dt: return
store metric in data tree and calc offset signs
def __handle_data_items(self, host, data): """ store metric in data tree and calc offset signs sign < 0 is CYAN, means metric value is lower then previous, sign > 1 is YELLOW, means metric value is higher then prevoius, sign == 0 is WHITE, means initial or equal metric value """ for metric, value in data.iteritems(): if value == '': self.sign[host][metric] = -1 self.data[host][metric] = value else: if not self.data[host].get(metric, None): self.sign[host][metric] = 1 elif float(value) > float(self.data[host][metric]): self.sign[host][metric] = 1 elif float(value) < float(self.data[host][metric]): self.sign[host][metric] = -1 else: self.sign[host][metric] = 0 self.data[host][metric] = "%.2f" % float(value)
decode agents jsons count diffs
def _decode_agents_data(self, block): """ decode agents jsons, count diffs """ collect = [] if block: for chunk in block.split('\n'): try: if chunk: prepared_results = {} jsn = json.loads(chunk) for ts, values in jsn.iteritems(): for key, value in values.iteritems(): # key sample: diskio-sda1_io_time # key_group sample: diskio # key_name sample: io_time try: key_group, key_name = key.split('_')[0].split('-')[0], '_'.join(key.split('_')[1:]) except: # noqa: E722 key_group, key_name = key.split('_')[0], '_'.join(key.split('_')[1:]) if key_group in decoder.diff_metrics.keys(): if key_name in decoder.diff_metrics[key_group]: decoded_key = decoder.find_common_names( key) if self.prev_check: try: value = jsn[ts][key] - \ self.prev_check[key] except KeyError: logger.debug( 'There is no diff value for metric %s.\n' 'Timestamp: %s. Is it initial data?', key, ts, exc_info=True) value = 0 prepared_results[decoded_key] = value else: decoded_key = decoder.find_common_names( key) prepared_results[decoded_key] = value else: decoded_key = decoder.find_common_names( key) prepared_results[decoded_key] = value self.prev_check = jsn[ts] collect.append((ts, prepared_results)) except ValueError: logger.error( 'Telegraf agent send trash to output: %s', chunk) logger.debug( 'Telegraf agent data block w/ trash: %s', exc_info=True) return [] except BaseException: logger.error( 'Exception trying to parse agent data: %s', chunk, exc_info=True) return [] if collect: return collect
Start subscribing channels. If the necessary connection isn t open yet it opens now.
async def subscribe(self, channels): '''Start subscribing channels. If the necessary connection isn't open yet, it opens now. ''' ws_channels = [] nats_channels = [] for c in channels: if c.startswith(('Q.', 'T.', 'A.', 'AM.',)): nats_channels.append(c) else: ws_channels.append(c) if len(ws_channels) > 0: await self._ensure_ws() await self._ws.send(json.dumps({ 'action': 'listen', 'data': { 'streams': ws_channels, } })) if len(nats_channels) > 0: await self._ensure_nats() await self.polygon.subscribe(nats_channels)
Run forever and block until exception is rasised. initial_channels is the channels to start with.
def run(self, initial_channels=[]): '''Run forever and block until exception is rasised. initial_channels is the channels to start with. ''' loop = asyncio.get_event_loop() try: loop.run_until_complete(self.subscribe(initial_channels)) loop.run_forever() finally: loop.run_until_complete(self.close())
Close any of open connections
async def close(self): '''Close any of open connections''' if self._ws is not None: await self._ws.close() if self.polygon is not None: await self.polygon.close()
## Experimental
def df(self): '''## Experimental ''' if not hasattr(self, '_df'): dfs = [] for symbol, bars in self.items(): df = bars.df.copy() df.columns = pd.MultiIndex.from_product( [[symbol, ], df.columns]) dfs.append(df) if len(dfs) == 0: self._df = pd.DataFrame() else: self._df = pd.concat(dfs, axis=1) return self._df
Perform one request possibly raising RetryException in the case the response is 429. Otherwise if error text contain code string then it decodes to json object and returns APIError. Returns the body json in the 200 status.
def _one_request(self, method, url, opts, retry): ''' Perform one request, possibly raising RetryException in the case the response is 429. Otherwise, if error text contain "code" string, then it decodes to json object and returns APIError. Returns the body json in the 200 status. ''' retry_codes = self._retry_codes resp = self._session.request(method, url, **opts) try: resp.raise_for_status() except HTTPError as http_error: # retry if we hit Rate Limit if resp.status_code in retry_codes and retry > 0: raise RetryException() if 'code' in resp.text: error = resp.json() if 'code' in error: raise APIError(error, http_error) else: raise if resp.text != '': return resp.json() return None
Get a list of orders https:// docs. alpaca. markets/ web - api/ orders/ #get - a - list - of - orders
def list_orders(self, status=None, limit=None, after=None, until=None, direction=None, params=None): ''' Get a list of orders https://docs.alpaca.markets/web-api/orders/#get-a-list-of-orders ''' if params is None: params = dict() if limit is not None: params['limit'] = limit if after is not None: params['after'] = after if until is not None: params['until'] = until if direction is not None: params['direction'] = direction if status is not None: params['status'] = status resp = self.get('/orders', params) return [Order(o) for o in resp]
Request a new order
def submit_order(self, symbol, qty, side, type, time_in_force, limit_price=None, stop_price=None, client_order_id=None): '''Request a new order''' params = { 'symbol': symbol, 'qty': qty, 'side': side, 'type': type, 'time_in_force': time_in_force, } if limit_price is not None: params['limit_price'] = limit_price if stop_price is not None: params['stop_price'] = stop_price if client_order_id is not None: params['client_order_id'] = client_order_id resp = self.post('/orders', params) return Order(resp)
Get an order
def get_order(self, order_id): '''Get an order''' resp = self.get('/orders/{}'.format(order_id)) return Order(resp)
Get an open position
def get_position(self, symbol): '''Get an open position''' resp = self.get('/positions/{}'.format(symbol)) return Position(resp)
Get a list of assets
def list_assets(self, status=None, asset_class=None): '''Get a list of assets''' params = { 'status': status, 'assert_class': asset_class, } resp = self.get('/assets', params) return [Asset(o) for o in resp]
Get an asset
def get_asset(self, symbol): '''Get an asset''' resp = self.get('/assets/{}'.format(symbol)) return Asset(resp)
Get BarSet ( dict [ str ] - > list [ Bar ] ) The parameter symbols can be either a comma - split string or a list of string. Each symbol becomes the key of the returned value.
def get_barset(self, symbols, timeframe, limit=None, start=None, end=None, after=None, until=None): '''Get BarSet(dict[str]->list[Bar]) The parameter symbols can be either a comma-split string or a list of string. Each symbol becomes the key of the returned value. ''' if not isinstance(symbols, str): symbols = ','.join(symbols) params = { 'symbols': symbols, } if limit is not None: params['limit'] = limit if start is not None: params['start'] = start if end is not None: params['end'] = end if after is not None: params['after'] = after if until is not None: params['until'] = until resp = self.data_get('/bars/{}'.format(timeframe), params) return BarSet(resp)
( decorator ) Create a simple solid.
def lambda_solid(name=None, inputs=None, output=None, description=None): '''(decorator) Create a simple solid. This shortcut allows the creation of simple solids that do not require configuration and whose implementations do not require a context. Lambda solids take inputs and produce a single output. The body of the function should return a single value. Args: name (str): Name of solid. inputs (list[InputDefinition]): List of inputs. output (OutputDefinition): The output of the solid. Defaults to ``OutputDefinition()``. description (str): Solid description. Examples: .. code-block:: python @lambda_solid def hello_world(): return 'hello' @lambda_solid(inputs=[InputDefinition(name='foo')]) def hello_world(foo): return foo ''' output = output or OutputDefinition() if callable(name): check.invariant(inputs is None) check.invariant(description is None) return _LambdaSolid(output=output)(name) return _LambdaSolid(name=name, inputs=inputs, output=output, description=description)
( decorator ) Create a solid with specified parameters.
def solid(name=None, inputs=None, outputs=None, config_field=None, description=None): '''(decorator) Create a solid with specified parameters. This shortcut simplifies the core solid API by exploding arguments into kwargs of the transform function and omitting additional parameters when they are not needed. Parameters are otherwise as in the core API, :py:class:`SolidDefinition`. The decorated function will be used as the solid's transform function. Unlike in the core API, the transform function does not have to yield :py:class:`Result` object directly. Several simpler alternatives are available: 1. Return a value. This is returned as a :py:class:`Result` for a single output solid. 2. Return a :py:class:`Result`. Works like yielding result. 3. Return an instance of :py:class:`MultipleResults`. Works like yielding several results for multiple outputs. Useful for solids that have multiple outputs. 4. Yield :py:class:`Result`. Same as default transform behaviour. Args: name (str): Name of solid. inputs (list[InputDefinition]): List of inputs. outputs (list[OutputDefinition]): List of outputs. config_field (Field): The configuration for this solid. description (str): Description of this solid. Examples: .. code-block:: python @solid def hello_world(_context): print('hello') @solid() def hello_world(_context): print('hello') @solid(outputs=[OutputDefinition()]) def hello_world(_context): return {'foo': 'bar'} @solid(outputs=[OutputDefinition()]) def hello_world(_context): return Result(value={'foo': 'bar'}) @solid(outputs=[OutputDefinition()]) def hello_world(_context): yield Result(value={'foo': 'bar'}) @solid(outputs=[ OutputDefinition(name="left"), OutputDefinition(name="right"), ]) def hello_world(_context): return MultipleResults.from_dict({ 'left': {'foo': 'left'}, 'right': {'foo': 'right'}, }) @solid( inputs=[InputDefinition(name="foo")], outputs=[OutputDefinition()] ) def hello_world(_context, foo): return foo @solid( inputs=[InputDefinition(name="foo")], outputs=[OutputDefinition()], ) def hello_world(context, foo): context.log.info('log something') return foo @solid( inputs=[InputDefinition(name="foo")], outputs=[OutputDefinition()], config_field=Field(types.Dict({'str_value' : Field(types.String)})), ) def hello_world(context, foo): # context.solid_config is a dictionary with 'str_value' key return foo + context.solid_config['str_value'] ''' # This case is for when decorator is used bare, without arguments. e.g. @solid versus @solid() if callable(name): check.invariant(inputs is None) check.invariant(outputs is None) check.invariant(description is None) check.invariant(config_field is None) return _Solid()(name) return _Solid( name=name, inputs=inputs, outputs=outputs, config_field=config_field, description=description, )
Create a new MultipleResults object from a dictionary. Keys of the dictionary are unpacked into result names. Args: result_dict ( dict ) - The dictionary to unpack. Returns: (: py: class: MultipleResults <dagster. MultipleResults > ) A new MultipleResults object
def from_dict(result_dict): '''Create a new ``MultipleResults`` object from a dictionary. Keys of the dictionary are unpacked into result names. Args: result_dict (dict) - The dictionary to unpack. Returns: (:py:class:`MultipleResults <dagster.MultipleResults>`) A new ``MultipleResults`` object ''' check.dict_param(result_dict, 'result_dict', key_type=str) results = [] for name, value in result_dict.items(): results.append(Result(value, name)) return MultipleResults(*results)
This captures a common pattern of fanning out a single value to N steps where each step has similar structure. The strict requirement here is that each step must provide an output named the parameters parallel_step_output.
def create_joining_subplan( pipeline_def, solid, join_step_key, parallel_steps, parallel_step_output ): ''' This captures a common pattern of fanning out a single value to N steps, where each step has similar structure. The strict requirement here is that each step must provide an output named the parameters parallel_step_output. This takes those steps and then uses a join node to coalesce them so that downstream steps can depend on a single output. Currently the join step just does a passthrough with no computation. It remains to be seen if there should be any work or verification done in this step, especially in multi-process environments that require persistence between steps. ''' check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) check.inst_param(solid, 'solid', Solid) check.str_param(join_step_key, 'join_step_key') check.list_param(parallel_steps, 'parallel_steps', of_type=ExecutionStep) check.str_param(parallel_step_output, 'parallel_step_output') for parallel_step in parallel_steps: check.invariant(parallel_step.has_step_output(parallel_step_output)) join_step = create_join_step( pipeline_def, solid, join_step_key, parallel_steps, parallel_step_output ) output_name = join_step.step_outputs[0].name return ExecutionValueSubplan( parallel_steps + [join_step], StepOutputHandle.from_step(join_step, output_name) )
gunzips/ path/ to/ foo. gz to/ path/ to/ raw/ 2019/ 01/ 01/ data. json
def gunzipper(gzip_file): '''gunzips /path/to/foo.gz to /path/to/raw/2019/01/01/data.json ''' # TODO: take date as an input path_prefix = os.path.dirname(gzip_file) output_folder = os.path.join(path_prefix, 'raw/2019/01/01') outfile = os.path.join(output_folder, 'data.json') if not safe_isfile(outfile): mkdir_p(output_folder) with gzip.open(gzip_file, 'rb') as f_in, open(outfile, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) return [path_prefix]
Ensures argument obj is a dictionary and enforces that the keys/ values conform to the types specified by key_type value_type.
def _check_key_value_types(obj, key_type, value_type, key_check=isinstance, value_check=isinstance): '''Ensures argument obj is a dictionary, and enforces that the keys/values conform to the types specified by key_type, value_type. ''' if not isinstance(obj, dict): raise_with_traceback(_type_mismatch_error(obj, dict)) if key_type is str: key_type = string_types if value_type is str: value_type = string_types for key, value in obj.items(): if key_type and not key_check(key, key_type): raise_with_traceback( CheckError( 'Key in dictionary mismatches type. Expected {key_type}. Got {obj_repr}'.format( key_type=repr(key_type), obj_repr=repr(key) ) ) ) if value_type and not value_check(value, value_type): raise_with_traceback( CheckError( 'Value in dictionary mismatches expected type for key {key}. Expected value ' 'of type {vtype}. Got value {value} of type {obj_type}.'.format( vtype=repr(value_type), obj_type=type(value), key=key, value=value ) ) ) return obj
Ensures argument obj is a native Python dictionary raises an exception if not and otherwise returns obj.
def dict_param(obj, param_name, key_type=None, value_type=None): '''Ensures argument obj is a native Python dictionary, raises an exception if not, and otherwise returns obj. ''' if not isinstance(obj, dict): raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name)) if not (key_type or value_type): return obj return _check_key_value_types(obj, key_type, value_type)
Ensures argument obj is either a dictionary or None ; if the latter instantiates an empty dictionary.
def opt_dict_param(obj, param_name, key_type=None, value_type=None, value_class=None): '''Ensures argument obj is either a dictionary or None; if the latter, instantiates an empty dictionary. ''' if obj is not None and not isinstance(obj, dict): raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name)) if not obj: return {} if value_class: return _check_key_value_types(obj, key_type, value_type=value_class, value_check=issubclass) return _check_key_value_types(obj, key_type, value_type)
Callback receives a stream of event_records
def construct_event_logger(event_record_callback): ''' Callback receives a stream of event_records ''' check.callable_param(event_record_callback, 'event_record_callback') return construct_single_handler_logger( 'event-logger', DEBUG, StructuredLoggerHandler( lambda logger_message: event_record_callback(construct_event_record(logger_message)) ), )
Record a stream of event records to json
def construct_json_event_logger(json_path): '''Record a stream of event records to json''' check.str_param(json_path, 'json_path') return construct_single_handler_logger( "json-event-record-logger", DEBUG, JsonEventLoggerHandler( json_path, lambda record: construct_event_record( StructuredLoggerMessage( name=record.name, message=record.msg, level=record.levelno, meta=record.dagster_meta, record=record, ) ), ), )
Read a config file and instantiate the RCParser.
def from_file(cls, path=None): """Read a config file and instantiate the RCParser. Create new :class:`configparser.ConfigParser` for the given **path** and instantiate the :class:`RCParser` with the ConfigParser as :attr:`config` attribute. If the **path** doesn't exist, raise :exc:`ConfigFileError`. Otherwise return a new :class:`RCParser` instance. :param path: Optional path to the config file to parse. If not given, use ``'~/.pypirc'``. """ path = path or cls.CONFIG_PATH if not os.path.exists(path): error = 'Config file not found: {0!r}'.format(path) raise ConfigFileError(error) config = read_config(path) return cls(config)
Get config dictionary for the given repository.
def get_repository_config(self, repository): """Get config dictionary for the given repository. If the repository section is not found in the config file, return ``None``. If the file is invalid, raise :exc:`configparser.Error`. Otherwise return a dictionary with: * ``'repository'`` -- the repository URL * ``'username'`` -- username for authentication * ``'password'`` -- password for authentication :param repository: Name or URL of the repository to find in the ``.pypirc`` file. The repository section must be defined in the config file. """ servers = self._read_index_servers() repo_config = self._find_repo_config(servers, repository) return repo_config
Assigned parameters into the appropiate place in the input notebook Args: nb ( NotebookNode ): Executable notebook object parameters ( dict ): Arbitrary keyword arguments to pass to the notebook parameters.
def replace_parameters(context, nb, parameters): # Uma: This is a copy-paste from papermill papermill/execute.py:104 (execute_parameters). # Typically, papermill injects the injected-parameters cell *below* the parameters cell # but we want to *replace* the parameters cell, which is what this function does. '''Assigned parameters into the appropiate place in the input notebook Args: nb (NotebookNode): Executable notebook object parameters (dict): Arbitrary keyword arguments to pass to the notebook parameters. ''' # Copy the nb object to avoid polluting the input nb = copy.deepcopy(nb) # Generate parameter content based on the kernel_name param_content = DagsterTranslator.codify(parameters) # papermill method choosed translator based on kernel_name and language, # but we just call the DagsterTranslator # translate_parameters(kernel_name, language, parameters) newcell = nbformat.v4.new_code_cell(source=param_content) newcell.metadata['tags'] = ['injected-parameters'] param_cell_index = _find_first_tagged_cell_index(nb, 'parameters') injected_cell_index = _find_first_tagged_cell_index(nb, 'injected-parameters') if injected_cell_index >= 0: # Replace the injected cell with a new version before = nb.cells[:injected_cell_index] after = nb.cells[injected_cell_index + 1 :] check.int_value_param(param_cell_index, -1, 'param_cell_index') # We should have blown away the parameters cell if there is an injected-parameters cell elif param_cell_index >= 0: # Replace the parameter cell with the injected-parameters cell before = nb.cells[:param_cell_index] after = nb.cells[param_cell_index + 1 :] else: # Inject to the top of the notebook, presumably first cell includes dagstermill import context.log.debug( ( 'Warning notebook has no parameters cell, ' 'so first cell must import dagstermill and call dm.register_repo()' ) ) before = nb.cells[:1] after = nb.cells[1:] nb.cells = before + [newcell] + after nb.metadata.papermill['parameters'] = parameters return nb
Creates a solid with the given number of ( meaningless ) inputs and outputs.
def nonce_solid(name, n_inputs, n_outputs): """Creates a solid with the given number of (meaningless) inputs and outputs. Config controls the behavior of the nonce solid.""" @solid( name=name, inputs=[ InputDefinition(name='input_{}'.format(i)) for i in range(n_inputs) ], outputs=[ OutputDefinition(name='output_{}'.format(i)) for i in range(n_outputs) ], ) def solid_fn(context, **_kwargs): for i in range(200): time.sleep(0.02) if i % 1000 == 420: context.log.error( 'Error message seq={i} from solid {name}'.format( i=i, name=name ) ) elif i % 100 == 0: context.log.warning( 'Warning message seq={i} from solid {name}'.format( i=i, name=name ) ) elif i % 10 == 0: context.log.info( 'Info message seq={i} from solid {name}'.format( i=i, name=name ) ) else: context.log.debug( 'Debug message seq={i} from solid {name}'.format( i=i, name=name ) ) return MultipleResults.from_dict( {'output_{}'.format(i): 'foo' for i in range(n_outputs)} ) return solid_fn
This recursive descent thing formats a config dict for GraphQL.
def format_config_for_graphql(config): '''This recursive descent thing formats a config dict for GraphQL.''' def _format_config_subdict(config, current_indent=0): check.dict_param(config, 'config', key_type=str) printer = IndentingStringIoPrinter(indent_level=2, current_indent=current_indent) printer.line('{') n_elements = len(config) for i, key in enumerate(sorted(config, key=lambda x: x[0])): value = config[key] with printer.with_indent(): formatted_value = ( _format_config_item(value, current_indent=printer.current_indent) .lstrip(' ') .rstrip('\n') ) printer.line( '{key}: {formatted_value}{comma}'.format( key=key, formatted_value=formatted_value, comma=',' if i != n_elements - 1 else '', ) ) printer.line('}') return printer.read() def _format_config_sublist(config, current_indent=0): printer = IndentingStringIoPrinter(indent_level=2, current_indent=current_indent) printer.line('[') n_elements = len(config) for i, value in enumerate(config): with printer.with_indent(): formatted_value = ( _format_config_item(value, current_indent=printer.current_indent) .lstrip(' ') .rstrip('\n') ) printer.line( '{formatted_value}{comma}'.format( formatted_value=formatted_value, comma=',' if i != n_elements - 1 else '' ) ) printer.line(']') return printer.read() def _format_config_item(config, current_indent=0): printer = IndentingStringIoPrinter(indent_level=2, current_indent=current_indent) if isinstance(config, dict): return _format_config_subdict(config, printer.current_indent) elif isinstance(config, list): return _format_config_sublist(config, printer.current_indent) elif isinstance(config, bool): return repr(config).lower() else: return repr(config).replace('\'', '"') check.dict_param(config, 'config', key_type=str) if not isinstance(config, dict): check.failed('Expected a dict to format as config, got: {item}'.format(item=repr(config))) return _format_config_subdict(config)
Get a pipeline by name. Only constructs that pipeline and caches it.
def get_pipeline(self, name): '''Get a pipeline by name. Only constructs that pipeline and caches it. Args: name (str): Name of the pipeline to retriever Returns: PipelineDefinition: Instance of PipelineDefinition with that name. ''' check.str_param(name, 'name') if name in self._pipeline_cache: return self._pipeline_cache[name] try: pipeline = self.pipeline_dict[name]() except KeyError: raise DagsterInvariantViolationError( 'Could not find pipeline "{name}". Found: {pipeline_names}.'.format( name=name, pipeline_names=', '.join( [ '"{pipeline_name}"'.format(pipeline_name=pipeline_name) for pipeline_name in self.pipeline_dict.keys() ] ), ) ) check.invariant( pipeline.name == name, 'Name does not match. Name in dict {name}. Name in pipeline {pipeline.name}'.format( name=name, pipeline=pipeline ), ) self._pipeline_cache[name] = check.inst( pipeline, PipelineDefinition, ( 'Function passed into pipeline_dict with key {key} must return a ' 'PipelineDefinition' ).format(key=name), ) return pipeline
Return all pipelines as a list
def get_all_pipelines(self): '''Return all pipelines as a list Returns: List[PipelineDefinition]: ''' pipelines = list(map(self.get_pipeline, self.pipeline_dict.keys())) # This does uniqueness check self._construct_solid_defs(pipelines) return pipelines
Spark configuration.
def define_spark_config(): '''Spark configuration. See the Spark documentation for reference: https://spark.apache.org/docs/latest/submitting-applications.html ''' master_url = Field( String, description='The master URL for the cluster (e.g. spark://23.195.26.187:7077)', is_optional=False, ) deploy_mode = Field( SparkDeployMode, description='''Whether to deploy your driver on the worker nodes (cluster) or locally as an external client (client) (default: client). A common deployment strategy is to submit your application from a gateway machine that is physically co-located with your worker machines (e.g. Master node in a standalone EC2 cluster). In this setup, client mode is appropriate. In client mode, the driver is launched directly within the spark-submit process which acts as a client to the cluster. The input and output of the application is attached to the console. Thus, this mode is especially suitable for applications that involve the REPL (e.g. Spark shell).''', is_optional=True, ) application_jar = Field( Path, description='''Path to a bundled jar including your application and all dependencies. The URL must be globally visible inside of your cluster, for instance, an hdfs:// path or a file:// path that is present on all nodes. ''', is_optional=False, ) application_arguments = Field( String, description='Arguments passed to the main method of your main class, if any', is_optional=True, ) spark_home = Field( String, description='The path to your spark installation. Defaults to $SPARK_HOME at runtime if not provided.', is_optional=True, ) spark_outputs = Field(List(String), description='The outputs that this Spark job will produce') return Field( Dict( fields={ 'master_url': master_url, 'deploy_mode': deploy_mode, 'application_jar': application_jar, 'spark_conf': spark_config(), 'spark_home': spark_home, 'application_arguments': application_arguments, 'spark_outputs': spark_outputs, } ) )
This function polls the process until it returns a valid item or returns PROCESS_DEAD_AND_QUEUE_EMPTY if it is in a state where the process has terminated and the queue is empty
def get_next_event(process, queue): ''' This function polls the process until it returns a valid item or returns PROCESS_DEAD_AND_QUEUE_EMPTY if it is in a state where the process has terminated and the queue is empty Warning: if the child process is in an infinite loop. This will also infinitely loop. ''' while True: try: return queue.get(block=True, timeout=TICK) except multiprocessing.queues.Empty: if not process.is_alive(): # There is a possibility that after the last queue.get the # process created another event and then died. In that case # we want to continue draining the queue. try: return queue.get(block=False) except multiprocessing.queues.Empty: # If the queue empty we know that there are no more events # and that the process has died. return PROCESS_DEAD_AND_QUEUE_EMPTY check.failed('unreachable')
Execute pipeline using message queue as a transport
def execute_pipeline_through_queue( repository_info, pipeline_name, solid_subset, environment_dict, run_id, message_queue, reexecution_config, step_keys_to_execute, ): """ Execute pipeline using message queue as a transport """ message_queue.put(ProcessStartedSentinel(os.getpid())) run_config = RunConfig( run_id, event_callback=message_queue.put, executor_config=InProcessExecutorConfig(raise_on_error=False), reexecution_config=reexecution_config, step_keys_to_execute=step_keys_to_execute, ) repository_container = RepositoryContainer(repository_info) if repository_container.repo_error: message_queue.put( MultiprocessingError( serializable_error_info_from_exc_info(repository_container.repo_error) ) ) return try: result = execute_pipeline( repository_container.repository.get_pipeline(pipeline_name).build_sub_pipeline( solid_subset ), environment_dict, run_config=run_config, ) return result except: # pylint: disable=W0702 error_info = serializable_error_info_from_exc_info(sys.exc_info()) message_queue.put(MultiprocessingError(error_info)) finally: message_queue.put(MultiprocessingDone()) message_queue.close()
Waits until all there are no processes enqueued.
def join(self): '''Waits until all there are no processes enqueued.''' while True: with self._processes_lock: if not self._processes and self._processing_semaphore.locked(): return True gevent.sleep(0.1)
The schema for configuration data that describes the type optionality defaults and description.
def Field( dagster_type, default_value=FIELD_NO_DEFAULT_PROVIDED, is_optional=INFER_OPTIONAL_COMPOSITE_FIELD, is_secret=False, description=None, ): ''' The schema for configuration data that describes the type, optionality, defaults, and description. Args: dagster_type (DagsterType): A ``DagsterType`` describing the schema of this field, ie `Dict({'example': Field(String)})` default_value (Any): A default value to use that respects the schema provided via dagster_type is_optional (bool): Whether the presence of this field is optional despcription (str): ''' config_type = resolve_to_config_type(dagster_type) if not config_type: raise DagsterInvalidDefinitionError( ( 'Attempted to pass {value_repr} to a Field that expects a valid ' 'dagster type usable in config (e.g. Dict, NamedDict, Int, String et al).' ).format(value_repr=repr(dagster_type)) ) return FieldImpl( config_type=resolve_to_config_type(dagster_type), default_value=default_value, is_optional=is_optional, is_secret=is_secret, description=description, )
Snowflake configuration.
def define_snowflake_config(): '''Snowflake configuration. See the Snowflake documentation for reference: https://docs.snowflake.net/manuals/user-guide/python-connector-api.html ''' account = Field( String, description='Your Snowflake account name. For more details, see https://bit.ly/2FBL320.', is_optional=True, ) user = Field(String, description='User login name.', is_optional=False) password = Field(String, description='User password.', is_optional=False) database = Field( String, description='''Name of the default database to use. After login, you can use USE DATABASE to change the database.''', is_optional=True, ) schema = Field( String, description='''Name of the default schema to use. After login, you can use USE SCHEMA to change the schema.''', is_optional=True, ) role = Field( String, description='''Name of the default role to use. After login, you can use USE ROLE to change the role.''', is_optional=True, ) warehouse = Field( String, description='''Name of the default warehouse to use. After login, you can use USE WAREHOUSE to change the role.''', is_optional=True, ) autocommit = Field( Bool, description='''None by default, which honors the Snowflake parameter AUTOCOMMIT. Set to True or False to enable or disable autocommit mode in the session, respectively.''', is_optional=True, ) client_prefetch_threads = Field( Int, description='''Number of threads used to download the results sets (4 by default). Increasing the value improves fetch performance but requires more memory.''', is_optional=True, ) client_session_keep_alive = Field( String, description='''False by default. Set this to True to keep the session active indefinitely, even if there is no activity from the user. Make certain to call the close method to terminate the thread properly or the process may hang.''', is_optional=True, ) login_timeout = Field( Int, description='''Timeout in seconds for login. By default, 60 seconds. The login request gives up after the timeout length if the HTTP response is "success".''', is_optional=True, ) network_timeout = Field( Int, description='''Timeout in seconds for all other operations. By default, none/infinite. A general request gives up after the timeout length if the HTTP response is not "success"''', is_optional=True, ) ocsp_response_cache_filename = Field( Path, description='''URI for the OCSP response cache file. By default, the OCSP response cache file is created in the cache directory.''', is_optional=True, ) validate_default_parameters = Field( Bool, description='''False by default. Raise an exception if either one of specified database, schema or warehouse doesn't exists if True.''', is_optional=True, ) paramstyle = Field( # TODO should validate only against permissible values for this String, description='''pyformat by default for client side binding. Specify qmark or numeric to change bind variable formats for server side binding.''', is_optional=True, ) timezone = Field( String, description='''None by default, which honors the Snowflake parameter TIMEZONE. Set to a valid time zone (e.g. America/Los_Angeles) to set the session time zone.''', is_optional=True, ) return Field( Dict( fields={ 'account': account, 'user': user, 'password': password, 'database': database, 'schema': schema, 'role': role, 'warehouse': warehouse, 'autocommit': autocommit, 'client_prefetch_threads': client_prefetch_threads, 'client_session_keep_alive': client_session_keep_alive, 'login_timeout': login_timeout, 'network_timeout': network_timeout, 'ocsp_response_cache_filename': ocsp_response_cache_filename, 'validate_default_parameters': validate_default_parameters, 'paramstyle': paramstyle, 'timezone': timezone, } ), description='Snowflake configuration', )
Builds the execution plan.
def build(self, pipeline_def, artifacts_persisted): '''Builds the execution plan. ''' # Construct dependency dictionary deps = {step.key: set() for step in self.steps} for step in self.steps: for step_input in step.step_inputs: deps[step.key].add(step_input.prev_output_handle.step_key) step_dict = {step.key: step for step in self.steps} return ExecutionPlan(pipeline_def, step_dict, deps, artifacts_persisted)
Here we build a new ExecutionPlan from a pipeline definition and the environment config.
def build(pipeline_def, environment_config): '''Here we build a new ExecutionPlan from a pipeline definition and the environment config. To do this, we iterate through the pipeline's solids in topological order, and hand off the execution steps for each solid to a companion _PlanBuilder object. Once we've processed the entire pipeline, we invoke _PlanBuilder.build() to construct the ExecutionPlan object. ''' check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) check.inst_param(environment_config, 'environment_config', EnvironmentConfig) plan_builder = _PlanBuilder() for solid in solids_in_topological_order(pipeline_def): ### 1. INPUTS # Create and add execution plan steps for solid inputs step_inputs = [] for input_def in solid.definition.input_defs: prev_step_output_handle = get_input_source_step_handle( pipeline_def, environment_config, plan_builder, solid, input_def ) # We return None for the handle (see above in get_input_source_step_handle) when the # input def runtime type is "Nothing" if not prev_step_output_handle: continue subplan = create_subplan_for_input( pipeline_def, environment_config, solid, prev_step_output_handle, input_def ) plan_builder.add_steps(subplan.steps) step_inputs.append( StepInput( input_def.name, input_def.runtime_type, subplan.terminal_step_output_handle ) ) ### 2. TRANSFORM FUNCTION # Create and add execution plan step for the solid transform function solid_transform_step = create_transform_step( pipeline_def, environment_config, solid, step_inputs ) plan_builder.add_step(solid_transform_step) ### 3. OUTPUTS # Create and add execution plan steps (and output handles) for solid outputs for output_def in solid.definition.output_defs: subplan = create_subplan_for_output( pipeline_def, environment_config, solid, solid_transform_step, output_def ) plan_builder.add_steps(subplan.steps) output_handle = solid.output_handle(output_def.name) plan_builder.set_output_handle(output_handle, subplan.terminal_step_output_handle) # Finally, we build and return the execution plan return plan_builder.build( pipeline_def=pipeline_def, artifacts_persisted=environment_config.storage.construct_run_storage().is_persistent, )
Build a pipeline which is a subset of another pipeline. Only includes the solids which are in solid_names.
def _build_sub_pipeline(pipeline_def, solid_names): ''' Build a pipeline which is a subset of another pipeline. Only includes the solids which are in solid_names. ''' check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) check.list_param(solid_names, 'solid_names', of_type=str) solid_name_set = set(solid_names) solids = list(map(pipeline_def.solid_named, solid_names)) deps = {_dep_key_of(solid): {} for solid in solids} def _out_handle_of_inp(input_handle): if pipeline_def.dependency_structure.has_dep(input_handle): output_handle = pipeline_def.dependency_structure.get_dep(input_handle) if output_handle.solid.name in solid_name_set: return output_handle return None for solid in solids: for input_handle in solid.input_handles(): output_handle = _out_handle_of_inp(input_handle) if output_handle: deps[_dep_key_of(solid)][input_handle.input_def.name] = DependencyDefinition( solid=output_handle.solid.name, output=output_handle.output_def.name ) return PipelineDefinition( name=pipeline_def.name, solids=list({solid.definition for solid in solids}), context_definitions=pipeline_def.context_definitions, dependencies=deps, )
Return the solid named name. Throws if it does not exist.
def solid_named(self, name): '''Return the solid named "name". Throws if it does not exist. Args: name (str): Name of solid Returns: SolidDefinition: SolidDefinition with correct name. ''' check.str_param(name, 'name') if name not in self._solid_dict: raise DagsterInvariantViolationError( 'Pipeline {pipeline_name} has no solid named {name}.'.format( pipeline_name=self.name, name=name ) ) return self._solid_dict[name]
Get the shell commands we ll use to actually build and publish a package to PyPI.
def construct_publish_comands(additional_steps=None, nightly=False): '''Get the shell commands we'll use to actually build and publish a package to PyPI.''' publish_commands = ( ['rm -rf dist'] + (additional_steps if additional_steps else []) + [ 'python setup.py sdist bdist_wheel{nightly}'.format( nightly=' --nightly' if nightly else '' ), 'twine upload dist/*', ] ) return publish_commands
Publishes ( uploads ) all submodules to PyPI.
def publish(nightly): """Publishes (uploads) all submodules to PyPI. Appropriate credentials must be available to twine, e.g. in a ~/.pypirc file, and users must be permissioned as maintainers on the PyPI projects. Publishing will fail if versions (git tags and Python versions) are not in lockstep, if the current commit is not tagged, or if there are untracked changes. """ try: RCParser.from_file() except ConfigFileError: raise ConfigFileError(PYPIRC_EXCEPTION_MESSAGE) assert '\nwheel' in subprocess.check_output(['pip', 'list']).decode('utf-8'), ( 'You must have wheel installed in order to build packages for release -- run ' '`pip install wheel`.' ) assert which_('twine'), ( 'You must have twin installed in order to upload packages to PyPI -- run ' '`pip install twine`.' ) assert which_('yarn'), ( 'You must have yarn installed in order to build dagit for release -- see ' 'https://yarnpkg.com/lang/en/docs/install/' ) print('Checking that module versions are in lockstep') check_versions(nightly=nightly) if not nightly: print('... and match git tag on most recent commit...') check_git_status() print('Publishing packages to PyPI...') if nightly: new_version = increment_nightly_versions() commit_new_version('nightly: {nightly}'.format(nightly=new_version['__nightly__'])) set_git_tag('{nightly}'.format(nightly=new_version['__nightly__'])) git_push() git_push(tags=True) publish_all(nightly)
Tags all submodules for a new release.
def release(version): """Tags all submodules for a new release. Ensures that git tags, as well as the version.py files in each submodule, agree and that the new version is strictly greater than the current version. Will fail if the new version is not an increment (following PEP 440). Creates a new git tag and commit. """ check_new_version(version) set_new_version(version) commit_new_version(version) set_git_tag(version)
Create a context definition from a pre - existing context. This can be useful in testing contexts where you may want to create a context manually and then pass it into a one - off PipelineDefinition
def passthrough_context_definition(context_params): '''Create a context definition from a pre-existing context. This can be useful in testing contexts where you may want to create a context manually and then pass it into a one-off PipelineDefinition Args: context (ExecutionContext): The context that will provided to the pipeline. Returns: PipelineContextDefinition: The passthrough context definition. ''' check.inst_param(context_params, 'context', ExecutionContext) context_definition = PipelineContextDefinition(context_fn=lambda *_args: context_params) return {DEFAULT_CONTEXT_NAME: context_definition}
A decorator for annotating a function that can take the selected properties from a config_value in to an instance of a custom type.
def input_selector_schema(config_cls): ''' A decorator for annotating a function that can take the selected properties from a ``config_value`` in to an instance of a custom type. Args: config_cls (Selector) ''' config_type = resolve_config_cls_arg(config_cls) check.param_invariant(config_type.is_selector, 'config_cls') def _wrap(func): def _selector(context, config_value): selector_key, selector_value = single_item(config_value) return func(context, selector_key, selector_value) return _create_input_schema(config_type, _selector) return _wrap
A decorator for a annotating a function that can take the selected properties of a config_value and an instance of a custom type and materialize it.
def output_selector_schema(config_cls): ''' A decorator for a annotating a function that can take the selected properties of a ``config_value`` and an instance of a custom type and materialize it. Args: config_cls (Selector): ''' config_type = resolve_config_cls_arg(config_cls) check.param_invariant(config_type.is_selector, 'config_cls') def _wrap(func): def _selector(context, config_value, runtime_value): selector_key, selector_value = single_item(config_value) return func(context, selector_key, selector_value, runtime_value) return _create_output_schema(config_type, _selector) return _wrap
Automagically wrap a block of text.
def block(self, text, prefix=''): '''Automagically wrap a block of text.''' wrapper = TextWrapper( width=self.line_length - len(self.current_indent_str), initial_indent=prefix, subsequent_indent=prefix, break_long_words=False, break_on_hyphens=False, ) for line in wrapper.wrap(text): self.line(line)
The following fields are shared between both QueryJobConfig and LoadJobConfig.
def _define_shared_fields(): '''The following fields are shared between both QueryJobConfig and LoadJobConfig. ''' clustering_fields = Field( List(String), description='''Fields defining clustering for the table (Defaults to None). Clustering fields are immutable after table creation. ''', is_optional=True, ) create_disposition = Field( BQCreateDisposition, description='''Specifies behavior for creating tables. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.createDisposition ''', is_optional=True, ) destination_encryption_configuration = Field( String, description='''Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or None if using default encryption. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationEncryptionConfiguration ''', is_optional=True, ) schema_update_options = Field( List(BQSchemaUpdateOption), description='''Specifies updates to the destination table schema to allow as a side effect of the query job.''', is_optional=True, ) time_partitioning = Field( Dict( fields={ 'expiration_ms': Field( Int, description='''Number of milliseconds for which to keep the storage for a partition.''', is_optional=True, ), 'field': Field( String, description='''If set, the table is partitioned by this field. If not set, the table is partitioned by pseudo column _PARTITIONTIME. The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED.''', is_optional=True, ), 'require_partition_filter': Field( Bool, description='''If set to true, queries over the partitioned table require a partition filter that can be used for partition elimination to be specified.''', is_optional=True, ), } ), description='Specifies time-based partitioning for the destination table.', is_optional=True, ) write_disposition = Field( BQWriteDisposition, description=''' Action that occurs if the destination table already exists. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.writeDisposition ''', is_optional=True, ) return { 'clustering_fields': clustering_fields, 'create_disposition': create_disposition, 'destination_encryption_configuration': destination_encryption_configuration, 'schema_update_options': schema_update_options, 'time_partitioning': time_partitioning, 'write_disposition': write_disposition, }
See: https:// googleapis. github. io/ google - cloud - python/ latest/ bigquery/ generated/ google. cloud. bigquery. job. QueryJobConfig. html
def define_bigquery_query_config(): '''See: https://googleapis.github.io/google-cloud-python/latest/bigquery/generated/google.cloud.bigquery.job.QueryJobConfig.html ''' sf = _define_shared_fields() allow_large_results = Field( Bool, description='''Allow large query results tables (legacy SQL, only) See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.allowLargeResults ''', is_optional=True, ) default_dataset = Field( Dataset, description='''the default dataset to use for unqualified table names in the query or None if not set. The default_dataset setter accepts a str of the fully-qualified dataset ID in standard SQL format. The value must included a project ID and dataset ID separated by ".". For example: your-project.your_dataset. See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.defaultDataset ''', is_optional=True, ) destination = Field( Table, description='''table where results are written or None if not set. The destination setter accepts a str of the fully-qualified table ID in standard SQL format. The value must included a project ID, dataset ID, and table ID, each separated by ".". For example: your-project.your_dataset.your_table. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationTable ''', is_optional=True, ) dry_run = Field( Bool, description='''True if this query should be a dry run to estimate costs. See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.dryRun ''', is_optional=True, ) flatten_results = Field( Bool, description='''Flatten nested/repeated fields in results. (Legacy SQL only) See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.flattenResults ''', is_optional=True, ) maximum_billing_tier = Field( Int, description='''Deprecated. Changes the billing tier to allow high-compute queries. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBillingTier ''', is_optional=True, ) maximum_bytes_billed = Field( Int, description='''Maximum bytes to be billed for this job or None if not set. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBytesBilled ''', is_optional=True, ) priority = Field( BQPriority, description='''Priority of the query. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.priority ''', is_optional=True, ) query_parameters = Field( List(String), description='''list of parameters for parameterized query (empty by default) See: https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.queryParameters ''', is_optional=True, ) # TODO: # Type: Dict[str, google.cloud.bigquery.external_config.ExternalConfig] # table_definitions = Field( # PermissiveDict(), # description='''Definitions for external tables or None if not set. # See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions # ''', # is_optional=True, # ) # TODO: Need to add this # Type: List[google.cloud.bigquery.query.UDFResource] # udf_resources = Field( # String, # description='''user defined function resources (empty by default) # See: https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.userDefinedFunctionResources # ''', # is_optional=True # ) use_legacy_sql = Field( Bool, description='''Use legacy SQL syntax. See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.useLegacySql ''', is_optional=True, ) use_query_cache = Field( Bool, description='''Look for the query result in the cache. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.useQueryCache ''', is_optional=True, ) return Field( Dict( fields={ 'query_job_config': Field( Dict( fields={ 'allow_large_results': allow_large_results, 'clustering_fields': sf['clustering_fields'], 'create_disposition': sf['create_disposition'], 'default_dataset': default_dataset, 'destination': destination, 'destination_encryption_configuration': sf[ 'destination_encryption_configuration' ], 'dry_run': dry_run, 'flatten_results': flatten_results, # TODO: labels 'maximum_billing_tier': maximum_billing_tier, 'maximum_bytes_billed': maximum_bytes_billed, 'priority': priority, 'query_parameters': query_parameters, # TODO: table_definitions 'schema_update_options': sf['schema_update_options'], 'time_partitioning': sf['time_partitioning'], # TODO: udf_resources 'use_legacy_sql': use_legacy_sql, 'use_query_cache': use_query_cache, 'write_disposition': sf['write_disposition'], } ) ) } ), description='BigQuery query configuration', )
Return a new solid that executes and materializes a SQL select statement.
def sql_solid(name, select_statement, materialization_strategy, table_name=None, inputs=None): '''Return a new solid that executes and materializes a SQL select statement. Args: name (str): The name of the new solid. select_statement (str): The select statement to execute. materialization_strategy (str): Must be 'table', the only currently supported materialization strategy. If 'table', the kwarg `table_name` must also be passed. Kwargs: table_name (str): THe name of the new table to create, if the materialization strategy is 'table'. Default: None. inputs (list[InputDefinition]): Inputs, if any, for the new solid. Default: None. Returns: function: The new SQL solid. ''' inputs = check.opt_list_param(inputs, 'inputs', InputDefinition) materialization_strategy_output_types = { # pylint:disable=C0103 'table': SqlTableName, # 'view': String, # 'query': SqlAlchemyQueryType, # 'subquery': SqlAlchemySubqueryType, # 'result_proxy': SqlAlchemyResultProxyType, # could also materialize as a Pandas table, as a Spark table, as an intermediate file, etc. } if materialization_strategy not in materialization_strategy_output_types: raise Exception( 'Invalid materialization strategy {materialization_strategy}, must ' 'be one of {materialization_strategies}'.format( materialization_strategy=materialization_strategy, materialization_strategies=str(list(materialization_strategy_output_types.keys())), ) ) if materialization_strategy == 'table': if table_name is None: raise Exception('Missing table_name: required for materialization strategy \'table\'') output_description = ( 'The string name of the new table created by the solid' if materialization_strategy == 'table' else 'The materialized SQL statement. If the materialization_strategy is ' '\'table\', this is the string name of the new table created by the solid.' ) description = '''This solid executes the following SQL statement: {select_statement}'''.format( select_statement=select_statement ) # n.b., we will eventually want to make this resources key configurable sql_statement = ( 'drop table if exists {table_name};\n' 'create table {table_name} as {select_statement};' ).format(table_name=table_name, select_statement=select_statement) def transform_fn(context, _inputs): '''Inner function defining the new solid. Args: context (TransformExecutionContext): Must expose a `db` resource with an `execute` method, like a SQLAlchemy engine, that can execute raw SQL against a database. Returns: str: The table name of the newly materialized SQL select statement. ''' context.log.info( 'Executing sql statement:\n{sql_statement}'.format(sql_statement=sql_statement) ) context.resources.db_info.engine.execute(text(sql_statement)) yield Result(value=table_name, output_name='result') return SolidDefinition( name=name, inputs=inputs, outputs=[ OutputDefinition( materialization_strategy_output_types[materialization_strategy], description=output_description, ) ], transform_fn=transform_fn, description=description, metadata={'kind': 'sql', 'sql': sql_statement}, )
Download an object from s3.
def download_from_s3(context): '''Download an object from s3. Args: info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource. Returns: str: The path to the downloaded object. ''' target_file = context.solid_config['target_file'] return context.resources.download_manager.download_file_contents(context, target_file)
Upload a file to s3.
def upload_to_s3(context, file_obj): '''Upload a file to s3. Args: info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource. Returns: (str, str): The bucket and key to which the file was uploaded. ''' bucket = context.solid_config['bucket'] key = context.solid_config['key'] context.resources.s3.put_object( Bucket=bucket, Body=file_obj.read(), Key=key, **(context.solid_config.get('kwargs') or {}) ) yield Result(bucket, 'bucket') yield Result(key, 'key')
Wraps the execution of user - space code in an error boundary. This places a uniform policy around an user code invoked by the framework. This ensures that all user errors are wrapped in the DagsterUserCodeExecutionError and that the original stack trace of the user error is preserved so that it can be reported without confusing framework code in the stack trace if a tool author wishes to do so. This has been especially help in a notebooking context.
def user_code_error_boundary(error_cls, msg, **kwargs): ''' Wraps the execution of user-space code in an error boundary. This places a uniform policy around an user code invoked by the framework. This ensures that all user errors are wrapped in the DagsterUserCodeExecutionError, and that the original stack trace of the user error is preserved, so that it can be reported without confusing framework code in the stack trace, if a tool author wishes to do so. This has been especially help in a notebooking context. ''' check.str_param(msg, 'msg') check.subclass_param(error_cls, 'error_cls', DagsterUserCodeExecutionError) try: yield except Exception as e: # pylint: disable=W0703 if isinstance(e, DagsterError): # The system has thrown an error that is part of the user-framework contract raise e else: # An exception has been thrown by user code and computation should cease # with the error reported further up the stack raise_from( error_cls(msg, user_exception=e, original_exc_info=sys.exc_info(), **kwargs), e )
The missing mkdir - p functionality in os.
def mkdir_p(newdir, mode=0o777): """The missing mkdir -p functionality in os.""" try: os.makedirs(newdir, mode) except OSError as err: # Reraise the error unless it's about an already existing directory if err.errno != errno.EEXIST or not os.path.isdir(newdir): raise
Wraps the output of a user provided function that may yield or return a value and returns a generator that asserts it only yields a single value.
def user_code_context_manager(user_fn, error_cls, msg): '''Wraps the output of a user provided function that may yield or return a value and returns a generator that asserts it only yields a single value. ''' check.callable_param(user_fn, 'user_fn') check.subclass_param(error_cls, 'error_cls', DagsterUserCodeExecutionError) with user_code_error_boundary(error_cls, msg): thing_or_gen = user_fn() gen = _ensure_gen(thing_or_gen) try: thing = next(gen) except StopIteration: check.failed('Must yield one item. You did not yield anything.') yield thing stopped = False try: next(gen) except StopIteration: stopped = True check.invariant(stopped, 'Must yield one item. Yielded more than one item')
Construct the run storage for this pipeline. Our rules are the following:
def construct_run_storage(run_config, environment_config): ''' Construct the run storage for this pipeline. Our rules are the following: If the RunConfig has a storage_mode provided, we use that. Then we fallback to environment config. If there is no config, we default to in memory storage. This is mostly so that tests default to in-memory. ''' check.inst_param(run_config, 'run_config', RunConfig) check.inst_param(environment_config, 'environment_config', EnvironmentConfig) if run_config.storage_mode: if run_config.storage_mode == RunStorageMode.FILESYSTEM: return FileSystemRunStorage() elif run_config.storage_mode == RunStorageMode.IN_MEMORY: return InMemoryRunStorage() elif run_config.storage_mode == RunStorageMode.S3: # TODO: Revisit whether we want to use S3 run storage return FileSystemRunStorage() else: check.failed('Unexpected enum {}'.format(run_config.storage_mode)) elif environment_config.storage.storage_mode == 'filesystem': return FileSystemRunStorage() elif environment_config.storage.storage_mode == 'in_memory': return InMemoryRunStorage() elif environment_config.storage.storage_mode == 's3': # TODO: Revisit whether we want to use S3 run storage return FileSystemRunStorage() elif environment_config.storage.storage_mode is None: return InMemoryRunStorage() else: raise DagsterInvariantViolationError( 'Invalid storage specified {}'.format(environment_config.storage.storage_mode) )
In the event of pipeline initialization failure we want to be able to log the failure without a dependency on the ExecutionContext to initialize DagsterLog
def _create_context_free_log(run_config, pipeline_def): '''In the event of pipeline initialization failure, we want to be able to log the failure without a dependency on the ExecutionContext to initialize DagsterLog ''' check.inst_param(run_config, 'run_config', RunConfig) check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) # Use the default logger loggers = [define_colored_console_logger('dagster')] if run_config.event_callback: loggers += [construct_event_logger(run_config.event_callback)] elif run_config.loggers: loggers += run_config.loggers return DagsterLog(run_config.run_id, get_logging_tags(None, run_config, pipeline_def), loggers)
Returns iterator that yields: py: class: SolidExecutionResult for each solid executed in the pipeline.
def execute_pipeline_iterator(pipeline, environment_dict=None, run_config=None): '''Returns iterator that yields :py:class:`SolidExecutionResult` for each solid executed in the pipeline. This is intended to allow the caller to do things between each executed node. For the 'synchronous' API, see :py:func:`execute_pipeline`. Parameters: pipeline (PipelineDefinition): Pipeline to run environment_dict (dict): The enviroment configuration that parameterizes this run run_config (RunConfig): Configuration for how this pipeline will be executed Returns: Iterator[DagsterEvent] ''' check.inst_param(pipeline, 'pipeline', PipelineDefinition) environment_dict = check.opt_dict_param(environment_dict, 'environment_dict') run_config = check_run_config_param(run_config) environment_config = create_environment_config(pipeline, environment_dict) intermediates_manager = construct_intermediates_manager( run_config, environment_config, pipeline ) with _pipeline_execution_context_manager( pipeline, environment_config, run_config, intermediates_manager ) as pipeline_context: return _execute_pipeline_iterator(pipeline_context)
Synchronous version of: py: func: execute_pipeline_iterator.
def execute_pipeline(pipeline, environment_dict=None, run_config=None): ''' "Synchronous" version of :py:func:`execute_pipeline_iterator`. Note: raise_on_error is very useful in testing contexts when not testing for error conditions Parameters: pipeline (PipelineDefinition): Pipeline to run environment_dict (dict): The enviroment configuration that parameterizes this run run_config (RunConfig): Configuration for how this pipeline will be executed Returns: :py:class:`PipelineExecutionResult` ''' check.inst_param(pipeline, 'pipeline', PipelineDefinition) environment_dict = check.opt_dict_param(environment_dict, 'environment_dict') run_config = check_run_config_param(run_config) environment_config = create_environment_config(pipeline, environment_dict) intermediates_manager = construct_intermediates_manager( run_config, environment_config, pipeline ) with _pipeline_execution_context_manager( pipeline, environment_config, run_config, intermediates_manager ) as pipeline_context: event_list = list(_execute_pipeline_iterator(pipeline_context)) return PipelineExecutionResult( pipeline, run_config.run_id, event_list, lambda: _pipeline_execution_context_manager( pipeline, environment_config, run_config, intermediates_manager ), )
Get a: py: class: SolidExecutionResult for a given solid name.
def result_for_solid(self, name): '''Get a :py:class:`SolidExecutionResult` for a given solid name. ''' check.str_param(name, 'name') if not self.pipeline.has_solid(name): raise DagsterInvariantViolationError( 'Try to get result for solid {name} in {pipeline}. No such solid.'.format( name=name, pipeline=self.pipeline.display_name ) ) if name not in self.solid_result_dict: raise DagsterInvariantViolationError( 'Did not find result for solid {name} in pipeline execution result'.format( name=name ) ) return self.solid_result_dict[name]
Whether the solid execution was successful
def success(self): '''Whether the solid execution was successful''' any_success = False for step_event in itertools.chain( self.input_expectations, self.output_expectations, self.transforms ): if step_event.event_type == DagsterEventType.STEP_FAILURE: return False if step_event.event_type == DagsterEventType.STEP_SUCCESS: any_success = True return any_success
Whether the solid execution was skipped
def skipped(self): '''Whether the solid execution was skipped''' return all( [ step_event.event_type == DagsterEventType.STEP_SKIPPED for step_event in itertools.chain( self.input_expectations, self.output_expectations, self.transforms ) ] )