INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Delete a dataset of Big query in your project.: param project_id: The name of the project where we have the dataset.: type project_id: str: param dataset_id: The dataset to be delete.: type dataset_id: str: return:
def delete_dataset(self, project_id, dataset_id): """ Delete a dataset of Big query in your project. :param project_id: The name of the project where we have the dataset . :type project_id: str :param dataset_id: The dataset to be delete. :type dataset_id: str :return: """ project_id = project_id if project_id is not None else self.project_id self.log.info('Deleting from project: %s Dataset:%s', project_id, dataset_id) try: self.service.datasets().delete( projectId=project_id, datasetId=dataset_id).execute(num_retries=self.num_retries) self.log.info('Dataset deleted successfully: In project %s ' 'Dataset %s', project_id, dataset_id) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content) )
Method returns dataset_resource if dataset exist and raised 404 error if dataset does not exist
def get_dataset(self, dataset_id, project_id=None): """ Method returns dataset_resource if dataset exist and raised 404 error if dataset does not exist :param dataset_id: The BigQuery Dataset ID :type dataset_id: str :param project_id: The GCP Project ID :type project_id: str :return: dataset_resource .. seealso:: For more information, see Dataset Resource content: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource """ if not dataset_id or not isinstance(dataset_id, str): raise ValueError("dataset_id argument must be provided and has " "a type 'str'. You provided: {}".format(dataset_id)) dataset_project_id = project_id if project_id else self.project_id try: dataset_resource = self.service.datasets().get( datasetId=dataset_id, projectId=dataset_project_id).execute(num_retries=self.num_retries) self.log.info("Dataset Resource: %s", dataset_resource) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content)) return dataset_resource
Method returns full list of BigQuery datasets in the current project
def get_datasets_list(self, project_id=None): """ Method returns full list of BigQuery datasets in the current project .. seealso:: For more information, see: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list :param project_id: Google Cloud Project for which you try to get all datasets :type project_id: str :return: datasets_list Example of returned datasets_list: :: { "kind":"bigquery#dataset", "location":"US", "id":"your-project:dataset_2_test", "datasetReference":{ "projectId":"your-project", "datasetId":"dataset_2_test" } }, { "kind":"bigquery#dataset", "location":"US", "id":"your-project:dataset_1_test", "datasetReference":{ "projectId":"your-project", "datasetId":"dataset_1_test" } } ] """ dataset_project_id = project_id if project_id else self.project_id try: datasets_list = self.service.datasets().list( projectId=dataset_project_id).execute(num_retries=self.num_retries)['datasets'] self.log.info("Datasets List: %s", datasets_list) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content)) return datasets_list
Method to stream data into BigQuery one record at a time without needing to run a load job
def insert_all(self, project_id, dataset_id, table_id, rows, ignore_unknown_values=False, skip_invalid_rows=False, fail_on_error=False): """ Method to stream data into BigQuery one record at a time without needing to run a load job .. seealso:: For more information, see: https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll :param project_id: The name of the project where we have the table :type project_id: str :param dataset_id: The name of the dataset where we have the table :type dataset_id: str :param table_id: The name of the table :type table_id: str :param rows: the rows to insert :type rows: list **Example or rows**: rows=[{"json": {"a_key": "a_value_0"}}, {"json": {"a_key": "a_value_1"}}] :param ignore_unknown_values: [Optional] Accept rows that contain values that do not match the schema. The unknown values are ignored. The default value is false, which treats unknown values as errors. :type ignore_unknown_values: bool :param skip_invalid_rows: [Optional] Insert all valid rows of a request, even if invalid rows exist. The default value is false, which causes the entire request to fail if any invalid rows exist. :type skip_invalid_rows: bool :param fail_on_error: [Optional] Force the task to fail if any errors occur. The default value is false, which indicates the task should not fail even if any insertion errors occur. :type fail_on_error: bool """ dataset_project_id = project_id if project_id else self.project_id body = { "rows": rows, "ignoreUnknownValues": ignore_unknown_values, "kind": "bigquery#tableDataInsertAllRequest", "skipInvalidRows": skip_invalid_rows, } try: self.log.info( 'Inserting %s row(s) into Table %s:%s.%s', len(rows), dataset_project_id, dataset_id, table_id ) resp = self.service.tabledata().insertAll( projectId=dataset_project_id, datasetId=dataset_id, tableId=table_id, body=body ).execute(num_retries=self.num_retries) if 'insertErrors' not in resp: self.log.info( 'All row(s) inserted successfully: %s:%s.%s', dataset_project_id, dataset_id, table_id ) else: error_msg = '{} insert error(s) occurred: {}:{}.{}. Details: {}'.format( len(resp['insertErrors']), dataset_project_id, dataset_id, table_id, resp['insertErrors']) if fail_on_error: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(error_msg) ) self.log.info(error_msg) except HttpError as err: raise AirflowException( 'BigQuery job failed. Error was: {}'.format(err.content) )
Executes a BigQuery query and returns the job ID.
def execute(self, operation, parameters=None): """ Executes a BigQuery query, and returns the job ID. :param operation: The query to execute. :type operation: str :param parameters: Parameters to substitute into the query. :type parameters: dict """ sql = _bind_parameters(operation, parameters) if parameters else operation self.job_id = self.run_query(sql)
Execute a BigQuery query multiple times with different parameters.
def executemany(self, operation, seq_of_parameters): """ Execute a BigQuery query multiple times with different parameters. :param operation: The query to execute. :type operation: str :param seq_of_parameters: List of dictionary parameters to substitute into the query. :type seq_of_parameters: list """ for parameters in seq_of_parameters: self.execute(operation, parameters)
Helper method for fetchone which returns the next row from a buffer. If the buffer is empty attempts to paginate through the result set for the next page and load it into the buffer.
def next(self): """ Helper method for fetchone, which returns the next row from a buffer. If the buffer is empty, attempts to paginate through the result set for the next page, and load it into the buffer. """ if not self.job_id: return None if len(self.buffer) == 0: if self.all_pages_loaded: return None query_results = (self.service.jobs().getQueryResults( projectId=self.project_id, jobId=self.job_id, pageToken=self.page_token).execute(num_retries=self.num_retries)) if 'rows' in query_results and query_results['rows']: self.page_token = query_results.get('pageToken') fields = query_results['schema']['fields'] col_types = [field['type'] for field in fields] rows = query_results['rows'] for dict_row in rows: typed_row = ([ _bq_cast(vs['v'], col_types[idx]) for idx, vs in enumerate(dict_row['f']) ]) self.buffer.append(typed_row) if not self.page_token: self.all_pages_loaded = True else: # Reset all state since we've exhausted the results. self.page_token = None self.job_id = None self.page_token = None return None return self.buffer.pop(0)
Fetch the next set of rows of a query result returning a sequence of sequences ( e. g. a list of tuples ). An empty sequence is returned when no more rows are available. The number of rows to fetch per call is specified by the parameter. If it is not given the cursor s arraysize determines the number of rows to be fetched. The method should try to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available fewer rows may be returned. An: py: class: ~pyhive. exc. Error ( or subclass ) exception is raised if the previous call to: py: meth: execute did not produce any result set or no call was issued yet.
def fetchmany(self, size=None): """ Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a list of tuples). An empty sequence is returned when no more rows are available. The number of rows to fetch per call is specified by the parameter. If it is not given, the cursor's arraysize determines the number of rows to be fetched. The method should try to fetch as many rows as indicated by the size parameter. If this is not possible due to the specified number of rows not being available, fewer rows may be returned. An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if the previous call to :py:meth:`execute` did not produce any result set or no call was issued yet. """ if size is None: size = self.arraysize result = [] for _ in range(size): one = self.fetchone() if one is None: break else: result.append(one) return result
Fetch all ( remaining ) rows of a query result returning them as a sequence of sequences ( e. g. a list of tuples ).
def fetchall(self): """ Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). """ result = [] while True: one = self.fetchone() if one is None: break else: result.append(one) return result
Loads the manifest file and register the url_for_asset_ template tag.: param app:: return:
def configure_manifest_files(app): """ Loads the manifest file and register the `url_for_asset_` template tag. :param app: :return: """ def parse_manifest_json(): # noinspection PyBroadException try: global manifest manifest_file = os.path.join(os.path.dirname(__file__), 'static/dist/manifest.json') with open(manifest_file, 'r') as f: manifest.update(json.load(f)) for k in manifest.keys(): manifest[k] = os.path.join("dist", manifest[k]) except Exception: print("Please make sure to build the frontend in " "static/ directory and restart the server") pass def get_asset_url(filename): if app.debug: parse_manifest_json() return url_for('static', filename=manifest.get(filename, '')) parse_manifest_json() @app.context_processor def get_url_for_asset(): """ Template tag to return the asset URL. WebPack renders the assets after minification and modification under the static/dist folder. This template tag reads the asset name in manifest.json and returns the appropriate file. """ return dict(url_for_asset=get_asset_url)
Queries Postgres and returns a cursor to the results.
def _query_postgres(self): """ Queries Postgres and returns a cursor to the results. """ postgres = PostgresHook(postgres_conn_id=self.postgres_conn_id) conn = postgres.get_conn() cursor = conn.cursor() cursor.execute(self.sql, self.parameters) return cursor
Takes a cursor and writes results to a local file.
def _write_local_data_files(self, cursor): """ Takes a cursor, and writes results to a local file. :return: A dictionary where keys are filenames to be used as object names in GCS, and values are file handles to local files that contain the data for the GCS objects. """ schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description)) tmp_file_handles = {} row_no = 0 def _create_new_file(): handle = NamedTemporaryFile(delete=True) filename = self.filename.format(len(tmp_file_handles)) tmp_file_handles[filename] = handle return handle # Don't create a file if there is nothing to write if cursor.rowcount > 0: tmp_file_handle = _create_new_file() for row in cursor: # Convert datetime objects to utc seconds, and decimals to floats row = map(self.convert_types, row) row_dict = dict(zip(schema, row)) s = json.dumps(row_dict, sort_keys=True).encode('utf-8') tmp_file_handle.write(s) # Append newline to make dumps BigQuery compatible. tmp_file_handle.write(b'\n') # Stop if the file exceeds the file size limit. if tmp_file_handle.tell() >= self.approx_max_file_size_bytes: tmp_file_handle = _create_new_file() row_no += 1 self.log.info('Received %s rows over %s files', row_no, len(tmp_file_handles)) return tmp_file_handles
Takes a cursor and writes the BigQuery schema for the results to a local file system.
def _write_local_schema_file(self, cursor): """ Takes a cursor, and writes the BigQuery schema for the results to a local file system. :return: A dictionary where key is a filename to be used as an object name in GCS, and values are file handles to local files that contains the BigQuery schema fields in .json format. """ schema = [] for field in cursor.description: # See PEP 249 for details about the description tuple. field_name = field[0] field_type = self.type_map(field[1]) field_mode = 'REPEATED' if field[1] in (1009, 1005, 1007, 1016) else 'NULLABLE' schema.append({ 'name': field_name, 'type': field_type, 'mode': field_mode, }) self.log.info('Using schema for %s: %s', self.schema_filename, schema) tmp_schema_file_handle = NamedTemporaryFile(delete=True) s = json.dumps(schema, sort_keys=True).encode('utf-8') tmp_schema_file_handle.write(s) return {self.schema_filename: tmp_schema_file_handle}
Takes a value from Postgres and converts it to a value that s safe for JSON/ Google Cloud Storage/ BigQuery. Dates are converted to UTC seconds. Decimals are converted to floats. Times are converted to seconds.
def convert_types(cls, value): """ Takes a value from Postgres, and converts it to a value that's safe for JSON/Google Cloud Storage/BigQuery. Dates are converted to UTC seconds. Decimals are converted to floats. Times are converted to seconds. """ if type(value) in (datetime.datetime, datetime.date): return time.mktime(value.timetuple()) elif type(value) == datetime.time: formated_time = time.strptime(str(value), "%H:%M:%S") return datetime.timedelta( hours=formated_time.tm_hour, minutes=formated_time.tm_min, seconds=formated_time.tm_sec).seconds elif isinstance(value, Decimal): return float(value) else: return value
Create all the intermediate directories in a remote host
def _make_intermediate_dirs(sftp_client, remote_directory): """ Create all the intermediate directories in a remote host :param sftp_client: A Paramiko SFTP client. :param remote_directory: Absolute Path of the directory containing the file :return: """ if remote_directory == '/': sftp_client.chdir('/') return if remote_directory == '': return try: sftp_client.chdir(remote_directory) except IOError: dirname, basename = os.path.split(remote_directory.rstrip('/')) _make_intermediate_dirs(sftp_client, dirname) sftp_client.mkdir(basename) sftp_client.chdir(basename) return
Create queue using connection object
def create_queue(self, queue_name, attributes=None): """ Create queue using connection object :param queue_name: name of the queue. :type queue_name: str :param attributes: additional attributes for the queue (default: None) For details of the attributes parameter see :py:meth:`botocore.client.SQS.create_queue` :type attributes: dict :return: dict with the information about the queue For details of the returned value see :py:meth:`botocore.client.SQS.create_queue` :rtype: dict """ return self.get_conn().create_queue(QueueName=queue_name, Attributes=attributes or {})
Send message to the queue
def send_message(self, queue_url, message_body, delay_seconds=0, message_attributes=None): """ Send message to the queue :param queue_url: queue url :type queue_url: str :param message_body: the contents of the message :type message_body: str :param delay_seconds: seconds to delay the message :type delay_seconds: int :param message_attributes: additional attributes for the message (default: None) For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message` :type message_attributes: dict :return: dict with the information about the message sent For details of the returned value see :py:meth:`botocore.client.SQS.send_message` :rtype: dict """ return self.get_conn().send_message(QueueUrl=queue_url, MessageBody=message_body, DelaySeconds=delay_seconds, MessageAttributes=message_attributes or {})
Integrate plugins to the context
def _integrate_plugins(): """Integrate plugins to the context""" from airflow.plugins_manager import hooks_modules for hooks_module in hooks_modules: sys.modules[hooks_module.__name__] = hooks_module globals()[hooks_module._name] = hooks_module
Run the task command.
def run_command(self, run_with=None, join_args=False): """ Run the task command. :param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']`` :type run_with: list :param join_args: whether to concatenate the list of command tokens e.g. ``['airflow', 'run']`` vs ``['airflow run']`` :param join_args: bool :return: the process that was run :rtype: subprocess.Popen """ run_with = run_with or [] cmd = [" ".join(self._command)] if join_args else self._command full_cmd = run_with + cmd self.log.info('Running: %s', full_cmd) proc = subprocess.Popen( full_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, close_fds=True, env=os.environ.copy(), preexec_fn=os.setsid ) # Start daemon thread to read subprocess logging output log_reader = threading.Thread( target=self._read_task_logs, args=(proc.stdout,), ) log_reader.daemon = True log_reader.start() return proc
A callback that should be called when this is done running.
def on_finish(self): """ A callback that should be called when this is done running. """ if self._cfg_path and os.path.isfile(self._cfg_path): if self.run_as_user: subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True) else: os.remove(self._cfg_path)
Parse options and process commands
def _main(): """ Parse options and process commands """ # Parse arguments usage = "usage: nvd3.py [options]" parser = OptionParser(usage=usage, version=("python-nvd3 - Charts generator with " "nvd3.js and d3.js")) parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True, help="don't print messages to stdout") (options, args) = parser.parse_args()
add serie - Series are list of data that will be plotted y { 1 2 3 4 5 }/ x { 1 2 3 4 5 }
def add_serie(self, y, x, name=None, extra=None, **kwargs): """ add serie - Series are list of data that will be plotted y {1, 2, 3, 4, 5} / x {1, 2, 3, 4, 5} **Attributes**: * ``name`` - set Serie name * ``x`` - x-axis data * ``y`` - y-axis data kwargs: * ``shape`` - for scatterChart, you can set different shapes (circle, triangle etc...) * ``size`` - for scatterChart, you can set size of different shapes * ``type`` - for multiChart, type should be bar * ``bar`` - to display bars in Chart * ``color_list`` - define list of colors which will be used by pieChart * ``color`` - set axis color * ``disabled`` - extra: * ``tooltip`` - set tooltip flag * ``date_format`` - set date_format for tooltip if x-axis is in date format """ if not name: name = "Serie %d" % (self.serie_no) # For scatterChart shape & size fields are added in serie if 'shape' in kwargs or 'size' in kwargs: csize = kwargs.get('size', 1) cshape = kwargs.get('shape', 'circle') serie = [{ 'x': x[i], 'y': j, 'shape': cshape, 'size': csize[i] if isinstance(csize, list) else csize } for i, j in enumerate(y)] else: if self.model == 'pieChart': serie = [{'label': x[i], 'value': y} for i, y in enumerate(y)] else: serie = [{'x': x[i], 'y': y} for i, y in enumerate(y)] data_keyvalue = {'values': serie, 'key': name} # multiChart # Histogram type='bar' for the series if 'type' in kwargs and kwargs['type']: data_keyvalue['type'] = kwargs['type'] # Define on which Y axis the serie is related # a chart can have 2 Y axis, left and right, by default only one Y Axis is used if 'yaxis' in kwargs and kwargs['yaxis']: data_keyvalue['yAxis'] = kwargs['yaxis'] else: if self.model != 'pieChart': data_keyvalue['yAxis'] = '1' if 'bar' in kwargs and kwargs['bar']: data_keyvalue['bar'] = 'true' if 'disabled' in kwargs and kwargs['disabled']: data_keyvalue['disabled'] = 'true' if 'color' in kwargs and kwargs['color']: data_keyvalue['color'] = kwargs['color'] if extra: if self.model == 'pieChart': if 'color_list' in extra and extra['color_list']: self.color_list = extra['color_list'] if extra.get('date_format'): self.charttooltip_dateformat = extra['date_format'] if extra.get('tooltip'): self.custom_tooltip_flag = True if self.model != 'pieChart': _start = extra['tooltip']['y_start'] _end = extra['tooltip']['y_end'] _start = ("'" + str(_start) + "' + ") if _start else '' _end = (" + '" + str(_end) + "'") if _end else '' if self.model == 'linePlusBarChart': if self.tooltip_condition_string: self.tooltip_condition_string += stab(5) self.tooltip_condition_string += stab(0) + "if(key.indexOf('" + name + "') > -1 ){\n" +\ stab(6) + "var y = " + _start + " String(graph.point.y) " + _end + ";\n" +\ stab(5) + "}\n" elif self.model == 'cumulativeLineChart': self.tooltip_condition_string += stab(0) + "if(key == '" + name + "'){\n" +\ stab(6) + "var y = " + _start + " String(e) " + _end + ";\n" +\ stab(5) + "}\n" else: self.tooltip_condition_string += stab(5) + "if(key == '" + name + "'){\n" +\ stab(6) + "var y = " + _start + " String(graph.point.y) " + _end + ";\n" +\ stab(5) + "}\n" if self.model == 'pieChart': _start = extra['tooltip']['y_start'] _end = extra['tooltip']['y_end'] _start = ("'" + str(_start) + "' + ") if _start else '' _end = (" + '" + str(_end) + "'") if _end else '' self.tooltip_condition_string += "var y = " + _start + " String(y) " + _end + ";\n" # Increment series counter & append self.serie_no += 1 self.series.append(data_keyvalue)
Build HTML content only no header or body tags. To be useful this will usually require the attribute juqery_on_ready to be set which will wrap the js in $ ( function () { <regular_js > } ; )
def buildcontent(self): """Build HTML content only, no header or body tags. To be useful this will usually require the attribute `juqery_on_ready` to be set which will wrap the js in $(function(){<regular_js>};) """ self.buildcontainer() # if the subclass has a method buildjs this method will be # called instead of the method defined here # when this subclass method is entered it does call # the method buildjschart defined here self.buildjschart() self.htmlcontent = self.template_content_nvd3.render(chart=self)
Build the HTML page Create the htmlheader with css/ js Create html page Add Js code for nvd3
def buildhtml(self): """Build the HTML page Create the htmlheader with css / js Create html page Add Js code for nvd3 """ self.buildcontent() self.content = self.htmlcontent self.htmlcontent = self.template_page_nvd3.render(chart=self)
generate HTML header content
def buildhtmlheader(self): """generate HTML header content""" self.htmlheader = '' # If the JavaScript assets have already been injected, don't bother re-sourcing them. global _js_initialized if '_js_initialized' not in globals() or not _js_initialized: for css in self.header_css: self.htmlheader += css for js in self.header_js: self.htmlheader += js
generate HTML div
def buildcontainer(self): """generate HTML div""" if self.container: return # Create SVG div with style if self.width: if self.width[-1] != '%': self.style += 'width:%spx;' % self.width else: self.style += 'width:%s;' % self.width if self.height: if self.height[-1] != '%': self.style += 'height:%spx;' % self.height else: self.style += 'height:%s;' % self.height if self.style: self.style = 'style="%s"' % self.style self.container = self.containerheader + \ '<div id="%s"><svg %s></svg></div>\n' % (self.name, self.style)
generate javascript code for the chart
def buildjschart(self): """generate javascript code for the chart""" self.jschart = '' # add custom tooltip string in jschart # default condition (if build_custom_tooltip is not called explicitly with date_flag=True) if self.tooltip_condition_string == '': self.tooltip_condition_string = 'var y = String(graph.point.y);\n' # Include data self.series_js = json.dumps(self.series)
Create X - axis
def create_x_axis(self, name, label=None, format=None, date=False, custom_format=False): """Create X-axis""" axis = {} if custom_format and format: axis['tickFormat'] = format elif format: if format == 'AM_PM': axis['tickFormat'] = "function(d) { return get_am_pm(parseInt(d)); }" else: axis['tickFormat'] = "d3.format(',%s')" % format if label: axis['axisLabel'] = "'" + label + "'" # date format : see https://github.com/mbostock/d3/wiki/Time-Formatting if date: self.dateformat = format axis['tickFormat'] = ("function(d) { return d3.time.format('%s')" "(new Date(parseInt(d))) }\n" "" % self.dateformat) # flag is the x Axis is a date if name[0] == 'x': self.x_axis_date = True # Add new axis to list of axis self.axislist[name] = axis # Create x2Axis if focus_enable if name == "xAxis" and self.focus_enable: self.axislist['x2Axis'] = axis
Create Y - axis
def create_y_axis(self, name, label=None, format=None, custom_format=False): """ Create Y-axis """ axis = {} if custom_format and format: axis['tickFormat'] = format elif format: axis['tickFormat'] = "d3.format(',%s')" % format if label: axis['axisLabel'] = "'" + label + "'" # Add new axis to list of axis self.axislist[name] = axis
Build HTML content only no header or body tags. To be useful this will usually require the attribute juqery_on_ready to be set which will wrap the js in $ ( function () { <regular_js > } ; )
def buildcontent(self): """Build HTML content only, no header or body tags. To be useful this will usually require the attribute `juqery_on_ready` to be set which will wrap the js in $(function(){<regular_js>};) """ self.buildcontainer() # if the subclass has a method buildjs this method will be # called instead of the method defined here # when this subclass method is entered it does call # the method buildjschart defined here self.buildjschart() self.htmlcontent = self.template_chart_nvd3.render(chart=self)
Returns a sqlite connection object
def get_conn(self): """ Returns a sqlite connection object """ conn = self.get_connection(self.sqlite_conn_id) conn = sqlite3.connect(conn.host) return conn
Decorator to log user actions
def action_logging(f): """ Decorator to log user actions """ @functools.wraps(f) def wrapper(*args, **kwargs): with create_session() as session: if g.user.is_anonymous: user = 'anonymous' else: user = g.user.username log = Log( event=f.__name__, task_instance=None, owner=user, extra=str(list(request.args.items())), task_id=request.args.get('task_id'), dag_id=request.args.get('dag_id')) if 'execution_date' in request.args: log.execution_date = pendulum.parse( request.args.get('execution_date')) session.add(log) return f(*args, **kwargs) return wrapper
Decorator to make a view compressed
def gzipped(f): """ Decorator to make a view compressed """ @functools.wraps(f) def view_func(*args, **kwargs): @after_this_request def zipper(response): accept_encoding = request.headers.get('Accept-Encoding', '') if 'gzip' not in accept_encoding.lower(): return response response.direct_passthrough = False if (response.status_code < 200 or response.status_code >= 300 or 'Content-Encoding' in response.headers): return response gzip_buffer = IO() gzip_file = gzip.GzipFile(mode='wb', fileobj=gzip_buffer) gzip_file.write(response.data) gzip_file.close() response.data = gzip_buffer.getvalue() response.headers['Content-Encoding'] = 'gzip' response.headers['Vary'] = 'Accept-Encoding' response.headers['Content-Length'] = len(response.data) return response return f(*args, **kwargs) return view_func
Decorator to check whether the user has read/ write permission on the dag.
def has_dag_access(**dag_kwargs): """ Decorator to check whether the user has read / write permission on the dag. """ def decorator(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): has_access = self.appbuilder.sm.has_access dag_id = request.args.get('dag_id') # if it is false, we need to check whether user has write access on the dag can_dag_edit = dag_kwargs.get('can_dag_edit', False) # 1. check whether the user has can_dag_edit permissions on all_dags # 2. if 1 false, check whether the user # has can_dag_edit permissions on the dag # 3. if 2 false, check whether it is can_dag_read view, # and whether user has the permissions if ( has_access('can_dag_edit', 'all_dags') or has_access('can_dag_edit', dag_id) or (not can_dag_edit and (has_access('can_dag_read', 'all_dags') or has_access('can_dag_read', dag_id)))): return f(self, *args, **kwargs) else: flash("Access is Denied", "danger") return redirect(url_for(self.appbuilder.sm.auth_view. __class__.__name__ + ".login")) return wrapper return decorator
Returns the last dag run for a dag None if there was none. Last dag run can be any type of run eg. scheduled or backfilled. Overridden DagRuns are ignored.
def get_last_dagrun(dag_id, session, include_externally_triggered=False): """ Returns the last dag run for a dag, None if there was none. Last dag run can be any type of run eg. scheduled or backfilled. Overridden DagRuns are ignored. """ DR = DagRun query = session.query(DR).filter(DR.dag_id == dag_id) if not include_externally_triggered: query = query.filter(DR.external_trigger == False) # noqa query = query.order_by(DR.execution_date.desc()) return query.first()
Creates a dag run from this dag including the tasks associated with this dag. Returns the dag run.
def create_dagrun(self, run_id, state, execution_date, start_date=None, external_trigger=False, conf=None, session=None): """ Creates a dag run from this dag including the tasks associated with this dag. Returns the dag run. :param run_id: defines the the run id for this dag run :type run_id: str :param execution_date: the execution date of this dag run :type execution_date: datetime.datetime :param state: the state of the dag run :type state: airflow.utils.state.State :param start_date: the date this dag run should be evaluated :type start_date: datetime.datetime :param external_trigger: whether this dag run is externally triggered :type external_trigger: bool :param session: database session :type session: sqlalchemy.orm.session.Session """ return self.get_dag().create_dagrun(run_id=run_id, state=state, execution_date=execution_date, start_date=start_date, external_trigger=external_trigger, conf=conf, session=session)
Publish the message to SQS queue
def execute(self, context): """ Publish the message to SQS queue :param context: the context object :type context: dict :return: dict with information about the message sent For details of the returned dict see :py:meth:`botocore.client.SQS.send_message` :rtype: dict """ hook = SQSHook(aws_conn_id=self.aws_conn_id) result = hook.send_message(queue_url=self.sqs_queue, message_body=self.message_content, delay_seconds=self.delay_seconds, message_attributes=self.message_attributes) self.log.info('result is send_message is %s', result) return result
Generates the HTML for a paging component using a similar logic to the paging auto - generated by Flask managed views. The paging component defines a number of pages visible in the pager ( window ) and once the user goes to a page beyond the largest visible it would scroll to the right the page numbers and keeps the current one in the middle of the pager component. When in the last pages the pages won t scroll and just keep moving until the last page. Pager also contains <first previous... next last > pages. This component takes into account custom parameters such as search and showPaused which could be added to the pages link in order to maintain the state between client and server. It also allows to make a bookmark on a specific paging state.: param current_page: the current page number 0 - indexed: param num_of_pages: the total number of pages: param search: the search query string if any: param showPaused: false if paused dags will be hidden otherwise true to show them: param window: the number of pages to be shown in the paging component ( 7 default ): return: the HTML string of the paging component
def generate_pages(current_page, num_of_pages, search=None, showPaused=None, window=7): """ Generates the HTML for a paging component using a similar logic to the paging auto-generated by Flask managed views. The paging component defines a number of pages visible in the pager (window) and once the user goes to a page beyond the largest visible, it would scroll to the right the page numbers and keeps the current one in the middle of the pager component. When in the last pages, the pages won't scroll and just keep moving until the last page. Pager also contains <first, previous, ..., next, last> pages. This component takes into account custom parameters such as search and showPaused, which could be added to the pages link in order to maintain the state between client and server. It also allows to make a bookmark on a specific paging state. :param current_page: the current page number, 0-indexed :param num_of_pages: the total number of pages :param search: the search query string, if any :param showPaused: false if paused dags will be hidden, otherwise true to show them :param window: the number of pages to be shown in the paging component (7 default) :return: the HTML string of the paging component """ void_link = 'javascript:void(0)' first_node = Markup("""<li class="paginate_button {disabled}" id="dags_first"> <a href="{href_link}" aria-controls="dags" data-dt-idx="0" tabindex="0">&laquo;</a> </li>""") previous_node = Markup("""<li class="paginate_button previous {disabled}" id="dags_previous"> <a href="{href_link}" aria-controls="dags" data-dt-idx="0" tabindex="0">&lt;</a> </li>""") next_node = Markup("""<li class="paginate_button next {disabled}" id="dags_next"> <a href="{href_link}" aria-controls="dags" data-dt-idx="3" tabindex="0">&gt;</a> </li>""") last_node = Markup("""<li class="paginate_button {disabled}" id="dags_last"> <a href="{href_link}" aria-controls="dags" data-dt-idx="3" tabindex="0">&raquo;</a> </li>""") page_node = Markup("""<li class="paginate_button {is_active}"> <a href="{href_link}" aria-controls="dags" data-dt-idx="2" tabindex="0">{page_num}</a> </li>""") output = [Markup('<ul class="pagination" style="margin-top:0px;">')] is_disabled = 'disabled' if current_page <= 0 else '' output.append(first_node.format(href_link="?{}" .format(get_params(page=0, search=search, showPaused=showPaused)), disabled=is_disabled)) page_link = void_link if current_page > 0: page_link = '?{}'.format(get_params(page=(current_page - 1), search=search, showPaused=showPaused)) output.append(previous_node.format(href_link=page_link, disabled=is_disabled)) mid = int(window / 2) last_page = num_of_pages - 1 if current_page <= mid or num_of_pages < window: pages = [i for i in range(0, min(num_of_pages, window))] elif mid < current_page < last_page - mid: pages = [i for i in range(current_page - mid, current_page + mid + 1)] else: pages = [i for i in range(num_of_pages - window, last_page + 1)] def is_current(current, page): return page == current for page in pages: vals = { 'is_active': 'active' if is_current(current_page, page) else '', 'href_link': void_link if is_current(current_page, page) else '?{}'.format(get_params(page=page, search=search, showPaused=showPaused)), 'page_num': page + 1 } output.append(page_node.format(**vals)) is_disabled = 'disabled' if current_page >= num_of_pages - 1 else '' page_link = (void_link if current_page >= num_of_pages - 1 else '?{}'.format(get_params(page=current_page + 1, search=search, showPaused=showPaused))) output.append(next_node.format(href_link=page_link, disabled=is_disabled)) output.append(last_node.format(href_link="?{}" .format(get_params(page=last_page, search=search, showPaused=showPaused)), disabled=is_disabled)) output.append(Markup('</ul>')) return Markup('\n'.join(output))
returns a json response from a json serializable python object
def json_response(obj): """ returns a json response from a json serializable python object """ return Response( response=json.dumps( obj, indent=4, cls=AirflowJsonEncoder), status=200, mimetype="application/json")
Opens the given file. If the path contains a folder with a. zip suffix then the folder is treated as a zip archive opening the file inside the archive.
def open_maybe_zipped(f, mode='r'): """ Opens the given file. If the path contains a folder with a .zip suffix, then the folder is treated as a zip archive, opening the file inside the archive. :return: a file object, as in `open`, or as in `ZipFile.open`. """ _, archive, filename = ZIP_REGEX.search(f).groups() if archive and zipfile.is_zipfile(archive): return zipfile.ZipFile(archive, mode=mode).open(filename) else: return io.open(f, mode=mode)
Used by cache to get a unique key per URL
def make_cache_key(*args, **kwargs): """ Used by cache to get a unique key per URL """ path = request.path args = str(hash(frozenset(request.args.items()))) return (path + args).encode('ascii', 'ignore')
Returns Gcp Video Intelligence Service client
def get_conn(self): """ Returns Gcp Video Intelligence Service client :rtype: google.cloud.videointelligence_v1.VideoIntelligenceServiceClient """ if not self._conn: self._conn = VideoIntelligenceServiceClient(credentials=self._get_credentials()) return self._conn
Performs video annotation.
def annotate_video( self, input_uri=None, input_content=None, features=None, video_context=None, output_uri=None, location=None, retry=None, timeout=None, metadata=None, ): """ Performs video annotation. :param input_uri: Input video location. Currently, only Google Cloud Storage URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id``. :type input_uri: str :param input_content: The video data bytes. If unset, the input video(s) should be specified via ``input_uri``. If set, ``input_uri`` should be unset. :type input_content: bytes :param features: Requested video annotation features. :type features: list[google.cloud.videointelligence_v1.VideoIntelligenceServiceClient.enums.Feature] :param output_uri: Optional, location where the output (in JSON format) should be stored. Currently, only Google Cloud Storage URIs are supported, which must be specified in the following format: ``gs://bucket-id/object-id``. :type output_uri: str :param video_context: Optional, Additional video context and/or feature-specific parameters. :type video_context: dict or google.cloud.videointelligence_v1.types.VideoContext :param location: Optional, cloud region where annotation should take place. Supported cloud regions: us-east1, us-west1, europe-west1, asia-east1. If no region is specified, a region will be determined based on video file location. :type location: str :param retry: Retry object used to determine when/if to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: Optional, The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :param metadata: Optional, Additional metadata that is provided to the method. :type metadata: seq[tuple[str, str]] """ client = self.get_conn() return client.annotate_video( input_uri=input_uri, input_content=input_content, features=features, video_context=video_context, output_uri=output_uri, location_id=location, retry=retry, timeout=timeout, metadata=metadata, )
Get Opsgenie api_key for creating alert
def _get_api_key(self): """ Get Opsgenie api_key for creating alert """ conn = self.get_connection(self.http_conn_id) api_key = conn.password if not api_key: raise AirflowException('Opsgenie API Key is required for this hook, ' 'please check your conn_id configuration.') return api_key
Overwrite HttpHook get_conn because this hook just needs base_url and headers and does not need generic params
def get_conn(self, headers=None): """ Overwrite HttpHook get_conn because this hook just needs base_url and headers, and does not need generic params :param headers: additional headers to be passed through as a dictionary :type headers: dict """ conn = self.get_connection(self.http_conn_id) self.base_url = conn.host if conn.host else 'https://api.opsgenie.com' session = requests.Session() if headers: session.headers.update(headers) return session
Execute the Opsgenie Alert call
def execute(self, payload={}): """ Execute the Opsgenie Alert call :param payload: Opsgenie API Create Alert payload values See https://docs.opsgenie.com/docs/alert-api#section-create-alert :type payload: dict """ api_key = self._get_api_key() return self.run(endpoint='v2/alerts', data=json.dumps(payload), headers={'Content-Type': 'application/json', 'Authorization': 'GenieKey %s' % api_key})
Execute the bash command in a temporary directory which will be cleaned afterwards
def poke(self, context): """ Execute the bash command in a temporary directory which will be cleaned afterwards """ bash_command = self.bash_command self.log.info("Tmp dir root location: \n %s", gettempdir()) with TemporaryDirectory(prefix='airflowtmp') as tmp_dir: with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f: f.write(bytes(bash_command, 'utf_8')) f.flush() fname = f.name script_location = tmp_dir + "/" + fname self.log.info("Temporary script location: %s", script_location) self.log.info("Running command: %s", bash_command) sp = Popen( ['bash', fname], stdout=PIPE, stderr=STDOUT, close_fds=True, cwd=tmp_dir, env=self.env, preexec_fn=os.setsid) self.sp = sp self.log.info("Output:") line = '' for line in iter(sp.stdout.readline, b''): line = line.decode(self.output_encoding).strip() self.log.info(line) sp.wait() self.log.info("Command exited with return code %s", sp.returncode) return not sp.returncode
Construct the Opsgenie JSON payload. All relevant parameters are combined here to a valid Opsgenie JSON payload.
def _build_opsgenie_payload(self): """ Construct the Opsgenie JSON payload. All relevant parameters are combined here to a valid Opsgenie JSON payload. :return: Opsgenie payload (dict) to send """ payload = {} for key in [ "message", "alias", "description", "responders", "visibleTo", "actions", "tags", "details", "entity", "source", "priority", "user", "note" ]: val = getattr(self, key) if val: payload[key] = val return payload
Call the OpsgenieAlertHook to post message
def execute(self, context): """ Call the OpsgenieAlertHook to post message """ self.hook = OpsgenieAlertHook(self.opsgenie_conn_id) self.hook.execute(self._build_opsgenie_payload())
check if aws conn exists already or create one and return it
def get_conn(self): """ check if aws conn exists already or create one and return it :return: boto3 session """ if not self.conn: self.conn = self.get_client_type('athena') return self.conn
Run Presto query on athena with provided config and return submitted query_execution_id
def run_query(self, query, query_context, result_configuration, client_request_token=None): """ Run Presto query on athena with provided config and return submitted query_execution_id :param query: Presto query to run :type query: str :param query_context: Context in which query need to be run :type query_context: dict :param result_configuration: Dict with path to store results in and config related to encryption :type result_configuration: dict :param client_request_token: Unique token created by user to avoid multiple executions of same query :type client_request_token: str :return: str """ response = self.conn.start_query_execution(QueryString=query, ClientRequestToken=client_request_token, QueryExecutionContext=query_context, ResultConfiguration=result_configuration) query_execution_id = response['QueryExecutionId'] return query_execution_id
Fetch the status of submitted athena query. Returns None or one of valid query states.
def check_query_status(self, query_execution_id): """ Fetch the status of submitted athena query. Returns None or one of valid query states. :param query_execution_id: Id of submitted athena query :type query_execution_id: str :return: str """ response = self.conn.get_query_execution(QueryExecutionId=query_execution_id) state = None try: state = response['QueryExecution']['Status']['State'] except Exception as ex: self.log.error('Exception while getting query state', ex) finally: return state
Fetch submitted athena query results. returns none if query is in intermediate state or failed/ cancelled state else dict of query output
def get_query_results(self, query_execution_id): """ Fetch submitted athena query results. returns none if query is in intermediate state or failed/cancelled state else dict of query output :param query_execution_id: Id of submitted athena query :type query_execution_id: str :return: dict """ query_state = self.check_query_status(query_execution_id) if query_state is None: self.log.error('Invalid Query state') return None elif query_state in self.INTERMEDIATE_STATES or query_state in self.FAILURE_STATES: self.log.error('Query is in {state} state. Cannot fetch results'.format(state=query_state)) return None return self.conn.get_query_results(QueryExecutionId=query_execution_id)
Poll the status of submitted athena query until query state reaches final state. Returns one of the final states
def poll_query_status(self, query_execution_id, max_tries=None): """ Poll the status of submitted athena query until query state reaches final state. Returns one of the final states :param query_execution_id: Id of submitted athena query :type query_execution_id: str :param max_tries: Number of times to poll for query state before function exits :type max_tries: int :return: str """ try_number = 1 final_query_state = None # Query state when query reaches final state or max_tries reached while True: query_state = self.check_query_status(query_execution_id) if query_state is None: self.log.info('Trial {try_number}: Invalid query state. Retrying again'.format( try_number=try_number)) elif query_state in self.INTERMEDIATE_STATES: self.log.info('Trial {try_number}: Query is still in an intermediate state - {state}' .format(try_number=try_number, state=query_state)) else: self.log.info('Trial {try_number}: Query execution completed. Final state is {state}' .format(try_number=try_number, state=query_state)) final_query_state = query_state break if max_tries and try_number >= max_tries: # Break loop if max_tries reached final_query_state = query_state break try_number += 1 sleep(self.sleep_time) return final_query_state
Returns an SFTP connection object
def get_conn(self): """ Returns an SFTP connection object """ if self.conn is None: cnopts = pysftp.CnOpts() if self.no_host_key_check: cnopts.hostkeys = None cnopts.compression = self.compress conn_params = { 'host': self.remote_host, 'port': self.port, 'username': self.username, 'cnopts': cnopts } if self.password and self.password.strip(): conn_params['password'] = self.password if self.key_file: conn_params['private_key'] = self.key_file if self.private_key_pass: conn_params['private_key_pass'] = self.private_key_pass self.conn = pysftp.Connection(**conn_params) return self.conn
Returns a dictionary of { filename: { attributes }} for all files on the remote system ( where the MLSD command is supported ).: param path: full path to the remote directory: type path: str
def describe_directory(self, path): """ Returns a dictionary of {filename: {attributes}} for all files on the remote system (where the MLSD command is supported). :param path: full path to the remote directory :type path: str """ conn = self.get_conn() flist = conn.listdir_attr(path) files = {} for f in flist: modify = datetime.datetime.fromtimestamp( f.st_mtime).strftime('%Y%m%d%H%M%S') files[f.filename] = { 'size': f.st_size, 'type': 'dir' if stat.S_ISDIR(f.st_mode) else 'file', 'modify': modify} return files
Returns a list of files on the remote system.: param path: full path to the remote directory to list: type path: str
def list_directory(self, path): """ Returns a list of files on the remote system. :param path: full path to the remote directory to list :type path: str """ conn = self.get_conn() files = conn.listdir(path) return files
Creates a directory on the remote system.: param path: full path to the remote directory to create: type path: str: param mode: int representation of octal mode for directory
def create_directory(self, path, mode=777): """ Creates a directory on the remote system. :param path: full path to the remote directory to create :type path: str :param mode: int representation of octal mode for directory """ conn = self.get_conn() conn.mkdir(path, mode)
Transfers the remote file to a local location. If local_full_path is a string path the file will be put at that location: param remote_full_path: full path to the remote file: type remote_full_path: str: param local_full_path: full path to the local file: type local_full_path: str
def retrieve_file(self, remote_full_path, local_full_path): """ Transfers the remote file to a local location. If local_full_path is a string path, the file will be put at that location :param remote_full_path: full path to the remote file :type remote_full_path: str :param local_full_path: full path to the local file :type local_full_path: str """ conn = self.get_conn() self.log.info('Retrieving file from FTP: %s', remote_full_path) conn.get(remote_full_path, local_full_path) self.log.info('Finished retrieving file from FTP: %s', remote_full_path)
Transfers a local file to the remote location. If local_full_path_or_buffer is a string path the file will be read from that location: param remote_full_path: full path to the remote file: type remote_full_path: str: param local_full_path: full path to the local file: type local_full_path: str
def store_file(self, remote_full_path, local_full_path): """ Transfers a local file to the remote location. If local_full_path_or_buffer is a string path, the file will be read from that location :param remote_full_path: full path to the remote file :type remote_full_path: str :param local_full_path: full path to the local file :type local_full_path: str """ conn = self.get_conn() conn.put(local_full_path, remote_full_path)
Sleep for the time specified in the exception. If not specified wait for 60 seconds.
def __handle_rate_limit_exception(self, rate_limit_exception): """ Sleep for the time specified in the exception. If not specified, wait for 60 seconds. """ retry_after = int( rate_limit_exception.response.headers.get('Retry-After', 60)) self.log.info( "Hit Zendesk API rate limit. Pausing for %s seconds", retry_after ) time.sleep(retry_after)
Call Zendesk API and return results
def call(self, path, query=None, get_all_pages=True, side_loading=False): """ Call Zendesk API and return results :param path: The Zendesk API to call :param query: Query parameters :param get_all_pages: Accumulate results over all pages before returning. Due to strict rate limiting, this can often timeout. Waits for recommended period between tries after a timeout. :param side_loading: Retrieve related records as part of a single request. In order to enable side-loading, add an 'include' query parameter containing a comma-separated list of resources to load. For more information on side-loading see https://developer.zendesk.com/rest_api/docs/core/side_loading """ zendesk = self.get_conn() first_request_successful = False while not first_request_successful: try: results = zendesk.call(path, query) first_request_successful = True except RateLimitError as rle: self.__handle_rate_limit_exception(rle) # Find the key with the results keys = [path.split("/")[-1].split(".json")[0]] next_page = results['next_page'] if side_loading: keys += query['include'].split(',') results = {key: results[key] for key in keys} if get_all_pages: while next_page is not None: try: # Need to split because the next page URL has # `github.zendesk...` # in it, but the call function needs it removed. next_url = next_page.split(self.__url)[1] self.log.info("Calling %s", next_url) more_res = zendesk.call(next_url) for key in results: results[key].extend(more_res[key]) if next_page == more_res['next_page']: # Unfortunately zdesk doesn't always throw ZendeskError # when we are done getting all the data. Sometimes the # next just refers to the current set of results. # Hence, need to deal with this special case break else: next_page = more_res['next_page'] except RateLimitError as rle: self.__handle_rate_limit_exception(rle) except ZendeskError as ze: if b"Use a start_time older than 5 minutes" in ze.msg: # We have pretty up to date data break else: raise ze return results
Retrieves the partition values for a table.
def get_partitions(self, database_name, table_name, expression='', page_size=None, max_items=None): """ Retrieves the partition values for a table. :param database_name: The name of the catalog database where the partitions reside. :type database_name: str :param table_name: The name of the partitions' table. :type table_name: str :param expression: An expression filtering the partitions to be returned. Please see official AWS documentation for further information. https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions :type expression: str :param page_size: pagination size :type page_size: int :param max_items: maximum items to return :type max_items: int :return: set of partition values where each value is a tuple since a partition may be composed of multiple columns. For example: ``{('2018-01-01','1'), ('2018-01-01','2')}`` """ config = { 'PageSize': page_size, 'MaxItems': max_items, } paginator = self.get_conn().get_paginator('get_partitions') response = paginator.paginate( DatabaseName=database_name, TableName=table_name, Expression=expression, PaginationConfig=config ) partitions = set() for page in response: for p in page['Partitions']: partitions.add(tuple(p['Values'])) return partitions
Checks whether a partition exists
def check_for_partition(self, database_name, table_name, expression): """ Checks whether a partition exists :param database_name: Name of hive database (schema) @table belongs to :type database_name: str :param table_name: Name of hive table @partition belongs to :type table_name: str :expression: Expression that matches the partitions to check for (eg `a = 'b' AND c = 'd'`) :type expression: str :rtype: bool >>> hook = AwsGlueCatalogHook() >>> t = 'static_babynames_partitioned' >>> hook.check_for_partition('airflow', t, "ds='2015-01-01'") True """ partitions = self.get_partitions(database_name, table_name, expression, max_items=1) if partitions: return True else: return False
Get the information of the table
def get_table(self, database_name, table_name): """ Get the information of the table :param database_name: Name of hive database (schema) @table belongs to :type database_name: str :param table_name: Name of hive table :type table_name: str :rtype: dict >>> hook = AwsGlueCatalogHook() >>> r = hook.get_table('db', 'table_foo') >>> r['Name'] = 'table_foo' """ result = self.get_conn().get_table(DatabaseName=database_name, Name=table_name) return result['Table']
Get the physical location of the table
def get_table_location(self, database_name, table_name): """ Get the physical location of the table :param database_name: Name of hive database (schema) @table belongs to :type database_name: str :param table_name: Name of hive table :type table_name: str :return: str """ table = self.get_table(database_name, table_name) return table['StorageDescriptor']['Location']
Return status of a cluster
def cluster_status(self, cluster_identifier): """ Return status of a cluster :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """ conn = self.get_conn() try: response = conn.describe_clusters( ClusterIdentifier=cluster_identifier)['Clusters'] return response[0]['ClusterStatus'] if response else None except conn.exceptions.ClusterNotFoundFault: return 'cluster_not_found'
Delete a cluster and optionally create a snapshot
def delete_cluster( self, cluster_identifier, skip_final_cluster_snapshot=True, final_cluster_snapshot_identifier=''): """ Delete a cluster and optionally create a snapshot :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str :param skip_final_cluster_snapshot: determines cluster snapshot creation :type skip_final_cluster_snapshot: bool :param final_cluster_snapshot_identifier: name of final cluster snapshot :type final_cluster_snapshot_identifier: str """ response = self.get_conn().delete_cluster( ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=skip_final_cluster_snapshot, FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier ) return response['Cluster'] if response['Cluster'] else None
Gets a list of snapshots for a cluster
def describe_cluster_snapshots(self, cluster_identifier): """ Gets a list of snapshots for a cluster :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """ response = self.get_conn().describe_cluster_snapshots( ClusterIdentifier=cluster_identifier ) if 'Snapshots' not in response: return None snapshots = response['Snapshots'] snapshots = filter(lambda x: x['Status'], snapshots) snapshots.sort(key=lambda x: x['SnapshotCreateTime'], reverse=True) return snapshots
Restores a cluster from its snapshot
def restore_from_cluster_snapshot(self, cluster_identifier, snapshot_identifier): """ Restores a cluster from its snapshot :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str :param snapshot_identifier: unique identifier for a snapshot of a cluster :type snapshot_identifier: str """ response = self.get_conn().restore_from_cluster_snapshot( ClusterIdentifier=cluster_identifier, SnapshotIdentifier=snapshot_identifier ) return response['Cluster'] if response['Cluster'] else None
Creates a snapshot of a cluster
def create_cluster_snapshot(self, snapshot_identifier, cluster_identifier): """ Creates a snapshot of a cluster :param snapshot_identifier: unique identifier for a snapshot of a cluster :type snapshot_identifier: str :param cluster_identifier: unique identifier of a cluster :type cluster_identifier: str """ response = self.get_conn().create_cluster_snapshot( SnapshotIdentifier=snapshot_identifier, ClusterIdentifier=cluster_identifier, ) return response['Snapshot'] if response['Snapshot'] else None
SlackAPIOperator calls will not fail even if the call is not unsuccessful. It should not prevent a DAG from completing in success
def execute(self, **kwargs): """ SlackAPIOperator calls will not fail even if the call is not unsuccessful. It should not prevent a DAG from completing in success """ if not self.api_params: self.construct_api_call_params() slack = SlackHook(token=self.token, slack_conn_id=self.slack_conn_id) slack.call(self.method, self.api_params)
Args: volume ( Volume ):
def add_volume(self, volume): """ Args: volume (Volume): """ self._add_volume(name=volume.name, configs=volume.configs)
Args: volume_mount ( VolumeMount ):
def add_mount(self, volume_mount): """ Args: volume_mount (VolumeMount): """ self._add_mount( name=volume_mount.name, mount_path=volume_mount.mount_path, sub_path=volume_mount.sub_path, read_only=volume_mount.read_only )
Creates a job flow using the config from the EMR connection. Keys of the json extra hash may have the arguments of the boto3 run_job_flow method. Overrides for this config may be passed as the job_flow_overrides.
def create_job_flow(self, job_flow_overrides): """ Creates a job flow using the config from the EMR connection. Keys of the json extra hash may have the arguments of the boto3 run_job_flow method. Overrides for this config may be passed as the job_flow_overrides. """ if not self.emr_conn_id: raise AirflowException('emr_conn_id must be present to use create_job_flow') emr_conn = self.get_connection(self.emr_conn_id) config = emr_conn.extra_dejson.copy() config.update(job_flow_overrides) response = self.get_conn().run_job_flow(**config) return response
Will test the filepath result and test if its size is at least self. filesize
def filter_for_filesize(result, size=None): """ Will test the filepath result and test if its size is at least self.filesize :param result: a list of dicts returned by Snakebite ls :param size: the file size in MB a file should be at least to trigger True :return: (bool) depending on the matching criteria """ if size: log = LoggingMixin().log log.debug( 'Filtering for file size >= %s in files: %s', size, map(lambda x: x['path'], result) ) size *= settings.MEGABYTE result = [x for x in result if x['length'] >= size] log.debug('HdfsSensor.poke: after size filter result is %s', result) return result
Will filter if instructed to do so the result to remove matching criteria
def filter_for_ignored_ext(result, ignored_ext, ignore_copying): """ Will filter if instructed to do so the result to remove matching criteria :param result: list of dicts returned by Snakebite ls :type result: list[dict] :param ignored_ext: list of ignored extensions :type ignored_ext: list :param ignore_copying: shall we ignore ? :type ignore_copying: bool :return: list of dicts which were not removed :rtype: list[dict] """ if ignore_copying: log = LoggingMixin().log regex_builder = r"^.*\.(%s$)$" % '$|'.join(ignored_ext) ignored_extensions_regex = re.compile(regex_builder) log.debug( 'Filtering result for ignored extensions: %s in files %s', ignored_extensions_regex.pattern, map(lambda x: x['path'], result) ) result = [x for x in result if not ignored_extensions_regex.match(x['path'])] log.debug('HdfsSensor.poke: after ext filter result is %s', result) return result
Executed by task_instance at runtime
def execute(self, context): """ Executed by task_instance at runtime """ s3_conn = S3Hook(self.s3_conn_id) # Grab collection and execute query according to whether or not it is a pipeline if self.is_pipeline: results = MongoHook(self.mongo_conn_id).aggregate( mongo_collection=self.mongo_collection, aggregate_query=self.mongo_query, mongo_db=self.mongo_db ) else: results = MongoHook(self.mongo_conn_id).find( mongo_collection=self.mongo_collection, query=self.mongo_query, mongo_db=self.mongo_db ) # Performs transform then stringifies the docs results into json format docs_str = self._stringify(self.transform(results)) # Load Into S3 s3_conn.load_string( string_data=docs_str, key=self.s3_key, bucket_name=self.s3_bucket, replace=self.replace ) return True
Takes an iterable ( pymongo Cursor or Array ) containing dictionaries and returns a stringified version using python join
def _stringify(iterable, joinable='\n'): """ Takes an iterable (pymongo Cursor or Array) containing dictionaries and returns a stringified version using python join """ return joinable.join( [json.dumps(doc, default=json_util.default) for doc in iterable] )
Get pool by a given name.
def get_pool(name, session=None): """Get pool by a given name.""" if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") pool = session.query(Pool).filter_by(pool=name).first() if pool is None: raise PoolNotFound("Pool '%s' doesn't exist" % name) return pool
Create a pool with a given parameters.
def create_pool(name, slots, description, session=None): """Create a pool with a given parameters.""" if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") try: slots = int(slots) except ValueError: raise AirflowBadRequest("Bad value for `slots`: %s" % slots) session.expire_on_commit = False pool = session.query(Pool).filter_by(pool=name).first() if pool is None: pool = Pool(pool=name, slots=slots, description=description) session.add(pool) else: pool.slots = slots pool.description = description session.commit() return pool
Delete pool by a given name.
def delete_pool(name, session=None): """Delete pool by a given name.""" if not (name and name.strip()): raise AirflowBadRequest("Pool name shouldn't be empty") pool = session.query(Pool).filter_by(pool=name).first() if pool is None: raise PoolNotFound("Pool '%s' doesn't exist" % name) session.delete(pool) session.commit() return pool
Converts a python dictionary to the proto supplied
def _dict_to_proto(py_dict, proto): """ Converts a python dictionary to the proto supplied :param py_dict: The dictionary to convert :type py_dict: dict :param proto: The proto object to merge with dictionary :type proto: protobuf :return: A parsed python dictionary in provided proto format :raises: ParseError: On JSON parsing problems. """ dict_json_str = json.dumps(py_dict) return json_format.Parse(dict_json_str, proto)
Given an operation continuously fetches the status from Google Cloud until either completion or an error occurring
def wait_for_operation(self, operation, project_id=None): """ Given an operation, continuously fetches the status from Google Cloud until either completion or an error occurring :param operation: The Operation to wait for :type operation: google.cloud.container_V1.gapic.enums.Operation :param project_id: Google Cloud Platform project ID :type project_id: str :return: A new, updated operation fetched from Google Cloud """ self.log.info("Waiting for OPERATION_NAME %s", operation.name) time.sleep(OPERATIONAL_POLL_INTERVAL) while operation.status != Operation.Status.DONE: if operation.status == Operation.Status.RUNNING or operation.status == \ Operation.Status.PENDING: time.sleep(OPERATIONAL_POLL_INTERVAL) else: raise exceptions.GoogleCloudError( "Operation has failed with status: %s" % operation.status) # To update status of operation operation = self.get_operation(operation.name, project_id=project_id or self.project_id) return operation
Fetches the operation from Google Cloud
def get_operation(self, operation_name, project_id=None): """ Fetches the operation from Google Cloud :param operation_name: Name of operation to fetch :type operation_name: str :param project_id: Google Cloud Platform project ID :type project_id: str :return: The new, updated operation from Google Cloud """ return self.get_client().get_operation(project_id=project_id or self.project_id, zone=self.location, operation_id=operation_name)
Append labels to provided Cluster Protobuf
def _append_label(cluster_proto, key, val): """ Append labels to provided Cluster Protobuf Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current airflow version string follows semantic versioning spec: x.y.z). :param cluster_proto: The proto to append resource_label airflow version to :type cluster_proto: google.cloud.container_v1.types.Cluster :param key: The key label :type key: str :param val: :type val: str :return: The cluster proto updated with new label """ val = val.replace('.', '-').replace('+', '-') cluster_proto.resource_labels.update({key: val}) return cluster_proto
Deletes the cluster including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster ( e. g. load balancer resources ) will not be deleted if they weren’t present at the initial create time.
def delete_cluster(self, name, project_id=None, retry=DEFAULT, timeout=DEFAULT): """ Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster (e.g. load balancer resources) will not be deleted if they weren’t present at the initial create time. :param name: The name of the cluster to delete :type name: str :param project_id: Google Cloud Platform project ID :type project_id: str :param retry: Retry object used to determine when/if to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :return: The full url to the delete operation if successful, else None """ self.log.info( "Deleting (project_id=%s, zone=%s, cluster_id=%s)", self.project_id, self.location, name ) try: op = self.get_client().delete_cluster(project_id=project_id or self.project_id, zone=self.location, cluster_id=name, retry=retry, timeout=timeout) op = self.wait_for_operation(op) # Returns server-defined url for the resource return op.self_link except NotFound as error: self.log.info('Assuming Success: %s', error.message)
Creates a cluster consisting of the specified number and type of Google Compute Engine instances.
def create_cluster(self, cluster, project_id=None, retry=DEFAULT, timeout=DEFAULT): """ Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. :param cluster: A Cluster protobuf or dict. If dict is provided, it must be of the same form as the protobuf message :class:`google.cloud.container_v1.types.Cluster` :type cluster: dict or google.cloud.container_v1.types.Cluster :param project_id: Google Cloud Platform project ID :type project_id: str :param retry: A retry object (``google.api_core.retry.Retry``) used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :return: The full url to the new, or existing, cluster :raises: ParseError: On JSON parsing problems when trying to convert dict AirflowException: cluster is not dict type nor Cluster proto type """ if isinstance(cluster, dict): cluster_proto = Cluster() cluster = self._dict_to_proto(py_dict=cluster, proto=cluster_proto) elif not isinstance(cluster, Cluster): raise AirflowException( "cluster is not instance of Cluster proto or python dict") self._append_label(cluster, 'airflow-version', 'v' + version.version) self.log.info( "Creating (project_id=%s, zone=%s, cluster_name=%s)", self.project_id, self.location, cluster.name ) try: op = self.get_client().create_cluster(project_id=project_id or self.project_id, zone=self.location, cluster=cluster, retry=retry, timeout=timeout) op = self.wait_for_operation(op) return op.target_link except AlreadyExists as error: self.log.info('Assuming Success: %s', error.message) return self.get_cluster(name=cluster.name).self_link
Gets details of specified cluster
def get_cluster(self, name, project_id=None, retry=DEFAULT, timeout=DEFAULT): """ Gets details of specified cluster :param name: The name of the cluster to retrieve :type name: str :param project_id: Google Cloud Platform project ID :type project_id: str :param retry: A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float :return: google.cloud.container_v1.types.Cluster """ self.log.info( "Fetching cluster (project_id=%s, zone=%s, cluster_name=%s)", project_id or self.project_id, self.location, name ) return self.get_client().get_cluster(project_id=project_id or self.project_id, zone=self.location, cluster_id=name, retry=retry, timeout=timeout).self_link
Given a Discord http_conn_id return the default webhook endpoint or override if a webhook_endpoint is manually supplied.
def _get_webhook_endpoint(self, http_conn_id, webhook_endpoint): """ Given a Discord http_conn_id, return the default webhook endpoint or override if a webhook_endpoint is manually supplied. :param http_conn_id: The provided connection ID :param webhook_endpoint: The manually provided webhook endpoint :return: Webhook endpoint (str) to use """ if webhook_endpoint: endpoint = webhook_endpoint elif http_conn_id: conn = self.get_connection(http_conn_id) extra = conn.extra_dejson endpoint = extra.get('webhook_endpoint', '') else: raise AirflowException('Cannot get webhook endpoint: No valid Discord ' 'webhook endpoint or http_conn_id supplied.') # make sure endpoint matches the expected Discord webhook format if not re.match('^webhooks/[0-9]+/[a-zA-Z0-9_-]+$', endpoint): raise AirflowException('Expected Discord webhook endpoint in the form ' 'of "webhooks/{webhook.id}/{webhook.token}".') return endpoint
Construct the Discord JSON payload. All relevant parameters are combined here to a valid Discord JSON payload.
def _build_discord_payload(self): """ Construct the Discord JSON payload. All relevant parameters are combined here to a valid Discord JSON payload. :return: Discord payload (str) to send """ payload = {} if self.username: payload['username'] = self.username if self.avatar_url: payload['avatar_url'] = self.avatar_url payload['tts'] = self.tts if len(self.message) <= 2000: payload['content'] = self.message else: raise AirflowException('Discord message length must be 2000 or fewer ' 'characters.') return json.dumps(payload)
Execute the Discord webhook call
def execute(self): """ Execute the Discord webhook call """ proxies = {} if self.proxy: # we only need https proxy for Discord proxies = {'https': self.proxy} discord_payload = self._build_discord_payload() self.run(endpoint=self.webhook_endpoint, data=discord_payload, headers={'Content-type': 'application/json'}, extra_options={'proxies': proxies})
Encrypts a plaintext message using Google Cloud KMS.
def encrypt(self, key_name, plaintext, authenticated_data=None): """ Encrypts a plaintext message using Google Cloud KMS. :param key_name: The Resource Name for the key (or key version) to be used for encyption. Of the form ``projects/*/locations/*/keyRings/*/cryptoKeys/**`` :type key_name: str :param plaintext: The message to be encrypted. :type plaintext: bytes :param authenticated_data: Optional additional authenticated data that must also be provided to decrypt the message. :type authenticated_data: bytes :return: The base 64 encoded ciphertext of the original message. :rtype: str """ keys = self.get_conn().projects().locations().keyRings().cryptoKeys() body = {'plaintext': _b64encode(plaintext)} if authenticated_data: body['additionalAuthenticatedData'] = _b64encode(authenticated_data) request = keys.encrypt(name=key_name, body=body) response = request.execute(num_retries=self.num_retries) ciphertext = response['ciphertext'] return ciphertext
Remote Popen
def Popen(self, cmd, **kwargs): """ Remote Popen :param cmd: command to remotely execute :param kwargs: extra arguments to Popen (see subprocess.Popen) :return: handle to subprocess """ masked_cmd = ' '.join(self.cmd_mask_password(cmd)) self.log.info("Executing command: {}".format(masked_cmd)) self.sp = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs) for line in iter(self.sp.stdout): self.log.info(line.strip()) self.sp.wait() self.log.info("Command exited with return code %s", self.sp.returncode) if self.sp.returncode: raise AirflowException("Sqoop command failed: {}".format(masked_cmd))
Imports table from remote location to target dir. Arguments are copies of direct sqoop command line arguments
def import_table(self, table, target_dir=None, append=False, file_type="text", columns=None, split_by=None, where=None, direct=False, driver=None, extra_import_options=None): """ Imports table from remote location to target dir. Arguments are copies of direct sqoop command line arguments :param table: Table to read :param target_dir: HDFS destination dir :param append: Append data to an existing dataset in HDFS :param file_type: "avro", "sequence", "text" or "parquet". Imports data to into the specified format. Defaults to text. :param columns: <col,col,col…> Columns to import from table :param split_by: Column of the table used to split work units :param where: WHERE clause to use during import :param direct: Use direct connector if exists for the database :param driver: Manually specify JDBC driver class to use :param extra_import_options: Extra import options to pass as dict. If a key doesn't have a value, just pass an empty string to it. Don't include prefix of -- for sqoop options. """ cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options) cmd += ["--table", table] if columns: cmd += ["--columns", columns] if where: cmd += ["--where", where] self.Popen(cmd)
Imports a specific query from the rdbms to hdfs
def import_query(self, query, target_dir, append=False, file_type="text", split_by=None, direct=None, driver=None, extra_import_options=None): """ Imports a specific query from the rdbms to hdfs :param query: Free format query to run :param target_dir: HDFS destination dir :param append: Append data to an existing dataset in HDFS :param file_type: "avro", "sequence", "text" or "parquet" Imports data to hdfs into the specified format. Defaults to text. :param split_by: Column of the table used to split work units :param direct: Use direct import fast path :param driver: Manually specify JDBC driver class to use :param extra_import_options: Extra import options to pass as dict. If a key doesn't have a value, just pass an empty string to it. Don't include prefix of -- for sqoop options. """ cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options) cmd += ["--query", query] self.Popen(cmd)
Exports Hive table to remote location. Arguments are copies of direct sqoop command line Arguments
def export_table(self, table, export_dir, input_null_string, input_null_non_string, staging_table, clear_staging_table, enclosed_by, escaped_by, input_fields_terminated_by, input_lines_terminated_by, input_optionally_enclosed_by, batch, relaxed_isolation, extra_export_options=None): """ Exports Hive table to remote location. Arguments are copies of direct sqoop command line Arguments :param table: Table remote destination :param export_dir: Hive table to export :param input_null_string: The string to be interpreted as null for string columns :param input_null_non_string: The string to be interpreted as null for non-string columns :param staging_table: The table in which data will be staged before being inserted into the destination table :param clear_staging_table: Indicate that any data present in the staging table can be deleted :param enclosed_by: Sets a required field enclosing character :param escaped_by: Sets the escape character :param input_fields_terminated_by: Sets the field separator character :param input_lines_terminated_by: Sets the end-of-line character :param input_optionally_enclosed_by: Sets a field enclosing character :param batch: Use batch mode for underlying statement execution :param relaxed_isolation: Transaction isolation to read uncommitted for the mappers :param extra_export_options: Extra export options to pass as dict. If a key doesn't have a value, just pass an empty string to it. Don't include prefix of -- for sqoop options. """ cmd = self._export_cmd(table, export_dir, input_null_string, input_null_non_string, staging_table, clear_staging_table, enclosed_by, escaped_by, input_fields_terminated_by, input_lines_terminated_by, input_optionally_enclosed_by, batch, relaxed_isolation, extra_export_options) self.Popen(cmd)
Retrieves connection to Cloud Text to Speech.
def get_conn(self): """ Retrieves connection to Cloud Text to Speech. :return: Google Cloud Text to Speech client object. :rtype: google.cloud.texttospeech_v1.TextToSpeechClient """ if not self._client: self._client = TextToSpeechClient(credentials=self._get_credentials()) return self._client
Synthesizes text input
def synthesize_speech(self, input_data, voice, audio_config, retry=None, timeout=None): """ Synthesizes text input :param input_data: text input to be synthesized. See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesisInput :type input_data: dict or google.cloud.texttospeech_v1.types.SynthesisInput :param voice: configuration of voice to be used in synthesis. See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.VoiceSelectionParams :type voice: dict or google.cloud.texttospeech_v1.types.VoiceSelectionParams :param audio_config: configuration of the synthesized audio. See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.AudioConfig :type audio_config: dict or google.cloud.texttospeech_v1.types.AudioConfig :return: SynthesizeSpeechResponse See more: https://googleapis.github.io/google-cloud-python/latest/texttospeech/gapic/v1/types.html#google.cloud.texttospeech_v1.types.SynthesizeSpeechResponse :rtype: object :param retry: (Optional) A retry object used to retry requests. If None is specified, requests will not be retried. :type retry: google.api_core.retry.Retry :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. Note that if retry is specified, the timeout applies to each individual attempt. :type timeout: float """ client = self.get_conn() self.log.info("Synthesizing input: %s" % input_data) return client.synthesize_speech( input_=input_data, voice=voice, audio_config=audio_config, retry=retry, timeout=timeout )
Close and upload local log file to remote storage S3.
def close(self): """ Close and upload local log file to remote storage S3. """ # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return super().close() if not self.upload_on_close: return local_loc = os.path.join(self.local_base, self.log_relative_path) remote_loc = os.path.join(self.remote_base, self.log_relative_path) if os.path.exists(local_loc): # read log and remove old logs to get just the latest additions with open(local_loc, 'r') as logfile: log = logfile.read() self.s3_write(log, remote_loc) # Mark closed so we don't double write if close is called twice self.closed = True