_id
stringlengths
98
184
text
stringlengths
91
10.9k
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/sina.py#L41-L52
def sina_download_by_vid(vid, title=None, output_dir='.', merge=True, info_only=False): """ """ xml = api_req(vid) urls, name, size = video_info(xml) if urls is None: log.wtf(name) title = name print_info(site_info, title, 'flv', size) if not info_only: download_urls(urls, title, 'flv', size, output_dir = output_dir, merge = merge)
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L457-L501
def post_content(url, headers={}, post_data={}, decoded=True, **kwargs): """ """ if kwargs.get('post_data_raw'): logging.debug('post_content: %s\npost_data_raw: %s' % (url, kwargs['post_data_raw'])) else: logging.debug('post_content: %s\npost_data: %s' % (url, post_data)) req = request.Request(url, headers=headers) if cookies: cookies.add_cookie_header(req) req.headers.update(req.unredirected_hdrs) if kwargs.get('post_data_raw'): post_data_enc = bytes(kwargs['post_data_raw'], 'utf-8') else: post_data_enc = bytes(parse.urlencode(post_data), 'utf-8') response = urlopen_with_retry(req, data=post_data_enc) data = response.read() # Handle HTTP compression for gzip and deflate (zlib) content_encoding = response.getheader('Content-Encoding') if content_encoding == 'gzip': data = ungzip(data) elif content_encoding == 'deflate': data = undeflate(data) # Decode the response body if decoded: charset = match1( response.getheader('Content-Type'), r'charset=([\w-]+)' ) if charset is not None: data = data.decode(charset) else: data = data.decode('utf-8') return data
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/ixigua.py#L34-L78
def get_video_url_from_video_id(video_id): """""" # from js data = [""] * 256 for index, _ in enumerate(data): t = index for i in range(8): t = -306674912 ^ unsigned_right_shitf(t, 1) if 1 & t else unsigned_right_shitf(t, 1) data[index] = t def tmp(): rand_num = random.random() path = "/video/urls/v/1/toutiao/mp4/{video_id}?r={random_num}".format(video_id=video_id, random_num=str(rand_num)[2:]) e = o = r = -1 i, a = 0, len(path) while i < a: e = ord(path[i]) i += 1 if e < 128: r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ e)] else: if e < 2048: r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (192 | e >> 6 & 31))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))] else: if 55296 <= e < 57344: e = (1023 & e) + 64 i += 1 o = 1023 & t.url(i) r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (240 | e >> 8 & 7))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 2 & 63))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | o >> 6 & 15 | (3 & e) << 4))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & o))] else: r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (224 | e >> 12 & 15))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 6 & 63))] r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))] return "https://ib.365yg.com{path}&s={param}".format(path=path, param=unsigned_right_shitf(r ^ -1, 0)) while 1: url = tmp() if url.split("=")[-1][0] != "-": # 参数s不能为负数 return url
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L252-L268
def matchall(text, patterns): """ """ ret = [] for pattern in patterns: match = re.findall(pattern, text) ret += match return ret
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/mysql_hook.py#L107-L117
def bulk_load(self, table, tmp_file): """ """ conn = self.get_conn() cur = conn.cursor() cur.execute(""" LOAD DATA LOCAL INFILE '{tmp_file}' INTO TABLE {table} """.format(tmp_file=tmp_file, table=table)) conn.commit()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L282-L310
def export_instance(self, instance, body, project_id=None): """ """ try: response = self.get_conn().instances().export( project=project_id, instance=instance, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name) except HttpError as ex: raise AirflowException( 'Exporting instance {} failed: {}'.format(instance, ex.content) )
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/postgres_hook.py#L85-L89
def bulk_load(self, table, tmp_file): """ """ self.copy_expert("COPY {table} FROM STDIN".format(table=table), tmp_file)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/macros/hive.py#L83-L118
def closest_ds_partition( table, ds, before=True, schema="default", metastore_conn_id='metastore_default'): """ """ from airflow.hooks.hive_hooks import HiveMetastoreHook if '.' in table: schema, table = table.split('.') hh = HiveMetastoreHook(metastore_conn_id=metastore_conn_id) partitions = hh.get_partitions(schema=schema, table_name=table) if not partitions: return None part_vals = [list(p.values())[0] for p in partitions] if ds in part_vals: return ds else: parts = [datetime.datetime.strptime(pv, '%Y-%m-%d') for pv in part_vals] target_dt = datetime.datetime.strptime(ds, '%Y-%m-%d') closest_ds = _closest_date(target_dt, parts, before_target=before) return closest_ds.isoformat()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_cosmos_hook.py#L181-L206
def upsert_document(self, document, database_name=None, collection_name=None, document_id=None): """ """ # Assign unique ID if one isn't provided if document_id is None: document_id = str(uuid.uuid4()) if document is None: raise AirflowBadRequest("You cannot insert a None document") # Add document id if isn't found if 'id' in document: if document['id'] is None: document['id'] = document_id else: document['id'] = document_id created_document = self.get_conn().CreateItem( get_collection_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name)), document) return created_document
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/scripts/perf/scheduler_ops_metrics.py#L138-L148
def clear_dag_runs(): """ """ session = settings.Session() drs = session.query(DagRun).filter( DagRun.dag_id.in_(DAG_IDS), ).all() for dr in drs: logging.info('Deleting DagRun :: {}'.format(dr)) session.delete(dr)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_cosmos_hook.py#L239-L253
def get_document(self, document_id, database_name=None, collection_name=None): """ """ if document_id is None: raise AirflowBadRequest("Cannot get a document without an id") try: return self.get_conn().ReadItem( get_document_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name), document_id)) except HTTPFailure: return None
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/file_to_gcs.py#L68-L82
def execute(self, context): """ """ hook = GoogleCloudStorageHook( google_cloud_storage_conn_id=self.google_cloud_storage_conn_id, delegate_to=self.delegate_to) hook.upload( bucket_name=self.bucket, object_name=self.dst, mime_type=self.mime_type, filename=self.src, gzip=self.gzip, )
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/cbs.py#L9-L17
def cbs_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """ """ html = get_content(url) pid = match1(html, r'video\.settings\.pid\s*=\s*\'([^\']+)\'') title = match1(html, r'video\.settings\.title\s*=\s*\"([^\"]+)\"') theplatform_download_by_pid(pid, title, output_dir=output_dir, merge=merge, info_only=info_only)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L289-L365
def list_py_file_paths(directory, safe_mode=True, include_examples=None): """ """ if include_examples is None: include_examples = conf.getboolean('core', 'LOAD_EXAMPLES') file_paths = [] if directory is None: return [] elif os.path.isfile(directory): return [directory] elif os.path.isdir(directory): patterns_by_dir = {} for root, dirs, files in os.walk(directory, followlinks=True): patterns = patterns_by_dir.get(root, []) ignore_file = os.path.join(root, '.airflowignore') if os.path.isfile(ignore_file): with open(ignore_file, 'r') as f: # If we have new patterns create a copy so we don't change # the previous list (which would affect other subdirs) patterns += [re.compile(p) for p in f.read().split('\n') if p] # If we can ignore any subdirs entirely we should - fewer paths # to walk is better. We have to modify the ``dirs`` array in # place for this to affect os.walk dirs[:] = [ d for d in dirs if not any(p.search(os.path.join(root, d)) for p in patterns) ] # We want patterns defined in a parent folder's .airflowignore to # apply to subdirs too for d in dirs: patterns_by_dir[os.path.join(root, d)] = patterns for f in files: try: file_path = os.path.join(root, f) if not os.path.isfile(file_path): continue mod_name, file_ext = os.path.splitext( os.path.split(file_path)[-1]) if file_ext != '.py' and not zipfile.is_zipfile(file_path): continue if any([re.findall(p, file_path) for p in patterns]): continue # Heuristic that guesses whether a Python file contains an # Airflow DAG definition. might_contain_dag = True if safe_mode and not zipfile.is_zipfile(file_path): with open(file_path, 'rb') as fp: content = fp.read() might_contain_dag = all( [s in content for s in (b'DAG', b'airflow')]) if not might_contain_dag: continue file_paths.append(file_path) except Exception: log = LoggingMixin().log log.exception("Error while examining %s", f) if include_examples: import airflow.example_dags example_dag_folder = airflow.example_dags.__path__[0] file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False)) return file_paths
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/wanmen.py#L18-L25
def _wanmen_get_title_by_json_topic_part(json_content, tIndex, pIndex): """""" return '_'.join([json_content[0]['name'], json_content[0]['Topics'][tIndex]['name'], json_content[0]['Topics'][tIndex]['Parts'][pIndex]['name']])
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L961-L972
def get_database_hook(self): """ """ if self.database_type == 'postgres': self.db_hook = PostgresHook(postgres_conn_id=self.db_conn_id, schema=self.database) else: self.db_hook = MySqlHook(mysql_conn_id=self.db_conn_id, schema=self.database) return self.db_hook
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/bin/cli.py#L527-L550
def task_failed_deps(args): """ """ dag = get_dag(args) task = dag.get_task(task_id=args.task_id) ti = TaskInstance(task, args.execution_date) dep_context = DepContext(deps=SCHEDULER_DEPS) failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context)) # TODO, Do we want to print or log this if failed_deps: print("Task instance dependencies not met:") for dep in failed_deps: print("{}: {}".format(dep.dep_name, dep.reason)) else: print("Task instance dependencies are all met.")
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/scripts/perf/scheduler_ops_metrics.py#L65-L101
def print_stats(self): """ """ session = settings.Session() TI = TaskInstance tis = ( session .query(TI) .filter(TI.dag_id.in_(DAG_IDS)) .all() ) successful_tis = [x for x in tis if x.state == State.SUCCESS] ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date, (ti.queued_dttm - self.start_date).total_seconds(), (ti.start_date - self.start_date).total_seconds(), (ti.end_date - self.start_date).total_seconds(), ti.duration) for ti in successful_tis] ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id', 'execution_date', 'queue_delay', 'start_delay', 'land_time', 'duration']) print('Performance Results') print('###################') for dag_id in DAG_IDS: print('DAG {}'.format(dag_id)) print(ti_perf_df[ti_perf_df['dag_id'] == dag_id]) print('###################') if len(tis) > len(successful_tis): print("WARNING!! The following task instances haven't completed") print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state) for ti in filter(lambda x: x.state != State.SUCCESS, tis)], columns=['dag_id', 'task_id', 'execution_date', 'state'])) session.commit()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/timezone.py#L98-L128
def make_aware(value, timezone=None): """ """ if timezone is None: timezone = TIMEZONE # Check that we won't overwrite the timezone of an aware datetime. if is_localized(value): raise ValueError( "make_aware expects a naive datetime, got %s" % value) if hasattr(value, 'fold'): # In case of python 3.6 we want to do the same that pendulum does for python3.5 # i.e in case we move clock back we want to schedule the run at the time of the second # instance of the same clock time rather than the first one. # Fold parameter has no impact in other cases so we can safely set it to 1 here value = value.replace(fold=1) if hasattr(timezone, 'localize'): # This method is available for pytz time zones. return timezone.localize(value) elif hasattr(timezone, 'convert'): # For pendulum return timezone.convert(value) else: # This may be wrong around DST changes! return value.replace(tzinfo=timezone)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/grpc_hook.py#L112-L123
def _get_field(self, field_name, default=None): """ """ full_field_name = 'extra__grpc__{}'.format(field_name) if full_field_name in self.extras: return self.extras[full_field_name] else: return default
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_hook.py#L28-L77
def _parse_s3_config(config_file_name, config_format='boto', profile=None): """ """ config = configparser.ConfigParser() if config.read(config_file_name): # pragma: no cover sections = config.sections() else: raise AirflowException("Couldn't read {0}".format(config_file_name)) # Setting option names depending on file format if config_format is None: config_format = 'boto' conf_format = config_format.lower() if conf_format == 'boto': # pragma: no cover if profile is not None and 'profile ' + profile in sections: cred_section = 'profile ' + profile else: cred_section = 'Credentials' elif conf_format == 'aws' and profile is not None: cred_section = profile else: cred_section = 'default' # Option names if conf_format in ('boto', 'aws'): # pragma: no cover key_id_option = 'aws_access_key_id' secret_key_option = 'aws_secret_access_key' # security_token_option = 'aws_security_token' else: key_id_option = 'access_key' secret_key_option = 'secret_key' # Actual Parsing if cred_section not in sections: raise AirflowException("This config file format is not recognized") else: try: access_key = config.get(cred_section, key_id_option) secret_key = config.get(cred_section, secret_key_option) except Exception: logging.warning("Option Error in parsing s3 config file") raise return access_key, secret_key
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/bin/cli.py#L579-L600
def next_execution(args): """ """ dag = get_dag(args) if dag.is_paused: print("[INFO] Please be reminded this DAG is PAUSED now.") if dag.latest_execution_date: next_execution_dttm = dag.following_schedule(dag.latest_execution_date) if next_execution_dttm is None: print("[WARN] No following schedule can be found. " + "This DAG may have schedule interval '@once' or `None`.") print(next_execution_dttm) else: print("[WARN] Only applicable when there is execution record found for the DAG.") print(None)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L612-L623
def _sync_metadata(self): """ """ while not self._stat_queue.empty(): stat = self._stat_queue.get() self._file_paths = stat.file_paths self._all_pids = stat.all_pids self._done = stat.done self._all_files_processed = stat.all_files_processed self._result_count += stat.result_count
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/git.py#L7-L15
def get_head(repo_path): """""" try: ref = open(os.path.join(repo_path, '.git', 'HEAD'), 'r').read().strip()[5:].split('/') branch = ref[-1] commit = open(os.path.join(repo_path, '.git', *ref), 'r').read().strip()[:7] return branch, commit except: return None
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L229-L256
def patch_database(self, instance, database, body, project_id=None): """ """ response = self.get_conn().databases().patch( project=project_id, instance=instance, database=database, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/db.py#L312-L331
def resetdb(): """ """ from airflow import models # alembic adds significant import time, so we import it lazily from alembic.migration import MigrationContext log.info("Dropping tables that exist") models.base.Base.metadata.drop_all(settings.engine) mc = MigrationContext.configure(settings.engine) if mc._version.exists(settings.engine): mc._version.drop(settings.engine) from flask_appbuilder.models.sqla import Base Base.metadata.drop_all(settings.engine) initdb()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/datastore_hook.py#L62-L81
def allocate_ids(self, partial_keys): """ """ conn = self.get_conn() resp = (conn .projects() .allocateIds(projectId=self.project_id, body={'keys': partial_keys}) .execute(num_retries=self.num_retries)) return resp['keys']
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L567-L599
def stop_proxy(self): """ """ if not self.sql_proxy_process: raise AirflowException("The sql proxy is not started yet") else: self.log.info("Stopping the cloud_sql_proxy pid: %s", self.sql_proxy_process.pid) self.sql_proxy_process.kill() self.sql_proxy_process = None # Cleanup! self.log.info("Removing the socket directory: %s", self.cloud_sql_proxy_socket_directory) shutil.rmtree(self.cloud_sql_proxy_socket_directory, ignore_errors=True) if self.sql_proxy_was_downloaded: self.log.info("Removing downloaded proxy: %s", self.sql_proxy_path) # Silently ignore if the file has already been removed (concurrency) try: os.remove(self.sql_proxy_path) except OSError as e: if not e.errno == errno.ENOENT: raise else: self.log.info("Skipped removing proxy - it was not downloaded: %s", self.sql_proxy_path) if os.path.isfile(self.credentials_path): self.log.info("Removing generated credentials file %s", self.credentials_path) # Here file cannot be delete by concurrent task (each task has its own copy) os.remove(self.credentials_path)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/sensors/gcs_sensor.py#L242-L310
def is_bucket_updated(self, current_num_objects): """ """ if current_num_objects > self.previous_num_objects: # When new objects arrived, reset the inactivity_seconds # previous_num_objects for the next poke. self.log.info( ''' New objects found at {} resetting last_activity_time. '''.format(os.path.join(self.bucket, self.prefix))) self.last_activity_time = get_time() self.inactivity_seconds = 0 self.previous_num_objects = current_num_objects elif current_num_objects < self.previous_num_objects: # During the last poke interval objects were deleted. if self.allow_delete: self.previous_num_objects = current_num_objects self.last_activity_time = get_time() self.log.warning( ''' Objects were deleted during the last poke interval. Updating the file counter and resetting last_activity_time. ''' ) else: raise RuntimeError( ''' Illegal behavior: objects were deleted in {} between pokes. '''.format(os.path.join(self.bucket, self.prefix)) ) else: if self.last_activity_time: self.inactivity_seconds = ( get_time() - self.last_activity_time).total_seconds() else: # Handles the first poke where last inactivity time is None. self.last_activity_time = get_time() self.inactivity_seconds = 0 if self.inactivity_seconds >= self.inactivity_period: if current_num_objects >= self.min_objects: self.log.info( ''' SUCCESS: Sensor found {} objects at {}. Waited at least {} seconds, with no new objects dropped. '''.format( current_num_objects, os.path.join(self.bucket, self.prefix), self.inactivity_period)) return True warn_msg = \ ''' FAILURE: Inactivity Period passed, not enough objects found in {} '''.format( os.path.join(self.bucket, self.prefix)) self.log.warning(warn_msg) return False return False
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/db.py#L47-L70
def provide_session(func): """ """ @wraps(func) def wrapper(*args, **kwargs): arg_session = 'session' func_params = func.__code__.co_varnames session_in_args = arg_session in func_params and \ func_params.index(arg_session) < len(args) session_in_kwargs = arg_session in kwargs if session_in_kwargs or session_in_args: return func(*args, **kwargs) else: with create_session() as session: kwargs[arg_session] = session return func(*args, **kwargs) return wrapper
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/helpers.py#L153-L166
def chain(*tasks): """ """ for up_task, down_task in zip(tasks[:-1], tasks[1:]): up_task.set_downstream(down_task)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/configuration.py#L346-L405
def as_dict( self, display_source=False, display_sensitive=False, raw=False): """ """ cfg = {} configs = [ ('default', self.airflow_defaults), ('airflow.cfg', self), ] for (source_name, config) in configs: for section in config.sections(): sect = cfg.setdefault(section, OrderedDict()) for (k, val) in config.items(section=section, raw=raw): if display_source: val = (val, source_name) sect[k] = val # add env vars and overwrite because they have priority for ev in [ev for ev in os.environ if ev.startswith('AIRFLOW__')]: try: _, section, key = ev.split('__') opt = self._get_env_var_option(section, key) except ValueError: continue if not display_sensitive and ev != 'AIRFLOW__CORE__UNIT_TEST_MODE': opt = '< hidden >' elif raw: opt = opt.replace('%', '%%') if display_source: opt = (opt, 'env var') cfg.setdefault(section.lower(), OrderedDict()).update( {key.lower(): opt}) # add bash commands for (section, key) in self.as_command_stdout: opt = self._get_cmd_option(section, key) if opt: if not display_sensitive: opt = '< hidden >' if display_source: opt = (opt, 'cmd') elif raw: opt = opt.replace('%', '%%') cfg.setdefault(section, OrderedDict()).update({key: opt}) del cfg[section][key + '_cmd'] return cfg
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L789-L808
def start(self): """ """ self.log.info("Processing files using up to %s processes at a time ", self._parallelism) self.log.info("Process each file at most once every %s seconds", self._file_process_interval) self.log.info( "Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval ) if self._async_mode: self.log.debug("Starting DagFileProcessorManager in async mode") self.start_in_async() else: self.log.debug("Starting DagFileProcessorManager in sync mode") self.start_in_sync()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_transfer_hook.py#L204-L232
def delete_transfer_job(self, job_name, project_id): """ """ return ( self.get_conn() .transferJobs() .patch( jobName=job_name, body={ PROJECT_ID: project_id, TRANSFER_JOB: {STATUS1: GcpTransferJobsStatus.DELETED}, TRANSFER_JOB_FIELD_MASK: STATUS1, }, ) .execute(num_retries=self.num_retries) )
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/executors/celery_executor.py#L90-L111
def fetch_celery_task_state(celery_task): """ """ try: with timeout(seconds=2): # Accessing state property of celery task will make actual network request # to get the current state of the task. res = (celery_task[0], celery_task[1].state) except Exception as e: exception_traceback = "Celery Task ID: {}\n{}".format(celery_task[0], traceback.format_exc()) res = ExceptionWithTraceback(e, exception_traceback) return res
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/mysql_hook.py#L62-L105
def get_conn(self): """ """ conn = self.get_connection(self.mysql_conn_id) conn_config = { "user": conn.login, "passwd": conn.password or '', "host": conn.host or 'localhost', "db": self.schema or conn.schema or '' } if not conn.port: conn_config["port"] = 3306 else: conn_config["port"] = int(conn.port) if conn.extra_dejson.get('charset', False): conn_config["charset"] = conn.extra_dejson["charset"] if (conn_config["charset"]).lower() == 'utf8' or\ (conn_config["charset"]).lower() == 'utf-8': conn_config["use_unicode"] = True if conn.extra_dejson.get('cursor', False): if (conn.extra_dejson["cursor"]).lower() == 'sscursor': conn_config["cursorclass"] = MySQLdb.cursors.SSCursor elif (conn.extra_dejson["cursor"]).lower() == 'dictcursor': conn_config["cursorclass"] = MySQLdb.cursors.DictCursor elif (conn.extra_dejson["cursor"]).lower() == 'ssdictcursor': conn_config["cursorclass"] = MySQLdb.cursors.SSDictCursor local_infile = conn.extra_dejson.get('local_infile', False) if conn.extra_dejson.get('ssl', False): # SSL parameter for MySQL has to be a dictionary and in case # of extra/dejson we can get string if extra is passed via # URL parameters dejson_ssl = conn.extra_dejson['ssl'] if isinstance(dejson_ssl, six.string_types): dejson_ssl = json.loads(dejson_ssl) conn_config['ssl'] = dejson_ssl if conn.extra_dejson.get('unix_socket'): conn_config['unix_socket'] = conn.extra_dejson['unix_socket'] if local_infile: conn_config["local_infile"] = 1 conn = MySQLdb.connect(**conn_config) return conn
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_pubsub_hook.py#L60-L81
def publish(self, project, topic, messages): """ """ body = {'messages': messages} full_topic = _format_topic(project, topic) request = self.get_conn().projects().topics().publish( topic=full_topic, body=body) try: request.execute(num_retries=self.num_retries) except HttpError as e: raise PubSubException( 'Error publishing to topic {}'.format(full_topic), e)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_transfer_hook.py#L323-L348
def wait_for_transfer_job(self, job, expected_statuses=(GcpTransferOperationStatus.SUCCESS,), timeout=60): """ """ while timeout > 0: operations = self.list_transfer_operations( filter={FILTER_PROJECT_ID: job[PROJECT_ID], FILTER_JOB_NAMES: [job[NAME]]} ) if GCPTransferServiceHook.operations_contain_expected_statuses(operations, expected_statuses): return time.sleep(TIME_TO_SLEEP_IN_SECONDS) timeout -= TIME_TO_SLEEP_IN_SECONDS raise AirflowException("Timeout. The operation could not be completed within the allotted time.")
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_mlengine_hook.py#L202-L224
def list_versions(self, project_id, model_name): """ """ result = [] full_parent_name = 'projects/{}/models/{}'.format( project_id, model_name) request = self._mlengine.projects().models().versions().list( parent=full_parent_name, pageSize=100) response = request.execute() next_page_token = response.get('nextPageToken', None) result.extend(response.get('versions', [])) while next_page_token is not None: next_request = self._mlengine.projects().models().versions().list( parent=full_parent_name, pageToken=next_page_token, pageSize=100) response = next_request.execute() next_page_token = response.get('nextPageToken', None) result.extend(response.get('versions', [])) time.sleep(5) return result
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L183-L202
def get_database(self, instance, database, project_id=None): """ """ return self.get_conn().databases().get( project=project_id, instance=instance, database=database ).execute(num_retries=self.num_retries)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L512-L524
def start(self): """ """ self._process = self._launch_process(self._dag_directory, self._file_paths, self._max_runs, self._processor_factory, self._child_signal_conn, self._stat_queue, self._result_queue, self._async_mode) self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_transfer_hook.py#L157-L179
def list_transfer_job(self, filter): """ """ conn = self.get_conn() filter = self._inject_project_id(filter, FILTER, FILTER_PROJECT_ID) request = conn.transferJobs().list(filter=json.dumps(filter)) jobs = [] while request is not None: response = request.execute(num_retries=self.num_retries) jobs.extend(response[TRANSFER_JOBS]) request = conn.transferJobs().list_next(previous_request=request, previous_response=response) return jobs
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/ssh_hook.py#L144-L185
def get_conn(self): """ """ self.log.debug('Creating SSH client for conn_id: %s', self.ssh_conn_id) client = paramiko.SSHClient() if not self.allow_host_key_change: self.log.warning('Remote Identification Change is not verified. ' 'This wont protect against Man-In-The-Middle attacks') client.load_system_host_keys() if self.no_host_key_check: self.log.warning('No Host Key Verification. This wont protect ' 'against Man-In-The-Middle attacks') # Default is RejectPolicy client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if self.password and self.password.strip(): client.connect(hostname=self.remote_host, username=self.username, password=self.password, key_filename=self.key_file, timeout=self.timeout, compress=self.compress, port=self.port, sock=self.host_proxy) else: client.connect(hostname=self.remote_host, username=self.username, key_filename=self.key_file, timeout=self.timeout, compress=self.compress, port=self.port, sock=self.host_proxy) if self.keepalive_interval: client.get_transport().set_keepalive(self.keepalive_interval) self.client = client return client
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/vertica_hook.py#L35-L53
def get_conn(self): """ """ conn = self.get_connection(self.vertica_conn_id) conn_config = { "user": conn.login, "password": conn.password or '', "database": conn.schema, "host": conn.host or 'localhost' } if not conn.port: conn_config["port"] = 5433 else: conn_config["port"] = int(conn.port) conn = connect(**conn_config) return conn
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L895-L908
def create_connection(self, session=None): """ """ connection = Connection(conn_id=self.db_conn_id) uri = self._generate_connection_uri() self.log.info("Creating connection %s", self.db_conn_id) connection.parse_from_uri(uri) session.add(connection) session.commit()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/datastore_hook.py#L257-L295
def export_to_storage_bucket(self, bucket, namespace=None, entity_filter=None, labels=None): """ """ admin_conn = self.get_conn() output_uri_prefix = 'gs://' + '/'.join(filter(None, [bucket, namespace])) if not entity_filter: entity_filter = {} if not labels: labels = {} body = { 'outputUrlPrefix': output_uri_prefix, 'entityFilter': entity_filter, 'labels': labels, } resp = (admin_conn .projects() .export(projectId=self.project_id, body=body) .execute(num_retries=self.num_retries)) return resp
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/snowflake_hook.py#L98-L105
def get_uri(self): """ """ conn_config = self._get_conn_params() uri = 'snowflake://{user}:{password}@{account}/{database}/' uri += '{schema}?warehouse={warehouse}&role={role}' return uri.format(**conn_config)
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/dailymotion.py#L13-L35
def dailymotion_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """ """ html = get_content(rebuilt_url(url)) info = json.loads(match1(html, r'qualities":({.+?}),"')) title = match1(html, r'"video_title"\s*:\s*"([^"]+)"') or \ match1(html, r'"title"\s*:\s*"([^"]+)"') title = unicodize(title) for quality in ['1080','720','480','380','240','144','auto']: try: real_url = info[quality][1]["url"] if real_url: break except KeyError: pass mime, ext, size = url_info(real_url) print_info(site_info, title, mime, size) if not info_only: download_urls([real_url], title, ext, size, output_dir=output_dir, merge=merge)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagrun.py#L95-L111
def refresh_from_db(self, session=None): """ """ DR = DagRun exec_date = func.cast(self.execution_date, DateTime) dr = session.query(DR).filter( DR.dag_id == self.dag_id, func.cast(DR.execution_date, DateTime) == exec_date, DR.run_id == self.run_id ).one() self.id = dr.id self.state = dr.state
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L1264-L1275
def max_runs_reached(self): """ """ if self._max_runs == -1: # Unlimited runs. return False for file_path in self._file_paths: if self._run_count[file_path] < self._max_runs: return False if self._run_count[self._heart_beat_key] < self._max_runs: return False return True
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagbag.py#L112-L143
def get_dag(self, dag_id): """ """ from airflow.models.dag import DagModel # Avoid circular import # If asking for a known subdag, we want to refresh the parent root_dag_id = dag_id if dag_id in self.dags: dag = self.dags[dag_id] if dag.is_subdag: root_dag_id = dag.parent_dag.dag_id # If the dag corresponding to root_dag_id is absent or expired orm_dag = DagModel.get_current(root_dag_id) if orm_dag and ( root_dag_id not in self.dags or ( orm_dag.last_expired and dag.last_loaded < orm_dag.last_expired ) ): # Reprocess source file found_dags = self.process_file( filepath=orm_dag.fileloc, only_if_updated=False) # If the source file no longer exports `dag_id`, delete it from self.dags if found_dags and dag_id in [found_dag.dag_id for found_dag in found_dags]: return self.dags[dag_id] elif dag_id in self.dags: del self.dags[dag_id] return self.dags.get(dag_id)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/configuration.py#L484-L491
def parameterized_config(template): """ """ all_vars = {k: v for d in [globals(), locals()] for k, v in d.items()} return template.format(**all_vars)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/operators/bash_operator.py#L86-L144
def execute(self, context): """ """ self.log.info('Tmp dir root location: \n %s', gettempdir()) # Prepare env for child process. if self.env is None: self.env = os.environ.copy() airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True) self.log.info('Exporting the following env vars:\n%s', '\n'.join(["{}={}".format(k, v) for k, v in airflow_context_vars.items()])) self.env.update(airflow_context_vars) self.lineage_data = self.bash_command with TemporaryDirectory(prefix='airflowtmp') as tmp_dir: with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as tmp_file: tmp_file.write(bytes(self.bash_command, 'utf_8')) tmp_file.flush() script_location = os.path.abspath(tmp_file.name) self.log.info('Temporary script location: %s', script_location) def pre_exec(): # Restore default signal disposition and invoke setsid for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ'): if hasattr(signal, sig): signal.signal(getattr(signal, sig), signal.SIG_DFL) os.setsid() self.log.info('Running command: %s', self.bash_command) sub_process = Popen( ['bash', tmp_file.name], stdout=PIPE, stderr=STDOUT, cwd=tmp_dir, env=self.env, preexec_fn=pre_exec) self.sub_process = sub_process self.log.info('Output:') line = '' for raw_line in iter(sub_process.stdout.readline, b''): line = raw_line.decode(self.output_encoding).rstrip() self.log.info(line) sub_process.wait() self.log.info('Command exited with return code %s', sub_process.returncode) if sub_process.returncode: raise AirflowException('Bash command failed') return line
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L1090-L1109
def set_file_paths(self, new_file_paths): """ """ self._file_paths = new_file_paths self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths] # Stop processors that are working on deleted files filtered_processors = {} for file_path, processor in self._processors.items(): if file_path in new_file_paths: filtered_processors[file_path] = processor else: self.log.warning("Stopping processor for %s", file_path) processor.terminate() self._processors = filtered_processors
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_natural_language_hook.py#L163-L195
def annotate_text(self, document, features, encoding_type=None, retry=None, timeout=None, metadata=None): """ """ client = self.get_conn() return client.annotate_text( document=document, features=features, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata, )
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/presto_hook.py#L120-L124
def run(self, hql, parameters=None): """ """ return super().run(self._strip_sql(hql), parameters)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L645-L679
def end(self): """ """ if not self._process: self.log.warn('Ending without manager process.') return this_process = psutil.Process(os.getpid()) try: manager_process = psutil.Process(self._process.pid) except psutil.NoSuchProcess: self.log.info("Manager process not running.") return # First try SIGTERM if manager_process.is_running() \ and manager_process.pid in [x.pid for x in this_process.children()]: self.log.info("Terminating manager process: %s", manager_process.pid) manager_process.terminate() # TODO: Remove magic number timeout = 5 self.log.info("Waiting up to %ss for manager process to exit...", timeout) try: psutil.wait_procs({manager_process}, timeout) except psutil.TimeoutExpired: self.log.debug("Ran out of time while waiting for " "processes to exit") # Then SIGKILL if manager_process.is_running() \ and manager_process.pid in [x.pid for x in this_process.children()]: self.log.info("Killing manager process: %s", manager_process.pid) manager_process.kill() manager_process.wait()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/executors/kubernetes_executor.py#L606-L653
def clear_not_launched_queued_tasks(self, session=None): """ """ queued_tasks = session\ .query(TaskInstance)\ .filter(TaskInstance.state == State.QUEUED).all() self.log.info( 'When executor started up, found %s queued task instances', len(queued_tasks) ) for task in queued_tasks: dict_string = ( "dag_id={},task_id={},execution_date={},airflow-worker={}".format( AirflowKubernetesScheduler._make_safe_label_value(task.dag_id), AirflowKubernetesScheduler._make_safe_label_value(task.task_id), AirflowKubernetesScheduler._datetime_to_label_safe_datestring( task.execution_date ), self.worker_uuid ) ) kwargs = dict(label_selector=dict_string) pod_list = self.kube_client.list_namespaced_pod( self.kube_config.kube_namespace, **kwargs) if len(pod_list.items) == 0: self.log.info( 'TaskInstance: %s found in queued state but was not launched, ' 'rescheduling', task ) session.query(TaskInstance).filter( TaskInstance.dag_id == task.dag_id, TaskInstance.task_id == task.task_id, TaskInstance.execution_date == task.execution_date ).update({TaskInstance.state: State.NONE})
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_mlengine_hook.py#L60-L121
def create_job(self, project_id, job, use_existing_job_fn=None): """ """ request = self._mlengine.projects().jobs().create( parent='projects/{}'.format(project_id), body=job) job_id = job['jobId'] try: request.execute() except HttpError as e: # 409 means there is an existing job with the same job ID. if e.resp.status == 409: if use_existing_job_fn is not None: existing_job = self._get_job(project_id, job_id) if not use_existing_job_fn(existing_job): self.log.error( 'Job with job_id %s already exist, but it does ' 'not match our expectation: %s', job_id, existing_job ) raise self.log.info( 'Job with job_id %s already exist. Will waiting for it to finish', job_id ) else: self.log.error('Failed to create MLEngine job: {}'.format(e)) raise return self._wait_for_job_done(project_id, job_id)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/timezone.py#L52-L64
def utcnow(): """ """ # pendulum utcnow() is not used as that sets a TimezoneInfo object # instead of a Timezone. This is not pickable and also creates issues # when using replace() d = dt.datetime.utcnow() d = d.replace(tzinfo=utc) return d
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_cosmos_hook.py#L50-L60
def get_conn(self): """ """ if self.cosmos_client is not None: return self.cosmos_client # Initialize the Python Azure Cosmos DB client self.cosmos_client = cosmos_client.CosmosClient(self.endpoint_uri, {'masterKey': self.master_key}) return self.cosmos_client
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/sina.py#L94-L121
def sina_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """ """ if 'news.sina.com.cn/zxt' in url: sina_zxt(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) return vid = match1(url, r'vid=(\d+)') if vid is None: video_page = get_content(url) vid = hd_vid = match1(video_page, r'hd_vid\s*:\s*\'([^\']+)\'') if hd_vid == '0': vids = match1(video_page, r'[^\w]vid\s*:\s*\'([^\']+)\'').split('|') vid = vids[-1] if vid is None: vid = match1(video_page, r'vid:"?(\d+)"?') if vid: #title = match1(video_page, r'title\s*:\s*\'([^\']+)\'') sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only) else: vkey = match1(video_page, r'vkey\s*:\s*"([^"]+)"') if vkey is None: vid = match1(url, r'#(\d+)') sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only) return title = match1(video_page, r'title\s*:\s*"([^"]+)"') sina_download_by_vkey(vkey, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L524-L565
def start_proxy(self): """ """ self._download_sql_proxy_if_needed() if self.sql_proxy_process: raise AirflowException("The sql proxy is already running: {}".format( self.sql_proxy_process)) else: command_to_run = [self.sql_proxy_path] command_to_run.extend(self.command_line_parameters) try: self.log.info("Creating directory %s", self.cloud_sql_proxy_socket_directory) os.makedirs(self.cloud_sql_proxy_socket_directory) except OSError: # Needed for python 2 compatibility (exists_ok missing) pass command_to_run.extend(self._get_credential_parameters()) self.log.info("Running the command: `%s`", " ".join(command_to_run)) self.sql_proxy_process = Popen(command_to_run, stdin=PIPE, stdout=PIPE, stderr=PIPE) self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid) while True: line = self.sql_proxy_process.stderr.readline().decode('utf-8') return_code = self.sql_proxy_process.poll() if line == '' and return_code is not None: self.sql_proxy_process = None raise AirflowException( "The cloud_sql_proxy finished early with return code {}!".format( return_code)) if line != '': self.log.info(line) if "googleapi: Error" in line or "invalid instance name:" in line: self.stop_proxy() raise AirflowException( "Error when starting the cloud_sql_proxy {}!".format( line)) if "Ready for new connections" in line: return
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_pubsub_hook.py#L196-L225
def delete_subscription(self, project, subscription, fail_if_not_exists=False): """ """ service = self.get_conn() full_subscription = _format_subscription(project, subscription) try: service.projects().subscriptions().delete( subscription=full_subscription).execute(num_retries=self.num_retries) except HttpError as e: # Status code 404 indicates that the subscription was not found if str(e.resp['status']) == '404': message = 'Subscription does not exist: {}'.format( full_subscription) self.log.warning(message) if fail_if_not_exists: raise PubSubException(message) else: raise PubSubException( 'Error deleting subscription {}'.format(full_subscription), e)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/configuration.py#L75-L93
def run_command(command): """ """ process = subprocess.Popen( shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) output, stderr = [stream.decode(sys.getdefaultencoding(), 'ignore') for stream in process.communicate()] if process.returncode != 0: raise AirflowConfigException( "Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}" .format(command, process.returncode, output, stderr) ) return output
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/wasb_task_handler.py#L97-L121
def _read(self, ti, try_number, metadata=None): """ """ # Explicitly getting log relative path is necessary as the given # task instance might be different than task instance passed in # in set_context method. log_relative_path = self._render_filename(ti, try_number) remote_loc = os.path.join(self.remote_base, log_relative_path) if self.wasb_log_exists(remote_loc): # If Wasb remote file exists, we do not fetch logs from task instance # local machine even if there are errors reading remote logs, as # returned remote_log will contain error messages. remote_log = self.wasb_read(remote_loc, return_error=True) log = '*** Reading remote log from {}.\n{}\n'.format( remote_loc, remote_log) return log, {'end_of_log': True} else: return super()._read(ti, try_number)
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L1216-L1226
def parse_host(host): """ """ if re.match(r'^(\d+)$', host) is not None: return ("0.0.0.0", int(host)) if re.match(r'^(\w+)://', host) is None: host = "//" + host o = parse.urlparse(host) hostname = o.hostname or "0.0.0.0" port = o.port or 0 return (hostname, port)
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/term.py#L3-L9
def get_terminal_size(): """""" try: import fcntl, termios, struct # fcntl module only available on Unix return struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234')) except: return (40, 80)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/file_to_wasb.py#L56-L64
def execute(self, context): """""" hook = WasbHook(wasb_conn_id=self.wasb_conn_id) self.log.info( 'Uploading %s to wasb://%s ' 'as %s'.format(self.file_path, self.container_name, self.blob_name) ) hook.load_file(self.file_path, self.container_name, self.blob_name, **self.load_options)
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/iqiyi.py#L158-L218
def download(self, **kwargs): """""" if 'json_output' in kwargs and kwargs['json_output']: json_output.output(self) elif 'info_only' in kwargs and kwargs['info_only']: if 'stream_id' in kwargs and kwargs['stream_id']: # Display the stream stream_id = kwargs['stream_id'] if 'index' not in kwargs: self.p(stream_id) else: self.p_i(stream_id) else: # Display all available streams if 'index' not in kwargs: self.p([]) else: stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] self.p_i(stream_id) else: if 'stream_id' in kwargs and kwargs['stream_id']: # Download the stream stream_id = kwargs['stream_id'] else: # Download stream with the best quality stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag'] if 'index' not in kwargs: self.p(stream_id) else: self.p_i(stream_id) if stream_id in self.streams: urls = self.streams[stream_id]['src'] ext = self.streams[stream_id]['container'] total_size = self.streams[stream_id]['size'] else: urls = self.dash_streams[stream_id]['src'] ext = self.dash_streams[stream_id]['container'] total_size = self.dash_streams[stream_id]['size'] if not urls: log.wtf('[Failed] Cannot extract video source.') # For legacy main() #Here's the change!! download_url_ffmpeg(urls[0], self.title, 'mp4', output_dir=kwargs['output_dir'], merge=kwargs['merge'], stream=False) if not kwargs['caption']: print('Skipping captions.') return for lang in self.caption_tracks: filename = '%s.%s.srt' % (get_filename(self.title), lang) print('Saving %s ... ' % filename, end="", flush=True) srt = self.caption_tracks[lang] with open(os.path.join(kwargs['output_dir'], filename), 'w', encoding='utf-8') as x: x.write(srt) print('Done.')
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_dataproc_hook.py#L243-L247
def wait(self, operation): """""" submitted = _DataProcOperation(self.get_conn(), operation, self.num_retries) submitted.wait_for_done()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L604-L610
def _heartbeat_manager(self): """ """ if self._process and not self._process.is_alive() and not self.done: self.start()
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L224-L249
def match1(text, *patterns): """ """ if len(patterns) == 1: pattern = patterns[0] match = re.search(pattern, text) if match: return match.group(1) else: return None else: ret = [] for pattern in patterns: match = re.search(pattern, text) if match: ret.append(match.group(1)) return ret
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L520-L543
def delete_objects(self, bucket, keys): """ """ if isinstance(keys, list): keys = keys else: keys = [keys] delete_dict = {"Objects": [{"Key": k} for k in keys]} response = self.get_conn().delete_objects(Bucket=bucket, Delete=delete_dict) return response
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L494-L515
def are_dependents_done(self, session=None): """ """ task = self.task if not task.downstream_task_ids: return True ti = session.query(func.count(TaskInstance.task_id)).filter( TaskInstance.dag_id == self.dag_id, TaskInstance.task_id.in_(task.downstream_task_ids), TaskInstance.execution_date == self.execution_date, TaskInstance.state == State.SUCCESS, ) count = ti[0][0] return count == len(task.downstream_task_ids)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_speech_to_text_hook.py#L53-L73
def recognize_speech(self, config, audio, retry=None, timeout=None): """ """ client = self.get_conn() response = client.recognize(config=config, audio=audio, retry=retry, timeout=timeout) self.log.info("Recognised speech: %s" % response) return response
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_cosmos_hook.py#L162-L169
def delete_database(self, database_name): """ """ if database_name is None: raise AirflowBadRequest("Database name cannot be None.") self.get_conn().DeleteDatabase(get_database_link(database_name))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/sensors/hdfs_sensor.py#L57-L74
def poke(self, context): """ """ sb = self.hook(self.hdfs_conn_id).get_conn() result = [f for f in sb.ls([self.filepath], include_toplevel=True)] result = self.filter_for_ignored_ext(result, self.ignored_ext, self.ignore_copying) result = self.filter_for_filesize(result, self.file_size) if self.be_empty: self.log.info('Poking for filepath %s to a empty directory', self.filepath) return len(result) == 1 and result[0]['path'] == self.filepath else: self.log.info('Poking for filepath %s to a non empty directory', self.filepath) result.pop(0) return bool(result) and result[0]['file_type'] == 'f'
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/executors/__init__.py#L64-L95
def _get_executor(executor_name): """ """ if executor_name == Executors.LocalExecutor: return LocalExecutor() elif executor_name == Executors.SequentialExecutor: return SequentialExecutor() elif executor_name == Executors.CeleryExecutor: from airflow.executors.celery_executor import CeleryExecutor return CeleryExecutor() elif executor_name == Executors.DaskExecutor: from airflow.executors.dask_executor import DaskExecutor return DaskExecutor() elif executor_name == Executors.KubernetesExecutor: from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor return KubernetesExecutor() else: # Loading plugins _integrate_plugins() executor_path = executor_name.split('.') if len(executor_path) != 2: raise AirflowException( "Executor {0} not supported: " "please specify in format plugin_module.executor".format(executor_name)) if executor_path[0] in globals(): return globals()[executor_path[0]].__dict__[executor_path[1]]() else: raise AirflowException("Executor {0} not supported.".format(executor_name))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/file_task_handler.py#L170-L207
def _init_file(self, ti): """ """ # To handle log writing when tasks are impersonated, the log files need to # be writable by the user that runs the Airflow command and the user # that is impersonated. This is mainly to handle corner cases with the # SubDagOperator. When the SubDagOperator is run, all of the operators # run under the impersonated user and create appropriate log files # as the impersonated user. However, if the user manually runs tasks # of the SubDagOperator through the UI, then the log files are created # by the user that runs the Airflow command. For example, the Airflow # run command may be run by the `airflow_sudoable` user, but the Airflow # tasks may be run by the `airflow` user. If the log files are not # writable by both users, then it's possible that re-running a task # via the UI (or vice versa) results in a permission error as the task # tries to write to a log file created by the other user. relative_path = self._render_filename(ti, ti.try_number) full_path = os.path.join(self.local_base, relative_path) directory = os.path.dirname(full_path) # Create the log file and give it group writable permissions # TODO(aoen): Make log dirs and logs globally readable for now since the SubDag # operator is not compatible with impersonation (e.g. if a Celery executor is used # for a SubDag operator and the SubDag operator has a different owner than the # parent DAG) if not os.path.exists(directory): # Create the directory as globally writable using custom mkdirs # as os.makedirs doesn't set mode properly. mkdirs(directory, 0o777) if not os.path.exists(full_path): open(full_path, "a").close() # TODO: Investigate using 444 instead of 666. os.chmod(full_path, 0o666) return full_path
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/datastore_hook.py#L191-L211
def get_operation(self, name): """ """ conn = self.get_conn() resp = (conn .projects() .operations() .get(name=name) .execute(num_retries=self.num_retries)) return resp
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/bin/cli.py#L567-L575
def dag_state(args): """ """ dag = get_dag(args) dr = DagRun.find(dag.dag_id, execution_date=args.execution_date) print(dr[0].state if len(dr) > 0 else None)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/spark_submit_operator.py#L147-L176
def execute(self, context): """ """ self._hook = SparkSubmitHook( conf=self._conf, conn_id=self._conn_id, files=self._files, py_files=self._py_files, archives=self._archives, driver_class_path=self._driver_class_path, jars=self._jars, java_class=self._java_class, packages=self._packages, exclude_packages=self._exclude_packages, repositories=self._repositories, total_executor_cores=self._total_executor_cores, executor_cores=self._executor_cores, executor_memory=self._executor_memory, driver_memory=self._driver_memory, keytab=self._keytab, principal=self._principal, name=self._name, num_executors=self._num_executors, application_args=self._application_args, env_vars=self._env_vars, verbose=self._verbose, spark_binary=self._spark_binary ) self._hook.submit(self._application)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/utils/mlengine_operator_utils.py#L32-L246
def create_evaluate_ops(task_prefix, data_format, input_paths, prediction_path, metric_fn_and_keys, validate_fn, batch_prediction_job_id=None, project_id=None, region=None, dataflow_options=None, model_uri=None, model_name=None, version_name=None, dag=None): """ """ # Verify that task_prefix doesn't have any special characters except hyphen # '-', which is the only allowed non-alphanumeric character by Dataflow. if not re.match(r"^[a-zA-Z][-A-Za-z0-9]*$", task_prefix): raise AirflowException( "Malformed task_id for DataFlowPythonOperator (only alphanumeric " "and hyphens are allowed but got: " + task_prefix) metric_fn, metric_keys = metric_fn_and_keys if not callable(metric_fn): raise AirflowException("`metric_fn` param must be callable.") if not callable(validate_fn): raise AirflowException("`validate_fn` param must be callable.") if dag is not None and dag.default_args is not None: default_args = dag.default_args project_id = project_id or default_args.get('project_id') region = region or default_args.get('region') model_name = model_name or default_args.get('model_name') version_name = version_name or default_args.get('version_name') dataflow_options = dataflow_options or \ default_args.get('dataflow_default_options') evaluate_prediction = MLEngineBatchPredictionOperator( task_id=(task_prefix + "-prediction"), project_id=project_id, job_id=batch_prediction_job_id, region=region, data_format=data_format, input_paths=input_paths, output_path=prediction_path, uri=model_uri, model_name=model_name, version_name=version_name, dag=dag) metric_fn_encoded = base64.b64encode(dill.dumps(metric_fn, recurse=True)) evaluate_summary = DataFlowPythonOperator( task_id=(task_prefix + "-summary"), py_options=["-m"], py_file="airflow.contrib.utils.mlengine_prediction_summary", dataflow_default_options=dataflow_options, options={ "prediction_path": prediction_path, "metric_fn_encoded": metric_fn_encoded, "metric_keys": ','.join(metric_keys) }, dag=dag) evaluate_summary.set_upstream(evaluate_prediction) def apply_validate_fn(*args, **kwargs): prediction_path = kwargs["templates_dict"]["prediction_path"] scheme, bucket, obj, _, _ = urlsplit(prediction_path) if scheme != "gs" or not bucket or not obj: raise ValueError("Wrong format prediction_path: %s", prediction_path) summary = os.path.join(obj.strip("/"), "prediction.summary.json") gcs_hook = GoogleCloudStorageHook() summary = json.loads(gcs_hook.download(bucket, summary)) return validate_fn(summary) evaluate_validation = PythonOperator( task_id=(task_prefix + "-validation"), python_callable=apply_validate_fn, provide_context=True, templates_dict={"prediction_path": prediction_path}, dag=dag) evaluate_validation.set_upstream(evaluate_summary) return evaluate_prediction, evaluate_summary, evaluate_validation
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L115-L133
def create_instance(self, body, project_id=None): """ """ response = self.get_conn().instances().insert( project=project_id, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/plugins_manager.py#L79-L98
def load_entrypoint_plugins(entry_points, airflow_plugins): """ """ for entry_point in entry_points: log.debug('Importing entry_point plugin %s', entry_point.name) plugin_obj = entry_point.load() if is_valid_plugin(plugin_obj, airflow_plugins): if callable(getattr(plugin_obj, 'on_load', None)): plugin_obj.on_load() airflow_plugins.append(plugin_obj) return airflow_plugins
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_cosmos_hook.py#L226-L237
def delete_document(self, document_id, database_name=None, collection_name=None): """ """ if document_id is None: raise AirflowBadRequest("Cannot delete a document without an id") self.get_conn().DeleteItem( get_document_link( self.__get_database_name(database_name), self.__get_collection_name(collection_name), document_id))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/api/experimental/endpoints.py#L325-L336
def create_pool(): """""" params = request.get_json(force=True) try: pool = pool_api.create_pool(**params) except AirflowException as err: _log.error(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response else: return jsonify(pool.to_json())
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/log.py#L94-L98
def wtf(message, exit_code=1): """""" print_log(message, RED, BOLD) if exit_code is not None: sys.exit(exit_code)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_compute_hook.py#L69-L97
def start_instance(self, zone, resource_id, project_id=None): """ """ response = self.get_conn().instances().start( project=project_id, zone=zone, instance=resource_id ).execute(num_retries=self.num_retries) try: operation_name = response["name"] except KeyError: raise AirflowException( "Wrong response '{}' returned - it should contain " "'name' field".format(response)) self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name, zone=zone)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_compute_hook.py#L55-L66
def get_conn(self): """ """ if not self._conn: http_authorized = self._authorize() self._conn = build('compute', self.api_version, http=http_authorized, cache_discovery=False) return self._conn
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L329-L335
def undeflate(data): """ """ import zlib decompressobj = zlib.decompressobj(-zlib.MAX_WBITS) return decompressobj.decompress(data)+decompressobj.flush()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/presto_hook.py#L101-L118
def get_pandas_df(self, hql, parameters=None): """ """ import pandas cursor = self.get_cursor() try: cursor.execute(self._strip_sql(hql), parameters) data = cursor.fetchall() except DatabaseError as e: raise PrestoException(self._get_pretty_exception_message(e)) column_descriptions = cursor.description if data: df = pandas.DataFrame(data) df.columns = [c[0] for c in column_descriptions] else: df = pandas.DataFrame() return df
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/executors/celery_executor.py#L158-L166
def _num_tasks_per_send_process(self, to_send_count): """ """ return max(1, int(math.ceil(1.0 * to_send_count / self._sync_parallelism)))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_data_lake_hook.py#L55-L68
def check_for_file(self, file_path): """ """ try: files = self.connection.glob(file_path, details=False, invalidate_cache=True) return len(files) == 1 except FileNotFoundError: return False
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_mlengine_hook.py#L185-L200
def set_default_version(self, project_id, model_name, version_name): """ """ full_version_name = 'projects/{}/models/{}/versions/{}'.format( project_id, model_name, version_name) request = self._mlengine.projects().models().versions().setDefault( name=full_version_name, body={}) try: response = request.execute() self.log.info('Successfully set version: %s to default', response) return response except HttpError as e: self.log.error('Something went wrong: %s', e) raise
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/api/experimental/endpoints.py#L198-L232
def task_instance_info(dag_id, execution_date, task_id): """ """ # Convert string datetime into actual datetime try: execution_date = timezone.parse(execution_date) except ValueError: error_message = ( 'Given execution date, {}, could not be identified ' 'as a date. Example date format: 2015-11-16T14:34:15+00:00' .format(execution_date)) _log.info(error_message) response = jsonify({'error': error_message}) response.status_code = 400 return response try: info = get_task_instance(dag_id, task_id, execution_date) except AirflowException as err: _log.info(err) response = jsonify(error="{}".format(err)) response.status_code = err.status_code return response # JSONify and return. fields = {k: str(v) for k, v in vars(info).items() if not k.startswith('_')} return jsonify(fields)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_cosmos_hook.py#L171-L179
def delete_collection(self, collection_name, database_name=None): """ """ if collection_name is None: raise AirflowBadRequest("Collection name cannot be None.") self.get_conn().DeleteContainer( get_collection_link(self.__get_database_name(database_name), collection_name))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_translate_hook.py#L45-L97
def translate( self, values, target_language, format_=None, source_language=None, model=None ): """ """ client = self.get_conn() return client.translate( values=values, target_language=target_language, format_=format_, source_language=source_language, model=model, )
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L384-L422
def load_bytes(self, bytes_data, key, bucket_name=None, replace=False, encrypt=False): """ """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) if not replace and self.check_for_key(key, bucket_name): raise ValueError("The key {key} already exists.".format(key=key)) extra_args = {} if encrypt: extra_args['ServerSideEncryption'] = "AES256" filelike_buffer = BytesIO(bytes_data) client = self.get_conn() client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args)