partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
S3TaskHandler.s3_read
|
Returns the log found at the remote_log_location. Returns '' if no
logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:type return_error: bool
|
airflow/utils/log/s3_task_handler.py
|
def s3_read(self, remote_log_location, return_error=False):
"""
Returns the log found at the remote_log_location. Returns '' if no
logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:type return_error: bool
"""
try:
return self.hook.read_key(remote_log_location)
except Exception:
msg = 'Could not read logs from {}'.format(remote_log_location)
self.log.exception(msg)
# return error if needed
if return_error:
return msg
|
def s3_read(self, remote_log_location, return_error=False):
"""
Returns the log found at the remote_log_location. Returns '' if no
logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:type return_error: bool
"""
try:
return self.hook.read_key(remote_log_location)
except Exception:
msg = 'Could not read logs from {}'.format(remote_log_location)
self.log.exception(msg)
# return error if needed
if return_error:
return msg
|
[
"Returns",
"the",
"log",
"found",
"at",
"the",
"remote_log_location",
".",
"Returns",
"if",
"no",
"logs",
"are",
"found",
"or",
"there",
"is",
"an",
"error",
".",
":",
"param",
"remote_log_location",
":",
"the",
"log",
"s",
"location",
"in",
"remote",
"storage",
":",
"type",
"remote_log_location",
":",
"str",
"(",
"path",
")",
":",
"param",
"return_error",
":",
"if",
"True",
"returns",
"a",
"string",
"error",
"message",
"if",
"an",
"error",
"occurs",
".",
"Otherwise",
"returns",
"when",
"an",
"error",
"occurs",
".",
":",
"type",
"return_error",
":",
"bool"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/s3_task_handler.py#L127-L144
|
[
"def",
"s3_read",
"(",
"self",
",",
"remote_log_location",
",",
"return_error",
"=",
"False",
")",
":",
"try",
":",
"return",
"self",
".",
"hook",
".",
"read_key",
"(",
"remote_log_location",
")",
"except",
"Exception",
":",
"msg",
"=",
"'Could not read logs from {}'",
".",
"format",
"(",
"remote_log_location",
")",
"self",
".",
"log",
".",
"exception",
"(",
"msg",
")",
"# return error if needed",
"if",
"return_error",
":",
"return",
"msg"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
S3TaskHandler.s3_write
|
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: str
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
|
airflow/utils/log/s3_task_handler.py
|
def s3_write(self, log, remote_log_location, append=True):
"""
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: str
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
"""
if append and self.s3_log_exists(remote_log_location):
old_log = self.s3_read(remote_log_location)
log = '\n'.join([old_log, log]) if old_log else log
try:
self.hook.load_string(
log,
key=remote_log_location,
replace=True,
encrypt=configuration.conf.getboolean('core', 'ENCRYPT_S3_LOGS'),
)
except Exception:
self.log.exception('Could not write logs to %s', remote_log_location)
|
def s3_write(self, log, remote_log_location, append=True):
"""
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: str
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
"""
if append and self.s3_log_exists(remote_log_location):
old_log = self.s3_read(remote_log_location)
log = '\n'.join([old_log, log]) if old_log else log
try:
self.hook.load_string(
log,
key=remote_log_location,
replace=True,
encrypt=configuration.conf.getboolean('core', 'ENCRYPT_S3_LOGS'),
)
except Exception:
self.log.exception('Could not write logs to %s', remote_log_location)
|
[
"Writes",
"the",
"log",
"to",
"the",
"remote_log_location",
".",
"Fails",
"silently",
"if",
"no",
"hook",
"was",
"created",
".",
":",
"param",
"log",
":",
"the",
"log",
"to",
"write",
"to",
"the",
"remote_log_location",
":",
"type",
"log",
":",
"str",
":",
"param",
"remote_log_location",
":",
"the",
"log",
"s",
"location",
"in",
"remote",
"storage",
":",
"type",
"remote_log_location",
":",
"str",
"(",
"path",
")",
":",
"param",
"append",
":",
"if",
"False",
"any",
"existing",
"log",
"file",
"is",
"overwritten",
".",
"If",
"True",
"the",
"new",
"log",
"is",
"appended",
"to",
"any",
"existing",
"logs",
".",
":",
"type",
"append",
":",
"bool"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/s3_task_handler.py#L146-L170
|
[
"def",
"s3_write",
"(",
"self",
",",
"log",
",",
"remote_log_location",
",",
"append",
"=",
"True",
")",
":",
"if",
"append",
"and",
"self",
".",
"s3_log_exists",
"(",
"remote_log_location",
")",
":",
"old_log",
"=",
"self",
".",
"s3_read",
"(",
"remote_log_location",
")",
"log",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"old_log",
",",
"log",
"]",
")",
"if",
"old_log",
"else",
"log",
"try",
":",
"self",
".",
"hook",
".",
"load_string",
"(",
"log",
",",
"key",
"=",
"remote_log_location",
",",
"replace",
"=",
"True",
",",
"encrypt",
"=",
"configuration",
".",
"conf",
".",
"getboolean",
"(",
"'core'",
",",
"'ENCRYPT_S3_LOGS'",
")",
",",
")",
"except",
"Exception",
":",
"self",
".",
"log",
".",
"exception",
"(",
"'Could not write logs to %s'",
",",
"remote_log_location",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
WorkerConfiguration._get_init_containers
|
When using git to retrieve the DAGs, use the GitSync Init Container
|
airflow/contrib/kubernetes/worker_configuration.py
|
def _get_init_containers(self):
"""When using git to retrieve the DAGs, use the GitSync Init Container"""
# If we're using volume claims to mount the dags, no init container is needed
if self.kube_config.dags_volume_claim or \
self.kube_config.dags_volume_host or self.kube_config.dags_in_image:
return []
# Otherwise, define a git-sync init container
init_environment = [{
'name': 'GIT_SYNC_REPO',
'value': self.kube_config.git_repo
}, {
'name': 'GIT_SYNC_BRANCH',
'value': self.kube_config.git_branch
}, {
'name': 'GIT_SYNC_ROOT',
'value': self.kube_config.git_sync_root
}, {
'name': 'GIT_SYNC_DEST',
'value': self.kube_config.git_sync_dest
}, {
'name': 'GIT_SYNC_DEPTH',
'value': '1'
}, {
'name': 'GIT_SYNC_ONE_TIME',
'value': 'true'
}]
if self.kube_config.git_user:
init_environment.append({
'name': 'GIT_SYNC_USERNAME',
'value': self.kube_config.git_user
})
if self.kube_config.git_password:
init_environment.append({
'name': 'GIT_SYNC_PASSWORD',
'value': self.kube_config.git_password
})
volume_mounts = [{
'mountPath': self.kube_config.git_sync_root,
'name': self.dags_volume_name,
'readOnly': False
}]
if self.kube_config.git_ssh_key_secret_name:
volume_mounts.append({
'name': self.git_sync_ssh_secret_volume_name,
'mountPath': '/etc/git-secret/ssh',
'subPath': 'ssh'
})
init_environment.extend([
{
'name': 'GIT_SSH_KEY_FILE',
'value': '/etc/git-secret/ssh'
},
{
'name': 'GIT_SYNC_SSH',
'value': 'true'
}])
if self.kube_config.git_ssh_known_hosts_configmap_name:
volume_mounts.append({
'name': self.git_sync_ssh_known_hosts_volume_name,
'mountPath': '/etc/git-secret/known_hosts',
'subPath': 'known_hosts'
})
init_environment.extend([
{
'name': 'GIT_KNOWN_HOSTS',
'value': 'true'
},
{
'name': 'GIT_SSH_KNOWN_HOSTS_FILE',
'value': '/etc/git-secret/known_hosts'
}
])
else:
init_environment.append({
'name': 'GIT_KNOWN_HOSTS',
'value': 'false'
})
return [{
'name': self.kube_config.git_sync_init_container_name,
'image': self.kube_config.git_sync_container,
'securityContext': {'runAsUser': 65533}, # git-sync user
'env': init_environment,
'volumeMounts': volume_mounts
}]
|
def _get_init_containers(self):
"""When using git to retrieve the DAGs, use the GitSync Init Container"""
# If we're using volume claims to mount the dags, no init container is needed
if self.kube_config.dags_volume_claim or \
self.kube_config.dags_volume_host or self.kube_config.dags_in_image:
return []
# Otherwise, define a git-sync init container
init_environment = [{
'name': 'GIT_SYNC_REPO',
'value': self.kube_config.git_repo
}, {
'name': 'GIT_SYNC_BRANCH',
'value': self.kube_config.git_branch
}, {
'name': 'GIT_SYNC_ROOT',
'value': self.kube_config.git_sync_root
}, {
'name': 'GIT_SYNC_DEST',
'value': self.kube_config.git_sync_dest
}, {
'name': 'GIT_SYNC_DEPTH',
'value': '1'
}, {
'name': 'GIT_SYNC_ONE_TIME',
'value': 'true'
}]
if self.kube_config.git_user:
init_environment.append({
'name': 'GIT_SYNC_USERNAME',
'value': self.kube_config.git_user
})
if self.kube_config.git_password:
init_environment.append({
'name': 'GIT_SYNC_PASSWORD',
'value': self.kube_config.git_password
})
volume_mounts = [{
'mountPath': self.kube_config.git_sync_root,
'name': self.dags_volume_name,
'readOnly': False
}]
if self.kube_config.git_ssh_key_secret_name:
volume_mounts.append({
'name': self.git_sync_ssh_secret_volume_name,
'mountPath': '/etc/git-secret/ssh',
'subPath': 'ssh'
})
init_environment.extend([
{
'name': 'GIT_SSH_KEY_FILE',
'value': '/etc/git-secret/ssh'
},
{
'name': 'GIT_SYNC_SSH',
'value': 'true'
}])
if self.kube_config.git_ssh_known_hosts_configmap_name:
volume_mounts.append({
'name': self.git_sync_ssh_known_hosts_volume_name,
'mountPath': '/etc/git-secret/known_hosts',
'subPath': 'known_hosts'
})
init_environment.extend([
{
'name': 'GIT_KNOWN_HOSTS',
'value': 'true'
},
{
'name': 'GIT_SSH_KNOWN_HOSTS_FILE',
'value': '/etc/git-secret/known_hosts'
}
])
else:
init_environment.append({
'name': 'GIT_KNOWN_HOSTS',
'value': 'false'
})
return [{
'name': self.kube_config.git_sync_init_container_name,
'image': self.kube_config.git_sync_container,
'securityContext': {'runAsUser': 65533}, # git-sync user
'env': init_environment,
'volumeMounts': volume_mounts
}]
|
[
"When",
"using",
"git",
"to",
"retrieve",
"the",
"DAGs",
"use",
"the",
"GitSync",
"Init",
"Container"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/kubernetes/worker_configuration.py#L45-L131
|
[
"def",
"_get_init_containers",
"(",
"self",
")",
":",
"# If we're using volume claims to mount the dags, no init container is needed",
"if",
"self",
".",
"kube_config",
".",
"dags_volume_claim",
"or",
"self",
".",
"kube_config",
".",
"dags_volume_host",
"or",
"self",
".",
"kube_config",
".",
"dags_in_image",
":",
"return",
"[",
"]",
"# Otherwise, define a git-sync init container",
"init_environment",
"=",
"[",
"{",
"'name'",
":",
"'GIT_SYNC_REPO'",
",",
"'value'",
":",
"self",
".",
"kube_config",
".",
"git_repo",
"}",
",",
"{",
"'name'",
":",
"'GIT_SYNC_BRANCH'",
",",
"'value'",
":",
"self",
".",
"kube_config",
".",
"git_branch",
"}",
",",
"{",
"'name'",
":",
"'GIT_SYNC_ROOT'",
",",
"'value'",
":",
"self",
".",
"kube_config",
".",
"git_sync_root",
"}",
",",
"{",
"'name'",
":",
"'GIT_SYNC_DEST'",
",",
"'value'",
":",
"self",
".",
"kube_config",
".",
"git_sync_dest",
"}",
",",
"{",
"'name'",
":",
"'GIT_SYNC_DEPTH'",
",",
"'value'",
":",
"'1'",
"}",
",",
"{",
"'name'",
":",
"'GIT_SYNC_ONE_TIME'",
",",
"'value'",
":",
"'true'",
"}",
"]",
"if",
"self",
".",
"kube_config",
".",
"git_user",
":",
"init_environment",
".",
"append",
"(",
"{",
"'name'",
":",
"'GIT_SYNC_USERNAME'",
",",
"'value'",
":",
"self",
".",
"kube_config",
".",
"git_user",
"}",
")",
"if",
"self",
".",
"kube_config",
".",
"git_password",
":",
"init_environment",
".",
"append",
"(",
"{",
"'name'",
":",
"'GIT_SYNC_PASSWORD'",
",",
"'value'",
":",
"self",
".",
"kube_config",
".",
"git_password",
"}",
")",
"volume_mounts",
"=",
"[",
"{",
"'mountPath'",
":",
"self",
".",
"kube_config",
".",
"git_sync_root",
",",
"'name'",
":",
"self",
".",
"dags_volume_name",
",",
"'readOnly'",
":",
"False",
"}",
"]",
"if",
"self",
".",
"kube_config",
".",
"git_ssh_key_secret_name",
":",
"volume_mounts",
".",
"append",
"(",
"{",
"'name'",
":",
"self",
".",
"git_sync_ssh_secret_volume_name",
",",
"'mountPath'",
":",
"'/etc/git-secret/ssh'",
",",
"'subPath'",
":",
"'ssh'",
"}",
")",
"init_environment",
".",
"extend",
"(",
"[",
"{",
"'name'",
":",
"'GIT_SSH_KEY_FILE'",
",",
"'value'",
":",
"'/etc/git-secret/ssh'",
"}",
",",
"{",
"'name'",
":",
"'GIT_SYNC_SSH'",
",",
"'value'",
":",
"'true'",
"}",
"]",
")",
"if",
"self",
".",
"kube_config",
".",
"git_ssh_known_hosts_configmap_name",
":",
"volume_mounts",
".",
"append",
"(",
"{",
"'name'",
":",
"self",
".",
"git_sync_ssh_known_hosts_volume_name",
",",
"'mountPath'",
":",
"'/etc/git-secret/known_hosts'",
",",
"'subPath'",
":",
"'known_hosts'",
"}",
")",
"init_environment",
".",
"extend",
"(",
"[",
"{",
"'name'",
":",
"'GIT_KNOWN_HOSTS'",
",",
"'value'",
":",
"'true'",
"}",
",",
"{",
"'name'",
":",
"'GIT_SSH_KNOWN_HOSTS_FILE'",
",",
"'value'",
":",
"'/etc/git-secret/known_hosts'",
"}",
"]",
")",
"else",
":",
"init_environment",
".",
"append",
"(",
"{",
"'name'",
":",
"'GIT_KNOWN_HOSTS'",
",",
"'value'",
":",
"'false'",
"}",
")",
"return",
"[",
"{",
"'name'",
":",
"self",
".",
"kube_config",
".",
"git_sync_init_container_name",
",",
"'image'",
":",
"self",
".",
"kube_config",
".",
"git_sync_container",
",",
"'securityContext'",
":",
"{",
"'runAsUser'",
":",
"65533",
"}",
",",
"# git-sync user",
"'env'",
":",
"init_environment",
",",
"'volumeMounts'",
":",
"volume_mounts",
"}",
"]"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
WorkerConfiguration._get_environment
|
Defines any necessary environment variables for the pod executor
|
airflow/contrib/kubernetes/worker_configuration.py
|
def _get_environment(self):
"""Defines any necessary environment variables for the pod executor"""
env = {}
for env_var_name, env_var_val in six.iteritems(self.kube_config.kube_env_vars):
env[env_var_name] = env_var_val
env["AIRFLOW__CORE__EXECUTOR"] = "LocalExecutor"
if self.kube_config.airflow_configmap:
env['AIRFLOW_HOME'] = self.worker_airflow_home
env['AIRFLOW__CORE__DAGS_FOLDER'] = self.worker_airflow_dags
if (not self.kube_config.airflow_configmap and
'AIRFLOW__CORE__SQL_ALCHEMY_CONN' not in self.kube_config.kube_secrets):
env['AIRFLOW__CORE__SQL_ALCHEMY_CONN'] = conf.get("core", "SQL_ALCHEMY_CONN")
if self.kube_config.git_dags_folder_mount_point:
# /root/airflow/dags/repo/dags
dag_volume_mount_path = os.path.join(
self.kube_config.git_dags_folder_mount_point,
self.kube_config.git_sync_dest, # repo
self.kube_config.git_subpath # dags
)
env['AIRFLOW__CORE__DAGS_FOLDER'] = dag_volume_mount_path
return env
|
def _get_environment(self):
"""Defines any necessary environment variables for the pod executor"""
env = {}
for env_var_name, env_var_val in six.iteritems(self.kube_config.kube_env_vars):
env[env_var_name] = env_var_val
env["AIRFLOW__CORE__EXECUTOR"] = "LocalExecutor"
if self.kube_config.airflow_configmap:
env['AIRFLOW_HOME'] = self.worker_airflow_home
env['AIRFLOW__CORE__DAGS_FOLDER'] = self.worker_airflow_dags
if (not self.kube_config.airflow_configmap and
'AIRFLOW__CORE__SQL_ALCHEMY_CONN' not in self.kube_config.kube_secrets):
env['AIRFLOW__CORE__SQL_ALCHEMY_CONN'] = conf.get("core", "SQL_ALCHEMY_CONN")
if self.kube_config.git_dags_folder_mount_point:
# /root/airflow/dags/repo/dags
dag_volume_mount_path = os.path.join(
self.kube_config.git_dags_folder_mount_point,
self.kube_config.git_sync_dest, # repo
self.kube_config.git_subpath # dags
)
env['AIRFLOW__CORE__DAGS_FOLDER'] = dag_volume_mount_path
return env
|
[
"Defines",
"any",
"necessary",
"environment",
"variables",
"for",
"the",
"pod",
"executor"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/kubernetes/worker_configuration.py#L133-L156
|
[
"def",
"_get_environment",
"(",
"self",
")",
":",
"env",
"=",
"{",
"}",
"for",
"env_var_name",
",",
"env_var_val",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"kube_config",
".",
"kube_env_vars",
")",
":",
"env",
"[",
"env_var_name",
"]",
"=",
"env_var_val",
"env",
"[",
"\"AIRFLOW__CORE__EXECUTOR\"",
"]",
"=",
"\"LocalExecutor\"",
"if",
"self",
".",
"kube_config",
".",
"airflow_configmap",
":",
"env",
"[",
"'AIRFLOW_HOME'",
"]",
"=",
"self",
".",
"worker_airflow_home",
"env",
"[",
"'AIRFLOW__CORE__DAGS_FOLDER'",
"]",
"=",
"self",
".",
"worker_airflow_dags",
"if",
"(",
"not",
"self",
".",
"kube_config",
".",
"airflow_configmap",
"and",
"'AIRFLOW__CORE__SQL_ALCHEMY_CONN'",
"not",
"in",
"self",
".",
"kube_config",
".",
"kube_secrets",
")",
":",
"env",
"[",
"'AIRFLOW__CORE__SQL_ALCHEMY_CONN'",
"]",
"=",
"conf",
".",
"get",
"(",
"\"core\"",
",",
"\"SQL_ALCHEMY_CONN\"",
")",
"if",
"self",
".",
"kube_config",
".",
"git_dags_folder_mount_point",
":",
"# /root/airflow/dags/repo/dags",
"dag_volume_mount_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"kube_config",
".",
"git_dags_folder_mount_point",
",",
"self",
".",
"kube_config",
".",
"git_sync_dest",
",",
"# repo",
"self",
".",
"kube_config",
".",
"git_subpath",
"# dags",
")",
"env",
"[",
"'AIRFLOW__CORE__DAGS_FOLDER'",
"]",
"=",
"dag_volume_mount_path",
"return",
"env"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
WorkerConfiguration._get_secrets
|
Defines any necessary secrets for the pod executor
|
airflow/contrib/kubernetes/worker_configuration.py
|
def _get_secrets(self):
"""Defines any necessary secrets for the pod executor"""
worker_secrets = []
for env_var_name, obj_key_pair in six.iteritems(self.kube_config.kube_secrets):
k8s_secret_obj, k8s_secret_key = obj_key_pair.split('=')
worker_secrets.append(
Secret('env', env_var_name, k8s_secret_obj, k8s_secret_key)
)
if self.kube_config.env_from_secret_ref:
for secret_ref in self.kube_config.env_from_secret_ref.split(','):
worker_secrets.append(
Secret('env', None, secret_ref)
)
return worker_secrets
|
def _get_secrets(self):
"""Defines any necessary secrets for the pod executor"""
worker_secrets = []
for env_var_name, obj_key_pair in six.iteritems(self.kube_config.kube_secrets):
k8s_secret_obj, k8s_secret_key = obj_key_pair.split('=')
worker_secrets.append(
Secret('env', env_var_name, k8s_secret_obj, k8s_secret_key)
)
if self.kube_config.env_from_secret_ref:
for secret_ref in self.kube_config.env_from_secret_ref.split(','):
worker_secrets.append(
Secret('env', None, secret_ref)
)
return worker_secrets
|
[
"Defines",
"any",
"necessary",
"secrets",
"for",
"the",
"pod",
"executor"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/kubernetes/worker_configuration.py#L164-L180
|
[
"def",
"_get_secrets",
"(",
"self",
")",
":",
"worker_secrets",
"=",
"[",
"]",
"for",
"env_var_name",
",",
"obj_key_pair",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"kube_config",
".",
"kube_secrets",
")",
":",
"k8s_secret_obj",
",",
"k8s_secret_key",
"=",
"obj_key_pair",
".",
"split",
"(",
"'='",
")",
"worker_secrets",
".",
"append",
"(",
"Secret",
"(",
"'env'",
",",
"env_var_name",
",",
"k8s_secret_obj",
",",
"k8s_secret_key",
")",
")",
"if",
"self",
".",
"kube_config",
".",
"env_from_secret_ref",
":",
"for",
"secret_ref",
"in",
"self",
".",
"kube_config",
".",
"env_from_secret_ref",
".",
"split",
"(",
"','",
")",
":",
"worker_secrets",
".",
"append",
"(",
"Secret",
"(",
"'env'",
",",
"None",
",",
"secret_ref",
")",
")",
"return",
"worker_secrets"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
WorkerConfiguration._get_security_context
|
Defines the security context
|
airflow/contrib/kubernetes/worker_configuration.py
|
def _get_security_context(self):
"""Defines the security context"""
security_context = {}
if self.kube_config.worker_run_as_user:
security_context['runAsUser'] = self.kube_config.worker_run_as_user
if self.kube_config.worker_fs_group:
security_context['fsGroup'] = self.kube_config.worker_fs_group
# set fs_group to 65533 if not explicitly specified and using git ssh keypair auth
if self.kube_config.git_ssh_key_secret_name and security_context.get('fsGroup') is None:
security_context['fsGroup'] = 65533
return security_context
|
def _get_security_context(self):
"""Defines the security context"""
security_context = {}
if self.kube_config.worker_run_as_user:
security_context['runAsUser'] = self.kube_config.worker_run_as_user
if self.kube_config.worker_fs_group:
security_context['fsGroup'] = self.kube_config.worker_fs_group
# set fs_group to 65533 if not explicitly specified and using git ssh keypair auth
if self.kube_config.git_ssh_key_secret_name and security_context.get('fsGroup') is None:
security_context['fsGroup'] = 65533
return security_context
|
[
"Defines",
"the",
"security",
"context"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/kubernetes/worker_configuration.py#L188-L202
|
[
"def",
"_get_security_context",
"(",
"self",
")",
":",
"security_context",
"=",
"{",
"}",
"if",
"self",
".",
"kube_config",
".",
"worker_run_as_user",
":",
"security_context",
"[",
"'runAsUser'",
"]",
"=",
"self",
".",
"kube_config",
".",
"worker_run_as_user",
"if",
"self",
".",
"kube_config",
".",
"worker_fs_group",
":",
"security_context",
"[",
"'fsGroup'",
"]",
"=",
"self",
".",
"kube_config",
".",
"worker_fs_group",
"# set fs_group to 65533 if not explicitly specified and using git ssh keypair auth",
"if",
"self",
".",
"kube_config",
".",
"git_ssh_key_secret_name",
"and",
"security_context",
".",
"get",
"(",
"'fsGroup'",
")",
"is",
"None",
":",
"security_context",
"[",
"'fsGroup'",
"]",
"=",
"65533",
"return",
"security_context"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
QuboleHook.kill
|
Kill (cancel) a Qubole command
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: response from Qubole
|
airflow/contrib/hooks/qubole_hook.py
|
def kill(self, ti):
"""
Kill (cancel) a Qubole command
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: response from Qubole
"""
if self.cmd is None:
if not ti and not self.task_instance:
raise Exception("Unable to cancel Qubole Command, context is unavailable!")
elif not ti:
ti = self.task_instance
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
self.cmd = self.cls.find(cmd_id)
if self.cls and self.cmd:
self.log.info('Sending KILL signal to Qubole Command Id: %s', self.cmd.id)
self.cmd.cancel()
|
def kill(self, ti):
"""
Kill (cancel) a Qubole command
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: response from Qubole
"""
if self.cmd is None:
if not ti and not self.task_instance:
raise Exception("Unable to cancel Qubole Command, context is unavailable!")
elif not ti:
ti = self.task_instance
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
self.cmd = self.cls.find(cmd_id)
if self.cls and self.cmd:
self.log.info('Sending KILL signal to Qubole Command Id: %s', self.cmd.id)
self.cmd.cancel()
|
[
"Kill",
"(",
"cancel",
")",
"a",
"Qubole",
"command",
":",
"param",
"ti",
":",
"Task",
"Instance",
"of",
"the",
"dag",
"used",
"to",
"determine",
"the",
"Quboles",
"command",
"id",
":",
"return",
":",
"response",
"from",
"Qubole"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/qubole_hook.py#L147-L162
|
[
"def",
"kill",
"(",
"self",
",",
"ti",
")",
":",
"if",
"self",
".",
"cmd",
"is",
"None",
":",
"if",
"not",
"ti",
"and",
"not",
"self",
".",
"task_instance",
":",
"raise",
"Exception",
"(",
"\"Unable to cancel Qubole Command, context is unavailable!\"",
")",
"elif",
"not",
"ti",
":",
"ti",
"=",
"self",
".",
"task_instance",
"cmd_id",
"=",
"ti",
".",
"xcom_pull",
"(",
"key",
"=",
"\"qbol_cmd_id\"",
",",
"task_ids",
"=",
"ti",
".",
"task_id",
")",
"self",
".",
"cmd",
"=",
"self",
".",
"cls",
".",
"find",
"(",
"cmd_id",
")",
"if",
"self",
".",
"cls",
"and",
"self",
".",
"cmd",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Sending KILL signal to Qubole Command Id: %s'",
",",
"self",
".",
"cmd",
".",
"id",
")",
"self",
".",
"cmd",
".",
"cancel",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
QuboleHook.get_results
|
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
|
airflow/contrib/hooks/qubole_hook.py
|
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
"""
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
"""
if fp is None:
iso = datetime.datetime.utcnow().isoformat()
logpath = os.path.expanduser(
configuration.conf.get('core', 'BASE_LOG_FOLDER')
)
resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results'
configuration.mkdir_p(resultpath)
fp = open(resultpath + '/' + iso, 'wb')
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
self.cmd = self.cls.find(cmd_id)
self.cmd.get_results(fp, inline, delim, fetch)
fp.flush()
fp.close()
return fp.name
|
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
"""
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
"""
if fp is None:
iso = datetime.datetime.utcnow().isoformat()
logpath = os.path.expanduser(
configuration.conf.get('core', 'BASE_LOG_FOLDER')
)
resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results'
configuration.mkdir_p(resultpath)
fp = open(resultpath + '/' + iso, 'wb')
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
self.cmd = self.cls.find(cmd_id)
self.cmd.get_results(fp, inline, delim, fetch)
fp.flush()
fp.close()
return fp.name
|
[
"Get",
"results",
"(",
"or",
"just",
"s3",
"locations",
")",
"of",
"a",
"command",
"from",
"Qubole",
"and",
"save",
"into",
"a",
"file",
":",
"param",
"ti",
":",
"Task",
"Instance",
"of",
"the",
"dag",
"used",
"to",
"determine",
"the",
"Quboles",
"command",
"id",
":",
"param",
"fp",
":",
"Optional",
"file",
"pointer",
"will",
"create",
"one",
"and",
"return",
"if",
"None",
"passed",
":",
"param",
"inline",
":",
"True",
"to",
"download",
"actual",
"results",
"False",
"to",
"get",
"s3",
"locations",
"only",
":",
"param",
"delim",
":",
"Replaces",
"the",
"CTL",
"-",
"A",
"chars",
"with",
"the",
"given",
"delim",
"defaults",
"to",
":",
"param",
"fetch",
":",
"when",
"inline",
"is",
"True",
"get",
"results",
"directly",
"from",
"s3",
"(",
"if",
"large",
")",
":",
"return",
":",
"file",
"location",
"containing",
"actual",
"results",
"or",
"s3",
"locations",
"of",
"results"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/qubole_hook.py#L164-L190
|
[
"def",
"get_results",
"(",
"self",
",",
"ti",
"=",
"None",
",",
"fp",
"=",
"None",
",",
"inline",
"=",
"True",
",",
"delim",
"=",
"None",
",",
"fetch",
"=",
"True",
")",
":",
"if",
"fp",
"is",
"None",
":",
"iso",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"isoformat",
"(",
")",
"logpath",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"configuration",
".",
"conf",
".",
"get",
"(",
"'core'",
",",
"'BASE_LOG_FOLDER'",
")",
")",
"resultpath",
"=",
"logpath",
"+",
"'/'",
"+",
"self",
".",
"dag_id",
"+",
"'/'",
"+",
"self",
".",
"task_id",
"+",
"'/results'",
"configuration",
".",
"mkdir_p",
"(",
"resultpath",
")",
"fp",
"=",
"open",
"(",
"resultpath",
"+",
"'/'",
"+",
"iso",
",",
"'wb'",
")",
"if",
"self",
".",
"cmd",
"is",
"None",
":",
"cmd_id",
"=",
"ti",
".",
"xcom_pull",
"(",
"key",
"=",
"\"qbol_cmd_id\"",
",",
"task_ids",
"=",
"self",
".",
"task_id",
")",
"self",
".",
"cmd",
"=",
"self",
".",
"cls",
".",
"find",
"(",
"cmd_id",
")",
"self",
".",
"cmd",
".",
"get_results",
"(",
"fp",
",",
"inline",
",",
"delim",
",",
"fetch",
")",
"fp",
".",
"flush",
"(",
")",
"fp",
".",
"close",
"(",
")",
"return",
"fp",
".",
"name"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
QuboleHook.get_log
|
Get Logs of a command from Qubole
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: command log as text
|
airflow/contrib/hooks/qubole_hook.py
|
def get_log(self, ti):
"""
Get Logs of a command from Qubole
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: command log as text
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_log_id(self.cls, cmd_id)
|
def get_log(self, ti):
"""
Get Logs of a command from Qubole
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: command log as text
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_log_id(self.cls, cmd_id)
|
[
"Get",
"Logs",
"of",
"a",
"command",
"from",
"Qubole",
":",
"param",
"ti",
":",
"Task",
"Instance",
"of",
"the",
"dag",
"used",
"to",
"determine",
"the",
"Quboles",
"command",
"id",
":",
"return",
":",
"command",
"log",
"as",
"text"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/qubole_hook.py#L192-L200
|
[
"def",
"get_log",
"(",
"self",
",",
"ti",
")",
":",
"if",
"self",
".",
"cmd",
"is",
"None",
":",
"cmd_id",
"=",
"ti",
".",
"xcom_pull",
"(",
"key",
"=",
"\"qbol_cmd_id\"",
",",
"task_ids",
"=",
"self",
".",
"task_id",
")",
"Command",
".",
"get_log_id",
"(",
"self",
".",
"cls",
",",
"cmd_id",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
QuboleHook.get_jobs_id
|
Get jobs associated with a Qubole commands
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: Job information associated with command
|
airflow/contrib/hooks/qubole_hook.py
|
def get_jobs_id(self, ti):
"""
Get jobs associated with a Qubole commands
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: Job information associated with command
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_jobs_id(self.cls, cmd_id)
|
def get_jobs_id(self, ti):
"""
Get jobs associated with a Qubole commands
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: Job information associated with command
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_jobs_id(self.cls, cmd_id)
|
[
"Get",
"jobs",
"associated",
"with",
"a",
"Qubole",
"commands",
":",
"param",
"ti",
":",
"Task",
"Instance",
"of",
"the",
"dag",
"used",
"to",
"determine",
"the",
"Quboles",
"command",
"id",
":",
"return",
":",
"Job",
"information",
"associated",
"with",
"command"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/qubole_hook.py#L202-L210
|
[
"def",
"get_jobs_id",
"(",
"self",
",",
"ti",
")",
":",
"if",
"self",
".",
"cmd",
"is",
"None",
":",
"cmd_id",
"=",
"ti",
".",
"xcom_pull",
"(",
"key",
"=",
"\"qbol_cmd_id\"",
",",
"task_ids",
"=",
"self",
".",
"task_id",
")",
"Command",
".",
"get_jobs_id",
"(",
"self",
".",
"cls",
",",
"cmd_id",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
QuboleHook.get_extra_links
|
Get link to qubole command result page.
:param operator: operator
:param dttm: datetime
:return: url link
|
airflow/contrib/hooks/qubole_hook.py
|
def get_extra_links(self, operator, dttm):
"""
Get link to qubole command result page.
:param operator: operator
:param dttm: datetime
:return: url link
"""
conn = BaseHook.get_connection(operator.kwargs['qubole_conn_id'])
if conn and conn.host:
host = re.sub(r'api$', 'v2/analyze?command_id=', conn.host)
else:
host = 'https://api.qubole.com/v2/analyze?command_id='
ti = TaskInstance(task=operator, execution_date=dttm)
qds_command_id = ti.xcom_pull(task_ids=operator.task_id, key='qbol_cmd_id')
url = host + str(qds_command_id) if qds_command_id else ''
return url
|
def get_extra_links(self, operator, dttm):
"""
Get link to qubole command result page.
:param operator: operator
:param dttm: datetime
:return: url link
"""
conn = BaseHook.get_connection(operator.kwargs['qubole_conn_id'])
if conn and conn.host:
host = re.sub(r'api$', 'v2/analyze?command_id=', conn.host)
else:
host = 'https://api.qubole.com/v2/analyze?command_id='
ti = TaskInstance(task=operator, execution_date=dttm)
qds_command_id = ti.xcom_pull(task_ids=operator.task_id, key='qbol_cmd_id')
url = host + str(qds_command_id) if qds_command_id else ''
return url
|
[
"Get",
"link",
"to",
"qubole",
"command",
"result",
"page",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/qubole_hook.py#L212-L229
|
[
"def",
"get_extra_links",
"(",
"self",
",",
"operator",
",",
"dttm",
")",
":",
"conn",
"=",
"BaseHook",
".",
"get_connection",
"(",
"operator",
".",
"kwargs",
"[",
"'qubole_conn_id'",
"]",
")",
"if",
"conn",
"and",
"conn",
".",
"host",
":",
"host",
"=",
"re",
".",
"sub",
"(",
"r'api$'",
",",
"'v2/analyze?command_id='",
",",
"conn",
".",
"host",
")",
"else",
":",
"host",
"=",
"'https://api.qubole.com/v2/analyze?command_id='",
"ti",
"=",
"TaskInstance",
"(",
"task",
"=",
"operator",
",",
"execution_date",
"=",
"dttm",
")",
"qds_command_id",
"=",
"ti",
".",
"xcom_pull",
"(",
"task_ids",
"=",
"operator",
".",
"task_id",
",",
"key",
"=",
"'qbol_cmd_id'",
")",
"url",
"=",
"host",
"+",
"str",
"(",
"qds_command_id",
")",
"if",
"qds_command_id",
"else",
"''",
"return",
"url"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BaseJob.heartbeat
|
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
|
airflow/jobs.py
|
def heartbeat(self):
"""
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
"""
try:
with create_session() as session:
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(job)
session.commit()
if job.state == State.SHUTDOWN:
self.kill()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
if not is_unit_test:
# Figure out how long to sleep for
sleep_for = 0
if job.latest_heartbeat:
seconds_remaining = self.heartrate - \
(timezone.utcnow() - job.latest_heartbeat)\
.total_seconds()
sleep_for = max(0, seconds_remaining)
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = timezone.utcnow()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
self.log.debug('[heartbeat]')
except OperationalError as e:
self.log.error("Scheduler heartbeat got an exception: %s", str(e))
|
def heartbeat(self):
"""
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
"""
try:
with create_session() as session:
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(job)
session.commit()
if job.state == State.SHUTDOWN:
self.kill()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
if not is_unit_test:
# Figure out how long to sleep for
sleep_for = 0
if job.latest_heartbeat:
seconds_remaining = self.heartrate - \
(timezone.utcnow() - job.latest_heartbeat)\
.total_seconds()
sleep_for = max(0, seconds_remaining)
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = timezone.utcnow()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
self.log.debug('[heartbeat]')
except OperationalError as e:
self.log.error("Scheduler heartbeat got an exception: %s", str(e))
|
[
"Heartbeats",
"update",
"the",
"job",
"s",
"entry",
"in",
"the",
"database",
"with",
"a",
"timestamp",
"for",
"the",
"latest_heartbeat",
"and",
"allows",
"for",
"the",
"job",
"to",
"be",
"killed",
"externally",
".",
"This",
"allows",
"at",
"the",
"system",
"level",
"to",
"monitor",
"what",
"is",
"actually",
"active",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L139-L189
|
[
"def",
"heartbeat",
"(",
"self",
")",
":",
"try",
":",
"with",
"create_session",
"(",
")",
"as",
"session",
":",
"job",
"=",
"session",
".",
"query",
"(",
"BaseJob",
")",
".",
"filter_by",
"(",
"id",
"=",
"self",
".",
"id",
")",
".",
"one",
"(",
")",
"make_transient",
"(",
"job",
")",
"session",
".",
"commit",
"(",
")",
"if",
"job",
".",
"state",
"==",
"State",
".",
"SHUTDOWN",
":",
"self",
".",
"kill",
"(",
")",
"is_unit_test",
"=",
"conf",
".",
"getboolean",
"(",
"'core'",
",",
"'unit_test_mode'",
")",
"if",
"not",
"is_unit_test",
":",
"# Figure out how long to sleep for",
"sleep_for",
"=",
"0",
"if",
"job",
".",
"latest_heartbeat",
":",
"seconds_remaining",
"=",
"self",
".",
"heartrate",
"-",
"(",
"timezone",
".",
"utcnow",
"(",
")",
"-",
"job",
".",
"latest_heartbeat",
")",
".",
"total_seconds",
"(",
")",
"sleep_for",
"=",
"max",
"(",
"0",
",",
"seconds_remaining",
")",
"sleep",
"(",
"sleep_for",
")",
"# Update last heartbeat time",
"with",
"create_session",
"(",
")",
"as",
"session",
":",
"job",
"=",
"session",
".",
"query",
"(",
"BaseJob",
")",
".",
"filter",
"(",
"BaseJob",
".",
"id",
"==",
"self",
".",
"id",
")",
".",
"first",
"(",
")",
"job",
".",
"latest_heartbeat",
"=",
"timezone",
".",
"utcnow",
"(",
")",
"session",
".",
"merge",
"(",
"job",
")",
"session",
".",
"commit",
"(",
")",
"self",
".",
"heartbeat_callback",
"(",
"session",
"=",
"session",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'[heartbeat]'",
")",
"except",
"OperationalError",
"as",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Scheduler heartbeat got an exception: %s\"",
",",
"str",
"(",
"e",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BaseJob.reset_state_for_orphaned_tasks
|
This function checks if there are any tasks in the dagrun (or all)
that have a scheduled state but are not known by the
executor. If it finds those it will reset the state to None
so they will get picked up again.
The batch option is for performance reasons as the queries are made in
sequence.
:param filter_by_dag_run: the dag_run we want to process, None if all
:type filter_by_dag_run: airflow.models.DagRun
:return: the TIs reset (in expired SQLAlchemy state)
:rtype: list[airflow.models.TaskInstance]
|
airflow/jobs.py
|
def reset_state_for_orphaned_tasks(self, filter_by_dag_run=None, session=None):
"""
This function checks if there are any tasks in the dagrun (or all)
that have a scheduled state but are not known by the
executor. If it finds those it will reset the state to None
so they will get picked up again.
The batch option is for performance reasons as the queries are made in
sequence.
:param filter_by_dag_run: the dag_run we want to process, None if all
:type filter_by_dag_run: airflow.models.DagRun
:return: the TIs reset (in expired SQLAlchemy state)
:rtype: list[airflow.models.TaskInstance]
"""
queued_tis = self.executor.queued_tasks
# also consider running as the state might not have changed in the db yet
running_tis = self.executor.running
resettable_states = [State.SCHEDULED, State.QUEUED]
TI = models.TaskInstance
DR = models.DagRun
if filter_by_dag_run is None:
resettable_tis = (
session
.query(TI)
.join(
DR,
and_(
TI.dag_id == DR.dag_id,
TI.execution_date == DR.execution_date))
.filter(
DR.state == State.RUNNING,
DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'),
TI.state.in_(resettable_states))).all()
else:
resettable_tis = filter_by_dag_run.get_task_instances(state=resettable_states,
session=session)
tis_to_reset = []
# Can't use an update here since it doesn't support joins
for ti in resettable_tis:
if ti.key not in queued_tis and ti.key not in running_tis:
tis_to_reset.append(ti)
if len(tis_to_reset) == 0:
return []
def query(result, items):
filter_for_tis = ([and_(TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in items])
reset_tis = (
session
.query(TI)
.filter(or_(*filter_for_tis), TI.state.in_(resettable_states))
.with_for_update()
.all())
for ti in reset_tis:
ti.state = State.NONE
session.merge(ti)
return result + reset_tis
reset_tis = helpers.reduce_in_chunks(query,
tis_to_reset,
[],
self.max_tis_per_query)
task_instance_str = '\n\t'.join(
[repr(x) for x in reset_tis])
session.commit()
self.log.info(
"Reset the following %s TaskInstances:\n\t%s",
len(reset_tis), task_instance_str
)
return reset_tis
|
def reset_state_for_orphaned_tasks(self, filter_by_dag_run=None, session=None):
"""
This function checks if there are any tasks in the dagrun (or all)
that have a scheduled state but are not known by the
executor. If it finds those it will reset the state to None
so they will get picked up again.
The batch option is for performance reasons as the queries are made in
sequence.
:param filter_by_dag_run: the dag_run we want to process, None if all
:type filter_by_dag_run: airflow.models.DagRun
:return: the TIs reset (in expired SQLAlchemy state)
:rtype: list[airflow.models.TaskInstance]
"""
queued_tis = self.executor.queued_tasks
# also consider running as the state might not have changed in the db yet
running_tis = self.executor.running
resettable_states = [State.SCHEDULED, State.QUEUED]
TI = models.TaskInstance
DR = models.DagRun
if filter_by_dag_run is None:
resettable_tis = (
session
.query(TI)
.join(
DR,
and_(
TI.dag_id == DR.dag_id,
TI.execution_date == DR.execution_date))
.filter(
DR.state == State.RUNNING,
DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'),
TI.state.in_(resettable_states))).all()
else:
resettable_tis = filter_by_dag_run.get_task_instances(state=resettable_states,
session=session)
tis_to_reset = []
# Can't use an update here since it doesn't support joins
for ti in resettable_tis:
if ti.key not in queued_tis and ti.key not in running_tis:
tis_to_reset.append(ti)
if len(tis_to_reset) == 0:
return []
def query(result, items):
filter_for_tis = ([and_(TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in items])
reset_tis = (
session
.query(TI)
.filter(or_(*filter_for_tis), TI.state.in_(resettable_states))
.with_for_update()
.all())
for ti in reset_tis:
ti.state = State.NONE
session.merge(ti)
return result + reset_tis
reset_tis = helpers.reduce_in_chunks(query,
tis_to_reset,
[],
self.max_tis_per_query)
task_instance_str = '\n\t'.join(
[repr(x) for x in reset_tis])
session.commit()
self.log.info(
"Reset the following %s TaskInstances:\n\t%s",
len(reset_tis), task_instance_str
)
return reset_tis
|
[
"This",
"function",
"checks",
"if",
"there",
"are",
"any",
"tasks",
"in",
"the",
"dagrun",
"(",
"or",
"all",
")",
"that",
"have",
"a",
"scheduled",
"state",
"but",
"are",
"not",
"known",
"by",
"the",
"executor",
".",
"If",
"it",
"finds",
"those",
"it",
"will",
"reset",
"the",
"state",
"to",
"None",
"so",
"they",
"will",
"get",
"picked",
"up",
"again",
".",
"The",
"batch",
"option",
"is",
"for",
"performance",
"reasons",
"as",
"the",
"queries",
"are",
"made",
"in",
"sequence",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L223-L298
|
[
"def",
"reset_state_for_orphaned_tasks",
"(",
"self",
",",
"filter_by_dag_run",
"=",
"None",
",",
"session",
"=",
"None",
")",
":",
"queued_tis",
"=",
"self",
".",
"executor",
".",
"queued_tasks",
"# also consider running as the state might not have changed in the db yet",
"running_tis",
"=",
"self",
".",
"executor",
".",
"running",
"resettable_states",
"=",
"[",
"State",
".",
"SCHEDULED",
",",
"State",
".",
"QUEUED",
"]",
"TI",
"=",
"models",
".",
"TaskInstance",
"DR",
"=",
"models",
".",
"DagRun",
"if",
"filter_by_dag_run",
"is",
"None",
":",
"resettable_tis",
"=",
"(",
"session",
".",
"query",
"(",
"TI",
")",
".",
"join",
"(",
"DR",
",",
"and_",
"(",
"TI",
".",
"dag_id",
"==",
"DR",
".",
"dag_id",
",",
"TI",
".",
"execution_date",
"==",
"DR",
".",
"execution_date",
")",
")",
".",
"filter",
"(",
"DR",
".",
"state",
"==",
"State",
".",
"RUNNING",
",",
"DR",
".",
"run_id",
".",
"notlike",
"(",
"BackfillJob",
".",
"ID_PREFIX",
"+",
"'%'",
")",
",",
"TI",
".",
"state",
".",
"in_",
"(",
"resettable_states",
")",
")",
")",
".",
"all",
"(",
")",
"else",
":",
"resettable_tis",
"=",
"filter_by_dag_run",
".",
"get_task_instances",
"(",
"state",
"=",
"resettable_states",
",",
"session",
"=",
"session",
")",
"tis_to_reset",
"=",
"[",
"]",
"# Can't use an update here since it doesn't support joins",
"for",
"ti",
"in",
"resettable_tis",
":",
"if",
"ti",
".",
"key",
"not",
"in",
"queued_tis",
"and",
"ti",
".",
"key",
"not",
"in",
"running_tis",
":",
"tis_to_reset",
".",
"append",
"(",
"ti",
")",
"if",
"len",
"(",
"tis_to_reset",
")",
"==",
"0",
":",
"return",
"[",
"]",
"def",
"query",
"(",
"result",
",",
"items",
")",
":",
"filter_for_tis",
"=",
"(",
"[",
"and_",
"(",
"TI",
".",
"dag_id",
"==",
"ti",
".",
"dag_id",
",",
"TI",
".",
"task_id",
"==",
"ti",
".",
"task_id",
",",
"TI",
".",
"execution_date",
"==",
"ti",
".",
"execution_date",
")",
"for",
"ti",
"in",
"items",
"]",
")",
"reset_tis",
"=",
"(",
"session",
".",
"query",
"(",
"TI",
")",
".",
"filter",
"(",
"or_",
"(",
"*",
"filter_for_tis",
")",
",",
"TI",
".",
"state",
".",
"in_",
"(",
"resettable_states",
")",
")",
".",
"with_for_update",
"(",
")",
".",
"all",
"(",
")",
")",
"for",
"ti",
"in",
"reset_tis",
":",
"ti",
".",
"state",
"=",
"State",
".",
"NONE",
"session",
".",
"merge",
"(",
"ti",
")",
"return",
"result",
"+",
"reset_tis",
"reset_tis",
"=",
"helpers",
".",
"reduce_in_chunks",
"(",
"query",
",",
"tis_to_reset",
",",
"[",
"]",
",",
"self",
".",
"max_tis_per_query",
")",
"task_instance_str",
"=",
"'\\n\\t'",
".",
"join",
"(",
"[",
"repr",
"(",
"x",
")",
"for",
"x",
"in",
"reset_tis",
"]",
")",
"session",
".",
"commit",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Reset the following %s TaskInstances:\\n\\t%s\"",
",",
"len",
"(",
"reset_tis",
")",
",",
"task_instance_str",
")",
"return",
"reset_tis"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DagFileProcessor._launch_process
|
Launch a process to process the given file.
:param result_queue: the queue to use for passing back the result
:type result_queue: multiprocessing.Queue
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:return: the process that was launched
:rtype: multiprocessing.Process
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
|
airflow/jobs.py
|
def _launch_process(result_queue,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Launch a process to process the given file.
:param result_queue: the queue to use for passing back the result
:type result_queue: multiprocessing.Queue
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:return: the process that was launched
:rtype: multiprocessing.Process
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
def helper():
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log)
result = scheduler_job.process_file(file_path,
zombies,
pickle_dags)
result_queue.put(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
p = multiprocessing.Process(target=helper,
args=(),
name="{}-Process".format(thread_name))
p.start()
return p
|
def _launch_process(result_queue,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Launch a process to process the given file.
:param result_queue: the queue to use for passing back the result
:type result_queue: multiprocessing.Queue
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:return: the process that was launched
:rtype: multiprocessing.Process
:param zombies: zombie task instances to kill
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
def helper():
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log)
result = scheduler_job.process_file(file_path,
zombies,
pickle_dags)
result_queue.put(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
p = multiprocessing.Process(target=helper,
args=(),
name="{}-Process".format(thread_name))
p.start()
return p
|
[
"Launch",
"a",
"process",
"to",
"process",
"the",
"given",
"file",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L342-L417
|
[
"def",
"_launch_process",
"(",
"result_queue",
",",
"file_path",
",",
"pickle_dags",
",",
"dag_id_white_list",
",",
"thread_name",
",",
"zombies",
")",
":",
"def",
"helper",
"(",
")",
":",
"# This helper runs in the newly created process",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"\"airflow.processor\"",
")",
"stdout",
"=",
"StreamLogWriter",
"(",
"log",
",",
"logging",
".",
"INFO",
")",
"stderr",
"=",
"StreamLogWriter",
"(",
"log",
",",
"logging",
".",
"WARN",
")",
"set_context",
"(",
"log",
",",
"file_path",
")",
"try",
":",
"# redirect stdout/stderr to log",
"sys",
".",
"stdout",
"=",
"stdout",
"sys",
".",
"stderr",
"=",
"stderr",
"# Re-configure the ORM engine as there are issues with multiple processes",
"settings",
".",
"configure_orm",
"(",
")",
"# Change the thread name to differentiate log lines. This is",
"# really a separate process, but changing the name of the",
"# process doesn't work, so changing the thread name instead.",
"threading",
".",
"current_thread",
"(",
")",
".",
"name",
"=",
"thread_name",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"log",
".",
"info",
"(",
"\"Started process (PID=%s) to work on %s\"",
",",
"os",
".",
"getpid",
"(",
")",
",",
"file_path",
")",
"scheduler_job",
"=",
"SchedulerJob",
"(",
"dag_ids",
"=",
"dag_id_white_list",
",",
"log",
"=",
"log",
")",
"result",
"=",
"scheduler_job",
".",
"process_file",
"(",
"file_path",
",",
"zombies",
",",
"pickle_dags",
")",
"result_queue",
".",
"put",
"(",
"result",
")",
"end_time",
"=",
"time",
".",
"time",
"(",
")",
"log",
".",
"info",
"(",
"\"Processing %s took %.3f seconds\"",
",",
"file_path",
",",
"end_time",
"-",
"start_time",
")",
"except",
"Exception",
":",
"# Log exceptions through the logging framework.",
"log",
".",
"exception",
"(",
"\"Got an exception! Propagating...\"",
")",
"raise",
"finally",
":",
"sys",
".",
"stdout",
"=",
"sys",
".",
"__stdout__",
"sys",
".",
"stderr",
"=",
"sys",
".",
"__stderr__",
"# We re-initialized the ORM within this Process above so we need to",
"# tear it down manually here",
"settings",
".",
"dispose_orm",
"(",
")",
"p",
"=",
"multiprocessing",
".",
"Process",
"(",
"target",
"=",
"helper",
",",
"args",
"=",
"(",
")",
",",
"name",
"=",
"\"{}-Process\"",
".",
"format",
"(",
"thread_name",
")",
")",
"p",
".",
"start",
"(",
")",
"return",
"p"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DagFileProcessor.start
|
Launch the process and start processing the DAG.
|
airflow/jobs.py
|
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies)
self._start_time = timezone.utcnow()
|
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies)
self._start_time = timezone.utcnow()
|
[
"Launch",
"the",
"process",
"and",
"start",
"processing",
"the",
"DAG",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L419-L430
|
[
"def",
"start",
"(",
"self",
")",
":",
"self",
".",
"_process",
"=",
"DagFileProcessor",
".",
"_launch_process",
"(",
"self",
".",
"_result_queue",
",",
"self",
".",
"file_path",
",",
"self",
".",
"_pickle_dags",
",",
"self",
".",
"_dag_id_white_list",
",",
"\"DagFileProcessor{}\"",
".",
"format",
"(",
"self",
".",
"_instance_id",
")",
",",
"self",
".",
"_zombies",
")",
"self",
".",
"_start_time",
"=",
"timezone",
".",
"utcnow",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DagFileProcessor.terminate
|
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
|
airflow/jobs.py
|
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call stop before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill and self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
|
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call stop before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill and self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
|
[
"Terminate",
"(",
"and",
"then",
"kill",
")",
"the",
"process",
"launched",
"to",
"process",
"the",
"file",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L432-L448
|
[
"def",
"terminate",
"(",
"self",
",",
"sigkill",
"=",
"False",
")",
":",
"if",
"self",
".",
"_process",
"is",
"None",
":",
"raise",
"AirflowException",
"(",
"\"Tried to call stop before starting!\"",
")",
"# The queue will likely get corrupted, so remove the reference",
"self",
".",
"_result_queue",
"=",
"None",
"self",
".",
"_process",
".",
"terminate",
"(",
")",
"# Arbitrarily wait 5s for the process to die",
"self",
".",
"_process",
".",
"join",
"(",
"5",
")",
"if",
"sigkill",
"and",
"self",
".",
"_process",
".",
"is_alive",
"(",
")",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Killing PID %s\"",
",",
"self",
".",
"_process",
".",
"pid",
")",
"os",
".",
"kill",
"(",
"self",
".",
"_process",
".",
"pid",
",",
"signal",
".",
"SIGKILL",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
DagFileProcessor.done
|
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
|
airflow/jobs.py
|
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
# In case result queue is corrupted.
if self._result_queue and not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
# Potential error case when process dies
if self._result_queue and not self._process.is_alive():
self._done = True
# Get the object from the queue or else join() can hang.
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
return False
|
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
# In case result queue is corrupted.
if self._result_queue and not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
# Potential error case when process dies
if self._result_queue and not self._process.is_alive():
self._done = True
# Get the object from the queue or else join() can hang.
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
return False
|
[
"Check",
"if",
"the",
"process",
"launched",
"to",
"process",
"this",
"file",
"is",
"done",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L473-L504
|
[
"def",
"done",
"(",
"self",
")",
":",
"if",
"self",
".",
"_process",
"is",
"None",
":",
"raise",
"AirflowException",
"(",
"\"Tried to see if it's done before starting!\"",
")",
"if",
"self",
".",
"_done",
":",
"return",
"True",
"# In case result queue is corrupted.",
"if",
"self",
".",
"_result_queue",
"and",
"not",
"self",
".",
"_result_queue",
".",
"empty",
"(",
")",
":",
"self",
".",
"_result",
"=",
"self",
".",
"_result_queue",
".",
"get_nowait",
"(",
")",
"self",
".",
"_done",
"=",
"True",
"self",
".",
"log",
".",
"debug",
"(",
"\"Waiting for %s\"",
",",
"self",
".",
"_process",
")",
"self",
".",
"_process",
".",
"join",
"(",
")",
"return",
"True",
"# Potential error case when process dies",
"if",
"self",
".",
"_result_queue",
"and",
"not",
"self",
".",
"_process",
".",
"is_alive",
"(",
")",
":",
"self",
".",
"_done",
"=",
"True",
"# Get the object from the queue or else join() can hang.",
"if",
"not",
"self",
".",
"_result_queue",
".",
"empty",
"(",
")",
":",
"self",
".",
"_result",
"=",
"self",
".",
"_result_queue",
".",
"get_nowait",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Waiting for %s\"",
",",
"self",
".",
"_process",
")",
"self",
".",
"_process",
".",
"join",
"(",
")",
"return",
"True",
"return",
"False"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob._exit_gracefully
|
Helper method to clean up processor_agent to avoid leaving orphan processes.
|
airflow/jobs.py
|
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
|
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
|
[
"Helper",
"method",
"to",
"clean",
"up",
"processor_agent",
"to",
"avoid",
"leaving",
"orphan",
"processes",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L599-L606
|
[
"def",
"_exit_gracefully",
"(",
"self",
",",
"signum",
",",
"frame",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Exiting gracefully upon receiving signal %s\"",
",",
"signum",
")",
"if",
"self",
".",
"processor_agent",
":",
"self",
".",
"processor_agent",
".",
"end",
"(",
")",
"sys",
".",
"exit",
"(",
"os",
".",
"EX_OK",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob.manage_slas
|
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
|
airflow/jobs.py
|
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa: E712
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
emails = set()
for task in dag.tasks:
if task.email:
if isinstance(task.email, basestring):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
|
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa: E712
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
emails = set()
for task in dag.tasks:
if task.email:
if isinstance(task.email, basestring):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
|
[
"Finding",
"all",
"tasks",
"that",
"have",
"SLAs",
"defined",
"and",
"sending",
"alert",
"emails",
"where",
"needed",
".",
"New",
"SLA",
"misses",
"are",
"also",
"recorded",
"in",
"the",
"database",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L609-L738
|
[
"def",
"manage_slas",
"(",
"self",
",",
"dag",
",",
"session",
"=",
"None",
")",
":",
"if",
"not",
"any",
"(",
"[",
"isinstance",
"(",
"ti",
".",
"sla",
",",
"timedelta",
")",
"for",
"ti",
"in",
"dag",
".",
"tasks",
"]",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Skipping SLA check for %s because no tasks in DAG have SLAs\"",
",",
"dag",
")",
"return",
"TI",
"=",
"models",
".",
"TaskInstance",
"sq",
"=",
"(",
"session",
".",
"query",
"(",
"TI",
".",
"task_id",
",",
"func",
".",
"max",
"(",
"TI",
".",
"execution_date",
")",
".",
"label",
"(",
"'max_ti'",
")",
")",
".",
"with_hint",
"(",
"TI",
",",
"'USE INDEX (PRIMARY)'",
",",
"dialect_name",
"=",
"'mysql'",
")",
".",
"filter",
"(",
"TI",
".",
"dag_id",
"==",
"dag",
".",
"dag_id",
")",
".",
"filter",
"(",
"or_",
"(",
"TI",
".",
"state",
"==",
"State",
".",
"SUCCESS",
",",
"TI",
".",
"state",
"==",
"State",
".",
"SKIPPED",
")",
")",
".",
"filter",
"(",
"TI",
".",
"task_id",
".",
"in_",
"(",
"dag",
".",
"task_ids",
")",
")",
".",
"group_by",
"(",
"TI",
".",
"task_id",
")",
".",
"subquery",
"(",
"'sq'",
")",
")",
"max_tis",
"=",
"session",
".",
"query",
"(",
"TI",
")",
".",
"filter",
"(",
"TI",
".",
"dag_id",
"==",
"dag",
".",
"dag_id",
",",
"TI",
".",
"task_id",
"==",
"sq",
".",
"c",
".",
"task_id",
",",
"TI",
".",
"execution_date",
"==",
"sq",
".",
"c",
".",
"max_ti",
",",
")",
".",
"all",
"(",
")",
"ts",
"=",
"timezone",
".",
"utcnow",
"(",
")",
"for",
"ti",
"in",
"max_tis",
":",
"task",
"=",
"dag",
".",
"get_task",
"(",
"ti",
".",
"task_id",
")",
"dttm",
"=",
"ti",
".",
"execution_date",
"if",
"isinstance",
"(",
"task",
".",
"sla",
",",
"timedelta",
")",
":",
"dttm",
"=",
"dag",
".",
"following_schedule",
"(",
"dttm",
")",
"while",
"dttm",
"<",
"timezone",
".",
"utcnow",
"(",
")",
":",
"following_schedule",
"=",
"dag",
".",
"following_schedule",
"(",
"dttm",
")",
"if",
"following_schedule",
"+",
"task",
".",
"sla",
"<",
"timezone",
".",
"utcnow",
"(",
")",
":",
"session",
".",
"merge",
"(",
"SlaMiss",
"(",
"task_id",
"=",
"ti",
".",
"task_id",
",",
"dag_id",
"=",
"ti",
".",
"dag_id",
",",
"execution_date",
"=",
"dttm",
",",
"timestamp",
"=",
"ts",
")",
")",
"dttm",
"=",
"dag",
".",
"following_schedule",
"(",
"dttm",
")",
"session",
".",
"commit",
"(",
")",
"slas",
"=",
"(",
"session",
".",
"query",
"(",
"SlaMiss",
")",
".",
"filter",
"(",
"SlaMiss",
".",
"notification_sent",
"==",
"False",
",",
"SlaMiss",
".",
"dag_id",
"==",
"dag",
".",
"dag_id",
")",
"# noqa: E712",
".",
"all",
"(",
")",
")",
"if",
"slas",
":",
"sla_dates",
"=",
"[",
"sla",
".",
"execution_date",
"for",
"sla",
"in",
"slas",
"]",
"qry",
"=",
"(",
"session",
".",
"query",
"(",
"TI",
")",
".",
"filter",
"(",
"TI",
".",
"state",
"!=",
"State",
".",
"SUCCESS",
",",
"TI",
".",
"execution_date",
".",
"in_",
"(",
"sla_dates",
")",
",",
"TI",
".",
"dag_id",
"==",
"dag",
".",
"dag_id",
")",
".",
"all",
"(",
")",
")",
"blocking_tis",
"=",
"[",
"]",
"for",
"ti",
"in",
"qry",
":",
"if",
"ti",
".",
"task_id",
"in",
"dag",
".",
"task_ids",
":",
"ti",
".",
"task",
"=",
"dag",
".",
"get_task",
"(",
"ti",
".",
"task_id",
")",
"blocking_tis",
".",
"append",
"(",
"ti",
")",
"else",
":",
"session",
".",
"delete",
"(",
"ti",
")",
"session",
".",
"commit",
"(",
")",
"task_list",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"sla",
".",
"task_id",
"+",
"' on '",
"+",
"sla",
".",
"execution_date",
".",
"isoformat",
"(",
")",
"for",
"sla",
"in",
"slas",
"]",
")",
"blocking_task_list",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"ti",
".",
"task_id",
"+",
"' on '",
"+",
"ti",
".",
"execution_date",
".",
"isoformat",
"(",
")",
"for",
"ti",
"in",
"blocking_tis",
"]",
")",
"# Track whether email or any alert notification sent",
"# We consider email or the alert callback as notifications",
"email_sent",
"=",
"False",
"notification_sent",
"=",
"False",
"if",
"dag",
".",
"sla_miss_callback",
":",
"# Execute the alert callback",
"self",
".",
"log",
".",
"info",
"(",
"' --------------> ABOUT TO CALL SLA MISS CALL BACK '",
")",
"try",
":",
"dag",
".",
"sla_miss_callback",
"(",
"dag",
",",
"task_list",
",",
"blocking_task_list",
",",
"slas",
",",
"blocking_tis",
")",
"notification_sent",
"=",
"True",
"except",
"Exception",
":",
"self",
".",
"log",
".",
"exception",
"(",
"\"Could not call sla_miss_callback for DAG %s\"",
",",
"dag",
".",
"dag_id",
")",
"email_content",
"=",
"\"\"\"\\\n Here's a list of tasks that missed their SLAs:\n <pre><code>{task_list}\\n<code></pre>\n Blocking tasks:\n <pre><code>{blocking_task_list}\\n{bug}<code></pre>\n \"\"\"",
".",
"format",
"(",
"task_list",
"=",
"task_list",
",",
"blocking_task_list",
"=",
"blocking_task_list",
",",
"bug",
"=",
"asciiart",
".",
"bug",
")",
"emails",
"=",
"set",
"(",
")",
"for",
"task",
"in",
"dag",
".",
"tasks",
":",
"if",
"task",
".",
"email",
":",
"if",
"isinstance",
"(",
"task",
".",
"email",
",",
"basestring",
")",
":",
"emails",
"|=",
"set",
"(",
"get_email_address_list",
"(",
"task",
".",
"email",
")",
")",
"elif",
"isinstance",
"(",
"task",
".",
"email",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"emails",
"|=",
"set",
"(",
"task",
".",
"email",
")",
"if",
"emails",
":",
"try",
":",
"send_email",
"(",
"emails",
",",
"\"[airflow] SLA miss on DAG=\"",
"+",
"dag",
".",
"dag_id",
",",
"email_content",
")",
"email_sent",
"=",
"True",
"notification_sent",
"=",
"True",
"except",
"Exception",
":",
"self",
".",
"log",
".",
"exception",
"(",
"\"Could not send SLA Miss email notification for\"",
"\" DAG %s\"",
",",
"dag",
".",
"dag_id",
")",
"# If we sent any notification, update the sla_miss table",
"if",
"notification_sent",
":",
"for",
"sla",
"in",
"slas",
":",
"if",
"email_sent",
":",
"sla",
".",
"email_sent",
"=",
"True",
"sla",
".",
"notification_sent",
"=",
"True",
"session",
".",
"merge",
"(",
"sla",
")",
"session",
".",
"commit",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob.update_import_errors
|
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
|
airflow/jobs.py
|
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(errors.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
|
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(errors.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
|
[
"For",
"the",
"DAGs",
"in",
"the",
"given",
"DagBag",
"record",
"any",
"associated",
"import",
"errors",
"and",
"clears",
"errors",
"for",
"files",
"that",
"no",
"longer",
"have",
"them",
".",
"These",
"are",
"usually",
"displayed",
"through",
"the",
"Airflow",
"UI",
"so",
"that",
"users",
"know",
"that",
"there",
"are",
"issues",
"parsing",
"DAGs",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L741-L763
|
[
"def",
"update_import_errors",
"(",
"session",
",",
"dagbag",
")",
":",
"# Clear the errors of the processed files",
"for",
"dagbag_file",
"in",
"dagbag",
".",
"file_last_changed",
":",
"session",
".",
"query",
"(",
"errors",
".",
"ImportError",
")",
".",
"filter",
"(",
"errors",
".",
"ImportError",
".",
"filename",
"==",
"dagbag_file",
")",
".",
"delete",
"(",
")",
"# Add the errors of the processed files",
"for",
"filename",
",",
"stacktrace",
"in",
"six",
".",
"iteritems",
"(",
"dagbag",
".",
"import_errors",
")",
":",
"session",
".",
"add",
"(",
"errors",
".",
"ImportError",
"(",
"filename",
"=",
"filename",
",",
"stacktrace",
"=",
"stacktrace",
")",
")",
"session",
".",
"commit",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob.create_dag_run
|
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
|
airflow/jobs.py
|
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
|
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
|
[
"This",
"method",
"checks",
"whether",
"a",
"new",
"DagRun",
"needs",
"to",
"be",
"created",
"for",
"a",
"DAG",
"based",
"on",
"scheduling",
"interval",
".",
"Returns",
"DagRun",
"if",
"one",
"is",
"scheduled",
".",
"Otherwise",
"returns",
"None",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L766-L894
|
[
"def",
"create_dag_run",
"(",
"self",
",",
"dag",
",",
"session",
"=",
"None",
")",
":",
"if",
"dag",
".",
"schedule_interval",
"and",
"conf",
".",
"getboolean",
"(",
"'scheduler'",
",",
"'USE_JOB_SCHEDULE'",
")",
":",
"active_runs",
"=",
"DagRun",
".",
"find",
"(",
"dag_id",
"=",
"dag",
".",
"dag_id",
",",
"state",
"=",
"State",
".",
"RUNNING",
",",
"external_trigger",
"=",
"False",
",",
"session",
"=",
"session",
")",
"# return if already reached maximum active runs and no timeout setting",
"if",
"len",
"(",
"active_runs",
")",
">=",
"dag",
".",
"max_active_runs",
"and",
"not",
"dag",
".",
"dagrun_timeout",
":",
"return",
"timedout_runs",
"=",
"0",
"for",
"dr",
"in",
"active_runs",
":",
"if",
"(",
"dr",
".",
"start_date",
"and",
"dag",
".",
"dagrun_timeout",
"and",
"dr",
".",
"start_date",
"<",
"timezone",
".",
"utcnow",
"(",
")",
"-",
"dag",
".",
"dagrun_timeout",
")",
":",
"dr",
".",
"state",
"=",
"State",
".",
"FAILED",
"dr",
".",
"end_date",
"=",
"timezone",
".",
"utcnow",
"(",
")",
"dag",
".",
"handle_callback",
"(",
"dr",
",",
"success",
"=",
"False",
",",
"reason",
"=",
"'dagrun_timeout'",
",",
"session",
"=",
"session",
")",
"timedout_runs",
"+=",
"1",
"session",
".",
"commit",
"(",
")",
"if",
"len",
"(",
"active_runs",
")",
"-",
"timedout_runs",
">=",
"dag",
".",
"max_active_runs",
":",
"return",
"# this query should be replaced by find dagrun",
"qry",
"=",
"(",
"session",
".",
"query",
"(",
"func",
".",
"max",
"(",
"DagRun",
".",
"execution_date",
")",
")",
".",
"filter_by",
"(",
"dag_id",
"=",
"dag",
".",
"dag_id",
")",
".",
"filter",
"(",
"or_",
"(",
"DagRun",
".",
"external_trigger",
"==",
"False",
",",
"# noqa: E712",
"# add % as a wildcard for the like query",
"DagRun",
".",
"run_id",
".",
"like",
"(",
"DagRun",
".",
"ID_PREFIX",
"+",
"'%'",
")",
")",
")",
")",
"last_scheduled_run",
"=",
"qry",
".",
"scalar",
"(",
")",
"# don't schedule @once again",
"if",
"dag",
".",
"schedule_interval",
"==",
"'@once'",
"and",
"last_scheduled_run",
":",
"return",
"None",
"# don't do scheduler catchup for dag's that don't have dag.catchup = True",
"if",
"not",
"(",
"dag",
".",
"catchup",
"or",
"dag",
".",
"schedule_interval",
"==",
"'@once'",
")",
":",
"# The logic is that we move start_date up until",
"# one period before, so that timezone.utcnow() is AFTER",
"# the period end, and the job can be created...",
"now",
"=",
"timezone",
".",
"utcnow",
"(",
")",
"next_start",
"=",
"dag",
".",
"following_schedule",
"(",
"now",
")",
"last_start",
"=",
"dag",
".",
"previous_schedule",
"(",
"now",
")",
"if",
"next_start",
"<=",
"now",
":",
"new_start",
"=",
"last_start",
"else",
":",
"new_start",
"=",
"dag",
".",
"previous_schedule",
"(",
"last_start",
")",
"if",
"dag",
".",
"start_date",
":",
"if",
"new_start",
">=",
"dag",
".",
"start_date",
":",
"dag",
".",
"start_date",
"=",
"new_start",
"else",
":",
"dag",
".",
"start_date",
"=",
"new_start",
"next_run_date",
"=",
"None",
"if",
"not",
"last_scheduled_run",
":",
"# First run",
"task_start_dates",
"=",
"[",
"t",
".",
"start_date",
"for",
"t",
"in",
"dag",
".",
"tasks",
"]",
"if",
"task_start_dates",
":",
"next_run_date",
"=",
"dag",
".",
"normalize_schedule",
"(",
"min",
"(",
"task_start_dates",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Next run date based on tasks %s\"",
",",
"next_run_date",
")",
"else",
":",
"next_run_date",
"=",
"dag",
".",
"following_schedule",
"(",
"last_scheduled_run",
")",
"# make sure backfills are also considered",
"last_run",
"=",
"dag",
".",
"get_last_dagrun",
"(",
"session",
"=",
"session",
")",
"if",
"last_run",
"and",
"next_run_date",
":",
"while",
"next_run_date",
"<=",
"last_run",
".",
"execution_date",
":",
"next_run_date",
"=",
"dag",
".",
"following_schedule",
"(",
"next_run_date",
")",
"# don't ever schedule prior to the dag's start_date",
"if",
"dag",
".",
"start_date",
":",
"next_run_date",
"=",
"(",
"dag",
".",
"start_date",
"if",
"not",
"next_run_date",
"else",
"max",
"(",
"next_run_date",
",",
"dag",
".",
"start_date",
")",
")",
"if",
"next_run_date",
"==",
"dag",
".",
"start_date",
":",
"next_run_date",
"=",
"dag",
".",
"normalize_schedule",
"(",
"dag",
".",
"start_date",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Dag start date: %s. Next run date: %s\"",
",",
"dag",
".",
"start_date",
",",
"next_run_date",
")",
"# don't ever schedule in the future or if next_run_date is None",
"if",
"not",
"next_run_date",
"or",
"next_run_date",
">",
"timezone",
".",
"utcnow",
"(",
")",
":",
"return",
"# this structure is necessary to avoid a TypeError from concatenating",
"# NoneType",
"if",
"dag",
".",
"schedule_interval",
"==",
"'@once'",
":",
"period_end",
"=",
"next_run_date",
"elif",
"next_run_date",
":",
"period_end",
"=",
"dag",
".",
"following_schedule",
"(",
"next_run_date",
")",
"# Don't schedule a dag beyond its end_date (as specified by the dag param)",
"if",
"next_run_date",
"and",
"dag",
".",
"end_date",
"and",
"next_run_date",
">",
"dag",
".",
"end_date",
":",
"return",
"# Don't schedule a dag beyond its end_date (as specified by the task params)",
"# Get the min task end date, which may come from the dag.default_args",
"min_task_end_date",
"=",
"[",
"]",
"task_end_dates",
"=",
"[",
"t",
".",
"end_date",
"for",
"t",
"in",
"dag",
".",
"tasks",
"if",
"t",
".",
"end_date",
"]",
"if",
"task_end_dates",
":",
"min_task_end_date",
"=",
"min",
"(",
"task_end_dates",
")",
"if",
"next_run_date",
"and",
"min_task_end_date",
"and",
"next_run_date",
">",
"min_task_end_date",
":",
"return",
"if",
"next_run_date",
"and",
"period_end",
"and",
"period_end",
"<=",
"timezone",
".",
"utcnow",
"(",
")",
":",
"next_run",
"=",
"dag",
".",
"create_dagrun",
"(",
"run_id",
"=",
"DagRun",
".",
"ID_PREFIX",
"+",
"next_run_date",
".",
"isoformat",
"(",
")",
",",
"execution_date",
"=",
"next_run_date",
",",
"start_date",
"=",
"timezone",
".",
"utcnow",
"(",
")",
",",
"state",
"=",
"State",
".",
"RUNNING",
",",
"external_trigger",
"=",
"False",
")",
"return",
"next_run"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob._process_task_instances
|
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
|
airflow/jobs.py
|
def _process_task_instances(self, dag, queue, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future
if run.execution_date > timezone.utcnow():
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY,
State.UP_FOR_RESCHEDULE))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.log.debug('Queuing task: %s', ti)
queue.append(ti.key)
|
def _process_task_instances(self, dag, queue, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future
if run.execution_date > timezone.utcnow():
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY,
State.UP_FOR_RESCHEDULE))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.log.debug('Queuing task: %s', ti)
queue.append(ti.key)
|
[
"This",
"method",
"schedules",
"the",
"tasks",
"for",
"a",
"single",
"DAG",
"by",
"looking",
"at",
"the",
"active",
"DAG",
"runs",
"and",
"adding",
"task",
"instances",
"that",
"should",
"run",
"to",
"the",
"queue",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L897-L954
|
[
"def",
"_process_task_instances",
"(",
"self",
",",
"dag",
",",
"queue",
",",
"session",
"=",
"None",
")",
":",
"# update the state of the previously active dag runs",
"dag_runs",
"=",
"DagRun",
".",
"find",
"(",
"dag_id",
"=",
"dag",
".",
"dag_id",
",",
"state",
"=",
"State",
".",
"RUNNING",
",",
"session",
"=",
"session",
")",
"active_dag_runs",
"=",
"[",
"]",
"for",
"run",
"in",
"dag_runs",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Examining DAG run %s\"",
",",
"run",
")",
"# don't consider runs that are executed in the future",
"if",
"run",
".",
"execution_date",
">",
"timezone",
".",
"utcnow",
"(",
")",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Execution date is in future: %s\"",
",",
"run",
".",
"execution_date",
")",
"continue",
"if",
"len",
"(",
"active_dag_runs",
")",
">=",
"dag",
".",
"max_active_runs",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Number of active dag runs reached max_active_run.\"",
")",
"break",
"# skip backfill dagruns for now as long as they are not really scheduled",
"if",
"run",
".",
"is_backfill",
":",
"continue",
"# todo: run.dag is transient but needs to be set",
"run",
".",
"dag",
"=",
"dag",
"# todo: preferably the integrity check happens at dag collection time",
"run",
".",
"verify_integrity",
"(",
"session",
"=",
"session",
")",
"run",
".",
"update_state",
"(",
"session",
"=",
"session",
")",
"if",
"run",
".",
"state",
"==",
"State",
".",
"RUNNING",
":",
"make_transient",
"(",
"run",
")",
"active_dag_runs",
".",
"append",
"(",
"run",
")",
"for",
"run",
"in",
"active_dag_runs",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Examining active DAG run: %s\"",
",",
"run",
")",
"# this needs a fresh session sometimes tis get detached",
"tis",
"=",
"run",
".",
"get_task_instances",
"(",
"state",
"=",
"(",
"State",
".",
"NONE",
",",
"State",
".",
"UP_FOR_RETRY",
",",
"State",
".",
"UP_FOR_RESCHEDULE",
")",
")",
"# this loop is quite slow as it uses are_dependencies_met for",
"# every task (in ti.is_runnable). This is also called in",
"# update_state above which has already checked these tasks",
"for",
"ti",
"in",
"tis",
":",
"task",
"=",
"dag",
".",
"get_task",
"(",
"ti",
".",
"task_id",
")",
"# fixme: ti.task is transient but needs to be set",
"ti",
".",
"task",
"=",
"task",
"if",
"ti",
".",
"are_dependencies_met",
"(",
"dep_context",
"=",
"DepContext",
"(",
"flag_upstream_failed",
"=",
"True",
")",
",",
"session",
"=",
"session",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Queuing task: %s'",
",",
"ti",
")",
"queue",
".",
"append",
"(",
"ti",
".",
"key",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob._change_state_for_tis_without_dagrun
|
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
|
airflow/jobs.py
|
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
if self.using_sqlite:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
|
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
if self.using_sqlite:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
|
[
"For",
"all",
"DAG",
"IDs",
"in",
"the",
"SimpleDagBag",
"look",
"for",
"task",
"instances",
"in",
"the",
"old_states",
"and",
"set",
"them",
"to",
"new_state",
"if",
"the",
"corresponding",
"DagRun",
"does",
"not",
"exist",
"or",
"exists",
"but",
"is",
"not",
"in",
"the",
"running",
"state",
".",
"This",
"normally",
"should",
"not",
"happen",
"but",
"it",
"can",
"if",
"the",
"state",
"of",
"DagRuns",
"are",
"changed",
"manually",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L957-L1012
|
[
"def",
"_change_state_for_tis_without_dagrun",
"(",
"self",
",",
"simple_dag_bag",
",",
"old_states",
",",
"new_state",
",",
"session",
"=",
"None",
")",
":",
"tis_changed",
"=",
"0",
"query",
"=",
"session",
".",
"query",
"(",
"models",
".",
"TaskInstance",
")",
".",
"outerjoin",
"(",
"models",
".",
"DagRun",
",",
"and_",
"(",
"models",
".",
"TaskInstance",
".",
"dag_id",
"==",
"models",
".",
"DagRun",
".",
"dag_id",
",",
"models",
".",
"TaskInstance",
".",
"execution_date",
"==",
"models",
".",
"DagRun",
".",
"execution_date",
")",
")",
".",
"filter",
"(",
"models",
".",
"TaskInstance",
".",
"dag_id",
".",
"in_",
"(",
"simple_dag_bag",
".",
"dag_ids",
")",
")",
".",
"filter",
"(",
"models",
".",
"TaskInstance",
".",
"state",
".",
"in_",
"(",
"old_states",
")",
")",
".",
"filter",
"(",
"or_",
"(",
"models",
".",
"DagRun",
".",
"state",
"!=",
"State",
".",
"RUNNING",
",",
"models",
".",
"DagRun",
".",
"state",
".",
"is_",
"(",
"None",
")",
")",
")",
"if",
"self",
".",
"using_sqlite",
":",
"tis_to_change",
"=",
"query",
".",
"with_for_update",
"(",
")",
".",
"all",
"(",
")",
"for",
"ti",
"in",
"tis_to_change",
":",
"ti",
".",
"set_state",
"(",
"new_state",
",",
"session",
"=",
"session",
")",
"tis_changed",
"+=",
"1",
"else",
":",
"subq",
"=",
"query",
".",
"subquery",
"(",
")",
"tis_changed",
"=",
"session",
".",
"query",
"(",
"models",
".",
"TaskInstance",
")",
".",
"filter",
"(",
"and_",
"(",
"models",
".",
"TaskInstance",
".",
"dag_id",
"==",
"subq",
".",
"c",
".",
"dag_id",
",",
"models",
".",
"TaskInstance",
".",
"task_id",
"==",
"subq",
".",
"c",
".",
"task_id",
",",
"models",
".",
"TaskInstance",
".",
"execution_date",
"==",
"subq",
".",
"c",
".",
"execution_date",
")",
")",
".",
"update",
"(",
"{",
"models",
".",
"TaskInstance",
".",
"state",
":",
"new_state",
"}",
",",
"synchronize_session",
"=",
"False",
")",
"session",
".",
"commit",
"(",
")",
"if",
"tis_changed",
">",
"0",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state\"",
",",
"tis_changed",
",",
"new_state",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob.__get_concurrency_maps
|
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
|
airflow/jobs.py
|
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
|
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
|
[
"Get",
"the",
"concurrency",
"maps",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1015-L1039
|
[
"def",
"__get_concurrency_maps",
"(",
"self",
",",
"states",
",",
"session",
"=",
"None",
")",
":",
"TI",
"=",
"models",
".",
"TaskInstance",
"ti_concurrency_query",
"=",
"(",
"session",
".",
"query",
"(",
"TI",
".",
"task_id",
",",
"TI",
".",
"dag_id",
",",
"func",
".",
"count",
"(",
"'*'",
")",
")",
".",
"filter",
"(",
"TI",
".",
"state",
".",
"in_",
"(",
"states",
")",
")",
".",
"group_by",
"(",
"TI",
".",
"task_id",
",",
"TI",
".",
"dag_id",
")",
")",
".",
"all",
"(",
")",
"dag_map",
"=",
"defaultdict",
"(",
"int",
")",
"task_map",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"result",
"in",
"ti_concurrency_query",
":",
"task_id",
",",
"dag_id",
",",
"count",
"=",
"result",
"dag_map",
"[",
"dag_id",
"]",
"+=",
"count",
"task_map",
"[",
"(",
"dag_id",
",",
"task_id",
")",
"]",
"=",
"count",
"return",
"dag_map",
",",
"task_map"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob._find_executable_task_instances
|
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
|
airflow/jobs.py
|
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
"""
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711
not_(DM.is_paused)))
)
# Additional filters on task instance state
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
states_to_count_as_running = [State.RUNNING, State.QUEUED]
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=states_to_count_as_running, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if not pool:
# Arbitrary:
# If queued outside of a pool, trigger no more than
# non_pooled_task_slot_count
open_slots = models.Pool.default_pool_open_slots()
pool_name = models.Pool.default_pool_name
else:
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
open_slots = 0
else:
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_starving_tasks = len(priority_sorted_task_instances) - current_index
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
|
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
"""
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711
not_(DM.is_paused)))
)
# Additional filters on task instance state
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
states_to_count_as_running = [State.RUNNING, State.QUEUED]
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=states_to_count_as_running, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if not pool:
# Arbitrary:
# If queued outside of a pool, trigger no more than
# non_pooled_task_slot_count
open_slots = models.Pool.default_pool_open_slots()
pool_name = models.Pool.default_pool_name
else:
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
open_slots = 0
else:
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_starving_tasks = len(priority_sorted_task_instances) - current_index
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
|
[
"Finds",
"TIs",
"that",
"are",
"ready",
"for",
"execution",
"with",
"respect",
"to",
"pool",
"limits",
"dag",
"concurrency",
"executor",
"state",
"and",
"priority",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1042-L1214
|
[
"def",
"_find_executable_task_instances",
"(",
"self",
",",
"simple_dag_bag",
",",
"states",
",",
"session",
"=",
"None",
")",
":",
"executable_tis",
"=",
"[",
"]",
"# Get all task instances associated with scheduled",
"# DagRuns which are not backfilled, in the given states,",
"# and the dag is not paused",
"TI",
"=",
"models",
".",
"TaskInstance",
"DR",
"=",
"models",
".",
"DagRun",
"DM",
"=",
"models",
".",
"DagModel",
"ti_query",
"=",
"(",
"session",
".",
"query",
"(",
"TI",
")",
".",
"filter",
"(",
"TI",
".",
"dag_id",
".",
"in_",
"(",
"simple_dag_bag",
".",
"dag_ids",
")",
")",
".",
"outerjoin",
"(",
"DR",
",",
"and_",
"(",
"DR",
".",
"dag_id",
"==",
"TI",
".",
"dag_id",
",",
"DR",
".",
"execution_date",
"==",
"TI",
".",
"execution_date",
")",
")",
".",
"filter",
"(",
"or_",
"(",
"DR",
".",
"run_id",
"==",
"None",
",",
"# noqa: E711",
"not_",
"(",
"DR",
".",
"run_id",
".",
"like",
"(",
"BackfillJob",
".",
"ID_PREFIX",
"+",
"'%'",
")",
")",
")",
")",
".",
"outerjoin",
"(",
"DM",
",",
"DM",
".",
"dag_id",
"==",
"TI",
".",
"dag_id",
")",
".",
"filter",
"(",
"or_",
"(",
"DM",
".",
"dag_id",
"==",
"None",
",",
"# noqa: E711",
"not_",
"(",
"DM",
".",
"is_paused",
")",
")",
")",
")",
"# Additional filters on task instance state",
"if",
"None",
"in",
"states",
":",
"ti_query",
"=",
"ti_query",
".",
"filter",
"(",
"or_",
"(",
"TI",
".",
"state",
"==",
"None",
",",
"TI",
".",
"state",
".",
"in_",
"(",
"states",
")",
")",
"# noqa: E711",
")",
"else",
":",
"ti_query",
"=",
"ti_query",
".",
"filter",
"(",
"TI",
".",
"state",
".",
"in_",
"(",
"states",
")",
")",
"task_instances_to_examine",
"=",
"ti_query",
".",
"all",
"(",
")",
"if",
"len",
"(",
"task_instances_to_examine",
")",
"==",
"0",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"No tasks to consider for execution.\"",
")",
"return",
"executable_tis",
"# Put one task instance on each line",
"task_instance_str",
"=",
"\"\\n\\t\"",
".",
"join",
"(",
"[",
"repr",
"(",
"x",
")",
"for",
"x",
"in",
"task_instances_to_examine",
"]",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"%s tasks up for execution:\\n\\t%s\"",
",",
"len",
"(",
"task_instances_to_examine",
")",
",",
"task_instance_str",
")",
"# Get the pool settings",
"pools",
"=",
"{",
"p",
".",
"pool",
":",
"p",
"for",
"p",
"in",
"session",
".",
"query",
"(",
"models",
".",
"Pool",
")",
".",
"all",
"(",
")",
"}",
"pool_to_task_instances",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"task_instance",
"in",
"task_instances_to_examine",
":",
"pool_to_task_instances",
"[",
"task_instance",
".",
"pool",
"]",
".",
"append",
"(",
"task_instance",
")",
"states_to_count_as_running",
"=",
"[",
"State",
".",
"RUNNING",
",",
"State",
".",
"QUEUED",
"]",
"# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.",
"dag_concurrency_map",
",",
"task_concurrency_map",
"=",
"self",
".",
"__get_concurrency_maps",
"(",
"states",
"=",
"states_to_count_as_running",
",",
"session",
"=",
"session",
")",
"# Go through each pool, and queue up a task for execution if there are",
"# any open slots in the pool.",
"for",
"pool",
",",
"task_instances",
"in",
"pool_to_task_instances",
".",
"items",
"(",
")",
":",
"pool_name",
"=",
"pool",
"if",
"not",
"pool",
":",
"# Arbitrary:",
"# If queued outside of a pool, trigger no more than",
"# non_pooled_task_slot_count",
"open_slots",
"=",
"models",
".",
"Pool",
".",
"default_pool_open_slots",
"(",
")",
"pool_name",
"=",
"models",
".",
"Pool",
".",
"default_pool_name",
"else",
":",
"if",
"pool",
"not",
"in",
"pools",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Tasks using non-existent pool '%s' will not be scheduled\"",
",",
"pool",
")",
"open_slots",
"=",
"0",
"else",
":",
"open_slots",
"=",
"pools",
"[",
"pool",
"]",
".",
"open_slots",
"(",
"session",
"=",
"session",
")",
"num_ready",
"=",
"len",
"(",
"task_instances",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Figuring out tasks to run in Pool(name=%s) with %s open slots \"",
"\"and %s task instances ready to be queued\"",
",",
"pool",
",",
"open_slots",
",",
"num_ready",
")",
"priority_sorted_task_instances",
"=",
"sorted",
"(",
"task_instances",
",",
"key",
"=",
"lambda",
"ti",
":",
"(",
"-",
"ti",
".",
"priority_weight",
",",
"ti",
".",
"execution_date",
")",
")",
"# Number of tasks that cannot be scheduled because of no open slot in pool",
"num_starving_tasks",
"=",
"0",
"for",
"current_index",
",",
"task_instance",
"in",
"enumerate",
"(",
"priority_sorted_task_instances",
")",
":",
"if",
"open_slots",
"<=",
"0",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Not scheduling since there are %s open slots in pool %s\"",
",",
"open_slots",
",",
"pool",
")",
"# Can't schedule any more since there are no more open slots.",
"num_starving_tasks",
"=",
"len",
"(",
"priority_sorted_task_instances",
")",
"-",
"current_index",
"break",
"# Check to make sure that the task concurrency of the DAG hasn't been",
"# reached.",
"dag_id",
"=",
"task_instance",
".",
"dag_id",
"simple_dag",
"=",
"simple_dag_bag",
".",
"get_dag",
"(",
"dag_id",
")",
"current_dag_concurrency",
"=",
"dag_concurrency_map",
"[",
"dag_id",
"]",
"dag_concurrency_limit",
"=",
"simple_dag_bag",
".",
"get_dag",
"(",
"dag_id",
")",
".",
"concurrency",
"self",
".",
"log",
".",
"info",
"(",
"\"DAG %s has %s/%s running and queued tasks\"",
",",
"dag_id",
",",
"current_dag_concurrency",
",",
"dag_concurrency_limit",
")",
"if",
"current_dag_concurrency",
">=",
"dag_concurrency_limit",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Not executing %s since the number of tasks running or queued \"",
"\"from DAG %s is >= to the DAG's task concurrency limit of %s\"",
",",
"task_instance",
",",
"dag_id",
",",
"dag_concurrency_limit",
")",
"continue",
"task_concurrency_limit",
"=",
"simple_dag",
".",
"get_task_special_arg",
"(",
"task_instance",
".",
"task_id",
",",
"'task_concurrency'",
")",
"if",
"task_concurrency_limit",
"is",
"not",
"None",
":",
"current_task_concurrency",
"=",
"task_concurrency_map",
"[",
"(",
"task_instance",
".",
"dag_id",
",",
"task_instance",
".",
"task_id",
")",
"]",
"if",
"current_task_concurrency",
">=",
"task_concurrency_limit",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Not executing %s since the task concurrency for\"",
"\" this task has been reached.\"",
",",
"task_instance",
")",
"continue",
"if",
"self",
".",
"executor",
".",
"has_task",
"(",
"task_instance",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Not handling task %s as the executor reports it is running\"",
",",
"task_instance",
".",
"key",
")",
"continue",
"executable_tis",
".",
"append",
"(",
"task_instance",
")",
"open_slots",
"-=",
"1",
"dag_concurrency_map",
"[",
"dag_id",
"]",
"+=",
"1",
"task_concurrency_map",
"[",
"(",
"task_instance",
".",
"dag_id",
",",
"task_instance",
".",
"task_id",
")",
"]",
"+=",
"1",
"Stats",
".",
"gauge",
"(",
"'pool.starving_tasks.{pool_name}'",
".",
"format",
"(",
"pool_name",
"=",
"pool_name",
")",
",",
"num_starving_tasks",
")",
"task_instance_str",
"=",
"\"\\n\\t\"",
".",
"join",
"(",
"[",
"repr",
"(",
"x",
")",
"for",
"x",
"in",
"executable_tis",
"]",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Setting the following tasks to queued state:\\n\\t%s\"",
",",
"task_instance_str",
")",
"# so these dont expire on commit",
"for",
"ti",
"in",
"executable_tis",
":",
"copy_dag_id",
"=",
"ti",
".",
"dag_id",
"copy_execution_date",
"=",
"ti",
".",
"execution_date",
"copy_task_id",
"=",
"ti",
".",
"task_id",
"make_transient",
"(",
"ti",
")",
"ti",
".",
"dag_id",
"=",
"copy_dag_id",
"ti",
".",
"execution_date",
"=",
"copy_execution_date",
"ti",
".",
"task_id",
"=",
"copy_task_id",
"return",
"executable_tis"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob._change_state_for_executable_task_instances
|
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.utils.dag_processing.SimpleTaskInstance]
|
airflow/jobs.py
|
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa: E711
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = (timezone.utcnow()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
|
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.utils.dag_processing.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa: E711
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = (timezone.utcnow()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
|
[
"Changes",
"the",
"state",
"of",
"task",
"instances",
"in",
"the",
"list",
"with",
"one",
"of",
"the",
"given",
"states",
"to",
"QUEUED",
"atomically",
"and",
"returns",
"the",
"TIs",
"changed",
"in",
"SimpleTaskInstance",
"format",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1217-L1280
|
[
"def",
"_change_state_for_executable_task_instances",
"(",
"self",
",",
"task_instances",
",",
"acceptable_states",
",",
"session",
"=",
"None",
")",
":",
"if",
"len",
"(",
"task_instances",
")",
"==",
"0",
":",
"session",
".",
"commit",
"(",
")",
"return",
"[",
"]",
"TI",
"=",
"models",
".",
"TaskInstance",
"filter_for_ti_state_change",
"=",
"(",
"[",
"and_",
"(",
"TI",
".",
"dag_id",
"==",
"ti",
".",
"dag_id",
",",
"TI",
".",
"task_id",
"==",
"ti",
".",
"task_id",
",",
"TI",
".",
"execution_date",
"==",
"ti",
".",
"execution_date",
")",
"for",
"ti",
"in",
"task_instances",
"]",
")",
"ti_query",
"=",
"(",
"session",
".",
"query",
"(",
"TI",
")",
".",
"filter",
"(",
"or_",
"(",
"*",
"filter_for_ti_state_change",
")",
")",
")",
"if",
"None",
"in",
"acceptable_states",
":",
"ti_query",
"=",
"ti_query",
".",
"filter",
"(",
"or_",
"(",
"TI",
".",
"state",
"==",
"None",
",",
"TI",
".",
"state",
".",
"in_",
"(",
"acceptable_states",
")",
")",
"# noqa: E711",
")",
"else",
":",
"ti_query",
"=",
"ti_query",
".",
"filter",
"(",
"TI",
".",
"state",
".",
"in_",
"(",
"acceptable_states",
")",
")",
"tis_to_set_to_queued",
"=",
"(",
"ti_query",
".",
"with_for_update",
"(",
")",
".",
"all",
"(",
")",
")",
"if",
"len",
"(",
"tis_to_set_to_queued",
")",
"==",
"0",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"No tasks were able to have their state changed to queued.\"",
")",
"session",
".",
"commit",
"(",
")",
"return",
"[",
"]",
"# set TIs to queued state",
"for",
"task_instance",
"in",
"tis_to_set_to_queued",
":",
"task_instance",
".",
"state",
"=",
"State",
".",
"QUEUED",
"task_instance",
".",
"queued_dttm",
"=",
"(",
"timezone",
".",
"utcnow",
"(",
")",
"if",
"not",
"task_instance",
".",
"queued_dttm",
"else",
"task_instance",
".",
"queued_dttm",
")",
"session",
".",
"merge",
"(",
"task_instance",
")",
"# Generate a list of SimpleTaskInstance for the use of queuing",
"# them in the executor.",
"simple_task_instances",
"=",
"[",
"SimpleTaskInstance",
"(",
"ti",
")",
"for",
"ti",
"in",
"tis_to_set_to_queued",
"]",
"task_instance_str",
"=",
"\"\\n\\t\"",
".",
"join",
"(",
"[",
"repr",
"(",
"x",
")",
"for",
"x",
"in",
"tis_to_set_to_queued",
"]",
")",
"session",
".",
"commit",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Setting the following %s tasks to queued state:\\n\\t%s\"",
",",
"len",
"(",
"tis_to_set_to_queued",
")",
",",
"task_instance_str",
")",
"return",
"simple_task_instances"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob._enqueue_task_instances_with_queued_state
|
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
|
airflow/jobs.py
|
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
|
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
|
[
"Takes",
"task_instances",
"which",
"should",
"have",
"been",
"set",
"to",
"queued",
"and",
"enqueues",
"them",
"with",
"the",
"executor",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1282-L1322
|
[
"def",
"_enqueue_task_instances_with_queued_state",
"(",
"self",
",",
"simple_dag_bag",
",",
"simple_task_instances",
")",
":",
"TI",
"=",
"models",
".",
"TaskInstance",
"# actually enqueue them",
"for",
"simple_task_instance",
"in",
"simple_task_instances",
":",
"simple_dag",
"=",
"simple_dag_bag",
".",
"get_dag",
"(",
"simple_task_instance",
".",
"dag_id",
")",
"command",
"=",
"TI",
".",
"generate_command",
"(",
"simple_task_instance",
".",
"dag_id",
",",
"simple_task_instance",
".",
"task_id",
",",
"simple_task_instance",
".",
"execution_date",
",",
"local",
"=",
"True",
",",
"mark_success",
"=",
"False",
",",
"ignore_all_deps",
"=",
"False",
",",
"ignore_depends_on_past",
"=",
"False",
",",
"ignore_task_deps",
"=",
"False",
",",
"ignore_ti_state",
"=",
"False",
",",
"pool",
"=",
"simple_task_instance",
".",
"pool",
",",
"file_path",
"=",
"simple_dag",
".",
"full_filepath",
",",
"pickle_id",
"=",
"simple_dag",
".",
"pickle_id",
")",
"priority",
"=",
"simple_task_instance",
".",
"priority_weight",
"queue",
"=",
"simple_task_instance",
".",
"queue",
"self",
".",
"log",
".",
"info",
"(",
"\"Sending %s to executor with priority %s and queue %s\"",
",",
"simple_task_instance",
".",
"key",
",",
"priority",
",",
"queue",
")",
"self",
".",
"executor",
".",
"queue_command",
"(",
"simple_task_instance",
",",
"command",
",",
"priority",
"=",
"priority",
",",
"queue",
"=",
"queue",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob._execute_task_instances
|
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
|
airflow/jobs.py
|
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
|
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
|
[
"Attempts",
"to",
"execute",
"TaskInstances",
"that",
"should",
"be",
"executed",
"by",
"the",
"scheduler",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1325-L1359
|
[
"def",
"_execute_task_instances",
"(",
"self",
",",
"simple_dag_bag",
",",
"states",
",",
"session",
"=",
"None",
")",
":",
"executable_tis",
"=",
"self",
".",
"_find_executable_task_instances",
"(",
"simple_dag_bag",
",",
"states",
",",
"session",
"=",
"session",
")",
"def",
"query",
"(",
"result",
",",
"items",
")",
":",
"simple_tis_with_state_changed",
"=",
"self",
".",
"_change_state_for_executable_task_instances",
"(",
"items",
",",
"states",
",",
"session",
"=",
"session",
")",
"self",
".",
"_enqueue_task_instances_with_queued_state",
"(",
"simple_dag_bag",
",",
"simple_tis_with_state_changed",
")",
"session",
".",
"commit",
"(",
")",
"return",
"result",
"+",
"len",
"(",
"simple_tis_with_state_changed",
")",
"return",
"helpers",
".",
"reduce_in_chunks",
"(",
"query",
",",
"executable_tis",
",",
"0",
",",
"self",
".",
"max_tis_per_query",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob._change_state_for_tasks_failed_to_execute
|
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
|
airflow/jobs.py
|
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
|
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
|
[
"If",
"there",
"are",
"tasks",
"left",
"over",
"in",
"the",
"executor",
"we",
"set",
"them",
"back",
"to",
"SCHEDULED",
"to",
"avoid",
"creating",
"hanging",
"tasks",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1362-L1399
|
[
"def",
"_change_state_for_tasks_failed_to_execute",
"(",
"self",
",",
"session",
")",
":",
"if",
"self",
".",
"executor",
".",
"queued_tasks",
":",
"TI",
"=",
"models",
".",
"TaskInstance",
"filter_for_ti_state_change",
"=",
"(",
"[",
"and_",
"(",
"TI",
".",
"dag_id",
"==",
"dag_id",
",",
"TI",
".",
"task_id",
"==",
"task_id",
",",
"TI",
".",
"execution_date",
"==",
"execution_date",
",",
"# The TI.try_number will return raw try_number+1 since the",
"# ti is not running. And we need to -1 to match the DB record.",
"TI",
".",
"_try_number",
"==",
"try_number",
"-",
"1",
",",
"TI",
".",
"state",
"==",
"State",
".",
"QUEUED",
")",
"for",
"dag_id",
",",
"task_id",
",",
"execution_date",
",",
"try_number",
"in",
"self",
".",
"executor",
".",
"queued_tasks",
".",
"keys",
"(",
")",
"]",
")",
"ti_query",
"=",
"(",
"session",
".",
"query",
"(",
"TI",
")",
".",
"filter",
"(",
"or_",
"(",
"*",
"filter_for_ti_state_change",
")",
")",
")",
"tis_to_set_to_scheduled",
"=",
"(",
"ti_query",
".",
"with_for_update",
"(",
")",
".",
"all",
"(",
")",
")",
"if",
"len",
"(",
"tis_to_set_to_scheduled",
")",
"==",
"0",
":",
"session",
".",
"commit",
"(",
")",
"return",
"# set TIs to queued state",
"for",
"task_instance",
"in",
"tis_to_set_to_scheduled",
":",
"task_instance",
".",
"state",
"=",
"State",
".",
"SCHEDULED",
"task_instance_str",
"=",
"\"\\n\\t\"",
".",
"join",
"(",
"[",
"repr",
"(",
"x",
")",
"for",
"x",
"in",
"tis_to_set_to_scheduled",
"]",
")",
"session",
".",
"commit",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Set the following tasks to scheduled state:\\n\\t%s\"",
",",
"task_instance_str",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob._process_dags
|
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: airflow.models.DAG
:param tis_out: A queue to add generated TaskInstance objects
:type tis_out: multiprocessing.Queue[TaskInstance]
:rtype: None
|
airflow/jobs.py
|
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: airflow.models.DAG
:param tis_out: A queue to add generated TaskInstance objects
:type tis_out: multiprocessing.Queue[TaskInstance]
:rtype: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
dag_run = self.create_dag_run(dag)
if dag_run:
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
|
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: airflow.models.DAG
:param tis_out: A queue to add generated TaskInstance objects
:type tis_out: multiprocessing.Queue[TaskInstance]
:rtype: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
dag_run = self.create_dag_run(dag)
if dag_run:
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
|
[
"Iterates",
"over",
"the",
"dags",
"and",
"processes",
"them",
".",
"Processing",
"includes",
":"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1401-L1439
|
[
"def",
"_process_dags",
"(",
"self",
",",
"dagbag",
",",
"dags",
",",
"tis_out",
")",
":",
"for",
"dag",
"in",
"dags",
":",
"dag",
"=",
"dagbag",
".",
"get_dag",
"(",
"dag",
".",
"dag_id",
")",
"if",
"not",
"dag",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"DAG ID %s was not found in the DagBag\"",
",",
"dag",
".",
"dag_id",
")",
"continue",
"if",
"dag",
".",
"is_paused",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Not processing DAG %s since it's paused\"",
",",
"dag",
".",
"dag_id",
")",
"continue",
"self",
".",
"log",
".",
"info",
"(",
"\"Processing %s\"",
",",
"dag",
".",
"dag_id",
")",
"dag_run",
"=",
"self",
".",
"create_dag_run",
"(",
"dag",
")",
"if",
"dag_run",
":",
"expected_start_date",
"=",
"dag",
".",
"following_schedule",
"(",
"dag_run",
".",
"execution_date",
")",
"if",
"expected_start_date",
":",
"schedule_delay",
"=",
"dag_run",
".",
"start_date",
"-",
"expected_start_date",
"Stats",
".",
"timing",
"(",
"'dagrun.schedule_delay.{dag_id}'",
".",
"format",
"(",
"dag_id",
"=",
"dag",
".",
"dag_id",
")",
",",
"schedule_delay",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Created %s\"",
",",
"dag_run",
")",
"self",
".",
"_process_task_instances",
"(",
"dag",
",",
"tis_out",
")",
"self",
".",
"manage_slas",
"(",
"dag",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob._process_executor_events
|
Respond to executor events.
|
airflow/jobs.py
|
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
|
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
|
[
"Respond",
"to",
"executor",
"events",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1442-L1484
|
[
"def",
"_process_executor_events",
"(",
"self",
",",
"simple_dag_bag",
",",
"session",
"=",
"None",
")",
":",
"# TODO: this shares quite a lot of code with _manage_executor_state",
"TI",
"=",
"models",
".",
"TaskInstance",
"for",
"key",
",",
"state",
"in",
"list",
"(",
"self",
".",
"executor",
".",
"get_event_buffer",
"(",
"simple_dag_bag",
".",
"dag_ids",
")",
".",
"items",
"(",
")",
")",
":",
"dag_id",
",",
"task_id",
",",
"execution_date",
",",
"try_number",
"=",
"key",
"self",
".",
"log",
".",
"info",
"(",
"\"Executor reports execution of %s.%s execution_date=%s \"",
"\"exited with status %s for try_number %s\"",
",",
"dag_id",
",",
"task_id",
",",
"execution_date",
",",
"state",
",",
"try_number",
")",
"if",
"state",
"==",
"State",
".",
"FAILED",
"or",
"state",
"==",
"State",
".",
"SUCCESS",
":",
"qry",
"=",
"session",
".",
"query",
"(",
"TI",
")",
".",
"filter",
"(",
"TI",
".",
"dag_id",
"==",
"dag_id",
",",
"TI",
".",
"task_id",
"==",
"task_id",
",",
"TI",
".",
"execution_date",
"==",
"execution_date",
")",
"ti",
"=",
"qry",
".",
"first",
"(",
")",
"if",
"not",
"ti",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"TaskInstance %s went missing from the database\"",
",",
"ti",
")",
"continue",
"# TODO: should we fail RUNNING as well, as we do in Backfills?",
"if",
"ti",
".",
"try_number",
"==",
"try_number",
"and",
"ti",
".",
"state",
"==",
"State",
".",
"QUEUED",
":",
"msg",
"=",
"(",
"\"Executor reports task instance {} finished ({}) \"",
"\"although the task says its {}. Was the task \"",
"\"killed externally?\"",
".",
"format",
"(",
"ti",
",",
"state",
",",
"ti",
".",
"state",
")",
")",
"self",
".",
"log",
".",
"error",
"(",
"msg",
")",
"try",
":",
"simple_dag",
"=",
"simple_dag_bag",
".",
"get_dag",
"(",
"dag_id",
")",
"dagbag",
"=",
"models",
".",
"DagBag",
"(",
"simple_dag",
".",
"full_filepath",
")",
"dag",
"=",
"dagbag",
".",
"get_dag",
"(",
"dag_id",
")",
"ti",
".",
"task",
"=",
"dag",
".",
"get_task",
"(",
"task_id",
")",
"ti",
".",
"handle_failure",
"(",
"msg",
")",
"except",
"Exception",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Cannot load the dag bag to handle failure for %s\"",
"\". Setting task to FAILED without callbacks or \"",
"\"retries. Do you have enough resources?\"",
",",
"ti",
")",
"ti",
".",
"state",
"=",
"State",
".",
"FAILED",
"session",
".",
"merge",
"(",
"ti",
")",
"session",
".",
"commit",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob._execute_helper
|
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
|
airflow/jobs.py
|
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while True:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.debug("Harvesting DAG parsing results")
simple_dags = self.processor_agent.harvest_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if len(simple_dags) > 0:
try:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
continue
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
# Exit early for a test mode, run one additional scheduler loop
# to reduce the possibility that parsed DAG was put into the queue
# by the DAG manager but not yet received by DAG agent.
if self.processor_agent.done:
self._last_loop = True
if self._last_loop:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1 and not is_unit_test:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
|
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while True:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.debug("Harvesting DAG parsing results")
simple_dags = self.processor_agent.harvest_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if len(simple_dags) > 0:
try:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
continue
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
# Exit early for a test mode, run one additional scheduler loop
# to reduce the possibility that parsed DAG was put into the queue
# by the DAG manager but not yet received by DAG agent.
if self.processor_agent.done:
self._last_loop = True
if self._last_loop:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1 and not is_unit_test:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
|
[
"The",
"actual",
"scheduler",
"loop",
".",
"The",
"main",
"steps",
"in",
"the",
"loop",
"are",
":",
"#",
".",
"Harvest",
"DAG",
"parsing",
"results",
"through",
"DagFileProcessorAgent",
"#",
".",
"Find",
"and",
"queue",
"executable",
"tasks",
"#",
".",
"Change",
"task",
"instance",
"state",
"in",
"DB",
"#",
".",
"Queue",
"tasks",
"in",
"executor",
"#",
".",
"Heartbeat",
"executor",
"#",
".",
"Execute",
"queued",
"tasks",
"in",
"executor",
"asynchronously",
"#",
".",
"Sync",
"on",
"the",
"states",
"of",
"running",
"tasks"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1526-L1666
|
[
"def",
"_execute_helper",
"(",
"self",
")",
":",
"self",
".",
"executor",
".",
"start",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Resetting orphaned tasks for active dag runs\"",
")",
"self",
".",
"reset_state_for_orphaned_tasks",
"(",
")",
"# Start after resetting orphaned tasks to avoid stressing out DB.",
"self",
".",
"processor_agent",
".",
"start",
"(",
")",
"execute_start_time",
"=",
"timezone",
".",
"utcnow",
"(",
")",
"# Last time that self.heartbeat() was called.",
"last_self_heartbeat_time",
"=",
"timezone",
".",
"utcnow",
"(",
")",
"# For the execute duration, parse and schedule DAGs",
"while",
"True",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Starting Loop...\"",
")",
"loop_start_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"using_sqlite",
":",
"self",
".",
"processor_agent",
".",
"heartbeat",
"(",
")",
"# For the sqlite case w/ 1 thread, wait until the processor",
"# is finished to avoid concurrent access to the DB.",
"self",
".",
"log",
".",
"debug",
"(",
"\"Waiting for processors to finish since we're using sqlite\"",
")",
"self",
".",
"processor_agent",
".",
"wait_until_finished",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Harvesting DAG parsing results\"",
")",
"simple_dags",
"=",
"self",
".",
"processor_agent",
".",
"harvest_simple_dags",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Harvested {} SimpleDAGs\"",
".",
"format",
"(",
"len",
"(",
"simple_dags",
")",
")",
")",
"# Send tasks for execution if available",
"simple_dag_bag",
"=",
"SimpleDagBag",
"(",
"simple_dags",
")",
"if",
"len",
"(",
"simple_dags",
")",
">",
"0",
":",
"try",
":",
"simple_dag_bag",
"=",
"SimpleDagBag",
"(",
"simple_dags",
")",
"# Handle cases where a DAG run state is set (perhaps manually) to",
"# a non-running state. Handle task instances that belong to",
"# DAG runs in those states",
"# If a task instance is up for retry but the corresponding DAG run",
"# isn't running, mark the task instance as FAILED so we don't try",
"# to re-run it.",
"self",
".",
"_change_state_for_tis_without_dagrun",
"(",
"simple_dag_bag",
",",
"[",
"State",
".",
"UP_FOR_RETRY",
"]",
",",
"State",
".",
"FAILED",
")",
"# If a task instance is scheduled or queued or up for reschedule,",
"# but the corresponding DAG run isn't running, set the state to",
"# NONE so we don't try to re-run it.",
"self",
".",
"_change_state_for_tis_without_dagrun",
"(",
"simple_dag_bag",
",",
"[",
"State",
".",
"QUEUED",
",",
"State",
".",
"SCHEDULED",
",",
"State",
".",
"UP_FOR_RESCHEDULE",
"]",
",",
"State",
".",
"NONE",
")",
"self",
".",
"_execute_task_instances",
"(",
"simple_dag_bag",
",",
"(",
"State",
".",
"SCHEDULED",
",",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Error queuing tasks\"",
")",
"self",
".",
"log",
".",
"exception",
"(",
"e",
")",
"continue",
"# Call heartbeats",
"self",
".",
"log",
".",
"debug",
"(",
"\"Heartbeating the executor\"",
")",
"self",
".",
"executor",
".",
"heartbeat",
"(",
")",
"self",
".",
"_change_state_for_tasks_failed_to_execute",
"(",
")",
"# Process events from the executor",
"self",
".",
"_process_executor_events",
"(",
"simple_dag_bag",
")",
"# Heartbeat the scheduler periodically",
"time_since_last_heartbeat",
"=",
"(",
"timezone",
".",
"utcnow",
"(",
")",
"-",
"last_self_heartbeat_time",
")",
".",
"total_seconds",
"(",
")",
"if",
"time_since_last_heartbeat",
">",
"self",
".",
"heartrate",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Heartbeating the scheduler\"",
")",
"self",
".",
"heartbeat",
"(",
")",
"last_self_heartbeat_time",
"=",
"timezone",
".",
"utcnow",
"(",
")",
"is_unit_test",
"=",
"conf",
".",
"getboolean",
"(",
"'core'",
",",
"'unit_test_mode'",
")",
"loop_end_time",
"=",
"time",
".",
"time",
"(",
")",
"loop_duration",
"=",
"loop_end_time",
"-",
"loop_start_time",
"self",
".",
"log",
".",
"debug",
"(",
"\"Ran scheduling loop in %.2f seconds\"",
",",
"loop_duration",
")",
"if",
"not",
"is_unit_test",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Sleeping for %.2f seconds\"",
",",
"self",
".",
"_processor_poll_interval",
")",
"time",
".",
"sleep",
"(",
"self",
".",
"_processor_poll_interval",
")",
"# Exit early for a test mode, run one additional scheduler loop",
"# to reduce the possibility that parsed DAG was put into the queue",
"# by the DAG manager but not yet received by DAG agent.",
"if",
"self",
".",
"processor_agent",
".",
"done",
":",
"self",
".",
"_last_loop",
"=",
"True",
"if",
"self",
".",
"_last_loop",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Exiting scheduler loop as all files\"",
"\" have been processed {} times\"",
".",
"format",
"(",
"self",
".",
"num_runs",
")",
")",
"break",
"if",
"loop_duration",
"<",
"1",
"and",
"not",
"is_unit_test",
":",
"sleep_length",
"=",
"1",
"-",
"loop_duration",
"self",
".",
"log",
".",
"debug",
"(",
"\"Sleeping for {0:.2f} seconds to prevent excessive logging\"",
".",
"format",
"(",
"sleep_length",
")",
")",
"sleep",
"(",
"sleep_length",
")",
"# Stop any processors",
"self",
".",
"processor_agent",
".",
"terminate",
"(",
")",
"# Verify that all files were processed, and if so, deactivate DAGs that",
"# haven't been touched by the scheduler as they likely have been",
"# deleted.",
"if",
"self",
".",
"processor_agent",
".",
"all_files_processed",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Deactivating DAGs that haven't been touched since %s\"",
",",
"execute_start_time",
".",
"isoformat",
"(",
")",
")",
"models",
".",
"DAG",
".",
"deactivate_stale_dags",
"(",
"execute_start_time",
")",
"self",
".",
"executor",
".",
"end",
"(",
")",
"settings",
".",
"Session",
".",
"remove",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SchedulerJob.process_file
|
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param zombies: zombie task instances to kill.
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[airflow.utils.dag_processing.SimpleDagBag]
|
airflow/jobs.py
|
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param zombies: zombie task instances to kill.
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return []
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true?)
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We can defer checking the task dependency checks to the worker themselves
# since they can be expensive to run in the scheduler.
dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
# TODO(aoen): It's not great that we have to check all the task instance
# dependencies twice; once to get the task scheduled, and again to actually
# run the task. We should try to come up with a way to only check them once.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags
|
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param zombies: zombie task instances to kill.
:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return []
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true?)
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We can defer checking the task dependency checks to the worker themselves
# since they can be expensive to run in the scheduler.
dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
# TODO(aoen): It's not great that we have to check all the task instance
# dependencies twice; once to get the task scheduled, and again to actually
# run the task. We should try to come up with a way to only check them once.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags
|
[
"Process",
"a",
"Python",
"file",
"containing",
"Airflow",
"DAGs",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1669-L1787
|
[
"def",
"process_file",
"(",
"self",
",",
"file_path",
",",
"zombies",
",",
"pickle_dags",
"=",
"False",
",",
"session",
"=",
"None",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Processing file %s for tasks to queue\"",
",",
"file_path",
")",
"# As DAGs are parsed from this file, they will be converted into SimpleDags",
"simple_dags",
"=",
"[",
"]",
"try",
":",
"dagbag",
"=",
"models",
".",
"DagBag",
"(",
"file_path",
",",
"include_examples",
"=",
"False",
")",
"except",
"Exception",
":",
"self",
".",
"log",
".",
"exception",
"(",
"\"Failed at reloading the DAG file %s\"",
",",
"file_path",
")",
"Stats",
".",
"incr",
"(",
"'dag_file_refresh_error'",
",",
"1",
",",
"1",
")",
"return",
"[",
"]",
"if",
"len",
"(",
"dagbag",
".",
"dags",
")",
">",
"0",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"DAG(s) %s retrieved from %s\"",
",",
"dagbag",
".",
"dags",
".",
"keys",
"(",
")",
",",
"file_path",
")",
"else",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"No viable dags retrieved from %s\"",
",",
"file_path",
")",
"self",
".",
"update_import_errors",
"(",
"session",
",",
"dagbag",
")",
"return",
"[",
"]",
"# Save individual DAGs in the ORM and update DagModel.last_scheduled_time",
"for",
"dag",
"in",
"dagbag",
".",
"dags",
".",
"values",
"(",
")",
":",
"dag",
".",
"sync_to_db",
"(",
")",
"paused_dag_ids",
"=",
"[",
"dag",
".",
"dag_id",
"for",
"dag",
"in",
"dagbag",
".",
"dags",
".",
"values",
"(",
")",
"if",
"dag",
".",
"is_paused",
"]",
"# Pickle the DAGs (if necessary) and put them into a SimpleDag",
"for",
"dag_id",
"in",
"dagbag",
".",
"dags",
":",
"# Only return DAGs that are not paused",
"if",
"dag_id",
"not",
"in",
"paused_dag_ids",
":",
"dag",
"=",
"dagbag",
".",
"get_dag",
"(",
"dag_id",
")",
"pickle_id",
"=",
"None",
"if",
"pickle_dags",
":",
"pickle_id",
"=",
"dag",
".",
"pickle",
"(",
"session",
")",
".",
"id",
"simple_dags",
".",
"append",
"(",
"SimpleDag",
"(",
"dag",
",",
"pickle_id",
"=",
"pickle_id",
")",
")",
"if",
"len",
"(",
"self",
".",
"dag_ids",
")",
">",
"0",
":",
"dags",
"=",
"[",
"dag",
"for",
"dag",
"in",
"dagbag",
".",
"dags",
".",
"values",
"(",
")",
"if",
"dag",
".",
"dag_id",
"in",
"self",
".",
"dag_ids",
"and",
"dag",
".",
"dag_id",
"not",
"in",
"paused_dag_ids",
"]",
"else",
":",
"dags",
"=",
"[",
"dag",
"for",
"dag",
"in",
"dagbag",
".",
"dags",
".",
"values",
"(",
")",
"if",
"not",
"dag",
".",
"parent_dag",
"and",
"dag",
".",
"dag_id",
"not",
"in",
"paused_dag_ids",
"]",
"# Not using multiprocessing.Queue() since it's no longer a separate",
"# process and due to some unusual behavior. (empty() incorrectly",
"# returns true?)",
"ti_keys_to_schedule",
"=",
"[",
"]",
"self",
".",
"_process_dags",
"(",
"dagbag",
",",
"dags",
",",
"ti_keys_to_schedule",
")",
"for",
"ti_key",
"in",
"ti_keys_to_schedule",
":",
"dag",
"=",
"dagbag",
".",
"dags",
"[",
"ti_key",
"[",
"0",
"]",
"]",
"task",
"=",
"dag",
".",
"get_task",
"(",
"ti_key",
"[",
"1",
"]",
")",
"ti",
"=",
"models",
".",
"TaskInstance",
"(",
"task",
",",
"ti_key",
"[",
"2",
"]",
")",
"ti",
".",
"refresh_from_db",
"(",
"session",
"=",
"session",
",",
"lock_for_update",
"=",
"True",
")",
"# We can defer checking the task dependency checks to the worker themselves",
"# since they can be expensive to run in the scheduler.",
"dep_context",
"=",
"DepContext",
"(",
"deps",
"=",
"QUEUE_DEPS",
",",
"ignore_task_deps",
"=",
"True",
")",
"# Only schedule tasks that have their dependencies met, e.g. to avoid",
"# a task that recently got its state changed to RUNNING from somewhere",
"# other than the scheduler from getting its state overwritten.",
"# TODO(aoen): It's not great that we have to check all the task instance",
"# dependencies twice; once to get the task scheduled, and again to actually",
"# run the task. We should try to come up with a way to only check them once.",
"if",
"ti",
".",
"are_dependencies_met",
"(",
"dep_context",
"=",
"dep_context",
",",
"session",
"=",
"session",
",",
"verbose",
"=",
"True",
")",
":",
"# Task starts out in the scheduled state. All tasks in the",
"# scheduled state will be sent to the executor",
"ti",
".",
"state",
"=",
"State",
".",
"SCHEDULED",
"# Also save this task instance to the DB.",
"self",
".",
"log",
".",
"info",
"(",
"\"Creating / updating %s in ORM\"",
",",
"ti",
")",
"session",
".",
"merge",
"(",
"ti",
")",
"# commit batch",
"session",
".",
"commit",
"(",
")",
"# Record import errors into the ORM",
"try",
":",
"self",
".",
"update_import_errors",
"(",
"session",
",",
"dagbag",
")",
"except",
"Exception",
":",
"self",
".",
"log",
".",
"exception",
"(",
"\"Error logging import errors!\"",
")",
"try",
":",
"dagbag",
".",
"kill_zombies",
"(",
"zombies",
")",
"except",
"Exception",
":",
"self",
".",
"log",
".",
"exception",
"(",
"\"Error killing zombies!\"",
")",
"return",
"simple_dags"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BackfillJob._update_counters
|
Updates the counters per state of the tasks that were running. Can re-add
to tasks to run in case required.
:param ti_status: the internal status of the backfill job tasks
:type ti_status: BackfillJob._DagRunTaskStatus
|
airflow/jobs.py
|
def _update_counters(self, ti_status):
"""
Updates the counters per state of the tasks that were running. Can re-add
to tasks to run in case required.
:param ti_status: the internal status of the backfill job tasks
:type ti_status: BackfillJob._DagRunTaskStatus
"""
for key, ti in list(ti_status.running.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.FAILED:
self.log.error("Task instance %s failed", ti)
ti_status.failed.add(key)
ti_status.running.pop(key)
continue
# special case: if the task needs to run again put it back
elif ti.state == State.UP_FOR_RETRY:
self.log.warning("Task instance %s is up for retry", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: if the task needs to be rescheduled put it back
elif ti.state == State.UP_FOR_RESCHEDULE:
self.log.warning("Task instance %s is up for reschedule", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: The state of the task can be set to NONE by the task itself
# when it reaches concurrency limits. It could also happen when the state
# is changed externally, e.g. by clearing tasks from the ui. We need to cover
# for that as otherwise those tasks would fall outside of the scope of
# the backfill suddenly.
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance %s state was set to none externally or "
"reaching concurrency limits. Re-adding task to queue.",
ti
)
ti.set_state(State.SCHEDULED)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
|
def _update_counters(self, ti_status):
"""
Updates the counters per state of the tasks that were running. Can re-add
to tasks to run in case required.
:param ti_status: the internal status of the backfill job tasks
:type ti_status: BackfillJob._DagRunTaskStatus
"""
for key, ti in list(ti_status.running.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.FAILED:
self.log.error("Task instance %s failed", ti)
ti_status.failed.add(key)
ti_status.running.pop(key)
continue
# special case: if the task needs to run again put it back
elif ti.state == State.UP_FOR_RETRY:
self.log.warning("Task instance %s is up for retry", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: if the task needs to be rescheduled put it back
elif ti.state == State.UP_FOR_RESCHEDULE:
self.log.warning("Task instance %s is up for reschedule", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: The state of the task can be set to NONE by the task itself
# when it reaches concurrency limits. It could also happen when the state
# is changed externally, e.g. by clearing tasks from the ui. We need to cover
# for that as otherwise those tasks would fall outside of the scope of
# the backfill suddenly.
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance %s state was set to none externally or "
"reaching concurrency limits. Re-adding task to queue.",
ti
)
ti.set_state(State.SCHEDULED)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
|
[
"Updates",
"the",
"counters",
"per",
"state",
"of",
"the",
"tasks",
"that",
"were",
"running",
".",
"Can",
"re",
"-",
"add",
"to",
"tasks",
"to",
"run",
"in",
"case",
"required",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1929-L1977
|
[
"def",
"_update_counters",
"(",
"self",
",",
"ti_status",
")",
":",
"for",
"key",
",",
"ti",
"in",
"list",
"(",
"ti_status",
".",
"running",
".",
"items",
"(",
")",
")",
":",
"ti",
".",
"refresh_from_db",
"(",
")",
"if",
"ti",
".",
"state",
"==",
"State",
".",
"SUCCESS",
":",
"ti_status",
".",
"succeeded",
".",
"add",
"(",
"key",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Task instance %s succeeded. Don't rerun.\"",
",",
"ti",
")",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"continue",
"elif",
"ti",
".",
"state",
"==",
"State",
".",
"SKIPPED",
":",
"ti_status",
".",
"skipped",
".",
"add",
"(",
"key",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Task instance %s skipped. Don't rerun.\"",
",",
"ti",
")",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"continue",
"elif",
"ti",
".",
"state",
"==",
"State",
".",
"FAILED",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Task instance %s failed\"",
",",
"ti",
")",
"ti_status",
".",
"failed",
".",
"add",
"(",
"key",
")",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"continue",
"# special case: if the task needs to run again put it back",
"elif",
"ti",
".",
"state",
"==",
"State",
".",
"UP_FOR_RETRY",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Task instance %s is up for retry\"",
",",
"ti",
")",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"ti_status",
".",
"to_run",
"[",
"key",
"]",
"=",
"ti",
"# special case: if the task needs to be rescheduled put it back",
"elif",
"ti",
".",
"state",
"==",
"State",
".",
"UP_FOR_RESCHEDULE",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Task instance %s is up for reschedule\"",
",",
"ti",
")",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"ti_status",
".",
"to_run",
"[",
"key",
"]",
"=",
"ti",
"# special case: The state of the task can be set to NONE by the task itself",
"# when it reaches concurrency limits. It could also happen when the state",
"# is changed externally, e.g. by clearing tasks from the ui. We need to cover",
"# for that as otherwise those tasks would fall outside of the scope of",
"# the backfill suddenly.",
"elif",
"ti",
".",
"state",
"==",
"State",
".",
"NONE",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"FIXME: task instance %s state was set to none externally or \"",
"\"reaching concurrency limits. Re-adding task to queue.\"",
",",
"ti",
")",
"ti",
".",
"set_state",
"(",
"State",
".",
"SCHEDULED",
")",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"ti_status",
".",
"to_run",
"[",
"key",
"]",
"=",
"ti"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BackfillJob._manage_executor_state
|
Checks if the executor agrees with the state of task instances
that are running
:param running: dict of key, task to verify
|
airflow/jobs.py
|
def _manage_executor_state(self, running):
"""
Checks if the executor agrees with the state of task instances
that are running
:param running: dict of key, task to verify
"""
executor = self.executor
for key, state in list(executor.get_event_buffer().items()):
if key not in running:
self.log.warning(
"%s state %s not in running=%s",
key, state, running.values()
)
continue
ti = running[key]
ti.refresh_from_db()
self.log.debug("Executor state: %s task %s", state, ti)
if state == State.FAILED or state == State.SUCCESS:
if ti.state == State.RUNNING or ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
ti.handle_failure(msg)
|
def _manage_executor_state(self, running):
"""
Checks if the executor agrees with the state of task instances
that are running
:param running: dict of key, task to verify
"""
executor = self.executor
for key, state in list(executor.get_event_buffer().items()):
if key not in running:
self.log.warning(
"%s state %s not in running=%s",
key, state, running.values()
)
continue
ti = running[key]
ti.refresh_from_db()
self.log.debug("Executor state: %s task %s", state, ti)
if state == State.FAILED or state == State.SUCCESS:
if ti.state == State.RUNNING or ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
ti.handle_failure(msg)
|
[
"Checks",
"if",
"the",
"executor",
"agrees",
"with",
"the",
"state",
"of",
"task",
"instances",
"that",
"are",
"running"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1979-L2007
|
[
"def",
"_manage_executor_state",
"(",
"self",
",",
"running",
")",
":",
"executor",
"=",
"self",
".",
"executor",
"for",
"key",
",",
"state",
"in",
"list",
"(",
"executor",
".",
"get_event_buffer",
"(",
")",
".",
"items",
"(",
")",
")",
":",
"if",
"key",
"not",
"in",
"running",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"%s state %s not in running=%s\"",
",",
"key",
",",
"state",
",",
"running",
".",
"values",
"(",
")",
")",
"continue",
"ti",
"=",
"running",
"[",
"key",
"]",
"ti",
".",
"refresh_from_db",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Executor state: %s task %s\"",
",",
"state",
",",
"ti",
")",
"if",
"state",
"==",
"State",
".",
"FAILED",
"or",
"state",
"==",
"State",
".",
"SUCCESS",
":",
"if",
"ti",
".",
"state",
"==",
"State",
".",
"RUNNING",
"or",
"ti",
".",
"state",
"==",
"State",
".",
"QUEUED",
":",
"msg",
"=",
"(",
"\"Executor reports task instance {} finished ({}) \"",
"\"although the task says its {}. Was the task \"",
"\"killed externally?\"",
".",
"format",
"(",
"ti",
",",
"state",
",",
"ti",
".",
"state",
")",
")",
"self",
".",
"log",
".",
"error",
"(",
"msg",
")",
"ti",
".",
"handle_failure",
"(",
"msg",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BackfillJob._get_dag_run
|
Returns a dag run for the given run date, which will be matched to an existing
dag run if available or create a new dag run otherwise. If the max_active_runs
limit is reached, this function will return None.
:param run_date: the execution date for the dag run
:type run_date: datetime.datetime
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
:return: a DagRun in state RUNNING or None
|
airflow/jobs.py
|
def _get_dag_run(self, run_date, session=None):
"""
Returns a dag run for the given run date, which will be matched to an existing
dag run if available or create a new dag run otherwise. If the max_active_runs
limit is reached, this function will return None.
:param run_date: the execution date for the dag run
:type run_date: datetime.datetime
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
:return: a DagRun in state RUNNING or None
"""
run_id = BackfillJob.ID_FORMAT_PREFIX.format(run_date.isoformat())
# consider max_active_runs but ignore when running subdags
respect_dag_max_active_limit = (True
if (self.dag.schedule_interval and
not self.dag.is_subdag)
else False)
current_active_dag_count = self.dag.get_num_active_runs(external_trigger=False)
# check if we are scheduling on top of a already existing dag_run
# we could find a "scheduled" run instead of a "backfill"
run = DagRun.find(dag_id=self.dag.dag_id,
execution_date=run_date,
session=session)
if run is not None and len(run) > 0:
run = run[0]
if run.state == State.RUNNING:
respect_dag_max_active_limit = False
else:
run = None
# enforce max_active_runs limit for dag, special cases already
# handled by respect_dag_max_active_limit
if (respect_dag_max_active_limit and
current_active_dag_count >= self.dag.max_active_runs):
return None
run = run or self.dag.create_dagrun(
run_id=run_id,
execution_date=run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
conf=self.conf,
)
# set required transient field
run.dag = self.dag
# explicitly mark as backfill and running
run.state = State.RUNNING
run.run_id = run_id
run.verify_integrity(session=session)
return run
|
def _get_dag_run(self, run_date, session=None):
"""
Returns a dag run for the given run date, which will be matched to an existing
dag run if available or create a new dag run otherwise. If the max_active_runs
limit is reached, this function will return None.
:param run_date: the execution date for the dag run
:type run_date: datetime.datetime
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
:return: a DagRun in state RUNNING or None
"""
run_id = BackfillJob.ID_FORMAT_PREFIX.format(run_date.isoformat())
# consider max_active_runs but ignore when running subdags
respect_dag_max_active_limit = (True
if (self.dag.schedule_interval and
not self.dag.is_subdag)
else False)
current_active_dag_count = self.dag.get_num_active_runs(external_trigger=False)
# check if we are scheduling on top of a already existing dag_run
# we could find a "scheduled" run instead of a "backfill"
run = DagRun.find(dag_id=self.dag.dag_id,
execution_date=run_date,
session=session)
if run is not None and len(run) > 0:
run = run[0]
if run.state == State.RUNNING:
respect_dag_max_active_limit = False
else:
run = None
# enforce max_active_runs limit for dag, special cases already
# handled by respect_dag_max_active_limit
if (respect_dag_max_active_limit and
current_active_dag_count >= self.dag.max_active_runs):
return None
run = run or self.dag.create_dagrun(
run_id=run_id,
execution_date=run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
conf=self.conf,
)
# set required transient field
run.dag = self.dag
# explicitly mark as backfill and running
run.state = State.RUNNING
run.run_id = run_id
run.verify_integrity(session=session)
return run
|
[
"Returns",
"a",
"dag",
"run",
"for",
"the",
"given",
"run",
"date",
"which",
"will",
"be",
"matched",
"to",
"an",
"existing",
"dag",
"run",
"if",
"available",
"or",
"create",
"a",
"new",
"dag",
"run",
"otherwise",
".",
"If",
"the",
"max_active_runs",
"limit",
"is",
"reached",
"this",
"function",
"will",
"return",
"None",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L2010-L2068
|
[
"def",
"_get_dag_run",
"(",
"self",
",",
"run_date",
",",
"session",
"=",
"None",
")",
":",
"run_id",
"=",
"BackfillJob",
".",
"ID_FORMAT_PREFIX",
".",
"format",
"(",
"run_date",
".",
"isoformat",
"(",
")",
")",
"# consider max_active_runs but ignore when running subdags",
"respect_dag_max_active_limit",
"=",
"(",
"True",
"if",
"(",
"self",
".",
"dag",
".",
"schedule_interval",
"and",
"not",
"self",
".",
"dag",
".",
"is_subdag",
")",
"else",
"False",
")",
"current_active_dag_count",
"=",
"self",
".",
"dag",
".",
"get_num_active_runs",
"(",
"external_trigger",
"=",
"False",
")",
"# check if we are scheduling on top of a already existing dag_run",
"# we could find a \"scheduled\" run instead of a \"backfill\"",
"run",
"=",
"DagRun",
".",
"find",
"(",
"dag_id",
"=",
"self",
".",
"dag",
".",
"dag_id",
",",
"execution_date",
"=",
"run_date",
",",
"session",
"=",
"session",
")",
"if",
"run",
"is",
"not",
"None",
"and",
"len",
"(",
"run",
")",
">",
"0",
":",
"run",
"=",
"run",
"[",
"0",
"]",
"if",
"run",
".",
"state",
"==",
"State",
".",
"RUNNING",
":",
"respect_dag_max_active_limit",
"=",
"False",
"else",
":",
"run",
"=",
"None",
"# enforce max_active_runs limit for dag, special cases already",
"# handled by respect_dag_max_active_limit",
"if",
"(",
"respect_dag_max_active_limit",
"and",
"current_active_dag_count",
">=",
"self",
".",
"dag",
".",
"max_active_runs",
")",
":",
"return",
"None",
"run",
"=",
"run",
"or",
"self",
".",
"dag",
".",
"create_dagrun",
"(",
"run_id",
"=",
"run_id",
",",
"execution_date",
"=",
"run_date",
",",
"start_date",
"=",
"timezone",
".",
"utcnow",
"(",
")",
",",
"state",
"=",
"State",
".",
"RUNNING",
",",
"external_trigger",
"=",
"False",
",",
"session",
"=",
"session",
",",
"conf",
"=",
"self",
".",
"conf",
",",
")",
"# set required transient field",
"run",
".",
"dag",
"=",
"self",
".",
"dag",
"# explicitly mark as backfill and running",
"run",
".",
"state",
"=",
"State",
".",
"RUNNING",
"run",
".",
"run_id",
"=",
"run_id",
"run",
".",
"verify_integrity",
"(",
"session",
"=",
"session",
")",
"return",
"run"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BackfillJob._task_instances_for_dag_run
|
Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: airflow.models.DagRun
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
|
airflow/jobs.py
|
def _task_instances_for_dag_run(self, dag_run, session=None):
"""
Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: airflow.models.DagRun
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
"""
tasks_to_run = {}
if dag_run is None:
return tasks_to_run
# check if we have orphaned tasks
self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)
# for some reason if we don't refresh the reference to run is lost
dag_run.refresh_from_db()
make_transient(dag_run)
# TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf
for ti in dag_run.get_task_instances():
# all tasks part of the backfill are scheduled to run
if ti.state == State.NONE:
ti.set_state(State.SCHEDULED, session=session)
if ti.state != State.REMOVED:
tasks_to_run[ti.key] = ti
return tasks_to_run
|
def _task_instances_for_dag_run(self, dag_run, session=None):
"""
Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: airflow.models.DagRun
:param session: the database session object
:type session: sqlalchemy.orm.session.Session
"""
tasks_to_run = {}
if dag_run is None:
return tasks_to_run
# check if we have orphaned tasks
self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)
# for some reason if we don't refresh the reference to run is lost
dag_run.refresh_from_db()
make_transient(dag_run)
# TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf
for ti in dag_run.get_task_instances():
# all tasks part of the backfill are scheduled to run
if ti.state == State.NONE:
ti.set_state(State.SCHEDULED, session=session)
if ti.state != State.REMOVED:
tasks_to_run[ti.key] = ti
return tasks_to_run
|
[
"Returns",
"a",
"map",
"of",
"task",
"instance",
"key",
"to",
"task",
"instance",
"object",
"for",
"the",
"tasks",
"to",
"run",
"in",
"the",
"given",
"dag",
"run",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L2071-L2101
|
[
"def",
"_task_instances_for_dag_run",
"(",
"self",
",",
"dag_run",
",",
"session",
"=",
"None",
")",
":",
"tasks_to_run",
"=",
"{",
"}",
"if",
"dag_run",
"is",
"None",
":",
"return",
"tasks_to_run",
"# check if we have orphaned tasks",
"self",
".",
"reset_state_for_orphaned_tasks",
"(",
"filter_by_dag_run",
"=",
"dag_run",
",",
"session",
"=",
"session",
")",
"# for some reason if we don't refresh the reference to run is lost",
"dag_run",
".",
"refresh_from_db",
"(",
")",
"make_transient",
"(",
"dag_run",
")",
"# TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf",
"for",
"ti",
"in",
"dag_run",
".",
"get_task_instances",
"(",
")",
":",
"# all tasks part of the backfill are scheduled to run",
"if",
"ti",
".",
"state",
"==",
"State",
".",
"NONE",
":",
"ti",
".",
"set_state",
"(",
"State",
".",
"SCHEDULED",
",",
"session",
"=",
"session",
")",
"if",
"ti",
".",
"state",
"!=",
"State",
".",
"REMOVED",
":",
"tasks_to_run",
"[",
"ti",
".",
"key",
"]",
"=",
"ti",
"return",
"tasks_to_run"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BackfillJob._process_backfill_task_instances
|
Process a set of task instances from a set of dag runs. Special handling is done
to account for different task instance states that could be present when running
them in a backfill process.
:param ti_status: the internal status of the job
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to run the task instances
:type executor: BaseExecutor
:param pickle_id: the pickle_id if dag is pickled, None otherwise
:type pickle_id: int
:param start_date: the start date of the backfill job
:type start_date: datetime.datetime
:param session: the current session object
:type session: sqlalchemy.orm.session.Session
:return: the list of execution_dates for the finished dag runs
:rtype: list
|
airflow/jobs.py
|
def _process_backfill_task_instances(self,
ti_status,
executor,
pickle_id,
start_date=None, session=None):
"""
Process a set of task instances from a set of dag runs. Special handling is done
to account for different task instance states that could be present when running
them in a backfill process.
:param ti_status: the internal status of the job
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to run the task instances
:type executor: BaseExecutor
:param pickle_id: the pickle_id if dag is pickled, None otherwise
:type pickle_id: int
:param start_date: the start date of the backfill job
:type start_date: datetime.datetime
:param session: the current session object
:type session: sqlalchemy.orm.session.Session
:return: the list of execution_dates for the finished dag runs
:rtype: list
"""
executed_run_dates = []
while ((len(ti_status.to_run) > 0 or len(ti_status.running) > 0) and
len(ti_status.deadlocked) == 0):
self.log.debug("*** Clearing out not_ready list ***")
ti_status.not_ready.clear()
# we need to execute the tasks bottom to top
# or leaf to root, as otherwise tasks might be
# determined deadlocked while they are actually
# waiting for their upstream to finish
@provide_session
def _per_task_process(task, key, ti, session=None):
ti.refresh_from_db()
task = self.dag.get_task(ti.task_id)
ti.task = task
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
self.log.debug(
"Task instance to run %s state %s", ti, ti.state)
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
# guard against externally modified tasks instances or
# in case max concurrency has been reached at task runtime
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance {} state was set to None "
"externally. This should not happen"
)
ti.set_state(State.SCHEDULED, session=session)
if self.rerun_failed_tasks:
# Rerun failed tasks or upstreamed failed tasks
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with state {state}".format(ti=ti,
state=ti.state))
if key in ti_status.running:
ti_status.running.pop(key)
# Reset the failed task in backfill to scheduled state
ti.set_state(State.SCHEDULED, session=session)
else:
# Default behaviour which works for subdag.
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with {state} state".format(ti=ti,
state=ti.state))
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
backfill_context = DepContext(
deps=RUN_DEPS,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
flag_upstream_failed=True)
# Is the task runnable? -- then run it
# the dependency checker can change states of tis
if ti.are_dependencies_met(
dep_context=backfill_context,
session=session,
verbose=self.verbose):
ti.refresh_from_db(lock_for_update=True, session=session)
if ti.state in (State.SCHEDULED, State.UP_FOR_RETRY, State.UP_FOR_RESCHEDULE):
if executor.has_task(ti):
self.log.debug(
"Task Instance %s already in executor "
"waiting for queue to clear",
ti
)
else:
self.log.debug('Sending %s to executor', ti)
# Skip scheduled state, we are executing immediately
ti.state = State.QUEUED
ti.queued_dttm = timezone.utcnow() if not ti.queued_dttm else ti.queued_dttm
session.merge(ti)
cfg_path = None
if executor.__class__ in (executors.LocalExecutor,
executors.SequentialExecutor):
cfg_path = tmp_configuration_copy()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_task_deps=self.ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool,
cfg_path=cfg_path)
ti_status.running[key] = ti
ti_status.to_run.pop(key)
session.commit()
return
if ti.state == State.UPSTREAM_FAILED:
self.log.error("Task instance %s upstream failed", ti)
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
# special case
if ti.state == State.UP_FOR_RETRY:
self.log.debug(
"Task instance %s retry period not "
"expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
return
# special case
if ti.state == State.UP_FOR_RESCHEDULE:
self.log.debug(
"Task instance %s reschedule period not "
"expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
return
# all remaining tasks
self.log.debug('Adding %s to not_ready', ti)
ti_status.not_ready.add(key)
non_pool_slots = conf.getint('core', 'non_pooled_backfill_task_slot_count')
try:
for task in self.dag.topological_sort():
for key, ti in list(ti_status.to_run.items()):
if task.task_id != ti.task_id:
continue
if task.pool:
pool = session.query(models.Pool) \
.filter(models.Pool.pool == task.pool) \
.first()
if not pool:
raise PoolNotFound('Unknown pool: {}'.format(task.pool))
open_slots = pool.open_slots(session=session)
if open_slots <= 0:
raise NoAvailablePoolSlot(
"Not scheduling since there are "
"%s open slots in pool %s".format(
open_slots, task.pool))
else:
if non_pool_slots <= 0:
raise NoAvailablePoolSlot(
"Not scheduling since there are no "
"non_pooled_backfill_task_slot_count.")
non_pool_slots -= 1
num_running_tasks = DAG.get_num_task_instances(
self.dag_id,
states=(State.QUEUED, State.RUNNING))
if num_running_tasks >= self.dag.concurrency:
raise DagConcurrencyLimitReached(
"Not scheduling since concurrency limit "
"is reached."
)
_per_task_process(task, key, ti)
except (NoAvailablePoolSlot, DagConcurrencyLimitReached) as e:
self.log.debug(e)
# execute the tasks in the queue
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run and there are no running tasks then the backfill
# is deadlocked
if (ti_status.not_ready and
ti_status.not_ready == set(ti_status.to_run) and
len(ti_status.running) == 0):
self.log.warning(
"Deadlock discovered for ti_status.to_run=%s",
ti_status.to_run.values()
)
ti_status.deadlocked.update(ti_status.to_run.values())
ti_status.to_run.clear()
# check executor state
self._manage_executor_state(ti_status.running)
# update the task counters
self._update_counters(ti_status=ti_status)
# update dag run state
_dag_runs = ti_status.active_runs[:]
for run in _dag_runs:
run.update_state(session=session)
if run.state in State.finished():
ti_status.finished_runs += 1
ti_status.active_runs.remove(run)
executed_run_dates.append(run.execution_date)
self._log_progress(ti_status)
# return updated status
return executed_run_dates
|
def _process_backfill_task_instances(self,
ti_status,
executor,
pickle_id,
start_date=None, session=None):
"""
Process a set of task instances from a set of dag runs. Special handling is done
to account for different task instance states that could be present when running
them in a backfill process.
:param ti_status: the internal status of the job
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to run the task instances
:type executor: BaseExecutor
:param pickle_id: the pickle_id if dag is pickled, None otherwise
:type pickle_id: int
:param start_date: the start date of the backfill job
:type start_date: datetime.datetime
:param session: the current session object
:type session: sqlalchemy.orm.session.Session
:return: the list of execution_dates for the finished dag runs
:rtype: list
"""
executed_run_dates = []
while ((len(ti_status.to_run) > 0 or len(ti_status.running) > 0) and
len(ti_status.deadlocked) == 0):
self.log.debug("*** Clearing out not_ready list ***")
ti_status.not_ready.clear()
# we need to execute the tasks bottom to top
# or leaf to root, as otherwise tasks might be
# determined deadlocked while they are actually
# waiting for their upstream to finish
@provide_session
def _per_task_process(task, key, ti, session=None):
ti.refresh_from_db()
task = self.dag.get_task(ti.task_id)
ti.task = task
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
self.log.debug(
"Task instance to run %s state %s", ti, ti.state)
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
# guard against externally modified tasks instances or
# in case max concurrency has been reached at task runtime
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance {} state was set to None "
"externally. This should not happen"
)
ti.set_state(State.SCHEDULED, session=session)
if self.rerun_failed_tasks:
# Rerun failed tasks or upstreamed failed tasks
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with state {state}".format(ti=ti,
state=ti.state))
if key in ti_status.running:
ti_status.running.pop(key)
# Reset the failed task in backfill to scheduled state
ti.set_state(State.SCHEDULED, session=session)
else:
# Default behaviour which works for subdag.
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with {state} state".format(ti=ti,
state=ti.state))
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
backfill_context = DepContext(
deps=RUN_DEPS,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
flag_upstream_failed=True)
# Is the task runnable? -- then run it
# the dependency checker can change states of tis
if ti.are_dependencies_met(
dep_context=backfill_context,
session=session,
verbose=self.verbose):
ti.refresh_from_db(lock_for_update=True, session=session)
if ti.state in (State.SCHEDULED, State.UP_FOR_RETRY, State.UP_FOR_RESCHEDULE):
if executor.has_task(ti):
self.log.debug(
"Task Instance %s already in executor "
"waiting for queue to clear",
ti
)
else:
self.log.debug('Sending %s to executor', ti)
# Skip scheduled state, we are executing immediately
ti.state = State.QUEUED
ti.queued_dttm = timezone.utcnow() if not ti.queued_dttm else ti.queued_dttm
session.merge(ti)
cfg_path = None
if executor.__class__ in (executors.LocalExecutor,
executors.SequentialExecutor):
cfg_path = tmp_configuration_copy()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_task_deps=self.ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool,
cfg_path=cfg_path)
ti_status.running[key] = ti
ti_status.to_run.pop(key)
session.commit()
return
if ti.state == State.UPSTREAM_FAILED:
self.log.error("Task instance %s upstream failed", ti)
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
return
# special case
if ti.state == State.UP_FOR_RETRY:
self.log.debug(
"Task instance %s retry period not "
"expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
return
# special case
if ti.state == State.UP_FOR_RESCHEDULE:
self.log.debug(
"Task instance %s reschedule period not "
"expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
return
# all remaining tasks
self.log.debug('Adding %s to not_ready', ti)
ti_status.not_ready.add(key)
non_pool_slots = conf.getint('core', 'non_pooled_backfill_task_slot_count')
try:
for task in self.dag.topological_sort():
for key, ti in list(ti_status.to_run.items()):
if task.task_id != ti.task_id:
continue
if task.pool:
pool = session.query(models.Pool) \
.filter(models.Pool.pool == task.pool) \
.first()
if not pool:
raise PoolNotFound('Unknown pool: {}'.format(task.pool))
open_slots = pool.open_slots(session=session)
if open_slots <= 0:
raise NoAvailablePoolSlot(
"Not scheduling since there are "
"%s open slots in pool %s".format(
open_slots, task.pool))
else:
if non_pool_slots <= 0:
raise NoAvailablePoolSlot(
"Not scheduling since there are no "
"non_pooled_backfill_task_slot_count.")
non_pool_slots -= 1
num_running_tasks = DAG.get_num_task_instances(
self.dag_id,
states=(State.QUEUED, State.RUNNING))
if num_running_tasks >= self.dag.concurrency:
raise DagConcurrencyLimitReached(
"Not scheduling since concurrency limit "
"is reached."
)
_per_task_process(task, key, ti)
except (NoAvailablePoolSlot, DagConcurrencyLimitReached) as e:
self.log.debug(e)
# execute the tasks in the queue
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run and there are no running tasks then the backfill
# is deadlocked
if (ti_status.not_ready and
ti_status.not_ready == set(ti_status.to_run) and
len(ti_status.running) == 0):
self.log.warning(
"Deadlock discovered for ti_status.to_run=%s",
ti_status.to_run.values()
)
ti_status.deadlocked.update(ti_status.to_run.values())
ti_status.to_run.clear()
# check executor state
self._manage_executor_state(ti_status.running)
# update the task counters
self._update_counters(ti_status=ti_status)
# update dag run state
_dag_runs = ti_status.active_runs[:]
for run in _dag_runs:
run.update_state(session=session)
if run.state in State.finished():
ti_status.finished_runs += 1
ti_status.active_runs.remove(run)
executed_run_dates.append(run.execution_date)
self._log_progress(ti_status)
# return updated status
return executed_run_dates
|
[
"Process",
"a",
"set",
"of",
"task",
"instances",
"from",
"a",
"set",
"of",
"dag",
"runs",
".",
"Special",
"handling",
"is",
"done",
"to",
"account",
"for",
"different",
"task",
"instance",
"states",
"that",
"could",
"be",
"present",
"when",
"running",
"them",
"in",
"a",
"backfill",
"process",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L2118-L2366
|
[
"def",
"_process_backfill_task_instances",
"(",
"self",
",",
"ti_status",
",",
"executor",
",",
"pickle_id",
",",
"start_date",
"=",
"None",
",",
"session",
"=",
"None",
")",
":",
"executed_run_dates",
"=",
"[",
"]",
"while",
"(",
"(",
"len",
"(",
"ti_status",
".",
"to_run",
")",
">",
"0",
"or",
"len",
"(",
"ti_status",
".",
"running",
")",
">",
"0",
")",
"and",
"len",
"(",
"ti_status",
".",
"deadlocked",
")",
"==",
"0",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"*** Clearing out not_ready list ***\"",
")",
"ti_status",
".",
"not_ready",
".",
"clear",
"(",
")",
"# we need to execute the tasks bottom to top",
"# or leaf to root, as otherwise tasks might be",
"# determined deadlocked while they are actually",
"# waiting for their upstream to finish",
"@",
"provide_session",
"def",
"_per_task_process",
"(",
"task",
",",
"key",
",",
"ti",
",",
"session",
"=",
"None",
")",
":",
"ti",
".",
"refresh_from_db",
"(",
")",
"task",
"=",
"self",
".",
"dag",
".",
"get_task",
"(",
"ti",
".",
"task_id",
")",
"ti",
".",
"task",
"=",
"task",
"ignore_depends_on_past",
"=",
"(",
"self",
".",
"ignore_first_depends_on_past",
"and",
"ti",
".",
"execution_date",
"==",
"(",
"start_date",
"or",
"ti",
".",
"start_date",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Task instance to run %s state %s\"",
",",
"ti",
",",
"ti",
".",
"state",
")",
"# The task was already marked successful or skipped by a",
"# different Job. Don't rerun it.",
"if",
"ti",
".",
"state",
"==",
"State",
".",
"SUCCESS",
":",
"ti_status",
".",
"succeeded",
".",
"add",
"(",
"key",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Task instance %s succeeded. Don't rerun.\"",
",",
"ti",
")",
"ti_status",
".",
"to_run",
".",
"pop",
"(",
"key",
")",
"if",
"key",
"in",
"ti_status",
".",
"running",
":",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"return",
"elif",
"ti",
".",
"state",
"==",
"State",
".",
"SKIPPED",
":",
"ti_status",
".",
"skipped",
".",
"add",
"(",
"key",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Task instance %s skipped. Don't rerun.\"",
",",
"ti",
")",
"ti_status",
".",
"to_run",
".",
"pop",
"(",
"key",
")",
"if",
"key",
"in",
"ti_status",
".",
"running",
":",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"return",
"# guard against externally modified tasks instances or",
"# in case max concurrency has been reached at task runtime",
"elif",
"ti",
".",
"state",
"==",
"State",
".",
"NONE",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"FIXME: task instance {} state was set to None \"",
"\"externally. This should not happen\"",
")",
"ti",
".",
"set_state",
"(",
"State",
".",
"SCHEDULED",
",",
"session",
"=",
"session",
")",
"if",
"self",
".",
"rerun_failed_tasks",
":",
"# Rerun failed tasks or upstreamed failed tasks",
"if",
"ti",
".",
"state",
"in",
"(",
"State",
".",
"FAILED",
",",
"State",
".",
"UPSTREAM_FAILED",
")",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Task instance {ti} \"",
"\"with state {state}\"",
".",
"format",
"(",
"ti",
"=",
"ti",
",",
"state",
"=",
"ti",
".",
"state",
")",
")",
"if",
"key",
"in",
"ti_status",
".",
"running",
":",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"# Reset the failed task in backfill to scheduled state",
"ti",
".",
"set_state",
"(",
"State",
".",
"SCHEDULED",
",",
"session",
"=",
"session",
")",
"else",
":",
"# Default behaviour which works for subdag.",
"if",
"ti",
".",
"state",
"in",
"(",
"State",
".",
"FAILED",
",",
"State",
".",
"UPSTREAM_FAILED",
")",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Task instance {ti} \"",
"\"with {state} state\"",
".",
"format",
"(",
"ti",
"=",
"ti",
",",
"state",
"=",
"ti",
".",
"state",
")",
")",
"ti_status",
".",
"failed",
".",
"add",
"(",
"key",
")",
"ti_status",
".",
"to_run",
".",
"pop",
"(",
"key",
")",
"if",
"key",
"in",
"ti_status",
".",
"running",
":",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"return",
"backfill_context",
"=",
"DepContext",
"(",
"deps",
"=",
"RUN_DEPS",
",",
"ignore_depends_on_past",
"=",
"ignore_depends_on_past",
",",
"ignore_task_deps",
"=",
"self",
".",
"ignore_task_deps",
",",
"flag_upstream_failed",
"=",
"True",
")",
"# Is the task runnable? -- then run it",
"# the dependency checker can change states of tis",
"if",
"ti",
".",
"are_dependencies_met",
"(",
"dep_context",
"=",
"backfill_context",
",",
"session",
"=",
"session",
",",
"verbose",
"=",
"self",
".",
"verbose",
")",
":",
"ti",
".",
"refresh_from_db",
"(",
"lock_for_update",
"=",
"True",
",",
"session",
"=",
"session",
")",
"if",
"ti",
".",
"state",
"in",
"(",
"State",
".",
"SCHEDULED",
",",
"State",
".",
"UP_FOR_RETRY",
",",
"State",
".",
"UP_FOR_RESCHEDULE",
")",
":",
"if",
"executor",
".",
"has_task",
"(",
"ti",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Task Instance %s already in executor \"",
"\"waiting for queue to clear\"",
",",
"ti",
")",
"else",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Sending %s to executor'",
",",
"ti",
")",
"# Skip scheduled state, we are executing immediately",
"ti",
".",
"state",
"=",
"State",
".",
"QUEUED",
"ti",
".",
"queued_dttm",
"=",
"timezone",
".",
"utcnow",
"(",
")",
"if",
"not",
"ti",
".",
"queued_dttm",
"else",
"ti",
".",
"queued_dttm",
"session",
".",
"merge",
"(",
"ti",
")",
"cfg_path",
"=",
"None",
"if",
"executor",
".",
"__class__",
"in",
"(",
"executors",
".",
"LocalExecutor",
",",
"executors",
".",
"SequentialExecutor",
")",
":",
"cfg_path",
"=",
"tmp_configuration_copy",
"(",
")",
"executor",
".",
"queue_task_instance",
"(",
"ti",
",",
"mark_success",
"=",
"self",
".",
"mark_success",
",",
"pickle_id",
"=",
"pickle_id",
",",
"ignore_task_deps",
"=",
"self",
".",
"ignore_task_deps",
",",
"ignore_depends_on_past",
"=",
"ignore_depends_on_past",
",",
"pool",
"=",
"self",
".",
"pool",
",",
"cfg_path",
"=",
"cfg_path",
")",
"ti_status",
".",
"running",
"[",
"key",
"]",
"=",
"ti",
"ti_status",
".",
"to_run",
".",
"pop",
"(",
"key",
")",
"session",
".",
"commit",
"(",
")",
"return",
"if",
"ti",
".",
"state",
"==",
"State",
".",
"UPSTREAM_FAILED",
":",
"self",
".",
"log",
".",
"error",
"(",
"\"Task instance %s upstream failed\"",
",",
"ti",
")",
"ti_status",
".",
"failed",
".",
"add",
"(",
"key",
")",
"ti_status",
".",
"to_run",
".",
"pop",
"(",
"key",
")",
"if",
"key",
"in",
"ti_status",
".",
"running",
":",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"return",
"# special case",
"if",
"ti",
".",
"state",
"==",
"State",
".",
"UP_FOR_RETRY",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Task instance %s retry period not \"",
"\"expired yet\"",
",",
"ti",
")",
"if",
"key",
"in",
"ti_status",
".",
"running",
":",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"ti_status",
".",
"to_run",
"[",
"key",
"]",
"=",
"ti",
"return",
"# special case",
"if",
"ti",
".",
"state",
"==",
"State",
".",
"UP_FOR_RESCHEDULE",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Task instance %s reschedule period not \"",
"\"expired yet\"",
",",
"ti",
")",
"if",
"key",
"in",
"ti_status",
".",
"running",
":",
"ti_status",
".",
"running",
".",
"pop",
"(",
"key",
")",
"ti_status",
".",
"to_run",
"[",
"key",
"]",
"=",
"ti",
"return",
"# all remaining tasks",
"self",
".",
"log",
".",
"debug",
"(",
"'Adding %s to not_ready'",
",",
"ti",
")",
"ti_status",
".",
"not_ready",
".",
"add",
"(",
"key",
")",
"non_pool_slots",
"=",
"conf",
".",
"getint",
"(",
"'core'",
",",
"'non_pooled_backfill_task_slot_count'",
")",
"try",
":",
"for",
"task",
"in",
"self",
".",
"dag",
".",
"topological_sort",
"(",
")",
":",
"for",
"key",
",",
"ti",
"in",
"list",
"(",
"ti_status",
".",
"to_run",
".",
"items",
"(",
")",
")",
":",
"if",
"task",
".",
"task_id",
"!=",
"ti",
".",
"task_id",
":",
"continue",
"if",
"task",
".",
"pool",
":",
"pool",
"=",
"session",
".",
"query",
"(",
"models",
".",
"Pool",
")",
".",
"filter",
"(",
"models",
".",
"Pool",
".",
"pool",
"==",
"task",
".",
"pool",
")",
".",
"first",
"(",
")",
"if",
"not",
"pool",
":",
"raise",
"PoolNotFound",
"(",
"'Unknown pool: {}'",
".",
"format",
"(",
"task",
".",
"pool",
")",
")",
"open_slots",
"=",
"pool",
".",
"open_slots",
"(",
"session",
"=",
"session",
")",
"if",
"open_slots",
"<=",
"0",
":",
"raise",
"NoAvailablePoolSlot",
"(",
"\"Not scheduling since there are \"",
"\"%s open slots in pool %s\"",
".",
"format",
"(",
"open_slots",
",",
"task",
".",
"pool",
")",
")",
"else",
":",
"if",
"non_pool_slots",
"<=",
"0",
":",
"raise",
"NoAvailablePoolSlot",
"(",
"\"Not scheduling since there are no \"",
"\"non_pooled_backfill_task_slot_count.\"",
")",
"non_pool_slots",
"-=",
"1",
"num_running_tasks",
"=",
"DAG",
".",
"get_num_task_instances",
"(",
"self",
".",
"dag_id",
",",
"states",
"=",
"(",
"State",
".",
"QUEUED",
",",
"State",
".",
"RUNNING",
")",
")",
"if",
"num_running_tasks",
">=",
"self",
".",
"dag",
".",
"concurrency",
":",
"raise",
"DagConcurrencyLimitReached",
"(",
"\"Not scheduling since concurrency limit \"",
"\"is reached.\"",
")",
"_per_task_process",
"(",
"task",
",",
"key",
",",
"ti",
")",
"except",
"(",
"NoAvailablePoolSlot",
",",
"DagConcurrencyLimitReached",
")",
"as",
"e",
":",
"self",
".",
"log",
".",
"debug",
"(",
"e",
")",
"# execute the tasks in the queue",
"self",
".",
"heartbeat",
"(",
")",
"executor",
".",
"heartbeat",
"(",
")",
"# If the set of tasks that aren't ready ever equals the set of",
"# tasks to run and there are no running tasks then the backfill",
"# is deadlocked",
"if",
"(",
"ti_status",
".",
"not_ready",
"and",
"ti_status",
".",
"not_ready",
"==",
"set",
"(",
"ti_status",
".",
"to_run",
")",
"and",
"len",
"(",
"ti_status",
".",
"running",
")",
"==",
"0",
")",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Deadlock discovered for ti_status.to_run=%s\"",
",",
"ti_status",
".",
"to_run",
".",
"values",
"(",
")",
")",
"ti_status",
".",
"deadlocked",
".",
"update",
"(",
"ti_status",
".",
"to_run",
".",
"values",
"(",
")",
")",
"ti_status",
".",
"to_run",
".",
"clear",
"(",
")",
"# check executor state",
"self",
".",
"_manage_executor_state",
"(",
"ti_status",
".",
"running",
")",
"# update the task counters",
"self",
".",
"_update_counters",
"(",
"ti_status",
"=",
"ti_status",
")",
"# update dag run state",
"_dag_runs",
"=",
"ti_status",
".",
"active_runs",
"[",
":",
"]",
"for",
"run",
"in",
"_dag_runs",
":",
"run",
".",
"update_state",
"(",
"session",
"=",
"session",
")",
"if",
"run",
".",
"state",
"in",
"State",
".",
"finished",
"(",
")",
":",
"ti_status",
".",
"finished_runs",
"+=",
"1",
"ti_status",
".",
"active_runs",
".",
"remove",
"(",
"run",
")",
"executed_run_dates",
".",
"append",
"(",
"run",
".",
"execution_date",
")",
"self",
".",
"_log_progress",
"(",
"ti_status",
")",
"# return updated status",
"return",
"executed_run_dates"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BackfillJob._execute_for_run_dates
|
Computes the dag runs and their respective task instances for
the given run dates and executes the task instances.
Returns a list of execution dates of the dag runs that were executed.
:param run_dates: Execution dates for dag runs
:type run_dates: list
:param ti_status: internal BackfillJob status structure to tis track progress
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to use, it must be previously started
:type executor: BaseExecutor
:param pickle_id: numeric id of the pickled dag, None if not pickled
:type pickle_id: int
:param start_date: backfill start date
:type start_date: datetime.datetime
:param session: the current session object
:type session: sqlalchemy.orm.session.Session
|
airflow/jobs.py
|
def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id,
start_date, session=None):
"""
Computes the dag runs and their respective task instances for
the given run dates and executes the task instances.
Returns a list of execution dates of the dag runs that were executed.
:param run_dates: Execution dates for dag runs
:type run_dates: list
:param ti_status: internal BackfillJob status structure to tis track progress
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to use, it must be previously started
:type executor: BaseExecutor
:param pickle_id: numeric id of the pickled dag, None if not pickled
:type pickle_id: int
:param start_date: backfill start date
:type start_date: datetime.datetime
:param session: the current session object
:type session: sqlalchemy.orm.session.Session
"""
for next_run_date in run_dates:
dag_run = self._get_dag_run(next_run_date, session=session)
tis_map = self._task_instances_for_dag_run(dag_run,
session=session)
if dag_run is None:
continue
ti_status.active_runs.append(dag_run)
ti_status.to_run.update(tis_map or {})
processed_dag_run_dates = self._process_backfill_task_instances(
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
ti_status.executed_dag_run_dates.update(processed_dag_run_dates)
|
def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id,
start_date, session=None):
"""
Computes the dag runs and their respective task instances for
the given run dates and executes the task instances.
Returns a list of execution dates of the dag runs that were executed.
:param run_dates: Execution dates for dag runs
:type run_dates: list
:param ti_status: internal BackfillJob status structure to tis track progress
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to use, it must be previously started
:type executor: BaseExecutor
:param pickle_id: numeric id of the pickled dag, None if not pickled
:type pickle_id: int
:param start_date: backfill start date
:type start_date: datetime.datetime
:param session: the current session object
:type session: sqlalchemy.orm.session.Session
"""
for next_run_date in run_dates:
dag_run = self._get_dag_run(next_run_date, session=session)
tis_map = self._task_instances_for_dag_run(dag_run,
session=session)
if dag_run is None:
continue
ti_status.active_runs.append(dag_run)
ti_status.to_run.update(tis_map or {})
processed_dag_run_dates = self._process_backfill_task_instances(
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
ti_status.executed_dag_run_dates.update(processed_dag_run_dates)
|
[
"Computes",
"the",
"dag",
"runs",
"and",
"their",
"respective",
"task",
"instances",
"for",
"the",
"given",
"run",
"dates",
"and",
"executes",
"the",
"task",
"instances",
".",
"Returns",
"a",
"list",
"of",
"execution",
"dates",
"of",
"the",
"dag",
"runs",
"that",
"were",
"executed",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L2405-L2442
|
[
"def",
"_execute_for_run_dates",
"(",
"self",
",",
"run_dates",
",",
"ti_status",
",",
"executor",
",",
"pickle_id",
",",
"start_date",
",",
"session",
"=",
"None",
")",
":",
"for",
"next_run_date",
"in",
"run_dates",
":",
"dag_run",
"=",
"self",
".",
"_get_dag_run",
"(",
"next_run_date",
",",
"session",
"=",
"session",
")",
"tis_map",
"=",
"self",
".",
"_task_instances_for_dag_run",
"(",
"dag_run",
",",
"session",
"=",
"session",
")",
"if",
"dag_run",
"is",
"None",
":",
"continue",
"ti_status",
".",
"active_runs",
".",
"append",
"(",
"dag_run",
")",
"ti_status",
".",
"to_run",
".",
"update",
"(",
"tis_map",
"or",
"{",
"}",
")",
"processed_dag_run_dates",
"=",
"self",
".",
"_process_backfill_task_instances",
"(",
"ti_status",
"=",
"ti_status",
",",
"executor",
"=",
"executor",
",",
"pickle_id",
"=",
"pickle_id",
",",
"start_date",
"=",
"start_date",
",",
"session",
"=",
"session",
")",
"ti_status",
".",
"executed_dag_run_dates",
".",
"update",
"(",
"processed_dag_run_dates",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BackfillJob._set_unfinished_dag_runs_to_failed
|
Go through the dag_runs and update the state based on the task_instance state.
Then set DAG runs that are not finished to failed.
:param dag_runs: DAG runs
:param session: session
:return: None
|
airflow/jobs.py
|
def _set_unfinished_dag_runs_to_failed(self, dag_runs, session=None):
"""
Go through the dag_runs and update the state based on the task_instance state.
Then set DAG runs that are not finished to failed.
:param dag_runs: DAG runs
:param session: session
:return: None
"""
for dag_run in dag_runs:
dag_run.update_state()
if dag_run.state not in State.finished():
dag_run.set_state(State.FAILED)
session.merge(dag_run)
|
def _set_unfinished_dag_runs_to_failed(self, dag_runs, session=None):
"""
Go through the dag_runs and update the state based on the task_instance state.
Then set DAG runs that are not finished to failed.
:param dag_runs: DAG runs
:param session: session
:return: None
"""
for dag_run in dag_runs:
dag_run.update_state()
if dag_run.state not in State.finished():
dag_run.set_state(State.FAILED)
session.merge(dag_run)
|
[
"Go",
"through",
"the",
"dag_runs",
"and",
"update",
"the",
"state",
"based",
"on",
"the",
"task_instance",
"state",
".",
"Then",
"set",
"DAG",
"runs",
"that",
"are",
"not",
"finished",
"to",
"failed",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L2445-L2458
|
[
"def",
"_set_unfinished_dag_runs_to_failed",
"(",
"self",
",",
"dag_runs",
",",
"session",
"=",
"None",
")",
":",
"for",
"dag_run",
"in",
"dag_runs",
":",
"dag_run",
".",
"update_state",
"(",
")",
"if",
"dag_run",
".",
"state",
"not",
"in",
"State",
".",
"finished",
"(",
")",
":",
"dag_run",
".",
"set_state",
"(",
"State",
".",
"FAILED",
")",
"session",
".",
"merge",
"(",
"dag_run",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
BackfillJob._execute
|
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
|
airflow/jobs.py
|
def _execute(self, session=None):
"""
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
"""
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date,
end_date=self.bf_end_date)
if self.run_backwards:
tasks_that_depend_on_past = [t.task_id for t in self.dag.task_dict.values() if t.depends_on_past]
if tasks_that_depend_on_past:
raise AirflowException(
'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format(
",".join(tasks_that_depend_on_past)))
run_dates = run_dates[::-1]
if len(run_dates) == 0:
self.log.info("No run dates were found for the given dates and dag interval.")
return
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
ti_status.total_runs = len(run_dates) # total dag runs in backfill
try:
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [run_date for run_date in run_dates
if run_date not in ti_status.executed_dag_run_dates]
self._execute_for_run_dates(run_dates=dates_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
remaining_dates = (
ti_status.total_runs - len(ti_status.executed_dag_run_dates)
)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise AirflowException(err)
if remaining_dates > 0:
self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id
)
time.sleep(self.delay_on_limit_secs)
except (KeyboardInterrupt, SystemExit):
self.log.warning("Backfill terminated by user.")
# TODO: we will need to terminate running task instances and set the
# state to failed.
self._set_unfinished_dag_runs_to_failed(ti_status.active_runs)
finally:
session.commit()
executor.end()
self.log.info("Backfill done. Exiting.")
|
def _execute(self, session=None):
"""
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
"""
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date,
end_date=self.bf_end_date)
if self.run_backwards:
tasks_that_depend_on_past = [t.task_id for t in self.dag.task_dict.values() if t.depends_on_past]
if tasks_that_depend_on_past:
raise AirflowException(
'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format(
",".join(tasks_that_depend_on_past)))
run_dates = run_dates[::-1]
if len(run_dates) == 0:
self.log.info("No run dates were found for the given dates and dag interval.")
return
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
ti_status.total_runs = len(run_dates) # total dag runs in backfill
try:
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [run_date for run_date in run_dates
if run_date not in ti_status.executed_dag_run_dates]
self._execute_for_run_dates(run_dates=dates_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
remaining_dates = (
ti_status.total_runs - len(ti_status.executed_dag_run_dates)
)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise AirflowException(err)
if remaining_dates > 0:
self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id
)
time.sleep(self.delay_on_limit_secs)
except (KeyboardInterrupt, SystemExit):
self.log.warning("Backfill terminated by user.")
# TODO: we will need to terminate running task instances and set the
# state to failed.
self._set_unfinished_dag_runs_to_failed(ti_status.active_runs)
finally:
session.commit()
executor.end()
self.log.info("Backfill done. Exiting.")
|
[
"Initializes",
"all",
"components",
"required",
"to",
"run",
"a",
"dag",
"for",
"a",
"specified",
"date",
"range",
"and",
"calls",
"helper",
"method",
"to",
"execute",
"the",
"tasks",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L2461-L2536
|
[
"def",
"_execute",
"(",
"self",
",",
"session",
"=",
"None",
")",
":",
"ti_status",
"=",
"BackfillJob",
".",
"_DagRunTaskStatus",
"(",
")",
"start_date",
"=",
"self",
".",
"bf_start_date",
"# Get intervals between the start/end dates, which will turn into dag runs",
"run_dates",
"=",
"self",
".",
"dag",
".",
"get_run_dates",
"(",
"start_date",
"=",
"start_date",
",",
"end_date",
"=",
"self",
".",
"bf_end_date",
")",
"if",
"self",
".",
"run_backwards",
":",
"tasks_that_depend_on_past",
"=",
"[",
"t",
".",
"task_id",
"for",
"t",
"in",
"self",
".",
"dag",
".",
"task_dict",
".",
"values",
"(",
")",
"if",
"t",
".",
"depends_on_past",
"]",
"if",
"tasks_that_depend_on_past",
":",
"raise",
"AirflowException",
"(",
"'You cannot backfill backwards because one or more tasks depend_on_past: {}'",
".",
"format",
"(",
"\",\"",
".",
"join",
"(",
"tasks_that_depend_on_past",
")",
")",
")",
"run_dates",
"=",
"run_dates",
"[",
":",
":",
"-",
"1",
"]",
"if",
"len",
"(",
"run_dates",
")",
"==",
"0",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"No run dates were found for the given dates and dag interval.\"",
")",
"return",
"# picklin'",
"pickle_id",
"=",
"None",
"if",
"not",
"self",
".",
"donot_pickle",
"and",
"self",
".",
"executor",
".",
"__class__",
"not",
"in",
"(",
"executors",
".",
"LocalExecutor",
",",
"executors",
".",
"SequentialExecutor",
")",
":",
"pickle",
"=",
"DagPickle",
"(",
"self",
".",
"dag",
")",
"session",
".",
"add",
"(",
"pickle",
")",
"session",
".",
"commit",
"(",
")",
"pickle_id",
"=",
"pickle",
".",
"id",
"executor",
"=",
"self",
".",
"executor",
"executor",
".",
"start",
"(",
")",
"ti_status",
".",
"total_runs",
"=",
"len",
"(",
"run_dates",
")",
"# total dag runs in backfill",
"try",
":",
"remaining_dates",
"=",
"ti_status",
".",
"total_runs",
"while",
"remaining_dates",
">",
"0",
":",
"dates_to_process",
"=",
"[",
"run_date",
"for",
"run_date",
"in",
"run_dates",
"if",
"run_date",
"not",
"in",
"ti_status",
".",
"executed_dag_run_dates",
"]",
"self",
".",
"_execute_for_run_dates",
"(",
"run_dates",
"=",
"dates_to_process",
",",
"ti_status",
"=",
"ti_status",
",",
"executor",
"=",
"executor",
",",
"pickle_id",
"=",
"pickle_id",
",",
"start_date",
"=",
"start_date",
",",
"session",
"=",
"session",
")",
"remaining_dates",
"=",
"(",
"ti_status",
".",
"total_runs",
"-",
"len",
"(",
"ti_status",
".",
"executed_dag_run_dates",
")",
")",
"err",
"=",
"self",
".",
"_collect_errors",
"(",
"ti_status",
"=",
"ti_status",
",",
"session",
"=",
"session",
")",
"if",
"err",
":",
"raise",
"AirflowException",
"(",
"err",
")",
"if",
"remaining_dates",
">",
"0",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"max_active_runs limit for dag %s has been reached \"",
"\" - waiting for other dag runs to finish\"",
",",
"self",
".",
"dag_id",
")",
"time",
".",
"sleep",
"(",
"self",
".",
"delay_on_limit_secs",
")",
"except",
"(",
"KeyboardInterrupt",
",",
"SystemExit",
")",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"Backfill terminated by user.\"",
")",
"# TODO: we will need to terminate running task instances and set the",
"# state to failed.",
"self",
".",
"_set_unfinished_dag_runs_to_failed",
"(",
"ti_status",
".",
"active_runs",
")",
"finally",
":",
"session",
".",
"commit",
"(",
")",
"executor",
".",
"end",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Backfill done. Exiting.\"",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
LocalTaskJob.heartbeat_callback
|
Self destruct task if state has been moved away from running externally
|
airflow/jobs.py
|
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# ensure termination if processes are created later
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
same_process = ti.pid == os.getpid()
if ti.state == State.RUNNING:
if not same_hostname:
self.log.warning("The recorded hostname %s "
"does not match this instance's hostname "
"%s", ti.hostname, fqdn)
raise AirflowException("Hostname of job runner does not match")
elif not same_process:
current_pid = os.getpid()
self.log.warning("Recorded pid %s does not match "
"the current pid %s", ti.pid, current_pid)
raise AirflowException("PID of job runner does not match")
elif (
self.task_runner.return_code() is None and
hasattr(self.task_runner, 'process')
):
self.log.warning(
"State of this instance has been externally set to %s. "
"Taking the poison pill.",
ti.state
)
self.task_runner.terminate()
self.terminating = True
|
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# ensure termination if processes are created later
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
same_process = ti.pid == os.getpid()
if ti.state == State.RUNNING:
if not same_hostname:
self.log.warning("The recorded hostname %s "
"does not match this instance's hostname "
"%s", ti.hostname, fqdn)
raise AirflowException("Hostname of job runner does not match")
elif not same_process:
current_pid = os.getpid()
self.log.warning("Recorded pid %s does not match "
"the current pid %s", ti.pid, current_pid)
raise AirflowException("PID of job runner does not match")
elif (
self.task_runner.return_code() is None and
hasattr(self.task_runner, 'process')
):
self.log.warning(
"State of this instance has been externally set to %s. "
"Taking the poison pill.",
ti.state
)
self.task_runner.terminate()
self.terminating = True
|
[
"Self",
"destruct",
"task",
"if",
"state",
"has",
"been",
"moved",
"away",
"from",
"running",
"externally"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L2637-L2673
|
[
"def",
"heartbeat_callback",
"(",
"self",
",",
"session",
"=",
"None",
")",
":",
"if",
"self",
".",
"terminating",
":",
"# ensure termination if processes are created later",
"self",
".",
"task_runner",
".",
"terminate",
"(",
")",
"return",
"self",
".",
"task_instance",
".",
"refresh_from_db",
"(",
")",
"ti",
"=",
"self",
".",
"task_instance",
"fqdn",
"=",
"get_hostname",
"(",
")",
"same_hostname",
"=",
"fqdn",
"==",
"ti",
".",
"hostname",
"same_process",
"=",
"ti",
".",
"pid",
"==",
"os",
".",
"getpid",
"(",
")",
"if",
"ti",
".",
"state",
"==",
"State",
".",
"RUNNING",
":",
"if",
"not",
"same_hostname",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"The recorded hostname %s \"",
"\"does not match this instance's hostname \"",
"\"%s\"",
",",
"ti",
".",
"hostname",
",",
"fqdn",
")",
"raise",
"AirflowException",
"(",
"\"Hostname of job runner does not match\"",
")",
"elif",
"not",
"same_process",
":",
"current_pid",
"=",
"os",
".",
"getpid",
"(",
")",
"self",
".",
"log",
".",
"warning",
"(",
"\"Recorded pid %s does not match \"",
"\"the current pid %s\"",
",",
"ti",
".",
"pid",
",",
"current_pid",
")",
"raise",
"AirflowException",
"(",
"\"PID of job runner does not match\"",
")",
"elif",
"(",
"self",
".",
"task_runner",
".",
"return_code",
"(",
")",
"is",
"None",
"and",
"hasattr",
"(",
"self",
".",
"task_runner",
",",
"'process'",
")",
")",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"State of this instance has been externally set to %s. \"",
"\"Taking the poison pill.\"",
",",
"ti",
".",
"state",
")",
"self",
".",
"task_runner",
".",
"terminate",
"(",
")",
"self",
".",
"terminating",
"=",
"True"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudSpannerHook._get_client
|
Provides a client for interacting with the Cloud Spanner API.
:param project_id: The ID of the GCP project.
:type project_id: str
:return: google.cloud.spanner_v1.client.Client
:rtype: object
|
airflow/contrib/hooks/gcp_spanner_hook.py
|
def _get_client(self, project_id):
"""
Provides a client for interacting with the Cloud Spanner API.
:param project_id: The ID of the GCP project.
:type project_id: str
:return: google.cloud.spanner_v1.client.Client
:rtype: object
"""
if not self._client:
self._client = Client(project=project_id, credentials=self._get_credentials())
return self._client
|
def _get_client(self, project_id):
"""
Provides a client for interacting with the Cloud Spanner API.
:param project_id: The ID of the GCP project.
:type project_id: str
:return: google.cloud.spanner_v1.client.Client
:rtype: object
"""
if not self._client:
self._client = Client(project=project_id, credentials=self._get_credentials())
return self._client
|
[
"Provides",
"a",
"client",
"for",
"interacting",
"with",
"the",
"Cloud",
"Spanner",
"API",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L41-L52
|
[
"def",
"_get_client",
"(",
"self",
",",
"project_id",
")",
":",
"if",
"not",
"self",
".",
"_client",
":",
"self",
".",
"_client",
"=",
"Client",
"(",
"project",
"=",
"project_id",
",",
"credentials",
"=",
"self",
".",
"_get_credentials",
"(",
")",
")",
"return",
"self",
".",
"_client"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudSpannerHook.get_instance
|
Gets information about a particular instance.
:param project_id: Optional, The ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:return: google.cloud.spanner_v1.instance.Instance
:rtype: object
|
airflow/contrib/hooks/gcp_spanner_hook.py
|
def get_instance(self, instance_id, project_id=None):
"""
Gets information about a particular instance.
:param project_id: Optional, The ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:return: google.cloud.spanner_v1.instance.Instance
:rtype: object
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
return None
return instance
|
def get_instance(self, instance_id, project_id=None):
"""
Gets information about a particular instance.
:param project_id: Optional, The ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:return: google.cloud.spanner_v1.instance.Instance
:rtype: object
"""
instance = self._get_client(project_id=project_id).instance(instance_id=instance_id)
if not instance.exists():
return None
return instance
|
[
"Gets",
"information",
"about",
"a",
"particular",
"instance",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L55-L70
|
[
"def",
"get_instance",
"(",
"self",
",",
"instance_id",
",",
"project_id",
"=",
"None",
")",
":",
"instance",
"=",
"self",
".",
"_get_client",
"(",
"project_id",
"=",
"project_id",
")",
".",
"instance",
"(",
"instance_id",
"=",
"instance_id",
")",
"if",
"not",
"instance",
".",
"exists",
"(",
")",
":",
"return",
"None",
"return",
"instance"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudSpannerHook._apply_to_instance
|
Invokes a method on a given instance by applying a specified Callable.
:param project_id: The ID of the GCP project that owns the Cloud Spanner
database.
:type project_id: str
:param instance_id: The ID of the instance.
:type instance_id: str
:param configuration_name: Name of the instance configuration defining how the
instance will be created. Required for instances which do not yet exist.
:type configuration_name: str
:param node_count: (Optional) Number of nodes allocated to the instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the Cloud
Console UI. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, will fall back to the instance ID.
:type display_name: str
:param func: Method of the instance to be called.
:type func: Callable
|
airflow/contrib/hooks/gcp_spanner_hook.py
|
def _apply_to_instance(self, project_id, instance_id, configuration_name, node_count,
display_name, func):
"""
Invokes a method on a given instance by applying a specified Callable.
:param project_id: The ID of the GCP project that owns the Cloud Spanner
database.
:type project_id: str
:param instance_id: The ID of the instance.
:type instance_id: str
:param configuration_name: Name of the instance configuration defining how the
instance will be created. Required for instances which do not yet exist.
:type configuration_name: str
:param node_count: (Optional) Number of nodes allocated to the instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the Cloud
Console UI. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, will fall back to the instance ID.
:type display_name: str
:param func: Method of the instance to be called.
:type func: Callable
"""
# noinspection PyUnresolvedReferences
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id, configuration_name=configuration_name,
node_count=node_count, display_name=display_name)
try:
operation = func(instance) # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
|
def _apply_to_instance(self, project_id, instance_id, configuration_name, node_count,
display_name, func):
"""
Invokes a method on a given instance by applying a specified Callable.
:param project_id: The ID of the GCP project that owns the Cloud Spanner
database.
:type project_id: str
:param instance_id: The ID of the instance.
:type instance_id: str
:param configuration_name: Name of the instance configuration defining how the
instance will be created. Required for instances which do not yet exist.
:type configuration_name: str
:param node_count: (Optional) Number of nodes allocated to the instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the Cloud
Console UI. (Must be between 4 and 30 characters.) If this value is not set
in the constructor, will fall back to the instance ID.
:type display_name: str
:param func: Method of the instance to be called.
:type func: Callable
"""
# noinspection PyUnresolvedReferences
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id, configuration_name=configuration_name,
node_count=node_count, display_name=display_name)
try:
operation = func(instance) # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
|
[
"Invokes",
"a",
"method",
"on",
"a",
"given",
"instance",
"by",
"applying",
"a",
"specified",
"Callable",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L72-L106
|
[
"def",
"_apply_to_instance",
"(",
"self",
",",
"project_id",
",",
"instance_id",
",",
"configuration_name",
",",
"node_count",
",",
"display_name",
",",
"func",
")",
":",
"# noinspection PyUnresolvedReferences",
"instance",
"=",
"self",
".",
"_get_client",
"(",
"project_id",
"=",
"project_id",
")",
".",
"instance",
"(",
"instance_id",
"=",
"instance_id",
",",
"configuration_name",
"=",
"configuration_name",
",",
"node_count",
"=",
"node_count",
",",
"display_name",
"=",
"display_name",
")",
"try",
":",
"operation",
"=",
"func",
"(",
"instance",
")",
"# type: Operation",
"except",
"GoogleAPICallError",
"as",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"'An error occurred: %s. Exiting.'",
",",
"e",
".",
"message",
")",
"raise",
"e",
"if",
"operation",
":",
"result",
"=",
"operation",
".",
"result",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"result",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudSpannerHook.create_instance
|
Creates a new Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the GCP
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
|
airflow/contrib/hooks/gcp_spanner_hook.py
|
def create_instance(self, instance_id, configuration_name, node_count,
display_name, project_id=None):
"""
Creates a new Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the GCP
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
self._apply_to_instance(project_id, instance_id, configuration_name,
node_count, display_name, lambda x: x.create())
|
def create_instance(self, instance_id, configuration_name, node_count,
display_name, project_id=None):
"""
Creates a new Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the GCP
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
self._apply_to_instance(project_id, instance_id, configuration_name,
node_count, display_name, lambda x: x.create())
|
[
"Creates",
"a",
"new",
"Cloud",
"Spanner",
"instance",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L109-L133
|
[
"def",
"create_instance",
"(",
"self",
",",
"instance_id",
",",
"configuration_name",
",",
"node_count",
",",
"display_name",
",",
"project_id",
"=",
"None",
")",
":",
"self",
".",
"_apply_to_instance",
"(",
"project_id",
",",
"instance_id",
",",
"configuration_name",
",",
"node_count",
",",
"display_name",
",",
"lambda",
"x",
":",
"x",
".",
"create",
"(",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudSpannerHook.update_instance
|
Updates an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the GCP
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
|
airflow/contrib/hooks/gcp_spanner_hook.py
|
def update_instance(self, instance_id, configuration_name, node_count,
display_name, project_id=None):
"""
Updates an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the GCP
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
return self._apply_to_instance(project_id, instance_id, configuration_name,
node_count, display_name, lambda x: x.update())
|
def update_instance(self, instance_id, configuration_name, node_count,
display_name, project_id=None):
"""
Updates an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param configuration_name: The name of the instance configuration defining how the
instance will be created. Possible configuration values can be retrieved via
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list
:type configuration_name: str
:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner
instance.
:type node_count: int
:param display_name: (Optional) The display name for the instance in the GCP
Console. Must be between 4 and 30 characters. If this value is not set in
the constructor, the name falls back to the instance ID.
:type display_name: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
return self._apply_to_instance(project_id, instance_id, configuration_name,
node_count, display_name, lambda x: x.update())
|
[
"Updates",
"an",
"existing",
"Cloud",
"Spanner",
"instance",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L136-L160
|
[
"def",
"update_instance",
"(",
"self",
",",
"instance_id",
",",
"configuration_name",
",",
"node_count",
",",
"display_name",
",",
"project_id",
"=",
"None",
")",
":",
"return",
"self",
".",
"_apply_to_instance",
"(",
"project_id",
",",
"instance_id",
",",
"configuration_name",
",",
"node_count",
",",
"display_name",
",",
"lambda",
"x",
":",
"x",
".",
"update",
"(",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudSpannerHook.delete_instance
|
Deletes an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
|
airflow/contrib/hooks/gcp_spanner_hook.py
|
def delete_instance(self, instance_id, project_id=None):
"""
Deletes an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
instance = self._get_client(project_id=project_id).instance(instance_id)
try:
instance.delete()
return
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
|
def delete_instance(self, instance_id, project_id=None):
"""
Deletes an existing Cloud Spanner instance.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
instance = self._get_client(project_id=project_id).instance(instance_id)
try:
instance.delete()
return
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
|
[
"Deletes",
"an",
"existing",
"Cloud",
"Spanner",
"instance",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L163-L181
|
[
"def",
"delete_instance",
"(",
"self",
",",
"instance_id",
",",
"project_id",
"=",
"None",
")",
":",
"instance",
"=",
"self",
".",
"_get_client",
"(",
"project_id",
"=",
"project_id",
")",
".",
"instance",
"(",
"instance_id",
")",
"try",
":",
"instance",
".",
"delete",
"(",
")",
"return",
"except",
"GoogleAPICallError",
"as",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"'An error occurred: %s. Exiting.'",
",",
"e",
".",
"message",
")",
"raise",
"e"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudSpannerHook.get_database
|
Retrieves a database in Cloud Spanner. If the database does not exist
in the specified instance, it returns None.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: Database object or None if database does not exist
:rtype: google.cloud.spanner_v1.database.Database or None
|
airflow/contrib/hooks/gcp_spanner_hook.py
|
def get_database(self, instance_id, database_id, project_id=None):
"""
Retrieves a database in Cloud Spanner. If the database does not exist
in the specified instance, it returns None.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: Database object or None if database does not exist
:rtype: google.cloud.spanner_v1.database.Database or None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
if not database.exists():
return None
else:
return database
|
def get_database(self, instance_id, database_id, project_id=None):
"""
Retrieves a database in Cloud Spanner. If the database does not exist
in the specified instance, it returns None.
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: Database object or None if database does not exist
:rtype: google.cloud.spanner_v1.database.Database or None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
if not database.exists():
return None
else:
return database
|
[
"Retrieves",
"a",
"database",
"in",
"Cloud",
"Spanner",
".",
"If",
"the",
"database",
"does",
"not",
"exist",
"in",
"the",
"specified",
"instance",
"it",
"returns",
"None",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L184-L209
|
[
"def",
"get_database",
"(",
"self",
",",
"instance_id",
",",
"database_id",
",",
"project_id",
"=",
"None",
")",
":",
"instance",
"=",
"self",
".",
"_get_client",
"(",
"project_id",
"=",
"project_id",
")",
".",
"instance",
"(",
"instance_id",
"=",
"instance_id",
")",
"if",
"not",
"instance",
".",
"exists",
"(",
")",
":",
"raise",
"AirflowException",
"(",
"\"The instance {} does not exist in project {} !\"",
".",
"format",
"(",
"instance_id",
",",
"project_id",
")",
")",
"database",
"=",
"instance",
".",
"database",
"(",
"database_id",
"=",
"database_id",
")",
"if",
"not",
"database",
".",
"exists",
"(",
")",
":",
"return",
"None",
"else",
":",
"return",
"database"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudSpannerHook.create_database
|
Creates a new database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database to create in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:return: None
|
airflow/contrib/hooks/gcp_spanner_hook.py
|
def create_database(self, instance_id, database_id, ddl_statements, project_id=None):
"""
Creates a new database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database to create in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:return: None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id,
ddl_statements=ddl_statements)
try:
operation = database.create() # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
return
|
def create_database(self, instance_id, database_id, ddl_statements, project_id=None):
"""
Creates a new database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database to create in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:return: None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id,
ddl_statements=ddl_statements)
try:
operation = database.create() # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
return
|
[
"Creates",
"a",
"new",
"database",
"in",
"Cloud",
"Spanner",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L212-L244
|
[
"def",
"create_database",
"(",
"self",
",",
"instance_id",
",",
"database_id",
",",
"ddl_statements",
",",
"project_id",
"=",
"None",
")",
":",
"instance",
"=",
"self",
".",
"_get_client",
"(",
"project_id",
"=",
"project_id",
")",
".",
"instance",
"(",
"instance_id",
"=",
"instance_id",
")",
"if",
"not",
"instance",
".",
"exists",
"(",
")",
":",
"raise",
"AirflowException",
"(",
"\"The instance {} does not exist in project {} !\"",
".",
"format",
"(",
"instance_id",
",",
"project_id",
")",
")",
"database",
"=",
"instance",
".",
"database",
"(",
"database_id",
"=",
"database_id",
",",
"ddl_statements",
"=",
"ddl_statements",
")",
"try",
":",
"operation",
"=",
"database",
".",
"create",
"(",
")",
"# type: Operation",
"except",
"GoogleAPICallError",
"as",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"'An error occurred: %s. Exiting.'",
",",
"e",
".",
"message",
")",
"raise",
"e",
"if",
"operation",
":",
"result",
"=",
"operation",
".",
"result",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"result",
")",
"return"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudSpannerHook.update_database
|
Updates DDL of a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:param operation_id: (Optional) The unique per database operation ID that can be
specified to implement idempotency check.
:type operation_id: str
:return: None
|
airflow/contrib/hooks/gcp_spanner_hook.py
|
def update_database(self, instance_id, database_id, ddl_statements,
project_id=None,
operation_id=None):
"""
Updates DDL of a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:param operation_id: (Optional) The unique per database operation ID that can be
specified to implement idempotency check.
:type operation_id: str
:return: None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
try:
operation = database.update_ddl(
ddl_statements=ddl_statements, operation_id=operation_id)
if operation:
result = operation.result()
self.log.info(result)
return
except AlreadyExists as e:
if e.code == 409 and operation_id in e.message:
self.log.info("Replayed update_ddl message - the operation id %s "
"was already done before.", operation_id)
return
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
|
def update_database(self, instance_id, database_id, ddl_statements,
project_id=None,
operation_id=None):
"""
Updates DDL of a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param ddl_statements: The string list containing DDL for the new database.
:type ddl_statements: list[str]
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:param operation_id: (Optional) The unique per database operation ID that can be
specified to implement idempotency check.
:type operation_id: str
:return: None
"""
instance = self._get_client(project_id=project_id).instance(
instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
try:
operation = database.update_ddl(
ddl_statements=ddl_statements, operation_id=operation_id)
if operation:
result = operation.result()
self.log.info(result)
return
except AlreadyExists as e:
if e.code == 409 and operation_id in e.message:
self.log.info("Replayed update_ddl message - the operation id %s "
"was already done before.", operation_id)
return
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
|
[
"Updates",
"DDL",
"of",
"a",
"database",
"in",
"Cloud",
"Spanner",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L247-L288
|
[
"def",
"update_database",
"(",
"self",
",",
"instance_id",
",",
"database_id",
",",
"ddl_statements",
",",
"project_id",
"=",
"None",
",",
"operation_id",
"=",
"None",
")",
":",
"instance",
"=",
"self",
".",
"_get_client",
"(",
"project_id",
"=",
"project_id",
")",
".",
"instance",
"(",
"instance_id",
"=",
"instance_id",
")",
"if",
"not",
"instance",
".",
"exists",
"(",
")",
":",
"raise",
"AirflowException",
"(",
"\"The instance {} does not exist in project {} !\"",
".",
"format",
"(",
"instance_id",
",",
"project_id",
")",
")",
"database",
"=",
"instance",
".",
"database",
"(",
"database_id",
"=",
"database_id",
")",
"try",
":",
"operation",
"=",
"database",
".",
"update_ddl",
"(",
"ddl_statements",
"=",
"ddl_statements",
",",
"operation_id",
"=",
"operation_id",
")",
"if",
"operation",
":",
"result",
"=",
"operation",
".",
"result",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"result",
")",
"return",
"except",
"AlreadyExists",
"as",
"e",
":",
"if",
"e",
".",
"code",
"==",
"409",
"and",
"operation_id",
"in",
"e",
".",
"message",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Replayed update_ddl message - the operation id %s \"",
"\"was already done before.\"",
",",
"operation_id",
")",
"return",
"except",
"GoogleAPICallError",
"as",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"'An error occurred: %s. Exiting.'",
",",
"e",
".",
"message",
")",
"raise",
"e"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudSpannerHook.delete_database
|
Drops a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:return: True if everything succeeded
:rtype: bool
|
airflow/contrib/hooks/gcp_spanner_hook.py
|
def delete_database(self, instance_id, database_id, project_id=None):
"""
Drops a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:return: True if everything succeeded
:rtype: bool
"""
instance = self._get_client(project_id=project_id).\
instance(instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
if not database.exists():
self.log.info("The database {} is already deleted from instance {}. "
"Exiting.".format(database_id, instance_id))
return
try:
operation = database.drop() # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
return
|
def delete_database(self, instance_id, database_id, project_id=None):
"""
Drops a database in Cloud Spanner.
:type project_id: str
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:return: True if everything succeeded
:rtype: bool
"""
instance = self._get_client(project_id=project_id).\
instance(instance_id=instance_id)
if not instance.exists():
raise AirflowException("The instance {} does not exist in project {} !".
format(instance_id, project_id))
database = instance.database(database_id=database_id)
if not database.exists():
self.log.info("The database {} is already deleted from instance {}. "
"Exiting.".format(database_id, instance_id))
return
try:
operation = database.drop() # type: Operation
except GoogleAPICallError as e:
self.log.error('An error occurred: %s. Exiting.', e.message)
raise e
if operation:
result = operation.result()
self.log.info(result)
return
|
[
"Drops",
"a",
"database",
"in",
"Cloud",
"Spanner",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L291-L325
|
[
"def",
"delete_database",
"(",
"self",
",",
"instance_id",
",",
"database_id",
",",
"project_id",
"=",
"None",
")",
":",
"instance",
"=",
"self",
".",
"_get_client",
"(",
"project_id",
"=",
"project_id",
")",
".",
"instance",
"(",
"instance_id",
"=",
"instance_id",
")",
"if",
"not",
"instance",
".",
"exists",
"(",
")",
":",
"raise",
"AirflowException",
"(",
"\"The instance {} does not exist in project {} !\"",
".",
"format",
"(",
"instance_id",
",",
"project_id",
")",
")",
"database",
"=",
"instance",
".",
"database",
"(",
"database_id",
"=",
"database_id",
")",
"if",
"not",
"database",
".",
"exists",
"(",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"The database {} is already deleted from instance {}. \"",
"\"Exiting.\"",
".",
"format",
"(",
"database_id",
",",
"instance_id",
")",
")",
"return",
"try",
":",
"operation",
"=",
"database",
".",
"drop",
"(",
")",
"# type: Operation",
"except",
"GoogleAPICallError",
"as",
"e",
":",
"self",
".",
"log",
".",
"error",
"(",
"'An error occurred: %s. Exiting.'",
",",
"e",
".",
"message",
")",
"raise",
"e",
"if",
"operation",
":",
"result",
"=",
"operation",
".",
"result",
"(",
")",
"self",
".",
"log",
".",
"info",
"(",
"result",
")",
"return"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CloudSpannerHook.execute_dml
|
Executes an arbitrary DML query (INSERT, UPDATE, DELETE).
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param queries: The queries to execute.
:type queries: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
|
airflow/contrib/hooks/gcp_spanner_hook.py
|
def execute_dml(self, instance_id, database_id, queries, project_id=None):
"""
Executes an arbitrary DML query (INSERT, UPDATE, DELETE).
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param queries: The queries to execute.
:type queries: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
"""
self._get_client(project_id=project_id).instance(instance_id=instance_id).\
database(database_id=database_id).run_in_transaction(
lambda transaction: self._execute_sql_in_transaction(transaction, queries))
|
def execute_dml(self, instance_id, database_id, queries, project_id=None):
"""
Executes an arbitrary DML query (INSERT, UPDATE, DELETE).
:param instance_id: The ID of the Cloud Spanner instance.
:type instance_id: str
:param database_id: The ID of the database in Cloud Spanner.
:type database_id: str
:param queries: The queries to execute.
:type queries: str
:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner
database. If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
"""
self._get_client(project_id=project_id).instance(instance_id=instance_id).\
database(database_id=database_id).run_in_transaction(
lambda transaction: self._execute_sql_in_transaction(transaction, queries))
|
[
"Executes",
"an",
"arbitrary",
"DML",
"query",
"(",
"INSERT",
"UPDATE",
"DELETE",
")",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L328-L344
|
[
"def",
"execute_dml",
"(",
"self",
",",
"instance_id",
",",
"database_id",
",",
"queries",
",",
"project_id",
"=",
"None",
")",
":",
"self",
".",
"_get_client",
"(",
"project_id",
"=",
"project_id",
")",
".",
"instance",
"(",
"instance_id",
"=",
"instance_id",
")",
".",
"database",
"(",
"database_id",
"=",
"database_id",
")",
".",
"run_in_transaction",
"(",
"lambda",
"transaction",
":",
"self",
".",
"_execute_sql_in_transaction",
"(",
"transaction",
",",
"queries",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
ImapAttachmentSensor.poke
|
Pokes for a mail attachment on the mail server.
:param context: The context that is being provided when poking.
:type context: dict
:return: True if attachment with the given name is present and False if not.
:rtype: bool
|
airflow/contrib/sensors/imap_attachment_sensor.py
|
def poke(self, context):
"""
Pokes for a mail attachment on the mail server.
:param context: The context that is being provided when poking.
:type context: dict
:return: True if attachment with the given name is present and False if not.
:rtype: bool
"""
self.log.info('Poking for %s', self.attachment_name)
with ImapHook(imap_conn_id=self.conn_id) as imap_hook:
return imap_hook.has_mail_attachment(
name=self.attachment_name,
mail_folder=self.mail_folder,
check_regex=self.check_regex
)
|
def poke(self, context):
"""
Pokes for a mail attachment on the mail server.
:param context: The context that is being provided when poking.
:type context: dict
:return: True if attachment with the given name is present and False if not.
:rtype: bool
"""
self.log.info('Poking for %s', self.attachment_name)
with ImapHook(imap_conn_id=self.conn_id) as imap_hook:
return imap_hook.has_mail_attachment(
name=self.attachment_name,
mail_folder=self.mail_folder,
check_regex=self.check_regex
)
|
[
"Pokes",
"for",
"a",
"mail",
"attachment",
"on",
"the",
"mail",
"server",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/sensors/imap_attachment_sensor.py#L60-L76
|
[
"def",
"poke",
"(",
"self",
",",
"context",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Poking for %s'",
",",
"self",
".",
"attachment_name",
")",
"with",
"ImapHook",
"(",
"imap_conn_id",
"=",
"self",
".",
"conn_id",
")",
"as",
"imap_hook",
":",
"return",
"imap_hook",
".",
"has_mail_attachment",
"(",
"name",
"=",
"self",
".",
"attachment_name",
",",
"mail_folder",
"=",
"self",
".",
"mail_folder",
",",
"check_regex",
"=",
"self",
".",
"check_regex",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
prepare_additional_parameters
|
Creates additional_properties parameter based on language_hints, web_detection_params and
additional_properties parameters specified by the user
|
airflow/contrib/operators/gcp_vision_operator.py
|
def prepare_additional_parameters(additional_properties, language_hints, web_detection_params):
"""
Creates additional_properties parameter based on language_hints, web_detection_params and
additional_properties parameters specified by the user
"""
if language_hints is None and web_detection_params is None:
return additional_properties
if additional_properties is None:
return {}
merged_additional_parameters = deepcopy(additional_properties)
if 'image_context' not in merged_additional_parameters:
merged_additional_parameters['image_context'] = {}
merged_additional_parameters['image_context']['language_hints'] = merged_additional_parameters[
'image_context'
].get('language_hints', language_hints)
merged_additional_parameters['image_context']['web_detection_params'] = merged_additional_parameters[
'image_context'
].get('web_detection_params', web_detection_params)
return merged_additional_parameters
|
def prepare_additional_parameters(additional_properties, language_hints, web_detection_params):
"""
Creates additional_properties parameter based on language_hints, web_detection_params and
additional_properties parameters specified by the user
"""
if language_hints is None and web_detection_params is None:
return additional_properties
if additional_properties is None:
return {}
merged_additional_parameters = deepcopy(additional_properties)
if 'image_context' not in merged_additional_parameters:
merged_additional_parameters['image_context'] = {}
merged_additional_parameters['image_context']['language_hints'] = merged_additional_parameters[
'image_context'
].get('language_hints', language_hints)
merged_additional_parameters['image_context']['web_detection_params'] = merged_additional_parameters[
'image_context'
].get('web_detection_params', web_detection_params)
return merged_additional_parameters
|
[
"Creates",
"additional_properties",
"parameter",
"based",
"on",
"language_hints",
"web_detection_params",
"and",
"additional_properties",
"parameters",
"specified",
"by",
"the",
"user"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/gcp_vision_operator.py#L1221-L1244
|
[
"def",
"prepare_additional_parameters",
"(",
"additional_properties",
",",
"language_hints",
",",
"web_detection_params",
")",
":",
"if",
"language_hints",
"is",
"None",
"and",
"web_detection_params",
"is",
"None",
":",
"return",
"additional_properties",
"if",
"additional_properties",
"is",
"None",
":",
"return",
"{",
"}",
"merged_additional_parameters",
"=",
"deepcopy",
"(",
"additional_properties",
")",
"if",
"'image_context'",
"not",
"in",
"merged_additional_parameters",
":",
"merged_additional_parameters",
"[",
"'image_context'",
"]",
"=",
"{",
"}",
"merged_additional_parameters",
"[",
"'image_context'",
"]",
"[",
"'language_hints'",
"]",
"=",
"merged_additional_parameters",
"[",
"'image_context'",
"]",
".",
"get",
"(",
"'language_hints'",
",",
"language_hints",
")",
"merged_additional_parameters",
"[",
"'image_context'",
"]",
"[",
"'web_detection_params'",
"]",
"=",
"merged_additional_parameters",
"[",
"'image_context'",
"]",
".",
"get",
"(",
"'web_detection_params'",
",",
"web_detection_params",
")",
"return",
"merged_additional_parameters"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CassandraHook.get_conn
|
Returns a cassandra Session object
|
airflow/contrib/hooks/cassandra_hook.py
|
def get_conn(self):
"""
Returns a cassandra Session object
"""
if self.session and not self.session.is_shutdown:
return self.session
self.session = self.cluster.connect(self.keyspace)
return self.session
|
def get_conn(self):
"""
Returns a cassandra Session object
"""
if self.session and not self.session.is_shutdown:
return self.session
self.session = self.cluster.connect(self.keyspace)
return self.session
|
[
"Returns",
"a",
"cassandra",
"Session",
"object"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/cassandra_hook.py#L108-L115
|
[
"def",
"get_conn",
"(",
"self",
")",
":",
"if",
"self",
".",
"session",
"and",
"not",
"self",
".",
"session",
".",
"is_shutdown",
":",
"return",
"self",
".",
"session",
"self",
".",
"session",
"=",
"self",
".",
"cluster",
".",
"connect",
"(",
"self",
".",
"keyspace",
")",
"return",
"self",
".",
"session"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CassandraHook.table_exists
|
Checks if a table exists in Cassandra
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:type table: str
|
airflow/contrib/hooks/cassandra_hook.py
|
def table_exists(self, table):
"""
Checks if a table exists in Cassandra
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:type table: str
"""
keyspace = self.keyspace
if '.' in table:
keyspace, table = table.split('.', 1)
cluster_metadata = self.get_conn().cluster.metadata
return (keyspace in cluster_metadata.keyspaces and
table in cluster_metadata.keyspaces[keyspace].tables)
|
def table_exists(self, table):
"""
Checks if a table exists in Cassandra
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:type table: str
"""
keyspace = self.keyspace
if '.' in table:
keyspace, table = table.split('.', 1)
cluster_metadata = self.get_conn().cluster.metadata
return (keyspace in cluster_metadata.keyspaces and
table in cluster_metadata.keyspaces[keyspace].tables)
|
[
"Checks",
"if",
"a",
"table",
"exists",
"in",
"Cassandra"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/cassandra_hook.py#L164-L177
|
[
"def",
"table_exists",
"(",
"self",
",",
"table",
")",
":",
"keyspace",
"=",
"self",
".",
"keyspace",
"if",
"'.'",
"in",
"table",
":",
"keyspace",
",",
"table",
"=",
"table",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"cluster_metadata",
"=",
"self",
".",
"get_conn",
"(",
")",
".",
"cluster",
".",
"metadata",
"return",
"(",
"keyspace",
"in",
"cluster_metadata",
".",
"keyspaces",
"and",
"table",
"in",
"cluster_metadata",
".",
"keyspaces",
"[",
"keyspace",
"]",
".",
"tables",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
CassandraHook.record_exists
|
Checks if a record exists in Cassandra
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:type table: str
:param keys: The keys and their values to check the existence.
:type keys: dict
|
airflow/contrib/hooks/cassandra_hook.py
|
def record_exists(self, table, keys):
"""
Checks if a record exists in Cassandra
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:type table: str
:param keys: The keys and their values to check the existence.
:type keys: dict
"""
keyspace = self.keyspace
if '.' in table:
keyspace, table = table.split('.', 1)
ks = " AND ".join("{}=%({})s".format(key, key) for key in keys.keys())
cql = "SELECT * FROM {keyspace}.{table} WHERE {keys}".format(
keyspace=keyspace, table=table, keys=ks)
try:
rs = self.get_conn().execute(cql, keys)
return rs.one() is not None
except Exception:
return False
|
def record_exists(self, table, keys):
"""
Checks if a record exists in Cassandra
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:type table: str
:param keys: The keys and their values to check the existence.
:type keys: dict
"""
keyspace = self.keyspace
if '.' in table:
keyspace, table = table.split('.', 1)
ks = " AND ".join("{}=%({})s".format(key, key) for key in keys.keys())
cql = "SELECT * FROM {keyspace}.{table} WHERE {keys}".format(
keyspace=keyspace, table=table, keys=ks)
try:
rs = self.get_conn().execute(cql, keys)
return rs.one() is not None
except Exception:
return False
|
[
"Checks",
"if",
"a",
"record",
"exists",
"in",
"Cassandra"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/cassandra_hook.py#L179-L200
|
[
"def",
"record_exists",
"(",
"self",
",",
"table",
",",
"keys",
")",
":",
"keyspace",
"=",
"self",
".",
"keyspace",
"if",
"'.'",
"in",
"table",
":",
"keyspace",
",",
"table",
"=",
"table",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"ks",
"=",
"\" AND \"",
".",
"join",
"(",
"\"{}=%({})s\"",
".",
"format",
"(",
"key",
",",
"key",
")",
"for",
"key",
"in",
"keys",
".",
"keys",
"(",
")",
")",
"cql",
"=",
"\"SELECT * FROM {keyspace}.{table} WHERE {keys}\"",
".",
"format",
"(",
"keyspace",
"=",
"keyspace",
",",
"table",
"=",
"table",
",",
"keys",
"=",
"ks",
")",
"try",
":",
"rs",
"=",
"self",
".",
"get_conn",
"(",
")",
".",
"execute",
"(",
"cql",
",",
"keys",
")",
"return",
"rs",
".",
"one",
"(",
")",
"is",
"not",
"None",
"except",
"Exception",
":",
"return",
"False"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SparkSubmitHook._build_spark_submit_command
|
Construct the spark-submit command to execute.
:param application: command to append to the spark-submit command
:type application: str
:return: full command to be executed
|
airflow/contrib/hooks/spark_submit_hook.py
|
def _build_spark_submit_command(self, application):
"""
Construct the spark-submit command to execute.
:param application: command to append to the spark-submit command
:type application: str
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
if self._conf:
for key in self._conf:
connection_cmd += ["--conf", "{}={}".format(key, str(self._conf[key]))]
if self._env_vars and (self._is_kubernetes or self._is_yarn):
if self._is_yarn:
tmpl = "spark.yarn.appMasterEnv.{}={}"
else:
tmpl = "spark.kubernetes.driverEnv.{}={}"
for key in self._env_vars:
connection_cmd += [
"--conf",
tmpl.format(key, str(self._env_vars[key]))]
elif self._env_vars and self._connection['deploy_mode'] != "cluster":
self._env = self._env_vars # Do it on Popen of the process
elif self._env_vars and self._connection['deploy_mode'] == "cluster":
raise AirflowException(
"SparkSubmitHook env_vars is not supported in standalone-cluster mode.")
if self._is_kubernetes:
connection_cmd += ["--conf", "spark.kubernetes.namespace={}".format(
self._connection['namespace'])]
if self._files:
connection_cmd += ["--files", self._files]
if self._py_files:
connection_cmd += ["--py-files", self._py_files]
if self._archives:
connection_cmd += ["--archives", self._archives]
if self._driver_class_path:
connection_cmd += ["--driver-class-path", self._driver_class_path]
if self._jars:
connection_cmd += ["--jars", self._jars]
if self._packages:
connection_cmd += ["--packages", self._packages]
if self._exclude_packages:
connection_cmd += ["--exclude-packages", self._exclude_packages]
if self._repositories:
connection_cmd += ["--repositories", self._repositories]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._driver_memory:
connection_cmd += ["--driver-memory", self._driver_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._name:
connection_cmd += ["--name", self._name]
if self._java_class:
connection_cmd += ["--class", self._java_class]
if self._verbose:
connection_cmd += ["--verbose"]
if self._connection['queue']:
connection_cmd += ["--queue", self._connection['queue']]
if self._connection['deploy_mode']:
connection_cmd += ["--deploy-mode", self._connection['deploy_mode']]
# The actual script to execute
connection_cmd += [application]
# Append any application arguments
if self._application_args:
connection_cmd += self._application_args
self.log.info("Spark-Submit cmd: %s", connection_cmd)
return connection_cmd
|
def _build_spark_submit_command(self, application):
"""
Construct the spark-submit command to execute.
:param application: command to append to the spark-submit command
:type application: str
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
if self._conf:
for key in self._conf:
connection_cmd += ["--conf", "{}={}".format(key, str(self._conf[key]))]
if self._env_vars and (self._is_kubernetes or self._is_yarn):
if self._is_yarn:
tmpl = "spark.yarn.appMasterEnv.{}={}"
else:
tmpl = "spark.kubernetes.driverEnv.{}={}"
for key in self._env_vars:
connection_cmd += [
"--conf",
tmpl.format(key, str(self._env_vars[key]))]
elif self._env_vars and self._connection['deploy_mode'] != "cluster":
self._env = self._env_vars # Do it on Popen of the process
elif self._env_vars and self._connection['deploy_mode'] == "cluster":
raise AirflowException(
"SparkSubmitHook env_vars is not supported in standalone-cluster mode.")
if self._is_kubernetes:
connection_cmd += ["--conf", "spark.kubernetes.namespace={}".format(
self._connection['namespace'])]
if self._files:
connection_cmd += ["--files", self._files]
if self._py_files:
connection_cmd += ["--py-files", self._py_files]
if self._archives:
connection_cmd += ["--archives", self._archives]
if self._driver_class_path:
connection_cmd += ["--driver-class-path", self._driver_class_path]
if self._jars:
connection_cmd += ["--jars", self._jars]
if self._packages:
connection_cmd += ["--packages", self._packages]
if self._exclude_packages:
connection_cmd += ["--exclude-packages", self._exclude_packages]
if self._repositories:
connection_cmd += ["--repositories", self._repositories]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._driver_memory:
connection_cmd += ["--driver-memory", self._driver_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._name:
connection_cmd += ["--name", self._name]
if self._java_class:
connection_cmd += ["--class", self._java_class]
if self._verbose:
connection_cmd += ["--verbose"]
if self._connection['queue']:
connection_cmd += ["--queue", self._connection['queue']]
if self._connection['deploy_mode']:
connection_cmd += ["--deploy-mode", self._connection['deploy_mode']]
# The actual script to execute
connection_cmd += [application]
# Append any application arguments
if self._application_args:
connection_cmd += self._application_args
self.log.info("Spark-Submit cmd: %s", connection_cmd)
return connection_cmd
|
[
"Construct",
"the",
"spark",
"-",
"submit",
"command",
"to",
"execute",
".",
":",
"param",
"application",
":",
"command",
"to",
"append",
"to",
"the",
"spark",
"-",
"submit",
"command",
":",
"type",
"application",
":",
"str",
":",
"return",
":",
"full",
"command",
"to",
"be",
"executed"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_submit_hook.py#L215-L297
|
[
"def",
"_build_spark_submit_command",
"(",
"self",
",",
"application",
")",
":",
"connection_cmd",
"=",
"self",
".",
"_get_spark_binary_path",
"(",
")",
"# The url ot the spark master",
"connection_cmd",
"+=",
"[",
"\"--master\"",
",",
"self",
".",
"_connection",
"[",
"'master'",
"]",
"]",
"if",
"self",
".",
"_conf",
":",
"for",
"key",
"in",
"self",
".",
"_conf",
":",
"connection_cmd",
"+=",
"[",
"\"--conf\"",
",",
"\"{}={}\"",
".",
"format",
"(",
"key",
",",
"str",
"(",
"self",
".",
"_conf",
"[",
"key",
"]",
")",
")",
"]",
"if",
"self",
".",
"_env_vars",
"and",
"(",
"self",
".",
"_is_kubernetes",
"or",
"self",
".",
"_is_yarn",
")",
":",
"if",
"self",
".",
"_is_yarn",
":",
"tmpl",
"=",
"\"spark.yarn.appMasterEnv.{}={}\"",
"else",
":",
"tmpl",
"=",
"\"spark.kubernetes.driverEnv.{}={}\"",
"for",
"key",
"in",
"self",
".",
"_env_vars",
":",
"connection_cmd",
"+=",
"[",
"\"--conf\"",
",",
"tmpl",
".",
"format",
"(",
"key",
",",
"str",
"(",
"self",
".",
"_env_vars",
"[",
"key",
"]",
")",
")",
"]",
"elif",
"self",
".",
"_env_vars",
"and",
"self",
".",
"_connection",
"[",
"'deploy_mode'",
"]",
"!=",
"\"cluster\"",
":",
"self",
".",
"_env",
"=",
"self",
".",
"_env_vars",
"# Do it on Popen of the process",
"elif",
"self",
".",
"_env_vars",
"and",
"self",
".",
"_connection",
"[",
"'deploy_mode'",
"]",
"==",
"\"cluster\"",
":",
"raise",
"AirflowException",
"(",
"\"SparkSubmitHook env_vars is not supported in standalone-cluster mode.\"",
")",
"if",
"self",
".",
"_is_kubernetes",
":",
"connection_cmd",
"+=",
"[",
"\"--conf\"",
",",
"\"spark.kubernetes.namespace={}\"",
".",
"format",
"(",
"self",
".",
"_connection",
"[",
"'namespace'",
"]",
")",
"]",
"if",
"self",
".",
"_files",
":",
"connection_cmd",
"+=",
"[",
"\"--files\"",
",",
"self",
".",
"_files",
"]",
"if",
"self",
".",
"_py_files",
":",
"connection_cmd",
"+=",
"[",
"\"--py-files\"",
",",
"self",
".",
"_py_files",
"]",
"if",
"self",
".",
"_archives",
":",
"connection_cmd",
"+=",
"[",
"\"--archives\"",
",",
"self",
".",
"_archives",
"]",
"if",
"self",
".",
"_driver_class_path",
":",
"connection_cmd",
"+=",
"[",
"\"--driver-class-path\"",
",",
"self",
".",
"_driver_class_path",
"]",
"if",
"self",
".",
"_jars",
":",
"connection_cmd",
"+=",
"[",
"\"--jars\"",
",",
"self",
".",
"_jars",
"]",
"if",
"self",
".",
"_packages",
":",
"connection_cmd",
"+=",
"[",
"\"--packages\"",
",",
"self",
".",
"_packages",
"]",
"if",
"self",
".",
"_exclude_packages",
":",
"connection_cmd",
"+=",
"[",
"\"--exclude-packages\"",
",",
"self",
".",
"_exclude_packages",
"]",
"if",
"self",
".",
"_repositories",
":",
"connection_cmd",
"+=",
"[",
"\"--repositories\"",
",",
"self",
".",
"_repositories",
"]",
"if",
"self",
".",
"_num_executors",
":",
"connection_cmd",
"+=",
"[",
"\"--num-executors\"",
",",
"str",
"(",
"self",
".",
"_num_executors",
")",
"]",
"if",
"self",
".",
"_total_executor_cores",
":",
"connection_cmd",
"+=",
"[",
"\"--total-executor-cores\"",
",",
"str",
"(",
"self",
".",
"_total_executor_cores",
")",
"]",
"if",
"self",
".",
"_executor_cores",
":",
"connection_cmd",
"+=",
"[",
"\"--executor-cores\"",
",",
"str",
"(",
"self",
".",
"_executor_cores",
")",
"]",
"if",
"self",
".",
"_executor_memory",
":",
"connection_cmd",
"+=",
"[",
"\"--executor-memory\"",
",",
"self",
".",
"_executor_memory",
"]",
"if",
"self",
".",
"_driver_memory",
":",
"connection_cmd",
"+=",
"[",
"\"--driver-memory\"",
",",
"self",
".",
"_driver_memory",
"]",
"if",
"self",
".",
"_keytab",
":",
"connection_cmd",
"+=",
"[",
"\"--keytab\"",
",",
"self",
".",
"_keytab",
"]",
"if",
"self",
".",
"_principal",
":",
"connection_cmd",
"+=",
"[",
"\"--principal\"",
",",
"self",
".",
"_principal",
"]",
"if",
"self",
".",
"_name",
":",
"connection_cmd",
"+=",
"[",
"\"--name\"",
",",
"self",
".",
"_name",
"]",
"if",
"self",
".",
"_java_class",
":",
"connection_cmd",
"+=",
"[",
"\"--class\"",
",",
"self",
".",
"_java_class",
"]",
"if",
"self",
".",
"_verbose",
":",
"connection_cmd",
"+=",
"[",
"\"--verbose\"",
"]",
"if",
"self",
".",
"_connection",
"[",
"'queue'",
"]",
":",
"connection_cmd",
"+=",
"[",
"\"--queue\"",
",",
"self",
".",
"_connection",
"[",
"'queue'",
"]",
"]",
"if",
"self",
".",
"_connection",
"[",
"'deploy_mode'",
"]",
":",
"connection_cmd",
"+=",
"[",
"\"--deploy-mode\"",
",",
"self",
".",
"_connection",
"[",
"'deploy_mode'",
"]",
"]",
"# The actual script to execute",
"connection_cmd",
"+=",
"[",
"application",
"]",
"# Append any application arguments",
"if",
"self",
".",
"_application_args",
":",
"connection_cmd",
"+=",
"self",
".",
"_application_args",
"self",
".",
"log",
".",
"info",
"(",
"\"Spark-Submit cmd: %s\"",
",",
"connection_cmd",
")",
"return",
"connection_cmd"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SparkSubmitHook._build_track_driver_status_command
|
Construct the command to poll the driver status.
:return: full command to be executed
|
airflow/contrib/hooks/spark_submit_hook.py
|
def _build_track_driver_status_command(self):
"""
Construct the command to poll the driver status.
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
# The driver id so we can poll for its status
if self._driver_id:
connection_cmd += ["--status", self._driver_id]
else:
raise AirflowException(
"Invalid status: attempted to poll driver " +
"status but no driver id is known. Giving up.")
self.log.debug("Poll driver status cmd: %s", connection_cmd)
return connection_cmd
|
def _build_track_driver_status_command(self):
"""
Construct the command to poll the driver status.
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
# The driver id so we can poll for its status
if self._driver_id:
connection_cmd += ["--status", self._driver_id]
else:
raise AirflowException(
"Invalid status: attempted to poll driver " +
"status but no driver id is known. Giving up.")
self.log.debug("Poll driver status cmd: %s", connection_cmd)
return connection_cmd
|
[
"Construct",
"the",
"command",
"to",
"poll",
"the",
"driver",
"status",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_submit_hook.py#L299-L320
|
[
"def",
"_build_track_driver_status_command",
"(",
"self",
")",
":",
"connection_cmd",
"=",
"self",
".",
"_get_spark_binary_path",
"(",
")",
"# The url ot the spark master",
"connection_cmd",
"+=",
"[",
"\"--master\"",
",",
"self",
".",
"_connection",
"[",
"'master'",
"]",
"]",
"# The driver id so we can poll for its status",
"if",
"self",
".",
"_driver_id",
":",
"connection_cmd",
"+=",
"[",
"\"--status\"",
",",
"self",
".",
"_driver_id",
"]",
"else",
":",
"raise",
"AirflowException",
"(",
"\"Invalid status: attempted to poll driver \"",
"+",
"\"status but no driver id is known. Giving up.\"",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Poll driver status cmd: %s\"",
",",
"connection_cmd",
")",
"return",
"connection_cmd"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SparkSubmitHook.submit
|
Remote Popen to execute the spark-submit job
:param application: Submitted application, jar or py file
:type application: str
:param kwargs: extra arguments to Popen (see subprocess.Popen)
|
airflow/contrib/hooks/spark_submit_hook.py
|
def submit(self, application="", **kwargs):
"""
Remote Popen to execute the spark-submit job
:param application: Submitted application, jar or py file
:type application: str
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_submit_cmd = self._build_spark_submit_command(application)
if hasattr(self, '_env'):
env = os.environ.copy()
env.update(self._env)
kwargs["env"] = env
self._submit_sp = subprocess.Popen(spark_submit_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True,
**kwargs)
self._process_spark_submit_log(iter(self._submit_sp.stdout.readline, ''))
returncode = self._submit_sp.wait()
# Check spark-submit return code. In Kubernetes mode, also check the value
# of exit code in the log, as it may differ.
if returncode or (self._is_kubernetes and self._spark_exit_code != 0):
raise AirflowException(
"Cannot execute: {}. Error code is: {}.".format(
spark_submit_cmd, returncode
)
)
self.log.debug("Should track driver: {}".format(self._should_track_driver_status))
# We want the Airflow job to wait until the Spark driver is finished
if self._should_track_driver_status:
if self._driver_id is None:
raise AirflowException(
"No driver id is known: something went wrong when executing " +
"the spark submit command"
)
# We start with the SUBMITTED status as initial status
self._driver_status = "SUBMITTED"
# Start tracking the driver status (blocking function)
self._start_driver_status_tracking()
if self._driver_status != "FINISHED":
raise AirflowException(
"ERROR : Driver {} badly exited with status {}"
.format(self._driver_id, self._driver_status)
)
|
def submit(self, application="", **kwargs):
"""
Remote Popen to execute the spark-submit job
:param application: Submitted application, jar or py file
:type application: str
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_submit_cmd = self._build_spark_submit_command(application)
if hasattr(self, '_env'):
env = os.environ.copy()
env.update(self._env)
kwargs["env"] = env
self._submit_sp = subprocess.Popen(spark_submit_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True,
**kwargs)
self._process_spark_submit_log(iter(self._submit_sp.stdout.readline, ''))
returncode = self._submit_sp.wait()
# Check spark-submit return code. In Kubernetes mode, also check the value
# of exit code in the log, as it may differ.
if returncode or (self._is_kubernetes and self._spark_exit_code != 0):
raise AirflowException(
"Cannot execute: {}. Error code is: {}.".format(
spark_submit_cmd, returncode
)
)
self.log.debug("Should track driver: {}".format(self._should_track_driver_status))
# We want the Airflow job to wait until the Spark driver is finished
if self._should_track_driver_status:
if self._driver_id is None:
raise AirflowException(
"No driver id is known: something went wrong when executing " +
"the spark submit command"
)
# We start with the SUBMITTED status as initial status
self._driver_status = "SUBMITTED"
# Start tracking the driver status (blocking function)
self._start_driver_status_tracking()
if self._driver_status != "FINISHED":
raise AirflowException(
"ERROR : Driver {} badly exited with status {}"
.format(self._driver_id, self._driver_status)
)
|
[
"Remote",
"Popen",
"to",
"execute",
"the",
"spark",
"-",
"submit",
"job"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_submit_hook.py#L322-L376
|
[
"def",
"submit",
"(",
"self",
",",
"application",
"=",
"\"\"",
",",
"*",
"*",
"kwargs",
")",
":",
"spark_submit_cmd",
"=",
"self",
".",
"_build_spark_submit_command",
"(",
"application",
")",
"if",
"hasattr",
"(",
"self",
",",
"'_env'",
")",
":",
"env",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"env",
".",
"update",
"(",
"self",
".",
"_env",
")",
"kwargs",
"[",
"\"env\"",
"]",
"=",
"env",
"self",
".",
"_submit_sp",
"=",
"subprocess",
".",
"Popen",
"(",
"spark_submit_cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"bufsize",
"=",
"-",
"1",
",",
"universal_newlines",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"_process_spark_submit_log",
"(",
"iter",
"(",
"self",
".",
"_submit_sp",
".",
"stdout",
".",
"readline",
",",
"''",
")",
")",
"returncode",
"=",
"self",
".",
"_submit_sp",
".",
"wait",
"(",
")",
"# Check spark-submit return code. In Kubernetes mode, also check the value",
"# of exit code in the log, as it may differ.",
"if",
"returncode",
"or",
"(",
"self",
".",
"_is_kubernetes",
"and",
"self",
".",
"_spark_exit_code",
"!=",
"0",
")",
":",
"raise",
"AirflowException",
"(",
"\"Cannot execute: {}. Error code is: {}.\"",
".",
"format",
"(",
"spark_submit_cmd",
",",
"returncode",
")",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"Should track driver: {}\"",
".",
"format",
"(",
"self",
".",
"_should_track_driver_status",
")",
")",
"# We want the Airflow job to wait until the Spark driver is finished",
"if",
"self",
".",
"_should_track_driver_status",
":",
"if",
"self",
".",
"_driver_id",
"is",
"None",
":",
"raise",
"AirflowException",
"(",
"\"No driver id is known: something went wrong when executing \"",
"+",
"\"the spark submit command\"",
")",
"# We start with the SUBMITTED status as initial status",
"self",
".",
"_driver_status",
"=",
"\"SUBMITTED\"",
"# Start tracking the driver status (blocking function)",
"self",
".",
"_start_driver_status_tracking",
"(",
")",
"if",
"self",
".",
"_driver_status",
"!=",
"\"FINISHED\"",
":",
"raise",
"AirflowException",
"(",
"\"ERROR : Driver {} badly exited with status {}\"",
".",
"format",
"(",
"self",
".",
"_driver_id",
",",
"self",
".",
"_driver_status",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SparkSubmitHook._process_spark_submit_log
|
Processes the log files and extracts useful information out of it.
If the deploy-mode is 'client', log the output of the submit command as those
are the output logs of the Spark worker directly.
Remark: If the driver needs to be tracked for its status, the log-level of the
spark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO)
:param itr: An iterator which iterates over the input of the subprocess
|
airflow/contrib/hooks/spark_submit_hook.py
|
def _process_spark_submit_log(self, itr):
"""
Processes the log files and extracts useful information out of it.
If the deploy-mode is 'client', log the output of the submit command as those
are the output logs of the Spark worker directly.
Remark: If the driver needs to be tracked for its status, the log-level of the
spark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO)
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# If we run yarn cluster mode, we want to extract the application id from
# the logs so we can kill the application when we stop it unexpectedly
if self._is_yarn and self._connection['deploy_mode'] == 'cluster':
match = re.search('(application[0-9_]+)', line)
if match:
self._yarn_application_id = match.groups()[0]
self.log.info("Identified spark driver id: %s",
self._yarn_application_id)
# If we run Kubernetes cluster mode, we want to extract the driver pod id
# from the logs so we can kill the application when we stop it unexpectedly
elif self._is_kubernetes:
match = re.search(r'\s*pod name: ((.+?)-([a-z0-9]+)-driver)', line)
if match:
self._kubernetes_driver_pod = match.groups()[0]
self.log.info("Identified spark driver pod: %s",
self._kubernetes_driver_pod)
# Store the Spark Exit code
match_exit_code = re.search(r'\s*Exit code: (\d+)', line)
if match_exit_code:
self._spark_exit_code = int(match_exit_code.groups()[0])
# if we run in standalone cluster mode and we want to track the driver status
# we need to extract the driver id from the logs. This allows us to poll for
# the status using the driver id. Also, we can kill the driver when needed.
elif self._should_track_driver_status and not self._driver_id:
match_driver_id = re.search(r'(driver-[0-9\-]+)', line)
if match_driver_id:
self._driver_id = match_driver_id.groups()[0]
self.log.info("identified spark driver id: {}"
.format(self._driver_id))
else:
self.log.info(line)
self.log.debug("spark submit log: {}".format(line))
|
def _process_spark_submit_log(self, itr):
"""
Processes the log files and extracts useful information out of it.
If the deploy-mode is 'client', log the output of the submit command as those
are the output logs of the Spark worker directly.
Remark: If the driver needs to be tracked for its status, the log-level of the
spark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO)
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# If we run yarn cluster mode, we want to extract the application id from
# the logs so we can kill the application when we stop it unexpectedly
if self._is_yarn and self._connection['deploy_mode'] == 'cluster':
match = re.search('(application[0-9_]+)', line)
if match:
self._yarn_application_id = match.groups()[0]
self.log.info("Identified spark driver id: %s",
self._yarn_application_id)
# If we run Kubernetes cluster mode, we want to extract the driver pod id
# from the logs so we can kill the application when we stop it unexpectedly
elif self._is_kubernetes:
match = re.search(r'\s*pod name: ((.+?)-([a-z0-9]+)-driver)', line)
if match:
self._kubernetes_driver_pod = match.groups()[0]
self.log.info("Identified spark driver pod: %s",
self._kubernetes_driver_pod)
# Store the Spark Exit code
match_exit_code = re.search(r'\s*Exit code: (\d+)', line)
if match_exit_code:
self._spark_exit_code = int(match_exit_code.groups()[0])
# if we run in standalone cluster mode and we want to track the driver status
# we need to extract the driver id from the logs. This allows us to poll for
# the status using the driver id. Also, we can kill the driver when needed.
elif self._should_track_driver_status and not self._driver_id:
match_driver_id = re.search(r'(driver-[0-9\-]+)', line)
if match_driver_id:
self._driver_id = match_driver_id.groups()[0]
self.log.info("identified spark driver id: {}"
.format(self._driver_id))
else:
self.log.info(line)
self.log.debug("spark submit log: {}".format(line))
|
[
"Processes",
"the",
"log",
"files",
"and",
"extracts",
"useful",
"information",
"out",
"of",
"it",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_submit_hook.py#L378-L429
|
[
"def",
"_process_spark_submit_log",
"(",
"self",
",",
"itr",
")",
":",
"# Consume the iterator",
"for",
"line",
"in",
"itr",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"# If we run yarn cluster mode, we want to extract the application id from",
"# the logs so we can kill the application when we stop it unexpectedly",
"if",
"self",
".",
"_is_yarn",
"and",
"self",
".",
"_connection",
"[",
"'deploy_mode'",
"]",
"==",
"'cluster'",
":",
"match",
"=",
"re",
".",
"search",
"(",
"'(application[0-9_]+)'",
",",
"line",
")",
"if",
"match",
":",
"self",
".",
"_yarn_application_id",
"=",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"self",
".",
"log",
".",
"info",
"(",
"\"Identified spark driver id: %s\"",
",",
"self",
".",
"_yarn_application_id",
")",
"# If we run Kubernetes cluster mode, we want to extract the driver pod id",
"# from the logs so we can kill the application when we stop it unexpectedly",
"elif",
"self",
".",
"_is_kubernetes",
":",
"match",
"=",
"re",
".",
"search",
"(",
"r'\\s*pod name: ((.+?)-([a-z0-9]+)-driver)'",
",",
"line",
")",
"if",
"match",
":",
"self",
".",
"_kubernetes_driver_pod",
"=",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"self",
".",
"log",
".",
"info",
"(",
"\"Identified spark driver pod: %s\"",
",",
"self",
".",
"_kubernetes_driver_pod",
")",
"# Store the Spark Exit code",
"match_exit_code",
"=",
"re",
".",
"search",
"(",
"r'\\s*Exit code: (\\d+)'",
",",
"line",
")",
"if",
"match_exit_code",
":",
"self",
".",
"_spark_exit_code",
"=",
"int",
"(",
"match_exit_code",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
"# if we run in standalone cluster mode and we want to track the driver status",
"# we need to extract the driver id from the logs. This allows us to poll for",
"# the status using the driver id. Also, we can kill the driver when needed.",
"elif",
"self",
".",
"_should_track_driver_status",
"and",
"not",
"self",
".",
"_driver_id",
":",
"match_driver_id",
"=",
"re",
".",
"search",
"(",
"r'(driver-[0-9\\-]+)'",
",",
"line",
")",
"if",
"match_driver_id",
":",
"self",
".",
"_driver_id",
"=",
"match_driver_id",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"self",
".",
"log",
".",
"info",
"(",
"\"identified spark driver id: {}\"",
".",
"format",
"(",
"self",
".",
"_driver_id",
")",
")",
"else",
":",
"self",
".",
"log",
".",
"info",
"(",
"line",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"spark submit log: {}\"",
".",
"format",
"(",
"line",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SparkSubmitHook._process_spark_status_log
|
parses the logs of the spark driver status query process
:param itr: An iterator which iterates over the input of the subprocess
|
airflow/contrib/hooks/spark_submit_hook.py
|
def _process_spark_status_log(self, itr):
"""
parses the logs of the spark driver status query process
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# Check if the log line is about the driver status and extract the status.
if "driverState" in line:
self._driver_status = line.split(' : ')[1] \
.replace(',', '').replace('\"', '').strip()
self.log.debug("spark driver status log: {}".format(line))
|
def _process_spark_status_log(self, itr):
"""
parses the logs of the spark driver status query process
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# Check if the log line is about the driver status and extract the status.
if "driverState" in line:
self._driver_status = line.split(' : ')[1] \
.replace(',', '').replace('\"', '').strip()
self.log.debug("spark driver status log: {}".format(line))
|
[
"parses",
"the",
"logs",
"of",
"the",
"spark",
"driver",
"status",
"query",
"process"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_submit_hook.py#L431-L446
|
[
"def",
"_process_spark_status_log",
"(",
"self",
",",
"itr",
")",
":",
"# Consume the iterator",
"for",
"line",
"in",
"itr",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"# Check if the log line is about the driver status and extract the status.",
"if",
"\"driverState\"",
"in",
"line",
":",
"self",
".",
"_driver_status",
"=",
"line",
".",
"split",
"(",
"' : '",
")",
"[",
"1",
"]",
".",
"replace",
"(",
"','",
",",
"''",
")",
".",
"replace",
"(",
"'\\\"'",
",",
"''",
")",
".",
"strip",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"spark driver status log: {}\"",
".",
"format",
"(",
"line",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SparkSubmitHook._start_driver_status_tracking
|
Polls the driver based on self._driver_id to get the status.
Finish successfully when the status is FINISHED.
Finish failed when the status is ERROR/UNKNOWN/KILLED/FAILED.
Possible status:
SUBMITTED
Submitted but not yet scheduled on a worker
RUNNING
Has been allocated to a worker to run
FINISHED
Previously ran and exited cleanly
RELAUNCHING
Exited non-zero or due to worker failure, but has not yet
started running again
UNKNOWN
The status of the driver is temporarily not known due to
master failure recovery
KILLED
A user manually killed this driver
FAILED
The driver exited non-zero and was not supervised
ERROR
Unable to run or restart due to an unrecoverable error
(e.g. missing jar file)
|
airflow/contrib/hooks/spark_submit_hook.py
|
def _start_driver_status_tracking(self):
"""
Polls the driver based on self._driver_id to get the status.
Finish successfully when the status is FINISHED.
Finish failed when the status is ERROR/UNKNOWN/KILLED/FAILED.
Possible status:
SUBMITTED
Submitted but not yet scheduled on a worker
RUNNING
Has been allocated to a worker to run
FINISHED
Previously ran and exited cleanly
RELAUNCHING
Exited non-zero or due to worker failure, but has not yet
started running again
UNKNOWN
The status of the driver is temporarily not known due to
master failure recovery
KILLED
A user manually killed this driver
FAILED
The driver exited non-zero and was not supervised
ERROR
Unable to run or restart due to an unrecoverable error
(e.g. missing jar file)
"""
# When your Spark Standalone cluster is not performing well
# due to misconfiguration or heavy loads.
# it is possible that the polling request will timeout.
# Therefore we use a simple retry mechanism.
missed_job_status_reports = 0
max_missed_job_status_reports = 10
# Keep polling as long as the driver is processing
while self._driver_status not in ["FINISHED", "UNKNOWN",
"KILLED", "FAILED", "ERROR"]:
# Sleep for 1 second as we do not want to spam the cluster
time.sleep(1)
self.log.debug("polling status of spark driver with id {}"
.format(self._driver_id))
poll_drive_status_cmd = self._build_track_driver_status_command()
status_process = subprocess.Popen(poll_drive_status_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True)
self._process_spark_status_log(iter(status_process.stdout.readline, ''))
returncode = status_process.wait()
if returncode:
if missed_job_status_reports < max_missed_job_status_reports:
missed_job_status_reports = missed_job_status_reports + 1
else:
raise AirflowException(
"Failed to poll for the driver status {} times: returncode = {}"
.format(max_missed_job_status_reports, returncode)
)
|
def _start_driver_status_tracking(self):
"""
Polls the driver based on self._driver_id to get the status.
Finish successfully when the status is FINISHED.
Finish failed when the status is ERROR/UNKNOWN/KILLED/FAILED.
Possible status:
SUBMITTED
Submitted but not yet scheduled on a worker
RUNNING
Has been allocated to a worker to run
FINISHED
Previously ran and exited cleanly
RELAUNCHING
Exited non-zero or due to worker failure, but has not yet
started running again
UNKNOWN
The status of the driver is temporarily not known due to
master failure recovery
KILLED
A user manually killed this driver
FAILED
The driver exited non-zero and was not supervised
ERROR
Unable to run or restart due to an unrecoverable error
(e.g. missing jar file)
"""
# When your Spark Standalone cluster is not performing well
# due to misconfiguration or heavy loads.
# it is possible that the polling request will timeout.
# Therefore we use a simple retry mechanism.
missed_job_status_reports = 0
max_missed_job_status_reports = 10
# Keep polling as long as the driver is processing
while self._driver_status not in ["FINISHED", "UNKNOWN",
"KILLED", "FAILED", "ERROR"]:
# Sleep for 1 second as we do not want to spam the cluster
time.sleep(1)
self.log.debug("polling status of spark driver with id {}"
.format(self._driver_id))
poll_drive_status_cmd = self._build_track_driver_status_command()
status_process = subprocess.Popen(poll_drive_status_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True)
self._process_spark_status_log(iter(status_process.stdout.readline, ''))
returncode = status_process.wait()
if returncode:
if missed_job_status_reports < max_missed_job_status_reports:
missed_job_status_reports = missed_job_status_reports + 1
else:
raise AirflowException(
"Failed to poll for the driver status {} times: returncode = {}"
.format(max_missed_job_status_reports, returncode)
)
|
[
"Polls",
"the",
"driver",
"based",
"on",
"self",
".",
"_driver_id",
"to",
"get",
"the",
"status",
".",
"Finish",
"successfully",
"when",
"the",
"status",
"is",
"FINISHED",
".",
"Finish",
"failed",
"when",
"the",
"status",
"is",
"ERROR",
"/",
"UNKNOWN",
"/",
"KILLED",
"/",
"FAILED",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_submit_hook.py#L448-L511
|
[
"def",
"_start_driver_status_tracking",
"(",
"self",
")",
":",
"# When your Spark Standalone cluster is not performing well",
"# due to misconfiguration or heavy loads.",
"# it is possible that the polling request will timeout.",
"# Therefore we use a simple retry mechanism.",
"missed_job_status_reports",
"=",
"0",
"max_missed_job_status_reports",
"=",
"10",
"# Keep polling as long as the driver is processing",
"while",
"self",
".",
"_driver_status",
"not",
"in",
"[",
"\"FINISHED\"",
",",
"\"UNKNOWN\"",
",",
"\"KILLED\"",
",",
"\"FAILED\"",
",",
"\"ERROR\"",
"]",
":",
"# Sleep for 1 second as we do not want to spam the cluster",
"time",
".",
"sleep",
"(",
"1",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"polling status of spark driver with id {}\"",
".",
"format",
"(",
"self",
".",
"_driver_id",
")",
")",
"poll_drive_status_cmd",
"=",
"self",
".",
"_build_track_driver_status_command",
"(",
")",
"status_process",
"=",
"subprocess",
".",
"Popen",
"(",
"poll_drive_status_cmd",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"bufsize",
"=",
"-",
"1",
",",
"universal_newlines",
"=",
"True",
")",
"self",
".",
"_process_spark_status_log",
"(",
"iter",
"(",
"status_process",
".",
"stdout",
".",
"readline",
",",
"''",
")",
")",
"returncode",
"=",
"status_process",
".",
"wait",
"(",
")",
"if",
"returncode",
":",
"if",
"missed_job_status_reports",
"<",
"max_missed_job_status_reports",
":",
"missed_job_status_reports",
"=",
"missed_job_status_reports",
"+",
"1",
"else",
":",
"raise",
"AirflowException",
"(",
"\"Failed to poll for the driver status {} times: returncode = {}\"",
".",
"format",
"(",
"max_missed_job_status_reports",
",",
"returncode",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SparkSubmitHook._build_spark_driver_kill_command
|
Construct the spark-submit command to kill a driver.
:return: full command to kill a driver
|
airflow/contrib/hooks/spark_submit_hook.py
|
def _build_spark_driver_kill_command(self):
"""
Construct the spark-submit command to kill a driver.
:return: full command to kill a driver
"""
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._connection['spark_home']:
connection_cmd = [os.path.join(self._connection['spark_home'],
'bin',
self._connection['spark_binary'])]
else:
connection_cmd = [self._connection['spark_binary']]
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
# The actual kill command
connection_cmd += ["--kill", self._driver_id]
self.log.debug("Spark-Kill cmd: %s", connection_cmd)
return connection_cmd
|
def _build_spark_driver_kill_command(self):
"""
Construct the spark-submit command to kill a driver.
:return: full command to kill a driver
"""
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._connection['spark_home']:
connection_cmd = [os.path.join(self._connection['spark_home'],
'bin',
self._connection['spark_binary'])]
else:
connection_cmd = [self._connection['spark_binary']]
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
# The actual kill command
connection_cmd += ["--kill", self._driver_id]
self.log.debug("Spark-Kill cmd: %s", connection_cmd)
return connection_cmd
|
[
"Construct",
"the",
"spark",
"-",
"submit",
"command",
"to",
"kill",
"a",
"driver",
".",
":",
"return",
":",
"full",
"command",
"to",
"kill",
"a",
"driver"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_submit_hook.py#L513-L537
|
[
"def",
"_build_spark_driver_kill_command",
"(",
"self",
")",
":",
"# If the spark_home is passed then build the spark-submit executable path using",
"# the spark_home; otherwise assume that spark-submit is present in the path to",
"# the executing user",
"if",
"self",
".",
"_connection",
"[",
"'spark_home'",
"]",
":",
"connection_cmd",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_connection",
"[",
"'spark_home'",
"]",
",",
"'bin'",
",",
"self",
".",
"_connection",
"[",
"'spark_binary'",
"]",
")",
"]",
"else",
":",
"connection_cmd",
"=",
"[",
"self",
".",
"_connection",
"[",
"'spark_binary'",
"]",
"]",
"# The url ot the spark master",
"connection_cmd",
"+=",
"[",
"\"--master\"",
",",
"self",
".",
"_connection",
"[",
"'master'",
"]",
"]",
"# The actual kill command",
"connection_cmd",
"+=",
"[",
"\"--kill\"",
",",
"self",
".",
"_driver_id",
"]",
"self",
".",
"log",
".",
"debug",
"(",
"\"Spark-Kill cmd: %s\"",
",",
"connection_cmd",
")",
"return",
"connection_cmd"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
get_task_runner
|
Get the task runner that can be used to run the given job.
:param local_task_job: The LocalTaskJob associated with the TaskInstance
that needs to be executed.
:type local_task_job: airflow.jobs.LocalTaskJob
:return: The task runner to use to run the task.
:rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner
|
airflow/task/task_runner/__init__.py
|
def get_task_runner(local_task_job):
"""
Get the task runner that can be used to run the given job.
:param local_task_job: The LocalTaskJob associated with the TaskInstance
that needs to be executed.
:type local_task_job: airflow.jobs.LocalTaskJob
:return: The task runner to use to run the task.
:rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner
"""
if _TASK_RUNNER == "StandardTaskRunner":
return StandardTaskRunner(local_task_job)
elif _TASK_RUNNER == "CgroupTaskRunner":
from airflow.contrib.task_runner.cgroup_task_runner import CgroupTaskRunner
return CgroupTaskRunner(local_task_job)
else:
raise AirflowException("Unknown task runner type {}".format(_TASK_RUNNER))
|
def get_task_runner(local_task_job):
"""
Get the task runner that can be used to run the given job.
:param local_task_job: The LocalTaskJob associated with the TaskInstance
that needs to be executed.
:type local_task_job: airflow.jobs.LocalTaskJob
:return: The task runner to use to run the task.
:rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner
"""
if _TASK_RUNNER == "StandardTaskRunner":
return StandardTaskRunner(local_task_job)
elif _TASK_RUNNER == "CgroupTaskRunner":
from airflow.contrib.task_runner.cgroup_task_runner import CgroupTaskRunner
return CgroupTaskRunner(local_task_job)
else:
raise AirflowException("Unknown task runner type {}".format(_TASK_RUNNER))
|
[
"Get",
"the",
"task",
"runner",
"that",
"can",
"be",
"used",
"to",
"run",
"the",
"given",
"job",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/task/task_runner/__init__.py#L27-L43
|
[
"def",
"get_task_runner",
"(",
"local_task_job",
")",
":",
"if",
"_TASK_RUNNER",
"==",
"\"StandardTaskRunner\"",
":",
"return",
"StandardTaskRunner",
"(",
"local_task_job",
")",
"elif",
"_TASK_RUNNER",
"==",
"\"CgroupTaskRunner\"",
":",
"from",
"airflow",
".",
"contrib",
".",
"task_runner",
".",
"cgroup_task_runner",
"import",
"CgroupTaskRunner",
"return",
"CgroupTaskRunner",
"(",
"local_task_job",
")",
"else",
":",
"raise",
"AirflowException",
"(",
"\"Unknown task runner type {}\"",
".",
"format",
"(",
"_TASK_RUNNER",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AWSBatchOperator._wait_for_task_ended
|
Try to use a waiter from the below pull request
* https://github.com/boto/botocore/pull/1307
If the waiter is not available apply a exponential backoff
* docs.aws.amazon.com/general/latest/gr/api-retries.html
|
airflow/contrib/operators/awsbatch_operator.py
|
def _wait_for_task_ended(self):
"""
Try to use a waiter from the below pull request
* https://github.com/boto/botocore/pull/1307
If the waiter is not available apply a exponential backoff
* docs.aws.amazon.com/general/latest/gr/api-retries.html
"""
try:
waiter = self.client.get_waiter('job_execution_complete')
waiter.config.max_attempts = sys.maxsize # timeout is managed by airflow
waiter.wait(jobs=[self.jobId])
except ValueError:
# If waiter not available use expo
retry = True
retries = 0
while retries < self.max_retries and retry:
self.log.info('AWS Batch retry in the next %s seconds', retries)
response = self.client.describe_jobs(
jobs=[self.jobId]
)
if response['jobs'][-1]['status'] in ['SUCCEEDED', 'FAILED']:
retry = False
sleep(1 + pow(retries * 0.1, 2))
retries += 1
|
def _wait_for_task_ended(self):
"""
Try to use a waiter from the below pull request
* https://github.com/boto/botocore/pull/1307
If the waiter is not available apply a exponential backoff
* docs.aws.amazon.com/general/latest/gr/api-retries.html
"""
try:
waiter = self.client.get_waiter('job_execution_complete')
waiter.config.max_attempts = sys.maxsize # timeout is managed by airflow
waiter.wait(jobs=[self.jobId])
except ValueError:
# If waiter not available use expo
retry = True
retries = 0
while retries < self.max_retries and retry:
self.log.info('AWS Batch retry in the next %s seconds', retries)
response = self.client.describe_jobs(
jobs=[self.jobId]
)
if response['jobs'][-1]['status'] in ['SUCCEEDED', 'FAILED']:
retry = False
sleep(1 + pow(retries * 0.1, 2))
retries += 1
|
[
"Try",
"to",
"use",
"a",
"waiter",
"from",
"the",
"below",
"pull",
"request"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/awsbatch_operator.py#L117-L145
|
[
"def",
"_wait_for_task_ended",
"(",
"self",
")",
":",
"try",
":",
"waiter",
"=",
"self",
".",
"client",
".",
"get_waiter",
"(",
"'job_execution_complete'",
")",
"waiter",
".",
"config",
".",
"max_attempts",
"=",
"sys",
".",
"maxsize",
"# timeout is managed by airflow",
"waiter",
".",
"wait",
"(",
"jobs",
"=",
"[",
"self",
".",
"jobId",
"]",
")",
"except",
"ValueError",
":",
"# If waiter not available use expo",
"retry",
"=",
"True",
"retries",
"=",
"0",
"while",
"retries",
"<",
"self",
".",
"max_retries",
"and",
"retry",
":",
"self",
".",
"log",
".",
"info",
"(",
"'AWS Batch retry in the next %s seconds'",
",",
"retries",
")",
"response",
"=",
"self",
".",
"client",
".",
"describe_jobs",
"(",
"jobs",
"=",
"[",
"self",
".",
"jobId",
"]",
")",
"if",
"response",
"[",
"'jobs'",
"]",
"[",
"-",
"1",
"]",
"[",
"'status'",
"]",
"in",
"[",
"'SUCCEEDED'",
",",
"'FAILED'",
"]",
":",
"retry",
"=",
"False",
"sleep",
"(",
"1",
"+",
"pow",
"(",
"retries",
"*",
"0.1",
",",
"2",
")",
")",
"retries",
"+=",
"1"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
MySqlToGoogleCloudStorageOperator._query_mysql
|
Queries mysql and returns a cursor to the results.
|
airflow/contrib/operators/mysql_to_gcs.py
|
def _query_mysql(self):
"""
Queries mysql and returns a cursor to the results.
"""
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor
|
def _query_mysql(self):
"""
Queries mysql and returns a cursor to the results.
"""
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
conn = mysql.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
return cursor
|
[
"Queries",
"mysql",
"and",
"returns",
"a",
"cursor",
"to",
"the",
"results",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mysql_to_gcs.py#L134-L142
|
[
"def",
"_query_mysql",
"(",
"self",
")",
":",
"mysql",
"=",
"MySqlHook",
"(",
"mysql_conn_id",
"=",
"self",
".",
"mysql_conn_id",
")",
"conn",
"=",
"mysql",
".",
"get_conn",
"(",
")",
"cursor",
"=",
"conn",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"self",
".",
"sql",
")",
"return",
"cursor"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
MySqlToGoogleCloudStorageOperator._write_local_data_files
|
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
|
airflow/contrib/operators/mysql_to_gcs.py
|
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
if self.export_format == 'csv':
file_mime_type = 'text/csv'
else:
file_mime_type = 'application/json'
files_to_upload = [{
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
}]
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats.
# Convert binary type object to string encoded with base64.
row = self._convert_types(schema, col_type_dict, row)
if self.export_format == 'csv':
csv_writer.writerow(row)
else:
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
s = json.dumps(row_dict, sort_keys=True).encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
files_to_upload.append({
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
})
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
return files_to_upload
|
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
if self.export_format == 'csv':
file_mime_type = 'text/csv'
else:
file_mime_type = 'application/json'
files_to_upload = [{
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
}]
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats.
# Convert binary type object to string encoded with base64.
row = self._convert_types(schema, col_type_dict, row)
if self.export_format == 'csv':
csv_writer.writerow(row)
else:
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
s = json.dumps(row_dict, sort_keys=True).encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
files_to_upload.append({
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
})
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
return files_to_upload
|
[
"Takes",
"a",
"cursor",
"and",
"writes",
"results",
"to",
"a",
"local",
"file",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mysql_to_gcs.py#L144-L199
|
[
"def",
"_write_local_data_files",
"(",
"self",
",",
"cursor",
")",
":",
"schema",
"=",
"list",
"(",
"map",
"(",
"lambda",
"schema_tuple",
":",
"schema_tuple",
"[",
"0",
"]",
",",
"cursor",
".",
"description",
")",
")",
"col_type_dict",
"=",
"self",
".",
"_get_col_type_dict",
"(",
")",
"file_no",
"=",
"0",
"tmp_file_handle",
"=",
"NamedTemporaryFile",
"(",
"delete",
"=",
"True",
")",
"if",
"self",
".",
"export_format",
"==",
"'csv'",
":",
"file_mime_type",
"=",
"'text/csv'",
"else",
":",
"file_mime_type",
"=",
"'application/json'",
"files_to_upload",
"=",
"[",
"{",
"'file_name'",
":",
"self",
".",
"filename",
".",
"format",
"(",
"file_no",
")",
",",
"'file_handle'",
":",
"tmp_file_handle",
",",
"'file_mime_type'",
":",
"file_mime_type",
"}",
"]",
"if",
"self",
".",
"export_format",
"==",
"'csv'",
":",
"csv_writer",
"=",
"self",
".",
"_configure_csv_file",
"(",
"tmp_file_handle",
",",
"schema",
")",
"for",
"row",
"in",
"cursor",
":",
"# Convert datetime objects to utc seconds, and decimals to floats.",
"# Convert binary type object to string encoded with base64.",
"row",
"=",
"self",
".",
"_convert_types",
"(",
"schema",
",",
"col_type_dict",
",",
"row",
")",
"if",
"self",
".",
"export_format",
"==",
"'csv'",
":",
"csv_writer",
".",
"writerow",
"(",
"row",
")",
"else",
":",
"row_dict",
"=",
"dict",
"(",
"zip",
"(",
"schema",
",",
"row",
")",
")",
"# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.",
"s",
"=",
"json",
".",
"dumps",
"(",
"row_dict",
",",
"sort_keys",
"=",
"True",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"tmp_file_handle",
".",
"write",
"(",
"s",
")",
"# Append newline to make dumps BigQuery compatible.",
"tmp_file_handle",
".",
"write",
"(",
"b'\\n'",
")",
"# Stop if the file exceeds the file size limit.",
"if",
"tmp_file_handle",
".",
"tell",
"(",
")",
">=",
"self",
".",
"approx_max_file_size_bytes",
":",
"file_no",
"+=",
"1",
"tmp_file_handle",
"=",
"NamedTemporaryFile",
"(",
"delete",
"=",
"True",
")",
"files_to_upload",
".",
"append",
"(",
"{",
"'file_name'",
":",
"self",
".",
"filename",
".",
"format",
"(",
"file_no",
")",
",",
"'file_handle'",
":",
"tmp_file_handle",
",",
"'file_mime_type'",
":",
"file_mime_type",
"}",
")",
"if",
"self",
".",
"export_format",
"==",
"'csv'",
":",
"csv_writer",
"=",
"self",
".",
"_configure_csv_file",
"(",
"tmp_file_handle",
",",
"schema",
")",
"return",
"files_to_upload"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
MySqlToGoogleCloudStorageOperator._configure_csv_file
|
Configure a csv writer with the file_handle and write schema
as headers for the new file.
|
airflow/contrib/operators/mysql_to_gcs.py
|
def _configure_csv_file(self, file_handle, schema):
"""Configure a csv writer with the file_handle and write schema
as headers for the new file.
"""
csv_writer = csv.writer(file_handle, encoding='utf-8',
delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
|
def _configure_csv_file(self, file_handle, schema):
"""Configure a csv writer with the file_handle and write schema
as headers for the new file.
"""
csv_writer = csv.writer(file_handle, encoding='utf-8',
delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
|
[
"Configure",
"a",
"csv",
"writer",
"with",
"the",
"file_handle",
"and",
"write",
"schema",
"as",
"headers",
"for",
"the",
"new",
"file",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mysql_to_gcs.py#L201-L208
|
[
"def",
"_configure_csv_file",
"(",
"self",
",",
"file_handle",
",",
"schema",
")",
":",
"csv_writer",
"=",
"csv",
".",
"writer",
"(",
"file_handle",
",",
"encoding",
"=",
"'utf-8'",
",",
"delimiter",
"=",
"self",
".",
"field_delimiter",
")",
"csv_writer",
".",
"writerow",
"(",
"schema",
")",
"return",
"csv_writer"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
MySqlToGoogleCloudStorageOperator._write_local_schema_file
|
Takes a cursor, and writes the BigQuery schema in .json format for the
results to a local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
|
airflow/contrib/operators/mysql_to_gcs.py
|
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema in .json format for the
results to a local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema_str = None
schema_file_mime_type = 'application/json'
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
if self.schema is not None and isinstance(self.schema, string_types):
schema_str = self.schema.encode('utf-8')
elif self.schema is not None and isinstance(self.schema, list):
schema_str = json.dumps(self.schema).encode('utf-8')
else:
schema = []
for field in cursor.description:
# See PEP 249 for details about the description tuple.
field_name = field[0]
field_type = self.type_map(field[1])
# Always allow TIMESTAMP to be nullable. MySQLdb returns None types
# for required fields because some MySQL timestamps can't be
# represented by Python's datetime (e.g. 0000-00-00 00:00:00).
if field[6] or field_type == 'TIMESTAMP':
field_mode = 'NULLABLE'
else:
field_mode = 'REQUIRED'
schema.append({
'name': field_name,
'type': field_type,
'mode': field_mode,
})
schema_str = json.dumps(schema, sort_keys=True).encode('utf-8')
tmp_schema_file_handle.write(schema_str)
self.log.info('Using schema for %s: %s', self.schema_filename, schema_str)
schema_file_to_upload = {
'file_name': self.schema_filename,
'file_handle': tmp_schema_file_handle,
'file_mime_type': schema_file_mime_type
}
return schema_file_to_upload
|
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema in .json format for the
results to a local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema_str = None
schema_file_mime_type = 'application/json'
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
if self.schema is not None and isinstance(self.schema, string_types):
schema_str = self.schema.encode('utf-8')
elif self.schema is not None and isinstance(self.schema, list):
schema_str = json.dumps(self.schema).encode('utf-8')
else:
schema = []
for field in cursor.description:
# See PEP 249 for details about the description tuple.
field_name = field[0]
field_type = self.type_map(field[1])
# Always allow TIMESTAMP to be nullable. MySQLdb returns None types
# for required fields because some MySQL timestamps can't be
# represented by Python's datetime (e.g. 0000-00-00 00:00:00).
if field[6] or field_type == 'TIMESTAMP':
field_mode = 'NULLABLE'
else:
field_mode = 'REQUIRED'
schema.append({
'name': field_name,
'type': field_type,
'mode': field_mode,
})
schema_str = json.dumps(schema, sort_keys=True).encode('utf-8')
tmp_schema_file_handle.write(schema_str)
self.log.info('Using schema for %s: %s', self.schema_filename, schema_str)
schema_file_to_upload = {
'file_name': self.schema_filename,
'file_handle': tmp_schema_file_handle,
'file_mime_type': schema_file_mime_type
}
return schema_file_to_upload
|
[
"Takes",
"a",
"cursor",
"and",
"writes",
"the",
"BigQuery",
"schema",
"in",
".",
"json",
"format",
"for",
"the",
"results",
"to",
"a",
"local",
"file",
"system",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mysql_to_gcs.py#L210-L253
|
[
"def",
"_write_local_schema_file",
"(",
"self",
",",
"cursor",
")",
":",
"schema_str",
"=",
"None",
"schema_file_mime_type",
"=",
"'application/json'",
"tmp_schema_file_handle",
"=",
"NamedTemporaryFile",
"(",
"delete",
"=",
"True",
")",
"if",
"self",
".",
"schema",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"self",
".",
"schema",
",",
"string_types",
")",
":",
"schema_str",
"=",
"self",
".",
"schema",
".",
"encode",
"(",
"'utf-8'",
")",
"elif",
"self",
".",
"schema",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"self",
".",
"schema",
",",
"list",
")",
":",
"schema_str",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"schema",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"else",
":",
"schema",
"=",
"[",
"]",
"for",
"field",
"in",
"cursor",
".",
"description",
":",
"# See PEP 249 for details about the description tuple.",
"field_name",
"=",
"field",
"[",
"0",
"]",
"field_type",
"=",
"self",
".",
"type_map",
"(",
"field",
"[",
"1",
"]",
")",
"# Always allow TIMESTAMP to be nullable. MySQLdb returns None types",
"# for required fields because some MySQL timestamps can't be",
"# represented by Python's datetime (e.g. 0000-00-00 00:00:00).",
"if",
"field",
"[",
"6",
"]",
"or",
"field_type",
"==",
"'TIMESTAMP'",
":",
"field_mode",
"=",
"'NULLABLE'",
"else",
":",
"field_mode",
"=",
"'REQUIRED'",
"schema",
".",
"append",
"(",
"{",
"'name'",
":",
"field_name",
",",
"'type'",
":",
"field_type",
",",
"'mode'",
":",
"field_mode",
",",
"}",
")",
"schema_str",
"=",
"json",
".",
"dumps",
"(",
"schema",
",",
"sort_keys",
"=",
"True",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"tmp_schema_file_handle",
".",
"write",
"(",
"schema_str",
")",
"self",
".",
"log",
".",
"info",
"(",
"'Using schema for %s: %s'",
",",
"self",
".",
"schema_filename",
",",
"schema_str",
")",
"schema_file_to_upload",
"=",
"{",
"'file_name'",
":",
"self",
".",
"schema_filename",
",",
"'file_handle'",
":",
"tmp_schema_file_handle",
",",
"'file_mime_type'",
":",
"schema_file_mime_type",
"}",
"return",
"schema_file_to_upload"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
MySqlToGoogleCloudStorageOperator._upload_to_gcs
|
Upload all of the file splits (and optionally the schema .json file) to
Google cloud storage.
|
airflow/contrib/operators/mysql_to_gcs.py
|
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google cloud storage.
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
for tmp_file in files_to_upload:
hook.upload(self.bucket, tmp_file.get('file_name'),
tmp_file.get('file_handle').name,
mime_type=tmp_file.get('file_mime_type'))
|
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google cloud storage.
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
for tmp_file in files_to_upload:
hook.upload(self.bucket, tmp_file.get('file_name'),
tmp_file.get('file_handle').name,
mime_type=tmp_file.get('file_mime_type'))
|
[
"Upload",
"all",
"of",
"the",
"file",
"splits",
"(",
"and",
"optionally",
"the",
"schema",
".",
"json",
"file",
")",
"to",
"Google",
"cloud",
"storage",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mysql_to_gcs.py#L255-L266
|
[
"def",
"_upload_to_gcs",
"(",
"self",
",",
"files_to_upload",
")",
":",
"hook",
"=",
"GoogleCloudStorageHook",
"(",
"google_cloud_storage_conn_id",
"=",
"self",
".",
"google_cloud_storage_conn_id",
",",
"delegate_to",
"=",
"self",
".",
"delegate_to",
")",
"for",
"tmp_file",
"in",
"files_to_upload",
":",
"hook",
".",
"upload",
"(",
"self",
".",
"bucket",
",",
"tmp_file",
".",
"get",
"(",
"'file_name'",
")",
",",
"tmp_file",
".",
"get",
"(",
"'file_handle'",
")",
".",
"name",
",",
"mime_type",
"=",
"tmp_file",
".",
"get",
"(",
"'file_mime_type'",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
MySqlToGoogleCloudStorageOperator._convert_types
|
Takes a value from MySQLdb, and converts it to a value that's safe for
JSON/Google cloud storage/BigQuery. Dates are converted to UTC seconds.
Decimals are converted to floats. Binary type fields are encoded with base64,
as imported BYTES data must be base64-encoded according to Bigquery SQL
date type documentation: https://cloud.google.com/bigquery/data-types
|
airflow/contrib/operators/mysql_to_gcs.py
|
def _convert_types(schema, col_type_dict, row):
"""
Takes a value from MySQLdb, and converts it to a value that's safe for
JSON/Google cloud storage/BigQuery. Dates are converted to UTC seconds.
Decimals are converted to floats. Binary type fields are encoded with base64,
as imported BYTES data must be base64-encoded according to Bigquery SQL
date type documentation: https://cloud.google.com/bigquery/data-types
"""
converted_row = []
for col_name, col_val in zip(schema, row):
if type(col_val) in (datetime, date):
col_val = time.mktime(col_val.timetuple())
elif isinstance(col_val, Decimal):
col_val = float(col_val)
elif col_type_dict.get(col_name) == "BYTES":
col_val = base64.standard_b64encode(col_val).decode('ascii')
else:
col_val = col_val
converted_row.append(col_val)
return converted_row
|
def _convert_types(schema, col_type_dict, row):
"""
Takes a value from MySQLdb, and converts it to a value that's safe for
JSON/Google cloud storage/BigQuery. Dates are converted to UTC seconds.
Decimals are converted to floats. Binary type fields are encoded with base64,
as imported BYTES data must be base64-encoded according to Bigquery SQL
date type documentation: https://cloud.google.com/bigquery/data-types
"""
converted_row = []
for col_name, col_val in zip(schema, row):
if type(col_val) in (datetime, date):
col_val = time.mktime(col_val.timetuple())
elif isinstance(col_val, Decimal):
col_val = float(col_val)
elif col_type_dict.get(col_name) == "BYTES":
col_val = base64.standard_b64encode(col_val).decode('ascii')
else:
col_val = col_val
converted_row.append(col_val)
return converted_row
|
[
"Takes",
"a",
"value",
"from",
"MySQLdb",
"and",
"converts",
"it",
"to",
"a",
"value",
"that",
"s",
"safe",
"for",
"JSON",
"/",
"Google",
"cloud",
"storage",
"/",
"BigQuery",
".",
"Dates",
"are",
"converted",
"to",
"UTC",
"seconds",
".",
"Decimals",
"are",
"converted",
"to",
"floats",
".",
"Binary",
"type",
"fields",
"are",
"encoded",
"with",
"base64",
"as",
"imported",
"BYTES",
"data",
"must",
"be",
"base64",
"-",
"encoded",
"according",
"to",
"Bigquery",
"SQL",
"date",
"type",
"documentation",
":",
"https",
":",
"//",
"cloud",
".",
"google",
".",
"com",
"/",
"bigquery",
"/",
"data",
"-",
"types"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mysql_to_gcs.py#L269-L288
|
[
"def",
"_convert_types",
"(",
"schema",
",",
"col_type_dict",
",",
"row",
")",
":",
"converted_row",
"=",
"[",
"]",
"for",
"col_name",
",",
"col_val",
"in",
"zip",
"(",
"schema",
",",
"row",
")",
":",
"if",
"type",
"(",
"col_val",
")",
"in",
"(",
"datetime",
",",
"date",
")",
":",
"col_val",
"=",
"time",
".",
"mktime",
"(",
"col_val",
".",
"timetuple",
"(",
")",
")",
"elif",
"isinstance",
"(",
"col_val",
",",
"Decimal",
")",
":",
"col_val",
"=",
"float",
"(",
"col_val",
")",
"elif",
"col_type_dict",
".",
"get",
"(",
"col_name",
")",
"==",
"\"BYTES\"",
":",
"col_val",
"=",
"base64",
".",
"standard_b64encode",
"(",
"col_val",
")",
".",
"decode",
"(",
"'ascii'",
")",
"else",
":",
"col_val",
"=",
"col_val",
"converted_row",
".",
"append",
"(",
"col_val",
")",
"return",
"converted_row"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
MySqlToGoogleCloudStorageOperator._get_col_type_dict
|
Return a dict of column name and column type based on self.schema if not None.
|
airflow/contrib/operators/mysql_to_gcs.py
|
def _get_col_type_dict(self):
"""
Return a dict of column name and column type based on self.schema if not None.
"""
schema = []
if isinstance(self.schema, string_types):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif self.schema is not None:
self.log.warn('Using default schema due to unexpected type.'
'Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warn('Using default schema due to missing name or type. Please '
'refer to: https://cloud.google.com/bigquery/docs/schemas'
'#specifying_a_json_schema_file')
return col_type_dict
|
def _get_col_type_dict(self):
"""
Return a dict of column name and column type based on self.schema if not None.
"""
schema = []
if isinstance(self.schema, string_types):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif self.schema is not None:
self.log.warn('Using default schema due to unexpected type.'
'Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warn('Using default schema due to missing name or type. Please '
'refer to: https://cloud.google.com/bigquery/docs/schemas'
'#specifying_a_json_schema_file')
return col_type_dict
|
[
"Return",
"a",
"dict",
"of",
"column",
"name",
"and",
"column",
"type",
"based",
"on",
"self",
".",
"schema",
"if",
"not",
"None",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mysql_to_gcs.py#L290-L310
|
[
"def",
"_get_col_type_dict",
"(",
"self",
")",
":",
"schema",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"self",
".",
"schema",
",",
"string_types",
")",
":",
"schema",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"schema",
")",
"elif",
"isinstance",
"(",
"self",
".",
"schema",
",",
"list",
")",
":",
"schema",
"=",
"self",
".",
"schema",
"elif",
"self",
".",
"schema",
"is",
"not",
"None",
":",
"self",
".",
"log",
".",
"warn",
"(",
"'Using default schema due to unexpected type.'",
"'Should be a string or list.'",
")",
"col_type_dict",
"=",
"{",
"}",
"try",
":",
"col_type_dict",
"=",
"{",
"col",
"[",
"'name'",
"]",
":",
"col",
"[",
"'type'",
"]",
"for",
"col",
"in",
"schema",
"}",
"except",
"KeyError",
":",
"self",
".",
"log",
".",
"warn",
"(",
"'Using default schema due to missing name or type. Please '",
"'refer to: https://cloud.google.com/bigquery/docs/schemas'",
"'#specifying_a_json_schema_file'",
")",
"return",
"col_type_dict"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
MySqlToGoogleCloudStorageOperator.type_map
|
Helper function that maps from MySQL fields to BigQuery fields. Used
when a schema_filename is set.
|
airflow/contrib/operators/mysql_to_gcs.py
|
def type_map(cls, mysql_type):
"""
Helper function that maps from MySQL fields to BigQuery fields. Used
when a schema_filename is set.
"""
d = {
FIELD_TYPE.INT24: 'INTEGER',
FIELD_TYPE.TINY: 'INTEGER',
FIELD_TYPE.BIT: 'INTEGER',
FIELD_TYPE.DATETIME: 'TIMESTAMP',
FIELD_TYPE.DATE: 'TIMESTAMP',
FIELD_TYPE.DECIMAL: 'FLOAT',
FIELD_TYPE.NEWDECIMAL: 'FLOAT',
FIELD_TYPE.DOUBLE: 'FLOAT',
FIELD_TYPE.FLOAT: 'FLOAT',
FIELD_TYPE.LONG: 'INTEGER',
FIELD_TYPE.LONGLONG: 'INTEGER',
FIELD_TYPE.SHORT: 'INTEGER',
FIELD_TYPE.TIMESTAMP: 'TIMESTAMP',
FIELD_TYPE.YEAR: 'INTEGER',
}
return d[mysql_type] if mysql_type in d else 'STRING'
|
def type_map(cls, mysql_type):
"""
Helper function that maps from MySQL fields to BigQuery fields. Used
when a schema_filename is set.
"""
d = {
FIELD_TYPE.INT24: 'INTEGER',
FIELD_TYPE.TINY: 'INTEGER',
FIELD_TYPE.BIT: 'INTEGER',
FIELD_TYPE.DATETIME: 'TIMESTAMP',
FIELD_TYPE.DATE: 'TIMESTAMP',
FIELD_TYPE.DECIMAL: 'FLOAT',
FIELD_TYPE.NEWDECIMAL: 'FLOAT',
FIELD_TYPE.DOUBLE: 'FLOAT',
FIELD_TYPE.FLOAT: 'FLOAT',
FIELD_TYPE.LONG: 'INTEGER',
FIELD_TYPE.LONGLONG: 'INTEGER',
FIELD_TYPE.SHORT: 'INTEGER',
FIELD_TYPE.TIMESTAMP: 'TIMESTAMP',
FIELD_TYPE.YEAR: 'INTEGER',
}
return d[mysql_type] if mysql_type in d else 'STRING'
|
[
"Helper",
"function",
"that",
"maps",
"from",
"MySQL",
"fields",
"to",
"BigQuery",
"fields",
".",
"Used",
"when",
"a",
"schema_filename",
"is",
"set",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mysql_to_gcs.py#L313-L334
|
[
"def",
"type_map",
"(",
"cls",
",",
"mysql_type",
")",
":",
"d",
"=",
"{",
"FIELD_TYPE",
".",
"INT24",
":",
"'INTEGER'",
",",
"FIELD_TYPE",
".",
"TINY",
":",
"'INTEGER'",
",",
"FIELD_TYPE",
".",
"BIT",
":",
"'INTEGER'",
",",
"FIELD_TYPE",
".",
"DATETIME",
":",
"'TIMESTAMP'",
",",
"FIELD_TYPE",
".",
"DATE",
":",
"'TIMESTAMP'",
",",
"FIELD_TYPE",
".",
"DECIMAL",
":",
"'FLOAT'",
",",
"FIELD_TYPE",
".",
"NEWDECIMAL",
":",
"'FLOAT'",
",",
"FIELD_TYPE",
".",
"DOUBLE",
":",
"'FLOAT'",
",",
"FIELD_TYPE",
".",
"FLOAT",
":",
"'FLOAT'",
",",
"FIELD_TYPE",
".",
"LONG",
":",
"'INTEGER'",
",",
"FIELD_TYPE",
".",
"LONGLONG",
":",
"'INTEGER'",
",",
"FIELD_TYPE",
".",
"SHORT",
":",
"'INTEGER'",
",",
"FIELD_TYPE",
".",
"TIMESTAMP",
":",
"'TIMESTAMP'",
",",
"FIELD_TYPE",
".",
"YEAR",
":",
"'INTEGER'",
",",
"}",
"return",
"d",
"[",
"mysql_type",
"]",
"if",
"mysql_type",
"in",
"d",
"else",
"'STRING'"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
authenticate
|
Authenticate a PasswordUser with the specified
username/password.
:param session: An active SQLAlchemy session
:param username: The username
:param password: The password
:raise AuthenticationError: if an error occurred
:return: a PasswordUser
|
airflow/contrib/auth/backends/password_auth.py
|
def authenticate(session, username, password):
"""
Authenticate a PasswordUser with the specified
username/password.
:param session: An active SQLAlchemy session
:param username: The username
:param password: The password
:raise AuthenticationError: if an error occurred
:return: a PasswordUser
"""
if not username or not password:
raise AuthenticationError()
user = session.query(PasswordUser).filter(
PasswordUser.username == username).first()
if not user:
raise AuthenticationError()
if not user.authenticate(password):
raise AuthenticationError()
log.info("User %s successfully authenticated", username)
return user
|
def authenticate(session, username, password):
"""
Authenticate a PasswordUser with the specified
username/password.
:param session: An active SQLAlchemy session
:param username: The username
:param password: The password
:raise AuthenticationError: if an error occurred
:return: a PasswordUser
"""
if not username or not password:
raise AuthenticationError()
user = session.query(PasswordUser).filter(
PasswordUser.username == username).first()
if not user:
raise AuthenticationError()
if not user.authenticate(password):
raise AuthenticationError()
log.info("User %s successfully authenticated", username)
return user
|
[
"Authenticate",
"a",
"PasswordUser",
"with",
"the",
"specified",
"username",
"/",
"password",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/auth/backends/password_auth.py#L107-L132
|
[
"def",
"authenticate",
"(",
"session",
",",
"username",
",",
"password",
")",
":",
"if",
"not",
"username",
"or",
"not",
"password",
":",
"raise",
"AuthenticationError",
"(",
")",
"user",
"=",
"session",
".",
"query",
"(",
"PasswordUser",
")",
".",
"filter",
"(",
"PasswordUser",
".",
"username",
"==",
"username",
")",
".",
"first",
"(",
")",
"if",
"not",
"user",
":",
"raise",
"AuthenticationError",
"(",
")",
"if",
"not",
"user",
".",
"authenticate",
"(",
"password",
")",
":",
"raise",
"AuthenticationError",
"(",
")",
"log",
".",
"info",
"(",
"\"User %s successfully authenticated\"",
",",
"username",
")",
"return",
"user"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
SqoopOperator.execute
|
Execute sqoop job
|
airflow/contrib/operators/sqoop_operator.py
|
def execute(self, context):
"""
Execute sqoop job
"""
self.hook = SqoopHook(
conn_id=self.conn_id,
verbose=self.verbose,
num_mappers=self.num_mappers,
hcatalog_database=self.hcatalog_database,
hcatalog_table=self.hcatalog_table,
properties=self.properties
)
if self.cmd_type == 'export':
self.hook.export_table(
table=self.table,
export_dir=self.export_dir,
input_null_string=self.input_null_string,
input_null_non_string=self.input_null_non_string,
staging_table=self.staging_table,
clear_staging_table=self.clear_staging_table,
enclosed_by=self.enclosed_by,
escaped_by=self.escaped_by,
input_fields_terminated_by=self.input_fields_terminated_by,
input_lines_terminated_by=self.input_lines_terminated_by,
input_optionally_enclosed_by=self.input_optionally_enclosed_by,
batch=self.batch,
relaxed_isolation=self.relaxed_isolation,
extra_export_options=self.extra_export_options)
elif self.cmd_type == 'import':
# add create hcatalog table to extra import options if option passed
# if new params are added to constructor can pass them in here
# so don't modify sqoop_hook for each param
if self.create_hcatalog_table:
self.extra_import_options['create-hcatalog-table'] = ''
if self.table and self.query:
raise AirflowException(
'Cannot specify query and table together. Need to specify either or.'
)
if self.table:
self.hook.import_table(
table=self.table,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
columns=self.columns,
split_by=self.split_by,
where=self.where,
direct=self.direct,
driver=self.driver,
extra_import_options=self.extra_import_options)
elif self.query:
self.hook.import_query(
query=self.query,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
split_by=self.split_by,
direct=self.direct,
driver=self.driver,
extra_import_options=self.extra_import_options)
else:
raise AirflowException(
"Provide query or table parameter to import using Sqoop"
)
else:
raise AirflowException("cmd_type should be 'import' or 'export'")
|
def execute(self, context):
"""
Execute sqoop job
"""
self.hook = SqoopHook(
conn_id=self.conn_id,
verbose=self.verbose,
num_mappers=self.num_mappers,
hcatalog_database=self.hcatalog_database,
hcatalog_table=self.hcatalog_table,
properties=self.properties
)
if self.cmd_type == 'export':
self.hook.export_table(
table=self.table,
export_dir=self.export_dir,
input_null_string=self.input_null_string,
input_null_non_string=self.input_null_non_string,
staging_table=self.staging_table,
clear_staging_table=self.clear_staging_table,
enclosed_by=self.enclosed_by,
escaped_by=self.escaped_by,
input_fields_terminated_by=self.input_fields_terminated_by,
input_lines_terminated_by=self.input_lines_terminated_by,
input_optionally_enclosed_by=self.input_optionally_enclosed_by,
batch=self.batch,
relaxed_isolation=self.relaxed_isolation,
extra_export_options=self.extra_export_options)
elif self.cmd_type == 'import':
# add create hcatalog table to extra import options if option passed
# if new params are added to constructor can pass them in here
# so don't modify sqoop_hook for each param
if self.create_hcatalog_table:
self.extra_import_options['create-hcatalog-table'] = ''
if self.table and self.query:
raise AirflowException(
'Cannot specify query and table together. Need to specify either or.'
)
if self.table:
self.hook.import_table(
table=self.table,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
columns=self.columns,
split_by=self.split_by,
where=self.where,
direct=self.direct,
driver=self.driver,
extra_import_options=self.extra_import_options)
elif self.query:
self.hook.import_query(
query=self.query,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
split_by=self.split_by,
direct=self.direct,
driver=self.driver,
extra_import_options=self.extra_import_options)
else:
raise AirflowException(
"Provide query or table parameter to import using Sqoop"
)
else:
raise AirflowException("cmd_type should be 'import' or 'export'")
|
[
"Execute",
"sqoop",
"job"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/sqoop_operator.py#L166-L234
|
[
"def",
"execute",
"(",
"self",
",",
"context",
")",
":",
"self",
".",
"hook",
"=",
"SqoopHook",
"(",
"conn_id",
"=",
"self",
".",
"conn_id",
",",
"verbose",
"=",
"self",
".",
"verbose",
",",
"num_mappers",
"=",
"self",
".",
"num_mappers",
",",
"hcatalog_database",
"=",
"self",
".",
"hcatalog_database",
",",
"hcatalog_table",
"=",
"self",
".",
"hcatalog_table",
",",
"properties",
"=",
"self",
".",
"properties",
")",
"if",
"self",
".",
"cmd_type",
"==",
"'export'",
":",
"self",
".",
"hook",
".",
"export_table",
"(",
"table",
"=",
"self",
".",
"table",
",",
"export_dir",
"=",
"self",
".",
"export_dir",
",",
"input_null_string",
"=",
"self",
".",
"input_null_string",
",",
"input_null_non_string",
"=",
"self",
".",
"input_null_non_string",
",",
"staging_table",
"=",
"self",
".",
"staging_table",
",",
"clear_staging_table",
"=",
"self",
".",
"clear_staging_table",
",",
"enclosed_by",
"=",
"self",
".",
"enclosed_by",
",",
"escaped_by",
"=",
"self",
".",
"escaped_by",
",",
"input_fields_terminated_by",
"=",
"self",
".",
"input_fields_terminated_by",
",",
"input_lines_terminated_by",
"=",
"self",
".",
"input_lines_terminated_by",
",",
"input_optionally_enclosed_by",
"=",
"self",
".",
"input_optionally_enclosed_by",
",",
"batch",
"=",
"self",
".",
"batch",
",",
"relaxed_isolation",
"=",
"self",
".",
"relaxed_isolation",
",",
"extra_export_options",
"=",
"self",
".",
"extra_export_options",
")",
"elif",
"self",
".",
"cmd_type",
"==",
"'import'",
":",
"# add create hcatalog table to extra import options if option passed",
"# if new params are added to constructor can pass them in here",
"# so don't modify sqoop_hook for each param",
"if",
"self",
".",
"create_hcatalog_table",
":",
"self",
".",
"extra_import_options",
"[",
"'create-hcatalog-table'",
"]",
"=",
"''",
"if",
"self",
".",
"table",
"and",
"self",
".",
"query",
":",
"raise",
"AirflowException",
"(",
"'Cannot specify query and table together. Need to specify either or.'",
")",
"if",
"self",
".",
"table",
":",
"self",
".",
"hook",
".",
"import_table",
"(",
"table",
"=",
"self",
".",
"table",
",",
"target_dir",
"=",
"self",
".",
"target_dir",
",",
"append",
"=",
"self",
".",
"append",
",",
"file_type",
"=",
"self",
".",
"file_type",
",",
"columns",
"=",
"self",
".",
"columns",
",",
"split_by",
"=",
"self",
".",
"split_by",
",",
"where",
"=",
"self",
".",
"where",
",",
"direct",
"=",
"self",
".",
"direct",
",",
"driver",
"=",
"self",
".",
"driver",
",",
"extra_import_options",
"=",
"self",
".",
"extra_import_options",
")",
"elif",
"self",
".",
"query",
":",
"self",
".",
"hook",
".",
"import_query",
"(",
"query",
"=",
"self",
".",
"query",
",",
"target_dir",
"=",
"self",
".",
"target_dir",
",",
"append",
"=",
"self",
".",
"append",
",",
"file_type",
"=",
"self",
".",
"file_type",
",",
"split_by",
"=",
"self",
".",
"split_by",
",",
"direct",
"=",
"self",
".",
"direct",
",",
"driver",
"=",
"self",
".",
"driver",
",",
"extra_import_options",
"=",
"self",
".",
"extra_import_options",
")",
"else",
":",
"raise",
"AirflowException",
"(",
"\"Provide query or table parameter to import using Sqoop\"",
")",
"else",
":",
"raise",
"AirflowException",
"(",
"\"cmd_type should be 'import' or 'export'\"",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
apply_lineage
|
Saves the lineage to XCom and if configured to do so sends it
to the backend.
|
airflow/lineage/__init__.py
|
def apply_lineage(func):
"""
Saves the lineage to XCom and if configured to do so sends it
to the backend.
"""
backend = _get_backend()
@wraps(func)
def wrapper(self, context, *args, **kwargs):
self.log.debug("Backend: %s, Lineage called with inlets: %s, outlets: %s",
backend, self.inlets, self.outlets)
ret_val = func(self, context, *args, **kwargs)
outlets = [x.as_dict() for x in self.outlets]
inlets = [x.as_dict() for x in self.inlets]
if len(self.outlets) > 0:
self.xcom_push(context,
key=PIPELINE_OUTLETS,
value=outlets,
execution_date=context['ti'].execution_date)
if len(self.inlets) > 0:
self.xcom_push(context,
key=PIPELINE_INLETS,
value=inlets,
execution_date=context['ti'].execution_date)
if backend:
backend.send_lineage(operator=self, inlets=self.inlets,
outlets=self.outlets, context=context)
return ret_val
return wrapper
|
def apply_lineage(func):
"""
Saves the lineage to XCom and if configured to do so sends it
to the backend.
"""
backend = _get_backend()
@wraps(func)
def wrapper(self, context, *args, **kwargs):
self.log.debug("Backend: %s, Lineage called with inlets: %s, outlets: %s",
backend, self.inlets, self.outlets)
ret_val = func(self, context, *args, **kwargs)
outlets = [x.as_dict() for x in self.outlets]
inlets = [x.as_dict() for x in self.inlets]
if len(self.outlets) > 0:
self.xcom_push(context,
key=PIPELINE_OUTLETS,
value=outlets,
execution_date=context['ti'].execution_date)
if len(self.inlets) > 0:
self.xcom_push(context,
key=PIPELINE_INLETS,
value=inlets,
execution_date=context['ti'].execution_date)
if backend:
backend.send_lineage(operator=self, inlets=self.inlets,
outlets=self.outlets, context=context)
return ret_val
return wrapper
|
[
"Saves",
"the",
"lineage",
"to",
"XCom",
"and",
"if",
"configured",
"to",
"do",
"so",
"sends",
"it",
"to",
"the",
"backend",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/lineage/__init__.py#L48-L82
|
[
"def",
"apply_lineage",
"(",
"func",
")",
":",
"backend",
"=",
"_get_backend",
"(",
")",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"self",
",",
"context",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Backend: %s, Lineage called with inlets: %s, outlets: %s\"",
",",
"backend",
",",
"self",
".",
"inlets",
",",
"self",
".",
"outlets",
")",
"ret_val",
"=",
"func",
"(",
"self",
",",
"context",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"outlets",
"=",
"[",
"x",
".",
"as_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"outlets",
"]",
"inlets",
"=",
"[",
"x",
".",
"as_dict",
"(",
")",
"for",
"x",
"in",
"self",
".",
"inlets",
"]",
"if",
"len",
"(",
"self",
".",
"outlets",
")",
">",
"0",
":",
"self",
".",
"xcom_push",
"(",
"context",
",",
"key",
"=",
"PIPELINE_OUTLETS",
",",
"value",
"=",
"outlets",
",",
"execution_date",
"=",
"context",
"[",
"'ti'",
"]",
".",
"execution_date",
")",
"if",
"len",
"(",
"self",
".",
"inlets",
")",
">",
"0",
":",
"self",
".",
"xcom_push",
"(",
"context",
",",
"key",
"=",
"PIPELINE_INLETS",
",",
"value",
"=",
"inlets",
",",
"execution_date",
"=",
"context",
"[",
"'ti'",
"]",
".",
"execution_date",
")",
"if",
"backend",
":",
"backend",
".",
"send_lineage",
"(",
"operator",
"=",
"self",
",",
"inlets",
"=",
"self",
".",
"inlets",
",",
"outlets",
"=",
"self",
".",
"outlets",
",",
"context",
"=",
"context",
")",
"return",
"ret_val",
"return",
"wrapper"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
prepare_lineage
|
Prepares the lineage inlets and outlets. Inlets can be:
* "auto" -> picks up any outlets from direct upstream tasks that have outlets defined, as such that
if A -> B -> C and B does not have outlets but A does, these are provided as inlets.
* "list of task_ids" -> picks up outlets from the upstream task_ids
* "list of datasets" -> manually defined list of DataSet
|
airflow/lineage/__init__.py
|
def prepare_lineage(func):
"""
Prepares the lineage inlets and outlets. Inlets can be:
* "auto" -> picks up any outlets from direct upstream tasks that have outlets defined, as such that
if A -> B -> C and B does not have outlets but A does, these are provided as inlets.
* "list of task_ids" -> picks up outlets from the upstream task_ids
* "list of datasets" -> manually defined list of DataSet
"""
@wraps(func)
def wrapper(self, context, *args, **kwargs):
self.log.debug("Preparing lineage inlets and outlets")
task_ids = set(self._inlets['task_ids']).intersection(
self.get_flat_relative_ids(upstream=True)
)
if task_ids:
inlets = self.xcom_pull(context,
task_ids=task_ids,
dag_id=self.dag_id,
key=PIPELINE_OUTLETS)
inlets = [item for sublist in inlets if sublist for item in sublist]
inlets = [DataSet.map_type(i['typeName'])(data=i['attributes'])
for i in inlets]
self.inlets.extend(inlets)
if self._inlets['auto']:
# dont append twice
task_ids = set(self._inlets['task_ids']).symmetric_difference(
self.upstream_task_ids
)
inlets = self.xcom_pull(context,
task_ids=task_ids,
dag_id=self.dag_id,
key=PIPELINE_OUTLETS)
inlets = [item for sublist in inlets if sublist for item in sublist]
inlets = [DataSet.map_type(i['typeName'])(data=i['attributes'])
for i in inlets]
self.inlets.extend(inlets)
if len(self._inlets['datasets']) > 0:
self.inlets.extend(self._inlets['datasets'])
# outlets
if len(self._outlets['datasets']) > 0:
self.outlets.extend(self._outlets['datasets'])
self.log.debug("inlets: %s, outlets: %s", self.inlets, self.outlets)
for dataset in chain(self.inlets, self.outlets):
dataset.set_context(context)
return func(self, context, *args, **kwargs)
return wrapper
|
def prepare_lineage(func):
"""
Prepares the lineage inlets and outlets. Inlets can be:
* "auto" -> picks up any outlets from direct upstream tasks that have outlets defined, as such that
if A -> B -> C and B does not have outlets but A does, these are provided as inlets.
* "list of task_ids" -> picks up outlets from the upstream task_ids
* "list of datasets" -> manually defined list of DataSet
"""
@wraps(func)
def wrapper(self, context, *args, **kwargs):
self.log.debug("Preparing lineage inlets and outlets")
task_ids = set(self._inlets['task_ids']).intersection(
self.get_flat_relative_ids(upstream=True)
)
if task_ids:
inlets = self.xcom_pull(context,
task_ids=task_ids,
dag_id=self.dag_id,
key=PIPELINE_OUTLETS)
inlets = [item for sublist in inlets if sublist for item in sublist]
inlets = [DataSet.map_type(i['typeName'])(data=i['attributes'])
for i in inlets]
self.inlets.extend(inlets)
if self._inlets['auto']:
# dont append twice
task_ids = set(self._inlets['task_ids']).symmetric_difference(
self.upstream_task_ids
)
inlets = self.xcom_pull(context,
task_ids=task_ids,
dag_id=self.dag_id,
key=PIPELINE_OUTLETS)
inlets = [item for sublist in inlets if sublist for item in sublist]
inlets = [DataSet.map_type(i['typeName'])(data=i['attributes'])
for i in inlets]
self.inlets.extend(inlets)
if len(self._inlets['datasets']) > 0:
self.inlets.extend(self._inlets['datasets'])
# outlets
if len(self._outlets['datasets']) > 0:
self.outlets.extend(self._outlets['datasets'])
self.log.debug("inlets: %s, outlets: %s", self.inlets, self.outlets)
for dataset in chain(self.inlets, self.outlets):
dataset.set_context(context)
return func(self, context, *args, **kwargs)
return wrapper
|
[
"Prepares",
"the",
"lineage",
"inlets",
"and",
"outlets",
".",
"Inlets",
"can",
"be",
":"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/lineage/__init__.py#L85-L140
|
[
"def",
"prepare_lineage",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"self",
",",
"context",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"\"Preparing lineage inlets and outlets\"",
")",
"task_ids",
"=",
"set",
"(",
"self",
".",
"_inlets",
"[",
"'task_ids'",
"]",
")",
".",
"intersection",
"(",
"self",
".",
"get_flat_relative_ids",
"(",
"upstream",
"=",
"True",
")",
")",
"if",
"task_ids",
":",
"inlets",
"=",
"self",
".",
"xcom_pull",
"(",
"context",
",",
"task_ids",
"=",
"task_ids",
",",
"dag_id",
"=",
"self",
".",
"dag_id",
",",
"key",
"=",
"PIPELINE_OUTLETS",
")",
"inlets",
"=",
"[",
"item",
"for",
"sublist",
"in",
"inlets",
"if",
"sublist",
"for",
"item",
"in",
"sublist",
"]",
"inlets",
"=",
"[",
"DataSet",
".",
"map_type",
"(",
"i",
"[",
"'typeName'",
"]",
")",
"(",
"data",
"=",
"i",
"[",
"'attributes'",
"]",
")",
"for",
"i",
"in",
"inlets",
"]",
"self",
".",
"inlets",
".",
"extend",
"(",
"inlets",
")",
"if",
"self",
".",
"_inlets",
"[",
"'auto'",
"]",
":",
"# dont append twice",
"task_ids",
"=",
"set",
"(",
"self",
".",
"_inlets",
"[",
"'task_ids'",
"]",
")",
".",
"symmetric_difference",
"(",
"self",
".",
"upstream_task_ids",
")",
"inlets",
"=",
"self",
".",
"xcom_pull",
"(",
"context",
",",
"task_ids",
"=",
"task_ids",
",",
"dag_id",
"=",
"self",
".",
"dag_id",
",",
"key",
"=",
"PIPELINE_OUTLETS",
")",
"inlets",
"=",
"[",
"item",
"for",
"sublist",
"in",
"inlets",
"if",
"sublist",
"for",
"item",
"in",
"sublist",
"]",
"inlets",
"=",
"[",
"DataSet",
".",
"map_type",
"(",
"i",
"[",
"'typeName'",
"]",
")",
"(",
"data",
"=",
"i",
"[",
"'attributes'",
"]",
")",
"for",
"i",
"in",
"inlets",
"]",
"self",
".",
"inlets",
".",
"extend",
"(",
"inlets",
")",
"if",
"len",
"(",
"self",
".",
"_inlets",
"[",
"'datasets'",
"]",
")",
">",
"0",
":",
"self",
".",
"inlets",
".",
"extend",
"(",
"self",
".",
"_inlets",
"[",
"'datasets'",
"]",
")",
"# outlets",
"if",
"len",
"(",
"self",
".",
"_outlets",
"[",
"'datasets'",
"]",
")",
">",
"0",
":",
"self",
".",
"outlets",
".",
"extend",
"(",
"self",
".",
"_outlets",
"[",
"'datasets'",
"]",
")",
"self",
".",
"log",
".",
"debug",
"(",
"\"inlets: %s, outlets: %s\"",
",",
"self",
".",
"inlets",
",",
"self",
".",
"outlets",
")",
"for",
"dataset",
"in",
"chain",
"(",
"self",
".",
"inlets",
",",
"self",
".",
"outlets",
")",
":",
"dataset",
".",
"set_context",
"(",
"context",
")",
"return",
"func",
"(",
"self",
",",
"context",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
Connection.extra_dejson
|
Returns the extra property by deserializing json.
|
airflow/models/connection.py
|
def extra_dejson(self):
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
self.log.exception(e)
self.log.error("Failed parsing the json for conn_id %s", self.conn_id)
return obj
|
def extra_dejson(self):
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
self.log.exception(e)
self.log.error("Failed parsing the json for conn_id %s", self.conn_id)
return obj
|
[
"Returns",
"the",
"extra",
"property",
"by",
"deserializing",
"json",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/connection.py#L286-L296
|
[
"def",
"extra_dejson",
"(",
"self",
")",
":",
"obj",
"=",
"{",
"}",
"if",
"self",
".",
"extra",
":",
"try",
":",
"obj",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"extra",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"log",
".",
"exception",
"(",
"e",
")",
"self",
".",
"log",
".",
"error",
"(",
"\"Failed parsing the json for conn_id %s\"",
",",
"self",
".",
"conn_id",
")",
"return",
"obj"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
date_range
|
Get a set of dates as a list based on a start, end and delta, delta
can be something that can be added to `datetime.datetime`
or a cron expression as a `str`
:Example::
date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1))
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta='0 0 * * *')
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
date_range(datetime(2016, 1, 1), datetime(2016, 3, 3), delta="0 0 0 * *")
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0),
datetime.datetime(2016, 3, 1, 0, 0)]
:param start_date: anchor date to start the series from
:type start_date: datetime.datetime
:param end_date: right boundary for the date range
:type end_date: datetime.datetime
:param num: alternatively to end_date, you can specify the number of
number of entries you want in the range. This number can be negative,
output will always be sorted regardless
:type num: int
|
airflow/utils/dates.py
|
def date_range(start_date, end_date=None, num=None, delta=None):
"""
Get a set of dates as a list based on a start, end and delta, delta
can be something that can be added to `datetime.datetime`
or a cron expression as a `str`
:Example::
date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1))
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta='0 0 * * *')
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
date_range(datetime(2016, 1, 1), datetime(2016, 3, 3), delta="0 0 0 * *")
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0),
datetime.datetime(2016, 3, 1, 0, 0)]
:param start_date: anchor date to start the series from
:type start_date: datetime.datetime
:param end_date: right boundary for the date range
:type end_date: datetime.datetime
:param num: alternatively to end_date, you can specify the number of
number of entries you want in the range. This number can be negative,
output will always be sorted regardless
:type num: int
"""
if not delta:
return []
if end_date and start_date > end_date:
raise Exception("Wait. start_date needs to be before end_date")
if end_date and num:
raise Exception("Wait. Either specify end_date OR num")
if not end_date and not num:
end_date = timezone.utcnow()
delta_iscron = False
tz = start_date.tzinfo
if isinstance(delta, six.string_types):
delta_iscron = True
start_date = timezone.make_naive(start_date, tz)
cron = croniter(delta, start_date)
elif isinstance(delta, timedelta):
delta = abs(delta)
dates = []
if end_date:
if timezone.is_naive(start_date):
end_date = timezone.make_naive(end_date, tz)
while start_date <= end_date:
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, tz))
else:
dates.append(start_date)
if delta_iscron:
start_date = cron.get_next(datetime)
else:
start_date += delta
else:
for _ in range(abs(num)):
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, tz))
else:
dates.append(start_date)
if delta_iscron:
if num > 0:
start_date = cron.get_next(datetime)
else:
start_date = cron.get_prev(datetime)
else:
if num > 0:
start_date += delta
else:
start_date -= delta
return sorted(dates)
|
def date_range(start_date, end_date=None, num=None, delta=None):
"""
Get a set of dates as a list based on a start, end and delta, delta
can be something that can be added to `datetime.datetime`
or a cron expression as a `str`
:Example::
date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1))
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta='0 0 * * *')
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
date_range(datetime(2016, 1, 1), datetime(2016, 3, 3), delta="0 0 0 * *")
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0),
datetime.datetime(2016, 3, 1, 0, 0)]
:param start_date: anchor date to start the series from
:type start_date: datetime.datetime
:param end_date: right boundary for the date range
:type end_date: datetime.datetime
:param num: alternatively to end_date, you can specify the number of
number of entries you want in the range. This number can be negative,
output will always be sorted regardless
:type num: int
"""
if not delta:
return []
if end_date and start_date > end_date:
raise Exception("Wait. start_date needs to be before end_date")
if end_date and num:
raise Exception("Wait. Either specify end_date OR num")
if not end_date and not num:
end_date = timezone.utcnow()
delta_iscron = False
tz = start_date.tzinfo
if isinstance(delta, six.string_types):
delta_iscron = True
start_date = timezone.make_naive(start_date, tz)
cron = croniter(delta, start_date)
elif isinstance(delta, timedelta):
delta = abs(delta)
dates = []
if end_date:
if timezone.is_naive(start_date):
end_date = timezone.make_naive(end_date, tz)
while start_date <= end_date:
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, tz))
else:
dates.append(start_date)
if delta_iscron:
start_date = cron.get_next(datetime)
else:
start_date += delta
else:
for _ in range(abs(num)):
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, tz))
else:
dates.append(start_date)
if delta_iscron:
if num > 0:
start_date = cron.get_next(datetime)
else:
start_date = cron.get_prev(datetime)
else:
if num > 0:
start_date += delta
else:
start_date -= delta
return sorted(dates)
|
[
"Get",
"a",
"set",
"of",
"dates",
"as",
"a",
"list",
"based",
"on",
"a",
"start",
"end",
"and",
"delta",
"delta",
"can",
"be",
"something",
"that",
"can",
"be",
"added",
"to",
"datetime",
".",
"datetime",
"or",
"a",
"cron",
"expression",
"as",
"a",
"str"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dates.py#L36-L111
|
[
"def",
"date_range",
"(",
"start_date",
",",
"end_date",
"=",
"None",
",",
"num",
"=",
"None",
",",
"delta",
"=",
"None",
")",
":",
"if",
"not",
"delta",
":",
"return",
"[",
"]",
"if",
"end_date",
"and",
"start_date",
">",
"end_date",
":",
"raise",
"Exception",
"(",
"\"Wait. start_date needs to be before end_date\"",
")",
"if",
"end_date",
"and",
"num",
":",
"raise",
"Exception",
"(",
"\"Wait. Either specify end_date OR num\"",
")",
"if",
"not",
"end_date",
"and",
"not",
"num",
":",
"end_date",
"=",
"timezone",
".",
"utcnow",
"(",
")",
"delta_iscron",
"=",
"False",
"tz",
"=",
"start_date",
".",
"tzinfo",
"if",
"isinstance",
"(",
"delta",
",",
"six",
".",
"string_types",
")",
":",
"delta_iscron",
"=",
"True",
"start_date",
"=",
"timezone",
".",
"make_naive",
"(",
"start_date",
",",
"tz",
")",
"cron",
"=",
"croniter",
"(",
"delta",
",",
"start_date",
")",
"elif",
"isinstance",
"(",
"delta",
",",
"timedelta",
")",
":",
"delta",
"=",
"abs",
"(",
"delta",
")",
"dates",
"=",
"[",
"]",
"if",
"end_date",
":",
"if",
"timezone",
".",
"is_naive",
"(",
"start_date",
")",
":",
"end_date",
"=",
"timezone",
".",
"make_naive",
"(",
"end_date",
",",
"tz",
")",
"while",
"start_date",
"<=",
"end_date",
":",
"if",
"timezone",
".",
"is_naive",
"(",
"start_date",
")",
":",
"dates",
".",
"append",
"(",
"timezone",
".",
"make_aware",
"(",
"start_date",
",",
"tz",
")",
")",
"else",
":",
"dates",
".",
"append",
"(",
"start_date",
")",
"if",
"delta_iscron",
":",
"start_date",
"=",
"cron",
".",
"get_next",
"(",
"datetime",
")",
"else",
":",
"start_date",
"+=",
"delta",
"else",
":",
"for",
"_",
"in",
"range",
"(",
"abs",
"(",
"num",
")",
")",
":",
"if",
"timezone",
".",
"is_naive",
"(",
"start_date",
")",
":",
"dates",
".",
"append",
"(",
"timezone",
".",
"make_aware",
"(",
"start_date",
",",
"tz",
")",
")",
"else",
":",
"dates",
".",
"append",
"(",
"start_date",
")",
"if",
"delta_iscron",
":",
"if",
"num",
">",
"0",
":",
"start_date",
"=",
"cron",
".",
"get_next",
"(",
"datetime",
")",
"else",
":",
"start_date",
"=",
"cron",
".",
"get_prev",
"(",
"datetime",
")",
"else",
":",
"if",
"num",
">",
"0",
":",
"start_date",
"+=",
"delta",
"else",
":",
"start_date",
"-=",
"delta",
"return",
"sorted",
"(",
"dates",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
round_time
|
Returns the datetime of the form start_date + i * delta
which is closest to dt for any non-negative integer i.
Note that delta may be a datetime.timedelta or a dateutil.relativedelta
>>> round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 1, 2), relativedelta(months=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 16, 0, 0)
>>> round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 15, 0, 0)
>>> round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
>>> round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
|
airflow/utils/dates.py
|
def round_time(dt, delta, start_date=timezone.make_aware(datetime.min)):
"""
Returns the datetime of the form start_date + i * delta
which is closest to dt for any non-negative integer i.
Note that delta may be a datetime.timedelta or a dateutil.relativedelta
>>> round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 1, 2), relativedelta(months=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 16, 0, 0)
>>> round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 15, 0, 0)
>>> round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
>>> round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
"""
if isinstance(delta, six.string_types):
# It's cron based, so it's easy
tz = start_date.tzinfo
start_date = timezone.make_naive(start_date, tz)
cron = croniter(delta, start_date)
prev = cron.get_prev(datetime)
if prev == start_date:
return timezone.make_aware(start_date, tz)
else:
return timezone.make_aware(prev, tz)
# Ignore the microseconds of dt
dt -= timedelta(microseconds=dt.microsecond)
# We are looking for a datetime in the form start_date + i * delta
# which is as close as possible to dt. Since delta could be a relative
# delta we don't know its exact length in seconds so we cannot rely on
# division to find i. Instead we employ a binary search algorithm, first
# finding an upper and lower limit and then disecting the interval until
# we have found the closest match.
# We first search an upper limit for i for which start_date + upper * delta
# exceeds dt.
upper = 1
while start_date + upper * delta < dt:
# To speed up finding an upper limit we grow this exponentially by a
# factor of 2
upper *= 2
# Since upper is the first value for which start_date + upper * delta
# exceeds dt, upper // 2 is below dt and therefore forms a lower limited
# for the i we are looking for
lower = upper // 2
# We now continue to intersect the interval between
# start_date + lower * delta and start_date + upper * delta
# until we find the closest value
while True:
# Invariant: start + lower * delta < dt <= start + upper * delta
# If start_date + (lower + 1)*delta exceeds dt, then either lower or
# lower+1 has to be the solution we are searching for
if start_date + (lower + 1) * delta >= dt:
# Check if start_date + (lower + 1)*delta or
# start_date + lower*delta is closer to dt and return the solution
if (start_date + (lower + 1) * delta) - dt <= dt - (start_date + lower * delta):
return start_date + (lower + 1) * delta
else:
return start_date + lower * delta
# We intersect the interval and either replace the lower or upper
# limit with the candidate
candidate = lower + (upper - lower) // 2
if start_date + candidate * delta >= dt:
upper = candidate
else:
lower = candidate
|
def round_time(dt, delta, start_date=timezone.make_aware(datetime.min)):
"""
Returns the datetime of the form start_date + i * delta
which is closest to dt for any non-negative integer i.
Note that delta may be a datetime.timedelta or a dateutil.relativedelta
>>> round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 1, 2), relativedelta(months=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 16, 0, 0)
>>> round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 15, 0, 0)
>>> round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
>>> round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
"""
if isinstance(delta, six.string_types):
# It's cron based, so it's easy
tz = start_date.tzinfo
start_date = timezone.make_naive(start_date, tz)
cron = croniter(delta, start_date)
prev = cron.get_prev(datetime)
if prev == start_date:
return timezone.make_aware(start_date, tz)
else:
return timezone.make_aware(prev, tz)
# Ignore the microseconds of dt
dt -= timedelta(microseconds=dt.microsecond)
# We are looking for a datetime in the form start_date + i * delta
# which is as close as possible to dt. Since delta could be a relative
# delta we don't know its exact length in seconds so we cannot rely on
# division to find i. Instead we employ a binary search algorithm, first
# finding an upper and lower limit and then disecting the interval until
# we have found the closest match.
# We first search an upper limit for i for which start_date + upper * delta
# exceeds dt.
upper = 1
while start_date + upper * delta < dt:
# To speed up finding an upper limit we grow this exponentially by a
# factor of 2
upper *= 2
# Since upper is the first value for which start_date + upper * delta
# exceeds dt, upper // 2 is below dt and therefore forms a lower limited
# for the i we are looking for
lower = upper // 2
# We now continue to intersect the interval between
# start_date + lower * delta and start_date + upper * delta
# until we find the closest value
while True:
# Invariant: start + lower * delta < dt <= start + upper * delta
# If start_date + (lower + 1)*delta exceeds dt, then either lower or
# lower+1 has to be the solution we are searching for
if start_date + (lower + 1) * delta >= dt:
# Check if start_date + (lower + 1)*delta or
# start_date + lower*delta is closer to dt and return the solution
if (start_date + (lower + 1) * delta) - dt <= dt - (start_date + lower * delta):
return start_date + (lower + 1) * delta
else:
return start_date + lower * delta
# We intersect the interval and either replace the lower or upper
# limit with the candidate
candidate = lower + (upper - lower) // 2
if start_date + candidate * delta >= dt:
upper = candidate
else:
lower = candidate
|
[
"Returns",
"the",
"datetime",
"of",
"the",
"form",
"start_date",
"+",
"i",
"*",
"delta",
"which",
"is",
"closest",
"to",
"dt",
"for",
"any",
"non",
"-",
"negative",
"integer",
"i",
".",
"Note",
"that",
"delta",
"may",
"be",
"a",
"datetime",
".",
"timedelta",
"or",
"a",
"dateutil",
".",
"relativedelta",
">>>",
"round_time",
"(",
"datetime",
"(",
"2015",
"1",
"1",
"6",
")",
"timedelta",
"(",
"days",
"=",
"1",
"))",
"datetime",
".",
"datetime",
"(",
"2015",
"1",
"1",
"0",
"0",
")",
">>>",
"round_time",
"(",
"datetime",
"(",
"2015",
"1",
"2",
")",
"relativedelta",
"(",
"months",
"=",
"1",
"))",
"datetime",
".",
"datetime",
"(",
"2015",
"1",
"1",
"0",
"0",
")",
">>>",
"round_time",
"(",
"datetime",
"(",
"2015",
"9",
"16",
"0",
"0",
")",
"timedelta",
"(",
"1",
")",
"datetime",
"(",
"2015",
"9",
"14",
"0",
"0",
"))",
"datetime",
".",
"datetime",
"(",
"2015",
"9",
"16",
"0",
"0",
")",
">>>",
"round_time",
"(",
"datetime",
"(",
"2015",
"9",
"15",
"0",
"0",
")",
"timedelta",
"(",
"1",
")",
"datetime",
"(",
"2015",
"9",
"14",
"0",
"0",
"))",
"datetime",
".",
"datetime",
"(",
"2015",
"9",
"15",
"0",
"0",
")",
">>>",
"round_time",
"(",
"datetime",
"(",
"2015",
"9",
"14",
"0",
"0",
")",
"timedelta",
"(",
"1",
")",
"datetime",
"(",
"2015",
"9",
"14",
"0",
"0",
"))",
"datetime",
".",
"datetime",
"(",
"2015",
"9",
"14",
"0",
"0",
")",
">>>",
"round_time",
"(",
"datetime",
"(",
"2015",
"9",
"13",
"0",
"0",
")",
"timedelta",
"(",
"1",
")",
"datetime",
"(",
"2015",
"9",
"14",
"0",
"0",
"))",
"datetime",
".",
"datetime",
"(",
"2015",
"9",
"14",
"0",
"0",
")"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dates.py#L114-L188
|
[
"def",
"round_time",
"(",
"dt",
",",
"delta",
",",
"start_date",
"=",
"timezone",
".",
"make_aware",
"(",
"datetime",
".",
"min",
")",
")",
":",
"if",
"isinstance",
"(",
"delta",
",",
"six",
".",
"string_types",
")",
":",
"# It's cron based, so it's easy",
"tz",
"=",
"start_date",
".",
"tzinfo",
"start_date",
"=",
"timezone",
".",
"make_naive",
"(",
"start_date",
",",
"tz",
")",
"cron",
"=",
"croniter",
"(",
"delta",
",",
"start_date",
")",
"prev",
"=",
"cron",
".",
"get_prev",
"(",
"datetime",
")",
"if",
"prev",
"==",
"start_date",
":",
"return",
"timezone",
".",
"make_aware",
"(",
"start_date",
",",
"tz",
")",
"else",
":",
"return",
"timezone",
".",
"make_aware",
"(",
"prev",
",",
"tz",
")",
"# Ignore the microseconds of dt",
"dt",
"-=",
"timedelta",
"(",
"microseconds",
"=",
"dt",
".",
"microsecond",
")",
"# We are looking for a datetime in the form start_date + i * delta",
"# which is as close as possible to dt. Since delta could be a relative",
"# delta we don't know its exact length in seconds so we cannot rely on",
"# division to find i. Instead we employ a binary search algorithm, first",
"# finding an upper and lower limit and then disecting the interval until",
"# we have found the closest match.",
"# We first search an upper limit for i for which start_date + upper * delta",
"# exceeds dt.",
"upper",
"=",
"1",
"while",
"start_date",
"+",
"upper",
"*",
"delta",
"<",
"dt",
":",
"# To speed up finding an upper limit we grow this exponentially by a",
"# factor of 2",
"upper",
"*=",
"2",
"# Since upper is the first value for which start_date + upper * delta",
"# exceeds dt, upper // 2 is below dt and therefore forms a lower limited",
"# for the i we are looking for",
"lower",
"=",
"upper",
"//",
"2",
"# We now continue to intersect the interval between",
"# start_date + lower * delta and start_date + upper * delta",
"# until we find the closest value",
"while",
"True",
":",
"# Invariant: start + lower * delta < dt <= start + upper * delta",
"# If start_date + (lower + 1)*delta exceeds dt, then either lower or",
"# lower+1 has to be the solution we are searching for",
"if",
"start_date",
"+",
"(",
"lower",
"+",
"1",
")",
"*",
"delta",
">=",
"dt",
":",
"# Check if start_date + (lower + 1)*delta or",
"# start_date + lower*delta is closer to dt and return the solution",
"if",
"(",
"start_date",
"+",
"(",
"lower",
"+",
"1",
")",
"*",
"delta",
")",
"-",
"dt",
"<=",
"dt",
"-",
"(",
"start_date",
"+",
"lower",
"*",
"delta",
")",
":",
"return",
"start_date",
"+",
"(",
"lower",
"+",
"1",
")",
"*",
"delta",
"else",
":",
"return",
"start_date",
"+",
"lower",
"*",
"delta",
"# We intersect the interval and either replace the lower or upper",
"# limit with the candidate",
"candidate",
"=",
"lower",
"+",
"(",
"upper",
"-",
"lower",
")",
"//",
"2",
"if",
"start_date",
"+",
"candidate",
"*",
"delta",
">=",
"dt",
":",
"upper",
"=",
"candidate",
"else",
":",
"lower",
"=",
"candidate"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
infer_time_unit
|
Determine the most appropriate time unit for an array of time durations
specified in seconds.
e.g. 5400 seconds => 'minutes', 36000 seconds => 'hours'
|
airflow/utils/dates.py
|
def infer_time_unit(time_seconds_arr):
"""
Determine the most appropriate time unit for an array of time durations
specified in seconds.
e.g. 5400 seconds => 'minutes', 36000 seconds => 'hours'
"""
if len(time_seconds_arr) == 0:
return 'hours'
max_time_seconds = max(time_seconds_arr)
if max_time_seconds <= 60 * 2:
return 'seconds'
elif max_time_seconds <= 60 * 60 * 2:
return 'minutes'
elif max_time_seconds <= 24 * 60 * 60 * 2:
return 'hours'
else:
return 'days'
|
def infer_time_unit(time_seconds_arr):
"""
Determine the most appropriate time unit for an array of time durations
specified in seconds.
e.g. 5400 seconds => 'minutes', 36000 seconds => 'hours'
"""
if len(time_seconds_arr) == 0:
return 'hours'
max_time_seconds = max(time_seconds_arr)
if max_time_seconds <= 60 * 2:
return 'seconds'
elif max_time_seconds <= 60 * 60 * 2:
return 'minutes'
elif max_time_seconds <= 24 * 60 * 60 * 2:
return 'hours'
else:
return 'days'
|
[
"Determine",
"the",
"most",
"appropriate",
"time",
"unit",
"for",
"an",
"array",
"of",
"time",
"durations",
"specified",
"in",
"seconds",
".",
"e",
".",
"g",
".",
"5400",
"seconds",
"=",
">",
"minutes",
"36000",
"seconds",
"=",
">",
"hours"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dates.py#L195-L211
|
[
"def",
"infer_time_unit",
"(",
"time_seconds_arr",
")",
":",
"if",
"len",
"(",
"time_seconds_arr",
")",
"==",
"0",
":",
"return",
"'hours'",
"max_time_seconds",
"=",
"max",
"(",
"time_seconds_arr",
")",
"if",
"max_time_seconds",
"<=",
"60",
"*",
"2",
":",
"return",
"'seconds'",
"elif",
"max_time_seconds",
"<=",
"60",
"*",
"60",
"*",
"2",
":",
"return",
"'minutes'",
"elif",
"max_time_seconds",
"<=",
"24",
"*",
"60",
"*",
"60",
"*",
"2",
":",
"return",
"'hours'",
"else",
":",
"return",
"'days'"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
scale_time_units
|
Convert an array of time durations in seconds to the specified time unit.
|
airflow/utils/dates.py
|
def scale_time_units(time_seconds_arr, unit):
"""
Convert an array of time durations in seconds to the specified time unit.
"""
if unit == 'minutes':
return list(map(lambda x: x * 1.0 / 60, time_seconds_arr))
elif unit == 'hours':
return list(map(lambda x: x * 1.0 / (60 * 60), time_seconds_arr))
elif unit == 'days':
return list(map(lambda x: x * 1.0 / (24 * 60 * 60), time_seconds_arr))
return time_seconds_arr
|
def scale_time_units(time_seconds_arr, unit):
"""
Convert an array of time durations in seconds to the specified time unit.
"""
if unit == 'minutes':
return list(map(lambda x: x * 1.0 / 60, time_seconds_arr))
elif unit == 'hours':
return list(map(lambda x: x * 1.0 / (60 * 60), time_seconds_arr))
elif unit == 'days':
return list(map(lambda x: x * 1.0 / (24 * 60 * 60), time_seconds_arr))
return time_seconds_arr
|
[
"Convert",
"an",
"array",
"of",
"time",
"durations",
"in",
"seconds",
"to",
"the",
"specified",
"time",
"unit",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dates.py#L214-L224
|
[
"def",
"scale_time_units",
"(",
"time_seconds_arr",
",",
"unit",
")",
":",
"if",
"unit",
"==",
"'minutes'",
":",
"return",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
"*",
"1.0",
"/",
"60",
",",
"time_seconds_arr",
")",
")",
"elif",
"unit",
"==",
"'hours'",
":",
"return",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
"*",
"1.0",
"/",
"(",
"60",
"*",
"60",
")",
",",
"time_seconds_arr",
")",
")",
"elif",
"unit",
"==",
"'days'",
":",
"return",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
"*",
"1.0",
"/",
"(",
"24",
"*",
"60",
"*",
"60",
")",
",",
"time_seconds_arr",
")",
")",
"return",
"time_seconds_arr"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
days_ago
|
Get a datetime object representing `n` days ago. By default the time is
set to midnight.
|
airflow/utils/dates.py
|
def days_ago(n, hour=0, minute=0, second=0, microsecond=0):
"""
Get a datetime object representing `n` days ago. By default the time is
set to midnight.
"""
today = timezone.utcnow().replace(
hour=hour,
minute=minute,
second=second,
microsecond=microsecond)
return today - timedelta(days=n)
|
def days_ago(n, hour=0, minute=0, second=0, microsecond=0):
"""
Get a datetime object representing `n` days ago. By default the time is
set to midnight.
"""
today = timezone.utcnow().replace(
hour=hour,
minute=minute,
second=second,
microsecond=microsecond)
return today - timedelta(days=n)
|
[
"Get",
"a",
"datetime",
"object",
"representing",
"n",
"days",
"ago",
".",
"By",
"default",
"the",
"time",
"is",
"set",
"to",
"midnight",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dates.py#L227-L237
|
[
"def",
"days_ago",
"(",
"n",
",",
"hour",
"=",
"0",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0",
",",
"microsecond",
"=",
"0",
")",
":",
"today",
"=",
"timezone",
".",
"utcnow",
"(",
")",
".",
"replace",
"(",
"hour",
"=",
"hour",
",",
"minute",
"=",
"minute",
",",
"second",
"=",
"second",
",",
"microsecond",
"=",
"microsecond",
")",
"return",
"today",
"-",
"timedelta",
"(",
"days",
"=",
"n",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
get_dag_runs
|
Returns a list of Dag Runs for a specific DAG ID.
:param dag_id: String identifier of a DAG
:param state: queued|running|success...
:return: List of DAG runs of a DAG with requested state,
or all runs if the state is not specified
|
airflow/api/common/experimental/get_dag_runs.py
|
def get_dag_runs(dag_id, state=None):
"""
Returns a list of Dag Runs for a specific DAG ID.
:param dag_id: String identifier of a DAG
:param state: queued|running|success...
:return: List of DAG runs of a DAG with requested state,
or all runs if the state is not specified
"""
dagbag = DagBag()
# Check DAG exists.
if dag_id not in dagbag.dags:
error_message = "Dag id {} not found".format(dag_id)
raise AirflowException(error_message)
dag_runs = list()
state = state.lower() if state else None
for run in DagRun.find(dag_id=dag_id, state=state):
dag_runs.append({
'id': run.id,
'run_id': run.run_id,
'state': run.state,
'dag_id': run.dag_id,
'execution_date': run.execution_date.isoformat(),
'start_date': ((run.start_date or '') and
run.start_date.isoformat()),
'dag_run_url': url_for('Airflow.graph', dag_id=run.dag_id,
execution_date=run.execution_date)
})
return dag_runs
|
def get_dag_runs(dag_id, state=None):
"""
Returns a list of Dag Runs for a specific DAG ID.
:param dag_id: String identifier of a DAG
:param state: queued|running|success...
:return: List of DAG runs of a DAG with requested state,
or all runs if the state is not specified
"""
dagbag = DagBag()
# Check DAG exists.
if dag_id not in dagbag.dags:
error_message = "Dag id {} not found".format(dag_id)
raise AirflowException(error_message)
dag_runs = list()
state = state.lower() if state else None
for run in DagRun.find(dag_id=dag_id, state=state):
dag_runs.append({
'id': run.id,
'run_id': run.run_id,
'state': run.state,
'dag_id': run.dag_id,
'execution_date': run.execution_date.isoformat(),
'start_date': ((run.start_date or '') and
run.start_date.isoformat()),
'dag_run_url': url_for('Airflow.graph', dag_id=run.dag_id,
execution_date=run.execution_date)
})
return dag_runs
|
[
"Returns",
"a",
"list",
"of",
"Dag",
"Runs",
"for",
"a",
"specific",
"DAG",
"ID",
".",
":",
"param",
"dag_id",
":",
"String",
"identifier",
"of",
"a",
"DAG",
":",
"param",
"state",
":",
"queued|running|success",
"...",
":",
"return",
":",
"List",
"of",
"DAG",
"runs",
"of",
"a",
"DAG",
"with",
"requested",
"state",
"or",
"all",
"runs",
"if",
"the",
"state",
"is",
"not",
"specified"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/api/common/experimental/get_dag_runs.py#L25-L55
|
[
"def",
"get_dag_runs",
"(",
"dag_id",
",",
"state",
"=",
"None",
")",
":",
"dagbag",
"=",
"DagBag",
"(",
")",
"# Check DAG exists.",
"if",
"dag_id",
"not",
"in",
"dagbag",
".",
"dags",
":",
"error_message",
"=",
"\"Dag id {} not found\"",
".",
"format",
"(",
"dag_id",
")",
"raise",
"AirflowException",
"(",
"error_message",
")",
"dag_runs",
"=",
"list",
"(",
")",
"state",
"=",
"state",
".",
"lower",
"(",
")",
"if",
"state",
"else",
"None",
"for",
"run",
"in",
"DagRun",
".",
"find",
"(",
"dag_id",
"=",
"dag_id",
",",
"state",
"=",
"state",
")",
":",
"dag_runs",
".",
"append",
"(",
"{",
"'id'",
":",
"run",
".",
"id",
",",
"'run_id'",
":",
"run",
".",
"run_id",
",",
"'state'",
":",
"run",
".",
"state",
",",
"'dag_id'",
":",
"run",
".",
"dag_id",
",",
"'execution_date'",
":",
"run",
".",
"execution_date",
".",
"isoformat",
"(",
")",
",",
"'start_date'",
":",
"(",
"(",
"run",
".",
"start_date",
"or",
"''",
")",
"and",
"run",
".",
"start_date",
".",
"isoformat",
"(",
")",
")",
",",
"'dag_run_url'",
":",
"url_for",
"(",
"'Airflow.graph'",
",",
"dag_id",
"=",
"run",
".",
"dag_id",
",",
"execution_date",
"=",
"run",
".",
"execution_date",
")",
"}",
")",
"return",
"dag_runs"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager.init_role
|
Initialize the role with the permissions and related view-menus.
:param role_name:
:param role_vms:
:param role_perms:
:return:
|
airflow/www/security.py
|
def init_role(self, role_name, role_vms, role_perms):
"""
Initialize the role with the permissions and related view-menus.
:param role_name:
:param role_vms:
:param role_perms:
:return:
"""
pvms = self.get_session.query(sqla_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
role = self.find_role(role_name)
if not role:
role = self.add_role(role_name)
if len(role.permissions) == 0:
self.log.info('Initializing permissions for role:%s in the database.', role_name)
role_pvms = set()
for pvm in pvms:
if pvm.view_menu.name in role_vms and pvm.permission.name in role_perms:
role_pvms.add(pvm)
role.permissions = list(role_pvms)
self.get_session.merge(role)
self.get_session.commit()
else:
self.log.debug('Existing permissions for the role:%s '
'within the database will persist.', role_name)
|
def init_role(self, role_name, role_vms, role_perms):
"""
Initialize the role with the permissions and related view-menus.
:param role_name:
:param role_vms:
:param role_perms:
:return:
"""
pvms = self.get_session.query(sqla_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
role = self.find_role(role_name)
if not role:
role = self.add_role(role_name)
if len(role.permissions) == 0:
self.log.info('Initializing permissions for role:%s in the database.', role_name)
role_pvms = set()
for pvm in pvms:
if pvm.view_menu.name in role_vms and pvm.permission.name in role_perms:
role_pvms.add(pvm)
role.permissions = list(role_pvms)
self.get_session.merge(role)
self.get_session.commit()
else:
self.log.debug('Existing permissions for the role:%s '
'within the database will persist.', role_name)
|
[
"Initialize",
"the",
"role",
"with",
"the",
"permissions",
"and",
"related",
"view",
"-",
"menus",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L175-L202
|
[
"def",
"init_role",
"(",
"self",
",",
"role_name",
",",
"role_vms",
",",
"role_perms",
")",
":",
"pvms",
"=",
"self",
".",
"get_session",
".",
"query",
"(",
"sqla_models",
".",
"PermissionView",
")",
".",
"all",
"(",
")",
"pvms",
"=",
"[",
"p",
"for",
"p",
"in",
"pvms",
"if",
"p",
".",
"permission",
"and",
"p",
".",
"view_menu",
"]",
"role",
"=",
"self",
".",
"find_role",
"(",
"role_name",
")",
"if",
"not",
"role",
":",
"role",
"=",
"self",
".",
"add_role",
"(",
"role_name",
")",
"if",
"len",
"(",
"role",
".",
"permissions",
")",
"==",
"0",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Initializing permissions for role:%s in the database.'",
",",
"role_name",
")",
"role_pvms",
"=",
"set",
"(",
")",
"for",
"pvm",
"in",
"pvms",
":",
"if",
"pvm",
".",
"view_menu",
".",
"name",
"in",
"role_vms",
"and",
"pvm",
".",
"permission",
".",
"name",
"in",
"role_perms",
":",
"role_pvms",
".",
"add",
"(",
"pvm",
")",
"role",
".",
"permissions",
"=",
"list",
"(",
"role_pvms",
")",
"self",
".",
"get_session",
".",
"merge",
"(",
"role",
")",
"self",
".",
"get_session",
".",
"commit",
"(",
")",
"else",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Existing permissions for the role:%s '",
"'within the database will persist.'",
",",
"role_name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager.delete_role
|
Delete the given Role
:param role_name: the name of a role in the ab_role table
|
airflow/www/security.py
|
def delete_role(self, role_name):
"""Delete the given Role
:param role_name: the name of a role in the ab_role table
"""
session = self.get_session
role = session.query(sqla_models.Role)\
.filter(sqla_models.Role.name == role_name)\
.first()
if role:
self.log.info("Deleting role '%s'", role_name)
session.delete(role)
session.commit()
else:
raise AirflowException("Role named '{}' does not exist".format(
role_name))
|
def delete_role(self, role_name):
"""Delete the given Role
:param role_name: the name of a role in the ab_role table
"""
session = self.get_session
role = session.query(sqla_models.Role)\
.filter(sqla_models.Role.name == role_name)\
.first()
if role:
self.log.info("Deleting role '%s'", role_name)
session.delete(role)
session.commit()
else:
raise AirflowException("Role named '{}' does not exist".format(
role_name))
|
[
"Delete",
"the",
"given",
"Role"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L204-L219
|
[
"def",
"delete_role",
"(",
"self",
",",
"role_name",
")",
":",
"session",
"=",
"self",
".",
"get_session",
"role",
"=",
"session",
".",
"query",
"(",
"sqla_models",
".",
"Role",
")",
".",
"filter",
"(",
"sqla_models",
".",
"Role",
".",
"name",
"==",
"role_name",
")",
".",
"first",
"(",
")",
"if",
"role",
":",
"self",
".",
"log",
".",
"info",
"(",
"\"Deleting role '%s'\"",
",",
"role_name",
")",
"session",
".",
"delete",
"(",
"role",
")",
"session",
".",
"commit",
"(",
")",
"else",
":",
"raise",
"AirflowException",
"(",
"\"Role named '{}' does not exist\"",
".",
"format",
"(",
"role_name",
")",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager.get_user_roles
|
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
|
airflow/www/security.py
|
def get_user_roles(self, user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
if user.is_anonymous:
public_role = appbuilder.config.get('AUTH_ROLE_PUBLIC')
return [appbuilder.security_manager.find_role(public_role)] \
if public_role else []
return user.roles
|
def get_user_roles(self, user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
if user.is_anonymous:
public_role = appbuilder.config.get('AUTH_ROLE_PUBLIC')
return [appbuilder.security_manager.find_role(public_role)] \
if public_role else []
return user.roles
|
[
"Get",
"all",
"the",
"roles",
"associated",
"with",
"the",
"user",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L221-L234
|
[
"def",
"get_user_roles",
"(",
"self",
",",
"user",
"=",
"None",
")",
":",
"if",
"user",
"is",
"None",
":",
"user",
"=",
"g",
".",
"user",
"if",
"user",
".",
"is_anonymous",
":",
"public_role",
"=",
"appbuilder",
".",
"config",
".",
"get",
"(",
"'AUTH_ROLE_PUBLIC'",
")",
"return",
"[",
"appbuilder",
".",
"security_manager",
".",
"find_role",
"(",
"public_role",
")",
"]",
"if",
"public_role",
"else",
"[",
"]",
"return",
"user",
".",
"roles"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager.get_all_permissions_views
|
Returns a set of tuples with the perm name and view menu name
|
airflow/www/security.py
|
def get_all_permissions_views(self):
"""
Returns a set of tuples with the perm name and view menu name
"""
perms_views = set()
for role in self.get_user_roles():
perms_views.update({(perm_view.permission.name, perm_view.view_menu.name)
for perm_view in role.permissions})
return perms_views
|
def get_all_permissions_views(self):
"""
Returns a set of tuples with the perm name and view menu name
"""
perms_views = set()
for role in self.get_user_roles():
perms_views.update({(perm_view.permission.name, perm_view.view_menu.name)
for perm_view in role.permissions})
return perms_views
|
[
"Returns",
"a",
"set",
"of",
"tuples",
"with",
"the",
"perm",
"name",
"and",
"view",
"menu",
"name"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L236-L244
|
[
"def",
"get_all_permissions_views",
"(",
"self",
")",
":",
"perms_views",
"=",
"set",
"(",
")",
"for",
"role",
"in",
"self",
".",
"get_user_roles",
"(",
")",
":",
"perms_views",
".",
"update",
"(",
"{",
"(",
"perm_view",
".",
"permission",
".",
"name",
",",
"perm_view",
".",
"view_menu",
".",
"name",
")",
"for",
"perm_view",
"in",
"role",
".",
"permissions",
"}",
")",
"return",
"perms_views"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager.get_accessible_dag_ids
|
Return a set of dags that user has access to(either read or write).
:param username: Name of the user.
:return: A set of dag ids that the user could access.
|
airflow/www/security.py
|
def get_accessible_dag_ids(self, username=None):
"""
Return a set of dags that user has access to(either read or write).
:param username: Name of the user.
:return: A set of dag ids that the user could access.
"""
if not username:
username = g.user
if username.is_anonymous or 'Public' in username.roles:
# return an empty set if the role is public
return set()
roles = {role.name for role in username.roles}
if {'Admin', 'Viewer', 'User', 'Op'} & roles:
return self.DAG_VMS
user_perms_views = self.get_all_permissions_views()
# return a set of all dags that the user could access
return set([view for perm, view in user_perms_views if perm in self.DAG_PERMS])
|
def get_accessible_dag_ids(self, username=None):
"""
Return a set of dags that user has access to(either read or write).
:param username: Name of the user.
:return: A set of dag ids that the user could access.
"""
if not username:
username = g.user
if username.is_anonymous or 'Public' in username.roles:
# return an empty set if the role is public
return set()
roles = {role.name for role in username.roles}
if {'Admin', 'Viewer', 'User', 'Op'} & roles:
return self.DAG_VMS
user_perms_views = self.get_all_permissions_views()
# return a set of all dags that the user could access
return set([view for perm, view in user_perms_views if perm in self.DAG_PERMS])
|
[
"Return",
"a",
"set",
"of",
"dags",
"that",
"user",
"has",
"access",
"to",
"(",
"either",
"read",
"or",
"write",
")",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L246-L266
|
[
"def",
"get_accessible_dag_ids",
"(",
"self",
",",
"username",
"=",
"None",
")",
":",
"if",
"not",
"username",
":",
"username",
"=",
"g",
".",
"user",
"if",
"username",
".",
"is_anonymous",
"or",
"'Public'",
"in",
"username",
".",
"roles",
":",
"# return an empty set if the role is public",
"return",
"set",
"(",
")",
"roles",
"=",
"{",
"role",
".",
"name",
"for",
"role",
"in",
"username",
".",
"roles",
"}",
"if",
"{",
"'Admin'",
",",
"'Viewer'",
",",
"'User'",
",",
"'Op'",
"}",
"&",
"roles",
":",
"return",
"self",
".",
"DAG_VMS",
"user_perms_views",
"=",
"self",
".",
"get_all_permissions_views",
"(",
")",
"# return a set of all dags that the user could access",
"return",
"set",
"(",
"[",
"view",
"for",
"perm",
",",
"view",
"in",
"user_perms_views",
"if",
"perm",
"in",
"self",
".",
"DAG_PERMS",
"]",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager.has_access
|
Verify whether a given user could perform certain permission
(e.g can_read, can_write) on the given dag_id.
:param permission: permission on dag_id(e.g can_read, can_edit).
:type permission: str
:param view_name: name of view-menu(e.g dag id is a view-menu as well).
:type view_name: str
:param user: user name
:type user: str
:return: a bool whether user could perform certain permission on the dag_id.
:rtype bool
|
airflow/www/security.py
|
def has_access(self, permission, view_name, user=None):
"""
Verify whether a given user could perform certain permission
(e.g can_read, can_write) on the given dag_id.
:param permission: permission on dag_id(e.g can_read, can_edit).
:type permission: str
:param view_name: name of view-menu(e.g dag id is a view-menu as well).
:type view_name: str
:param user: user name
:type user: str
:return: a bool whether user could perform certain permission on the dag_id.
:rtype bool
"""
if not user:
user = g.user
if user.is_anonymous:
return self.is_item_public(permission, view_name)
return self._has_view_access(user, permission, view_name)
|
def has_access(self, permission, view_name, user=None):
"""
Verify whether a given user could perform certain permission
(e.g can_read, can_write) on the given dag_id.
:param permission: permission on dag_id(e.g can_read, can_edit).
:type permission: str
:param view_name: name of view-menu(e.g dag id is a view-menu as well).
:type view_name: str
:param user: user name
:type user: str
:return: a bool whether user could perform certain permission on the dag_id.
:rtype bool
"""
if not user:
user = g.user
if user.is_anonymous:
return self.is_item_public(permission, view_name)
return self._has_view_access(user, permission, view_name)
|
[
"Verify",
"whether",
"a",
"given",
"user",
"could",
"perform",
"certain",
"permission",
"(",
"e",
".",
"g",
"can_read",
"can_write",
")",
"on",
"the",
"given",
"dag_id",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L268-L286
|
[
"def",
"has_access",
"(",
"self",
",",
"permission",
",",
"view_name",
",",
"user",
"=",
"None",
")",
":",
"if",
"not",
"user",
":",
"user",
"=",
"g",
".",
"user",
"if",
"user",
".",
"is_anonymous",
":",
"return",
"self",
".",
"is_item_public",
"(",
"permission",
",",
"view_name",
")",
"return",
"self",
".",
"_has_view_access",
"(",
"user",
",",
"permission",
",",
"view_name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager._has_role
|
Whether the user has this role name
|
airflow/www/security.py
|
def _has_role(self, role_name_or_list):
"""
Whether the user has this role name
"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(
[r.name in role_name_or_list for r in self.get_user_roles()])
|
def _has_role(self, role_name_or_list):
"""
Whether the user has this role name
"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(
[r.name in role_name_or_list for r in self.get_user_roles()])
|
[
"Whether",
"the",
"user",
"has",
"this",
"role",
"name"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L294-L301
|
[
"def",
"_has_role",
"(",
"self",
",",
"role_name_or_list",
")",
":",
"if",
"not",
"isinstance",
"(",
"role_name_or_list",
",",
"list",
")",
":",
"role_name_or_list",
"=",
"[",
"role_name_or_list",
"]",
"return",
"any",
"(",
"[",
"r",
".",
"name",
"in",
"role_name_or_list",
"for",
"r",
"in",
"self",
".",
"get_user_roles",
"(",
")",
"]",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager._has_perm
|
Whether the user has this perm
|
airflow/www/security.py
|
def _has_perm(self, permission_name, view_menu_name):
"""
Whether the user has this perm
"""
if hasattr(self, 'perms'):
if (permission_name, view_menu_name) in self.perms:
return True
# rebuild the permissions set
self._get_and_cache_perms()
return (permission_name, view_menu_name) in self.perms
|
def _has_perm(self, permission_name, view_menu_name):
"""
Whether the user has this perm
"""
if hasattr(self, 'perms'):
if (permission_name, view_menu_name) in self.perms:
return True
# rebuild the permissions set
self._get_and_cache_perms()
return (permission_name, view_menu_name) in self.perms
|
[
"Whether",
"the",
"user",
"has",
"this",
"perm"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L303-L312
|
[
"def",
"_has_perm",
"(",
"self",
",",
"permission_name",
",",
"view_menu_name",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'perms'",
")",
":",
"if",
"(",
"permission_name",
",",
"view_menu_name",
")",
"in",
"self",
".",
"perms",
":",
"return",
"True",
"# rebuild the permissions set",
"self",
".",
"_get_and_cache_perms",
"(",
")",
"return",
"(",
"permission_name",
",",
"view_menu_name",
")",
"in",
"self",
".",
"perms"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager.clean_perms
|
FAB leaves faulty permissions that need to be cleaned up
|
airflow/www/security.py
|
def clean_perms(self):
"""
FAB leaves faulty permissions that need to be cleaned up
"""
self.log.debug('Cleaning faulty perms')
sesh = self.get_session
pvms = (
sesh.query(sqla_models.PermissionView)
.filter(or_(
sqla_models.PermissionView.permission == None, # NOQA
sqla_models.PermissionView.view_menu == None, # NOQA
))
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
self.log.info('Deleted %s faulty permissions', deleted_count)
|
def clean_perms(self):
"""
FAB leaves faulty permissions that need to be cleaned up
"""
self.log.debug('Cleaning faulty perms')
sesh = self.get_session
pvms = (
sesh.query(sqla_models.PermissionView)
.filter(or_(
sqla_models.PermissionView.permission == None, # NOQA
sqla_models.PermissionView.view_menu == None, # NOQA
))
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
self.log.info('Deleted %s faulty permissions', deleted_count)
|
[
"FAB",
"leaves",
"faulty",
"permissions",
"that",
"need",
"to",
"be",
"cleaned",
"up"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L326-L342
|
[
"def",
"clean_perms",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Cleaning faulty perms'",
")",
"sesh",
"=",
"self",
".",
"get_session",
"pvms",
"=",
"(",
"sesh",
".",
"query",
"(",
"sqla_models",
".",
"PermissionView",
")",
".",
"filter",
"(",
"or_",
"(",
"sqla_models",
".",
"PermissionView",
".",
"permission",
"==",
"None",
",",
"# NOQA",
"sqla_models",
".",
"PermissionView",
".",
"view_menu",
"==",
"None",
",",
"# NOQA",
")",
")",
")",
"deleted_count",
"=",
"pvms",
".",
"delete",
"(",
")",
"sesh",
".",
"commit",
"(",
")",
"if",
"deleted_count",
":",
"self",
".",
"log",
".",
"info",
"(",
"'Deleted %s faulty permissions'",
",",
"deleted_count",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager._merge_perm
|
Add the new permission , view_menu to ab_permission_view_role if not exists.
It will add the related entry to ab_permission
and ab_view_menu two meta tables as well.
:param permission_name: Name of the permission.
:type permission_name: str
:param view_menu_name: Name of the view-menu
:type view_menu_name: str
:return:
|
airflow/www/security.py
|
def _merge_perm(self, permission_name, view_menu_name):
"""
Add the new permission , view_menu to ab_permission_view_role if not exists.
It will add the related entry to ab_permission
and ab_view_menu two meta tables as well.
:param permission_name: Name of the permission.
:type permission_name: str
:param view_menu_name: Name of the view-menu
:type view_menu_name: str
:return:
"""
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
pv = None
if permission and view_menu:
pv = self.get_session.query(self.permissionview_model).filter_by(
permission=permission, view_menu=view_menu).first()
if not pv and permission_name and view_menu_name:
self.add_permission_view_menu(permission_name, view_menu_name)
|
def _merge_perm(self, permission_name, view_menu_name):
"""
Add the new permission , view_menu to ab_permission_view_role if not exists.
It will add the related entry to ab_permission
and ab_view_menu two meta tables as well.
:param permission_name: Name of the permission.
:type permission_name: str
:param view_menu_name: Name of the view-menu
:type view_menu_name: str
:return:
"""
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
pv = None
if permission and view_menu:
pv = self.get_session.query(self.permissionview_model).filter_by(
permission=permission, view_menu=view_menu).first()
if not pv and permission_name and view_menu_name:
self.add_permission_view_menu(permission_name, view_menu_name)
|
[
"Add",
"the",
"new",
"permission",
"view_menu",
"to",
"ab_permission_view_role",
"if",
"not",
"exists",
".",
"It",
"will",
"add",
"the",
"related",
"entry",
"to",
"ab_permission",
"and",
"ab_view_menu",
"two",
"meta",
"tables",
"as",
"well",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L344-L363
|
[
"def",
"_merge_perm",
"(",
"self",
",",
"permission_name",
",",
"view_menu_name",
")",
":",
"permission",
"=",
"self",
".",
"find_permission",
"(",
"permission_name",
")",
"view_menu",
"=",
"self",
".",
"find_view_menu",
"(",
"view_menu_name",
")",
"pv",
"=",
"None",
"if",
"permission",
"and",
"view_menu",
":",
"pv",
"=",
"self",
".",
"get_session",
".",
"query",
"(",
"self",
".",
"permissionview_model",
")",
".",
"filter_by",
"(",
"permission",
"=",
"permission",
",",
"view_menu",
"=",
"view_menu",
")",
".",
"first",
"(",
")",
"if",
"not",
"pv",
"and",
"permission_name",
"and",
"view_menu_name",
":",
"self",
".",
"add_permission_view_menu",
"(",
"permission_name",
",",
"view_menu_name",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager.create_custom_dag_permission_view
|
Workflow:
1. Fetch all the existing (permissions, view-menu) from Airflow DB.
2. Fetch all the existing dag models that are either active or paused. Exclude the subdags.
3. Create both read and write permission view-menus relation for every dags from step 2
4. Find out all the dag specific roles(excluded pubic, admin, viewer, op, user)
5. Get all the permission-vm owned by the user role.
6. Grant all the user role's permission-vm except the all-dag view-menus to the dag roles.
7. Commit the updated permission-vm-role into db
:return: None.
|
airflow/www/security.py
|
def create_custom_dag_permission_view(self, session=None):
"""
Workflow:
1. Fetch all the existing (permissions, view-menu) from Airflow DB.
2. Fetch all the existing dag models that are either active or paused. Exclude the subdags.
3. Create both read and write permission view-menus relation for every dags from step 2
4. Find out all the dag specific roles(excluded pubic, admin, viewer, op, user)
5. Get all the permission-vm owned by the user role.
6. Grant all the user role's permission-vm except the all-dag view-menus to the dag roles.
7. Commit the updated permission-vm-role into db
:return: None.
"""
self.log.debug('Fetching a set of all permission, view_menu from FAB meta-table')
def merge_pv(perm, view_menu):
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
self._merge_perm(perm, view_menu)
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
# Get all the active / paused dags and insert them into a set
all_dags_models = session.query(models.DagModel)\
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))\
.filter(~models.DagModel.is_subdag).all()
# create can_dag_edit and can_dag_read permissions for every dag(vm)
for dag in all_dags_models:
for perm in self.DAG_PERMS:
merge_pv(perm, dag.dag_id)
# for all the dag-level role, add the permission of viewer
# with the dag view to ab_permission_view
all_roles = self.get_all_roles()
user_role = self.find_role('User')
dag_role = [role for role in all_roles if role.name not in EXISTING_ROLES]
update_perm_views = []
# need to remove all_dag vm from all the existing view-menus
dag_vm = self.find_view_menu('all_dags')
ab_perm_view_role = sqla_models.assoc_permissionview_role
perm_view = self.permissionview_model
view_menu = self.viewmenu_model
all_perm_view_by_user = session.query(ab_perm_view_role)\
.join(perm_view, perm_view.id == ab_perm_view_role
.columns.permission_view_id)\
.filter(ab_perm_view_role.columns.role_id == user_role.id)\
.join(view_menu)\
.filter(perm_view.view_menu_id != dag_vm.id)
all_perm_views = set([role.permission_view_id for role in all_perm_view_by_user])
for role in dag_role:
# Get all the perm-view of the role
existing_perm_view_by_user = self.get_session.query(ab_perm_view_role)\
.filter(ab_perm_view_role.columns.role_id == role.id)
existing_perms_views = set([pv.permission_view_id
for pv in existing_perm_view_by_user])
missing_perm_views = all_perm_views - existing_perms_views
for perm_view_id in missing_perm_views:
update_perm_views.append({'permission_view_id': perm_view_id,
'role_id': role.id})
if update_perm_views:
self.get_session.execute(ab_perm_view_role.insert(), update_perm_views)
self.get_session.commit()
|
def create_custom_dag_permission_view(self, session=None):
"""
Workflow:
1. Fetch all the existing (permissions, view-menu) from Airflow DB.
2. Fetch all the existing dag models that are either active or paused. Exclude the subdags.
3. Create both read and write permission view-menus relation for every dags from step 2
4. Find out all the dag specific roles(excluded pubic, admin, viewer, op, user)
5. Get all the permission-vm owned by the user role.
6. Grant all the user role's permission-vm except the all-dag view-menus to the dag roles.
7. Commit the updated permission-vm-role into db
:return: None.
"""
self.log.debug('Fetching a set of all permission, view_menu from FAB meta-table')
def merge_pv(perm, view_menu):
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
self._merge_perm(perm, view_menu)
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
# Get all the active / paused dags and insert them into a set
all_dags_models = session.query(models.DagModel)\
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))\
.filter(~models.DagModel.is_subdag).all()
# create can_dag_edit and can_dag_read permissions for every dag(vm)
for dag in all_dags_models:
for perm in self.DAG_PERMS:
merge_pv(perm, dag.dag_id)
# for all the dag-level role, add the permission of viewer
# with the dag view to ab_permission_view
all_roles = self.get_all_roles()
user_role = self.find_role('User')
dag_role = [role for role in all_roles if role.name not in EXISTING_ROLES]
update_perm_views = []
# need to remove all_dag vm from all the existing view-menus
dag_vm = self.find_view_menu('all_dags')
ab_perm_view_role = sqla_models.assoc_permissionview_role
perm_view = self.permissionview_model
view_menu = self.viewmenu_model
all_perm_view_by_user = session.query(ab_perm_view_role)\
.join(perm_view, perm_view.id == ab_perm_view_role
.columns.permission_view_id)\
.filter(ab_perm_view_role.columns.role_id == user_role.id)\
.join(view_menu)\
.filter(perm_view.view_menu_id != dag_vm.id)
all_perm_views = set([role.permission_view_id for role in all_perm_view_by_user])
for role in dag_role:
# Get all the perm-view of the role
existing_perm_view_by_user = self.get_session.query(ab_perm_view_role)\
.filter(ab_perm_view_role.columns.role_id == role.id)
existing_perms_views = set([pv.permission_view_id
for pv in existing_perm_view_by_user])
missing_perm_views = all_perm_views - existing_perms_views
for perm_view_id in missing_perm_views:
update_perm_views.append({'permission_view_id': perm_view_id,
'role_id': role.id})
if update_perm_views:
self.get_session.execute(ab_perm_view_role.insert(), update_perm_views)
self.get_session.commit()
|
[
"Workflow",
":",
"1",
".",
"Fetch",
"all",
"the",
"existing",
"(",
"permissions",
"view",
"-",
"menu",
")",
"from",
"Airflow",
"DB",
".",
"2",
".",
"Fetch",
"all",
"the",
"existing",
"dag",
"models",
"that",
"are",
"either",
"active",
"or",
"paused",
".",
"Exclude",
"the",
"subdags",
".",
"3",
".",
"Create",
"both",
"read",
"and",
"write",
"permission",
"view",
"-",
"menus",
"relation",
"for",
"every",
"dags",
"from",
"step",
"2",
"4",
".",
"Find",
"out",
"all",
"the",
"dag",
"specific",
"roles",
"(",
"excluded",
"pubic",
"admin",
"viewer",
"op",
"user",
")",
"5",
".",
"Get",
"all",
"the",
"permission",
"-",
"vm",
"owned",
"by",
"the",
"user",
"role",
".",
"6",
".",
"Grant",
"all",
"the",
"user",
"role",
"s",
"permission",
"-",
"vm",
"except",
"the",
"all",
"-",
"dag",
"view",
"-",
"menus",
"to",
"the",
"dag",
"roles",
".",
"7",
".",
"Commit",
"the",
"updated",
"permission",
"-",
"vm",
"-",
"role",
"into",
"db"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L366-L438
|
[
"def",
"create_custom_dag_permission_view",
"(",
"self",
",",
"session",
"=",
"None",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Fetching a set of all permission, view_menu from FAB meta-table'",
")",
"def",
"merge_pv",
"(",
"perm",
",",
"view_menu",
")",
":",
"\"\"\"Create permission view menu only if it doesn't exist\"\"\"",
"if",
"view_menu",
"and",
"perm",
"and",
"(",
"view_menu",
",",
"perm",
")",
"not",
"in",
"all_pvs",
":",
"self",
".",
"_merge_perm",
"(",
"perm",
",",
"view_menu",
")",
"all_pvs",
"=",
"set",
"(",
")",
"for",
"pv",
"in",
"self",
".",
"get_session",
".",
"query",
"(",
"self",
".",
"permissionview_model",
")",
".",
"all",
"(",
")",
":",
"if",
"pv",
".",
"permission",
"and",
"pv",
".",
"view_menu",
":",
"all_pvs",
".",
"add",
"(",
"(",
"pv",
".",
"permission",
".",
"name",
",",
"pv",
".",
"view_menu",
".",
"name",
")",
")",
"# Get all the active / paused dags and insert them into a set",
"all_dags_models",
"=",
"session",
".",
"query",
"(",
"models",
".",
"DagModel",
")",
".",
"filter",
"(",
"or_",
"(",
"models",
".",
"DagModel",
".",
"is_active",
",",
"models",
".",
"DagModel",
".",
"is_paused",
")",
")",
".",
"filter",
"(",
"~",
"models",
".",
"DagModel",
".",
"is_subdag",
")",
".",
"all",
"(",
")",
"# create can_dag_edit and can_dag_read permissions for every dag(vm)",
"for",
"dag",
"in",
"all_dags_models",
":",
"for",
"perm",
"in",
"self",
".",
"DAG_PERMS",
":",
"merge_pv",
"(",
"perm",
",",
"dag",
".",
"dag_id",
")",
"# for all the dag-level role, add the permission of viewer",
"# with the dag view to ab_permission_view",
"all_roles",
"=",
"self",
".",
"get_all_roles",
"(",
")",
"user_role",
"=",
"self",
".",
"find_role",
"(",
"'User'",
")",
"dag_role",
"=",
"[",
"role",
"for",
"role",
"in",
"all_roles",
"if",
"role",
".",
"name",
"not",
"in",
"EXISTING_ROLES",
"]",
"update_perm_views",
"=",
"[",
"]",
"# need to remove all_dag vm from all the existing view-menus",
"dag_vm",
"=",
"self",
".",
"find_view_menu",
"(",
"'all_dags'",
")",
"ab_perm_view_role",
"=",
"sqla_models",
".",
"assoc_permissionview_role",
"perm_view",
"=",
"self",
".",
"permissionview_model",
"view_menu",
"=",
"self",
".",
"viewmenu_model",
"all_perm_view_by_user",
"=",
"session",
".",
"query",
"(",
"ab_perm_view_role",
")",
".",
"join",
"(",
"perm_view",
",",
"perm_view",
".",
"id",
"==",
"ab_perm_view_role",
".",
"columns",
".",
"permission_view_id",
")",
".",
"filter",
"(",
"ab_perm_view_role",
".",
"columns",
".",
"role_id",
"==",
"user_role",
".",
"id",
")",
".",
"join",
"(",
"view_menu",
")",
".",
"filter",
"(",
"perm_view",
".",
"view_menu_id",
"!=",
"dag_vm",
".",
"id",
")",
"all_perm_views",
"=",
"set",
"(",
"[",
"role",
".",
"permission_view_id",
"for",
"role",
"in",
"all_perm_view_by_user",
"]",
")",
"for",
"role",
"in",
"dag_role",
":",
"# Get all the perm-view of the role",
"existing_perm_view_by_user",
"=",
"self",
".",
"get_session",
".",
"query",
"(",
"ab_perm_view_role",
")",
".",
"filter",
"(",
"ab_perm_view_role",
".",
"columns",
".",
"role_id",
"==",
"role",
".",
"id",
")",
"existing_perms_views",
"=",
"set",
"(",
"[",
"pv",
".",
"permission_view_id",
"for",
"pv",
"in",
"existing_perm_view_by_user",
"]",
")",
"missing_perm_views",
"=",
"all_perm_views",
"-",
"existing_perms_views",
"for",
"perm_view_id",
"in",
"missing_perm_views",
":",
"update_perm_views",
".",
"append",
"(",
"{",
"'permission_view_id'",
":",
"perm_view_id",
",",
"'role_id'",
":",
"role",
".",
"id",
"}",
")",
"if",
"update_perm_views",
":",
"self",
".",
"get_session",
".",
"execute",
"(",
"ab_perm_view_role",
".",
"insert",
"(",
")",
",",
"update_perm_views",
")",
"self",
".",
"get_session",
".",
"commit",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager.update_admin_perm_view
|
Admin should have all the permission-views.
Add the missing ones to the table for admin.
:return: None.
|
airflow/www/security.py
|
def update_admin_perm_view(self):
"""
Admin should have all the permission-views.
Add the missing ones to the table for admin.
:return: None.
"""
pvms = self.get_session.query(sqla_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
admin = self.find_role('Admin')
admin.permissions = list(set(admin.permissions) | set(pvms))
self.get_session.commit()
|
def update_admin_perm_view(self):
"""
Admin should have all the permission-views.
Add the missing ones to the table for admin.
:return: None.
"""
pvms = self.get_session.query(sqla_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
admin = self.find_role('Admin')
admin.permissions = list(set(admin.permissions) | set(pvms))
self.get_session.commit()
|
[
"Admin",
"should",
"have",
"all",
"the",
"permission",
"-",
"views",
".",
"Add",
"the",
"missing",
"ones",
"to",
"the",
"table",
"for",
"admin",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L440-L453
|
[
"def",
"update_admin_perm_view",
"(",
"self",
")",
":",
"pvms",
"=",
"self",
".",
"get_session",
".",
"query",
"(",
"sqla_models",
".",
"PermissionView",
")",
".",
"all",
"(",
")",
"pvms",
"=",
"[",
"p",
"for",
"p",
"in",
"pvms",
"if",
"p",
".",
"permission",
"and",
"p",
".",
"view_menu",
"]",
"admin",
"=",
"self",
".",
"find_role",
"(",
"'Admin'",
")",
"admin",
".",
"permissions",
"=",
"list",
"(",
"set",
"(",
"admin",
".",
"permissions",
")",
"|",
"set",
"(",
"pvms",
")",
")",
"self",
".",
"get_session",
".",
"commit",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager.sync_roles
|
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
|
airflow/www/security.py
|
def sync_roles(self):
"""
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
self.log.debug('Start syncing user roles.')
# Create global all-dag VM
self.create_perm_vm_for_all_dag()
# Create default user role.
for config in self.ROLE_CONFIGS:
role = config['role']
vms = config['vms']
perms = config['perms']
self.init_role(role, vms, perms)
self.create_custom_dag_permission_view()
# init existing roles, the rest role could be created through UI.
self.update_admin_perm_view()
self.clean_perms()
|
def sync_roles(self):
"""
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
self.log.debug('Start syncing user roles.')
# Create global all-dag VM
self.create_perm_vm_for_all_dag()
# Create default user role.
for config in self.ROLE_CONFIGS:
role = config['role']
vms = config['vms']
perms = config['perms']
self.init_role(role, vms, perms)
self.create_custom_dag_permission_view()
# init existing roles, the rest role could be created through UI.
self.update_admin_perm_view()
self.clean_perms()
|
[
"1",
".",
"Init",
"the",
"default",
"role",
"(",
"Admin",
"Viewer",
"User",
"Op",
"public",
")",
"with",
"related",
"permissions",
".",
"2",
".",
"Init",
"the",
"custom",
"role",
"(",
"dag",
"-",
"user",
")",
"with",
"related",
"permissions",
"."
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L455-L477
|
[
"def",
"sync_roles",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Start syncing user roles.'",
")",
"# Create global all-dag VM",
"self",
".",
"create_perm_vm_for_all_dag",
"(",
")",
"# Create default user role.",
"for",
"config",
"in",
"self",
".",
"ROLE_CONFIGS",
":",
"role",
"=",
"config",
"[",
"'role'",
"]",
"vms",
"=",
"config",
"[",
"'vms'",
"]",
"perms",
"=",
"config",
"[",
"'perms'",
"]",
"self",
".",
"init_role",
"(",
"role",
",",
"vms",
",",
"perms",
")",
"self",
".",
"create_custom_dag_permission_view",
"(",
")",
"# init existing roles, the rest role could be created through UI.",
"self",
".",
"update_admin_perm_view",
"(",
")",
"self",
".",
"clean_perms",
"(",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
test
|
AirflowSecurityManager.sync_perm_for_dag
|
Sync permissions for given dag id. The dag id surely exists in our dag bag
as only / refresh button or cli.sync_perm will call this function
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
:return:
|
airflow/www/security.py
|
def sync_perm_for_dag(self, dag_id, access_control=None):
"""
Sync permissions for given dag id. The dag id surely exists in our dag bag
as only / refresh button or cli.sync_perm will call this function
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
:return:
"""
for dag_perm in self.DAG_PERMS:
perm_on_dag = self.find_permission_view_menu(dag_perm, dag_id)
if perm_on_dag is None:
self.add_permission_view_menu(dag_perm, dag_id)
if access_control:
self._sync_dag_view_permissions(dag_id, access_control)
|
def sync_perm_for_dag(self, dag_id, access_control=None):
"""
Sync permissions for given dag id. The dag id surely exists in our dag bag
as only / refresh button or cli.sync_perm will call this function
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
:return:
"""
for dag_perm in self.DAG_PERMS:
perm_on_dag = self.find_permission_view_menu(dag_perm, dag_id)
if perm_on_dag is None:
self.add_permission_view_menu(dag_perm, dag_id)
if access_control:
self._sync_dag_view_permissions(dag_id, access_control)
|
[
"Sync",
"permissions",
"for",
"given",
"dag",
"id",
".",
"The",
"dag",
"id",
"surely",
"exists",
"in",
"our",
"dag",
"bag",
"as",
"only",
"/",
"refresh",
"button",
"or",
"cli",
".",
"sync_perm",
"will",
"call",
"this",
"function"
] |
apache/airflow
|
python
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L479-L498
|
[
"def",
"sync_perm_for_dag",
"(",
"self",
",",
"dag_id",
",",
"access_control",
"=",
"None",
")",
":",
"for",
"dag_perm",
"in",
"self",
".",
"DAG_PERMS",
":",
"perm_on_dag",
"=",
"self",
".",
"find_permission_view_menu",
"(",
"dag_perm",
",",
"dag_id",
")",
"if",
"perm_on_dag",
"is",
"None",
":",
"self",
".",
"add_permission_view_menu",
"(",
"dag_perm",
",",
"dag_id",
")",
"if",
"access_control",
":",
"self",
".",
"_sync_dag_view_permissions",
"(",
"dag_id",
",",
"access_control",
")"
] |
b69c686ad8a0c89b9136bb4b31767257eb7b2597
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.